code stringlengths 281 23.7M |
|---|
def test_parse_command_only_expands_alias(parser):
line = 'fake foobar.py "somebody.py'
statement = parser.parse_command_only(line)
assert (statement == 'foobar.py "somebody.py')
assert (statement.args == statement)
assert (statement.arg_list == [])
assert (statement.command == 'run_pyscript')
assert (statement.command_and_args == 'run_pyscript foobar.py "somebody.py')
assert (statement.multiline_command == '')
assert (statement.raw == line)
assert (statement.terminator == '')
assert (statement.suffix == '')
assert (statement.pipe_to == '')
assert (statement.output == '')
assert (statement.output_to == '') |
class PancakeHouseMenu(Menu):
menuItems: List[str]
def __init__(self):
self.menuItems = []
self.addItem("K&B's Pancake Breakfast")
self.addItem('Regular Pancake Breakfast')
self.addItem('Blueberry Pancakes')
self.addItem('Waffles')
def addItem(self, name: str) -> None:
self.menuItems.append(name)
def getMenuItems(self) -> List[str]:
return self.menuItems
def createIterator(self) -> Iterator[str]:
return PancakeHouseMenuIterator(self.menuItems)
def __str__(self) -> str:
return 'Pancake House Menu'
def __repr__(self) -> str:
return str(self) |
_events
class EventObject(DefaultObject):
_events = {'drop': (['character', 'obj'], OBJECT_DROP), 'get': (['character', 'obj'], OBJECT_GET), 'time': (['object'], OBJECT_TIME, None, time_event)}
_property
def callbacks(self):
return CallbackHandler(self)
def at_get(self, getter):
super().at_get(getter)
self.callbacks.call('get', getter, self)
def at_drop(self, dropper):
super().at_drop(dropper)
self.callbacks.call('drop', dropper, self) |
def _test():
import torch
pretrained = False
models = [mnasnet]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != mnasnet) or (weight_count == 4308816))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
_api()
class rate_limit(Stream):
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
kwargs['ensure_io_loop'] = True
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
now = time()
old_next = self.next
self.next = (max(now, self.next) + self.interval)
if (now < old_next):
(yield gen.sleep((old_next - now)))
(yield self._emit(x, metadata=metadata)) |
def get_linear_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, last_epoch: int=(- 1)):
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return max(1e-06, (float(current_step) / float(max(1, num_warmup_steps))))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
return optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch) |
class Full(BaseElectrolyteConductivity):
def __init__(self, param, options=None):
super().__init__(param, options=options)
def get_fundamental_variables(self):
phi_e_dict = {}
variables = {}
for domain in self.options.whole_cell_domains:
Dom = domain.capitalize().split()[0]
name = f'{Dom} electrolyte potential [V]'
phi_e_k = pybamm.Variable(name, domain=domain, auxiliary_domains={'secondary': 'current collector'}, reference=(- self.param.n.prim.U_init))
phi_e_k.print_name = f'phi_e_{domain[0]}'
phi_e_dict[domain] = phi_e_k
variables['Electrolyte potential [V]'] = pybamm.concatenation(*phi_e_dict.values())
variables.update(self._get_standard_potential_variables(phi_e_dict))
return variables
def get_coupled_variables(self, variables):
param = self.param
T = variables['Cell temperature [K]']
tor = variables['Electrolyte transport efficiency']
c_e = variables['Electrolyte concentration [mol.m-3]']
phi_e = variables['Electrolyte potential [V]']
i_e = ((param.kappa_e(c_e, T) * tor) * ((param.chiRT_over_Fc(c_e, T) * pybamm.grad(c_e)) - pybamm.grad(phi_e)))
i_e.print_name = 'i_e'
variables.update(self._get_standard_current_variables(i_e))
variables.update(self._get_electrolyte_overpotentials(variables))
return variables
def set_algebraic(self, variables):
phi_e = variables['Electrolyte potential [V]']
i_e = variables['Electrolyte current density [A.m-2]']
sum_a_j = variables['Sum of volumetric interfacial current densities [A.m-3]']
sum_a_j.print_name = 'aj'
self.algebraic = {phi_e: ((self.param.L_x ** 2) * (pybamm.div(i_e) - sum_a_j))}
def set_initial_conditions(self, variables):
phi_e = variables['Electrolyte potential [V]']
self.initial_conditions = {phi_e: (- self.param.n.prim.U_init)} |
class L2Loss(Loss):
def evaluate(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
self._validate_shapes(predict, target)
if (len(predict.shape) <= 1):
return ((predict - target) ** 2)
else:
return (np.linalg.norm((predict - target), axis=tuple(range(1, len(predict.shape)))) ** 2)
def gradient(self, predict: np.ndarray, target: np.ndarray) -> np.ndarray:
self._validate_shapes(predict, target)
return (2 * (predict - target)) |
()
def remove_old_client_ids(days=90):
old_cutoff = (get_ad_day() - datetime.timedelta(days=days))
while True:
offer_ids = Offer.objects.filter(date__lt=old_cutoff, client_id__isnull=False).values('pk')[:1000]
offers_changed = Offer.objects.filter(pk__in=offer_ids).update(client_id=None)
if (not offers_changed):
break |
class TutorialReadable(TutorialObject):
def at_object_creation(self):
super().at_object_creation()
self.db.tutorial_info = "This is an object with a 'read' command defined in a command set on itself."
self.db.readable_text = ('There is no text written on %s.' % self.key)
self.cmdset.add_default(CmdSetReadable, permanent=True) |
class TestNativeMSGPadder(unittest.TestCase):
def prepare_padder(test_dict):
dataset_id = test_dict['dataset_id']
img_bounds = test_dict['img_bounds']
is_full_disk = test_dict['is_full_disk']
dataset = test_dict['dataset']
final_shape = test_dict['final_shape']
expected_padded_data = test_dict['expected_padded_data']
padder = Padder(dataset_id, img_bounds, is_full_disk)
padder._final_shape = final_shape
calc_padded_data = padder.pad_data(dataset)
return (calc_padded_data, expected_padded_data)
def test_padder_rss_roi(self):
(calculated, expected) = self.prepare_padder(TEST_PADDER_RSS_ROI)
np.testing.assert_array_equal(calculated, expected)
def test_padder_fes_hrv(self):
(calculated, expected) = self.prepare_padder(TEST_PADDER_FES_HRV)
np.testing.assert_array_equal(calculated, expected) |
def particle_grid(dim_x, dim_y, dim_z, lower, radius, jitter):
points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z))
points_t = (((np.array((points[0], points[1], points[2])).T * radius) * 2.0) + np.array(lower))
points_t = (points_t + ((np.random.rand(*points_t.shape) * radius) * jitter))
return points_t.reshape(((- 1), 3)) |
def test_cli_async_reduce_without_curry(runner, reactor, server, capsys):
base_url = '
in_stream = ''.join((base_url.format(i) for i in [6, 2, 1]))
args = ['async-map', 'await asks.get ! f"{types.SimpleNamespace(**x.json()).delay}"', 'map', 'json.loads', 'reduce', 'operator.truediv']
expected = '3.0\n'
with helpers.Timer(7):
output = helpers.run(args, input=in_stream.encode()).decode()
assert (output == expected) |
class LinearColormap(ColorMap):
def __init__(self, colors, index=None, vmin=0.0, vmax=1.0, caption='', max_labels=10, tick_labels=None):
super().__init__(vmin=vmin, vmax=vmax, caption=caption, max_labels=max_labels)
self.tick_labels = tick_labels
n = len(colors)
if (n < 2):
raise ValueError('You must provide at least 2 colors.')
if (index is None):
self.index = [(vmin + ((((vmax - vmin) * i) * 1.0) / (n - 1))) for i in range(n)]
else:
self.index = list(index)
self.colors = [_parse_color(x) for x in colors]
def rgba_floats_tuple(self, x):
if (x <= self.index[0]):
return self.colors[0]
if (x >= self.index[(- 1)]):
return self.colors[(- 1)]
i = len([u for u in self.index if (u < x)])
if (self.index[(i - 1)] < self.index[i]):
p = (((x - self.index[(i - 1)]) * 1.0) / (self.index[i] - self.index[(i - 1)]))
elif (self.index[(i - 1)] == self.index[i]):
p = 1.0
else:
raise ValueError('Thresholds are not sorted.')
return tuple(((((1.0 - p) * self.colors[(i - 1)][j]) + (p * self.colors[i][j])) for j in range(4)))
def to_step(self, n=None, index=None, data=None, method=None, quantiles=None, round_method=None, max_labels=10):
msg = 'You must specify either `index` or `n`'
if (index is None):
if (data is None):
if (n is None):
raise ValueError(msg)
else:
index = [(self.vmin + ((((self.vmax - self.vmin) * i) * 1.0) / n)) for i in range((1 + n))]
scaled_cm = self
else:
max_ = max(data)
min_ = min(data)
scaled_cm = self.scale(vmin=min_, vmax=max_)
method = ('quantiles' if (quantiles is not None) else (method if (method is not None) else 'linear'))
if method.lower().startswith('lin'):
if (n is None):
raise ValueError(msg)
index = [(min_ + (((i * (max_ - min_)) * 1.0) / n)) for i in range((1 + n))]
elif method.lower().startswith('log'):
if (n is None):
raise ValueError(msg)
if (min_ <= 0):
msg = 'Log-scale works only with strictly positive values.'
raise ValueError(msg)
index = [math.exp((math.log(min_) + (((i * (math.log(max_) - math.log(min_))) * 1.0) / n))) for i in range((1 + n))]
elif method.lower().startswith('quant'):
if (quantiles is None):
if (n is None):
msg = 'You must specify either `index`, `n` or`quantiles`.'
raise ValueError(msg)
else:
quantiles = [((i * 1.0) / n) for i in range((1 + n))]
p = (len(data) - 1)
s = sorted(data)
index = [((s[int((q * p))] * (1.0 - ((q * p) % 1))) + (s[min((int((q * p)) + 1), p)] * ((q * p) % 1))) for q in quantiles]
else:
raise ValueError(f'Unknown method {method}')
else:
scaled_cm = self.scale(vmin=min(index), vmax=max(index))
n = (len(index) - 1)
if (round_method == 'int'):
index = [round(x) for x in index]
if (round_method == 'log10'):
index = [_base(x) for x in index]
colors = [scaled_cm.rgba_floats_tuple(((index[i] * (1.0 - (i / (n - 1.0)))) + ((index[(i + 1)] * i) / (n - 1.0)))) for i in range(n)]
caption = self.caption
return StepColormap(colors, index=index, vmin=index[0], vmax=index[(- 1)], caption=caption, max_labels=max_labels, tick_labels=self.tick_labels)
def scale(self, vmin=0.0, vmax=1.0, max_labels=10):
return LinearColormap(self.colors, index=[(vmin + ((((vmax - vmin) * (x - self.vmin)) * 1.0) / (self.vmax - self.vmin))) for x in self.index], vmin=vmin, vmax=vmax, caption=self.caption, max_labels=max_labels) |
class AlgorithmResult(ABC, collections.UserDict):
def __init__(self, a_dict: Optional[Dict]=None) -> None:
super().__init__()
if a_dict:
self.data.update(a_dict)
def __setitem__(self, key: object, item: object) -> None:
raise TypeError("'__setitem__' invalid for this object.")
def __delitem__(self, key: object) -> None:
raise TypeError("'__delitem__' invalid for this object.")
def clear(self) -> None:
raise TypeError("'clear' invalid for this object.")
def pop(self, key: object, default: Optional[object]=None) -> object:
raise TypeError("'pop' invalid for this object.")
def popitem(self) -> Tuple[(object, object)]:
raise TypeError("'popitem' invalid for this object.")
def update(self, *args, **kwargs) -> None:
raise TypeError("'update' invalid for this object.")
def combine(self, result: 'AlgorithmResult') -> None:
if (result is None):
raise TypeError('Argument result expected.')
if (result == self):
return
for (name, value) in inspect.getmembers(result):
if ((not name.startswith('_')) and (name != 'data') and (not inspect.ismethod(value)) and (not inspect.isfunction(value)) and hasattr(self, name)):
if (value is None):
if (name in self.data):
del self.data[name]
else:
self.data[name] = value
def __contains__(self, key: object) -> bool:
try:
_ = self.__getitem__(key)
return True
except KeyError:
return False |
def numpy_random_mtrand_transform():
return parse("\n def beta(a, b, size=None): return uninferable\n def binomial(n, p, size=None): return uninferable\n def bytes(length): return uninferable\n def chisquare(df, size=None): return uninferable\n def choice(a, size=None, replace=True, p=None): return uninferable\n def dirichlet(alpha, size=None): return uninferable\n def exponential(scale=1.0, size=None): return uninferable\n def f(dfnum, dfden, size=None): return uninferable\n def gamma(shape, scale=1.0, size=None): return uninferable\n def geometric(p, size=None): return uninferable\n def get_state(): return uninferable\n def gumbel(loc=0.0, scale=1.0, size=None): return uninferable\n def hypergeometric(ngood, nbad, nsample, size=None): return uninferable\n def laplace(loc=0.0, scale=1.0, size=None): return uninferable\n def logistic(loc=0.0, scale=1.0, size=None): return uninferable\n def lognormal(mean=0.0, sigma=1.0, size=None): return uninferable\n def logseries(p, size=None): return uninferable\n def multinomial(n, pvals, size=None): return uninferable\n def multivariate_normal(mean, cov, size=None): return uninferable\n def negative_binomial(n, p, size=None): return uninferable\n def noncentral_chisquare(df, nonc, size=None): return uninferable\n def noncentral_f(dfnum, dfden, nonc, size=None): return uninferable\n def normal(loc=0.0, scale=1.0, size=None): return uninferable\n def pareto(a, size=None): return uninferable\n def permutation(x): return uninferable\n def poisson(lam=1.0, size=None): return uninferable\n def power(a, size=None): return uninferable\n def rand(*args): return uninferable\n def randint(low, high=None, size=None, dtype='l'):\n import numpy\n return numpy.ndarray((1,1))\n def randn(*args): return uninferable\n def random(size=None): return uninferable\n def random_integers(low, high=None, size=None): return uninferable\n def random_sample(size=None): return uninferable\n def rayleigh(scale=1.0, size=None): return uninferable\n def seed(seed=None): return uninferable\n def set_state(state): return uninferable\n def shuffle(x): return uninferable\n def standard_cauchy(size=None): return uninferable\n def standard_exponential(size=None): return uninferable\n def standard_gamma(shape, size=None): return uninferable\n def standard_normal(size=None): return uninferable\n def standard_t(df, size=None): return uninferable\n def triangular(left, mode, right, size=None): return uninferable\n def uniform(low=0.0, high=1.0, size=None): return uninferable\n def vonmises(mu, kappa, size=None): return uninferable\n def wald(mean, scale, size=None): return uninferable\n def weibull(a, size=None): return uninferable\n def zipf(a, size=None): return uninferable\n ") |
class TensoredMeasFitter():
def __init__(self, results: Union[(Result, List[Result])], mit_pattern: List[List[int]], substate_labels_list: List[List[str]]=None, circlabel: str=''):
self._result_list = []
self._cal_matrices = None
self._circlabel = circlabel
self._mit_pattern = mit_pattern
self._qubit_list_sizes = [len(qubit_list) for qubit_list in mit_pattern]
self._indices_list = []
if (substate_labels_list is None):
self._substate_labels_list = []
for list_size in self._qubit_list_sizes:
self._substate_labels_list.append(count_keys(list_size))
else:
self._substate_labels_list = substate_labels_list
if (len(self._qubit_list_sizes) != len(substate_labels_list)):
raise ValueError('mit_pattern does not match substate_labels_list')
self._indices_list = []
for (_, sub_labels) in enumerate(self._substate_labels_list):
self._indices_list.append({lab: ind for (ind, lab) in enumerate(sub_labels)})
self.add_data(results)
def cal_matrices(self):
return self._cal_matrices
_matrices.setter
def cal_matrices(self, new_cal_matrices):
self._cal_matrices = copy.deepcopy(new_cal_matrices)
def substate_labels_list(self):
return self._substate_labels_list
def filter(self):
return TensoredFilter(self._cal_matrices, self._substate_labels_list, self._mit_pattern)
def nqubits(self):
return sum(self._qubit_list_sizes)
def add_data(self, new_results, rebuild_cal_matrix=True):
if (new_results is None):
return
if (not isinstance(new_results, list)):
new_results = [new_results]
for result in new_results:
self._result_list.append(result)
if rebuild_cal_matrix:
self._build_calibration_matrices()
def readout_fidelity(self, cal_index=0, label_list=None):
if (self._cal_matrices is None):
raise QiskitError('Cal matrix has not been set')
if (label_list is None):
label_list = [[label] for label in self._substate_labels_list[cal_index]]
state_labels = self._substate_labels_list[cal_index]
fidelity_label_list = []
if (label_list is None):
fidelity_label_list = [[label] for label in state_labels]
else:
for fid_sublist in label_list:
fidelity_label_list.append([])
for fid_statelabl in fid_sublist:
for (label_idx, label) in enumerate(state_labels):
if (fid_statelabl == label):
fidelity_label_list[(- 1)].append(label_idx)
continue
assign_fid_list = []
for fid_label_sublist in fidelity_label_list:
assign_fid_list.append(0)
for state_idx_i in fid_label_sublist:
for state_idx_j in fid_label_sublist:
assign_fid_list[(- 1)] += self._cal_matrices[cal_index][state_idx_i][state_idx_j]
assign_fid_list[(- 1)] /= len(fid_label_sublist)
return np.mean(assign_fid_list)
def _build_calibration_matrices(self):
self._cal_matrices = []
for list_size in self._qubit_list_sizes:
self._cal_matrices.append(np.zeros([(2 ** list_size), (2 ** list_size)], dtype=float))
for result in self._result_list:
for experiment in result.results:
circ_name = experiment.header.name
circ_search = re.search((('(?<=' + self._circlabel) + 'cal_)\\w+'), circ_name)
if (circ_search is None):
continue
state = circ_search.group(0)
state_cnts = result.get_counts(circ_name)
for (measured_state, counts) in state_cnts.items():
end_index = self.nqubits
for (cal_ind, cal_mat) in enumerate(self._cal_matrices):
start_index = (end_index - self._qubit_list_sizes[cal_ind])
substate_index = self._indices_list[cal_ind][state[start_index:end_index]]
measured_substate_index = self._indices_list[cal_ind][measured_state[start_index:end_index]]
end_index = start_index
cal_mat[measured_substate_index][substate_index] += counts
for (mat_index, _) in enumerate(self._cal_matrices):
sums_of_columns = np.sum(self._cal_matrices[mat_index], axis=0)
self._cal_matrices[mat_index] = np.divide(self._cal_matrices[mat_index], sums_of_columns, out=np.zeros_like(self._cal_matrices[mat_index]), where=(sums_of_columns != 0))
def plot_calibration(self, cal_index=0, ax=None, show_plot=True):
if (self._cal_matrices is None):
raise QiskitError('Cal matrix has not been set')
if (not HAS_MATPLOTLIB):
raise ImportError('The function plot_rb_data needs matplotlib. Run "pip install matplotlib" before.')
if (ax is None):
plt.figure()
ax = plt.gca()
axim = ax.matshow(self.cal_matrices[cal_index], cmap=plt.cm.binary, clim=[0, 1])
ax.figure.colorbar(axim)
ax.set_xlabel('Prepared State')
ax.xaxis.set_label_position('top')
ax.set_ylabel('Measured State')
ax.set_xticks(np.arange(len(self._substate_labels_list[cal_index])))
ax.set_yticks(np.arange(len(self._substate_labels_list[cal_index])))
ax.set_xticklabels(self._substate_labels_list[cal_index])
ax.set_yticklabels(self._substate_labels_list[cal_index])
if show_plot:
plt.show() |
def all_dna_locations(game: GameDescription, config: AM2RArtifactConfig):
locations = []
for node in game.region_list.all_nodes:
if isinstance(node, PickupNode):
name = node.extra['object_name']
if (config.prefer_metroids and name.startswith('oItemDNA_')):
locations.append(node)
elif (config.prefer_bosses and (name in _boss_items)):
locations.append(node)
return locations |
def _CKD_priv(parent_privkey: bytes, parent_chaincode: bytes, child_index: bytes, is_hardened_child: bool) -> Tuple[(bytes, bytes)]:
try:
keypair = ecc.ECPrivkey(parent_privkey)
except ecc.InvalidECPointException as e:
raise BitcoinException('Impossible xprv (not within curve order)') from e
parent_pubkey = keypair.get_public_key_bytes(compressed=True)
if is_hardened_child:
data = ((bytes([0]) + parent_privkey) + child_index)
else:
data = (parent_pubkey + child_index)
I = hmac_oneshot(parent_chaincode, data, hashlib.sha512)
I_left = ecc.string_to_number(I[0:32])
child_privkey = ((I_left + ecc.string_to_number(parent_privkey)) % ecc.CURVE_ORDER)
if ((I_left >= ecc.CURVE_ORDER) or (child_privkey == 0)):
raise ecc.InvalidECPointException()
child_privkey = int.to_bytes(child_privkey, length=32, byteorder='big', signed=False)
child_chaincode = I[32:]
return (child_privkey, child_chaincode) |
def remove_all_but_largest_component_from_segmentation(segmentation: np.ndarray, labels_or_regions: Union[(int, Tuple[(int, ...)], List[Union[(int, Tuple[(int, ...)])]])], background_label: int=0) -> np.ndarray:
mask = np.zeros_like(segmentation, dtype=bool)
if (not isinstance(labels_or_regions, list)):
labels_or_regions = [labels_or_regions]
for l_or_r in labels_or_regions:
mask |= region_or_label_to_mask(segmentation, l_or_r)
mask_keep = remove_all_but_largest_component(mask)
ret = np.copy(segmentation)
ret[(mask & (~ mask_keep))] = background_label
return ret |
def test_on_action_delete_items(view, item):
view.scene.cancel_crop_mode = MagicMock()
view.scene.addItem(item)
item.setSelected(True)
view.on_action_delete_items()
assert (view.scene.items() == [])
assert (view.undo_stack.isClean() is False)
view.scene.cancel_crop_mode.assert_called_once() |
class TestInputGeneration(unittest.TestCase):
def test_tape_inputs(self):
for env_kls in ALL_TAPE_ENVS:
env = env_kls()
for size in range(2, 5):
input_tape = env.generate_input_data(size)
self.assertTrue(all(((0 <= x <= env.base) for x in input_tape)), 'Invalid input tape from env {}: {}'.format(env_kls, input_tape))
self.assertLessEqual(len(input_tape), size)
def test_grid_inputs(self):
for env_kls in ALL_GRID_ENVS:
env = env_kls()
for size in range(2, 5):
input_grid = env.generate_input_data(size)
self.assertEqual(len(input_grid), size)
self.assertTrue(all(((len(col) == env.rows) for col in input_grid)))
self.assertTrue(all(((0 <= x <= env.base) for x in input_grid[0])))
def test_duplicatedinput_inputs(self):
env = alg.duplicated_input.DuplicatedInputEnv(duplication=2)
input_tape = env.generate_input_data(4)
self.assertEqual(len(input_tape), 4)
self.assertEqual(input_tape[0], input_tape[1])
self.assertEqual(input_tape[2], input_tape[3])
input_tape = env.generate_input_data(3)
self.assertEqual(len(input_tape), 2)
self.assertEqual(input_tape[0], input_tape[1])
input_tape = env.generate_input_data(1)
self.assertEqual(len(input_tape), 2)
self.assertEqual(input_tape[0], input_tape[1])
env = alg.duplicated_input.DuplicatedInputEnv(duplication=3)
input_tape = env.generate_input_data(6)
self.assertEqual(len(input_tape), 6)
self.assertEqual(input_tape[0], input_tape[1])
self.assertEqual(input_tape[1], input_tape[2]) |
def LSTMCell(prev_cell, prev_out, input_or_inputs=tuple(), num_units=None, peepholes=True, weight_init=init.Normal(), bias_init=init.Constant(), peepholes_W_init=init.Normal(), forgetgate_nonlinearity=lasagne.nonlinearities.sigmoid, inputgate_nonlinearity=lasagne.nonlinearities.sigmoid, outputgate_nonlinearity=lasagne.nonlinearities.sigmoid, cell_nonlinearity=lasagne.nonlinearities.tanh, output_nonlinearity=lasagne.nonlinearities.tanh, dropout=0.0, name=None, grad_clipping=0.0):
assert (len(prev_cell.output_shape) == 2)
if (num_units is None):
num_units = prev_cell.output_shape[1]
assert (num_units == prev_cell.output_shape[1])
gates = GateLayer(([prev_out] + check_list(input_or_inputs)), ([num_units] * 4), channel_names=['to_ingate', 'to_forgetgate', 'to_cell', 'to_outgate'], gate_nonlinearities=None, bias_init=bias_init, weight_init=weight_init, name=(name or ''))
(ingate, forgetgate, cell_input, outputgate) = gates.values()
if grad_clipping:
(ingate, forgetgate, cell_input, outputgate) = [clip_grads(lyr, grad_clipping) for lyr in [ingate, forgetgate, cell_input, outputgate]]
if peepholes:
peepholes_W_init = check_list(peepholes_W_init)
assert (len(peepholes_W_init) in (1, 3))
if (len(peepholes_W_init) == 1):
peepholes_W_init *= 3
(W_cell_to_ingate_init, W_cell_to_forgetgate_init) = peepholes_W_init[:2]
peep_ingate = lasagne.layers.ScaleLayer(prev_cell, W_cell_to_ingate_init, shared_axes=[0], name=((name or '') + '.W_cell_to_ingate_peephole'))
peep_forgetgate = lasagne.layers.ScaleLayer(prev_cell, W_cell_to_forgetgate_init, shared_axes=[0], name=((name or '') + '.W_cell_to_forgetgate_peephole'))
ingate = add(ingate, peep_ingate)
forgetgate = add(forgetgate, peep_forgetgate)
ingate = NonlinearityLayer(ingate, inputgate_nonlinearity, name=((name or '') + '.inputgate'))
forgetgate = NonlinearityLayer(forgetgate, forgetgate_nonlinearity, name=((name or '') + '.forgetgate'))
cell_input = NonlinearityLayer(cell_input, nonlinearity=cell_nonlinearity, name=((name or '') + '.cell_nonlinearity'))
if (dropout != 0):
cell_input = DropoutLayer(cell_input, p=dropout)
new_cell = add(mul(cell_input, ingate), mul(prev_cell, forgetgate))
if peepholes:
W_cell_to_outgate_init = peepholes_W_init[2]
peep_outgate = lasagne.layers.ScaleLayer(new_cell, W_cell_to_outgate_init, shared_axes=[0], name=((name or '') + '.W_cell_to_outgate_peephole'))
outputgate = add(outputgate, peep_outgate)
outputgate = NonlinearityLayer(outputgate, outputgate_nonlinearity, name=((name or '') + '.outgate'))
new_output = NonlinearityLayer(new_cell, output_nonlinearity, name=((name or '') + '.outgate_nonlinearity'))
new_output = mul(outputgate, new_output, name=((name or '') + '.outgate'))
return (new_cell, new_output) |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Commit', fields=[('sha', models.CharField(help_text='The SHA hash of this commit.', max_length=40, primary_key=True, serialize=False)), ('message', models.TextField(help_text='The commit message.')), ('date', models.DateTimeField(help_text='The date and time the commit was created.')), ('authors', models.TextField(help_text='The person(s) who created the commit. This is a serialized JSON object. Refer to the GitHub documentation on the commit endpoint (schema/commit.author & schema/commit.committer) for more info. migrations.CreateModel(name='Tag', fields=[('last_updated', models.DateTimeField(auto_now=True, help_text='The date and time this data was last fetched.')), ('sha', models.CharField(help_text="The tag's hash, as calculated by GitHub.", max_length=40)), ('name', models.CharField(help_text="The tag's name.", max_length=50, primary_key=True, serialize=False)), ('group', models.CharField(help_text='The group the tag belongs to.', max_length=50, null=True)), ('body', models.TextField(help_text='The content of the tag.')), ('last_commit', models.ForeignKey(help_text='The commit this file was last touched in.', null=True, on_delete=django.db.models.deletion.CASCADE, to='content.commit'))])] |
def single_run(E=30000.0, P=25.0, w=0.1, x=0.0):
ops.wipe()
ops.model('basic', '-ndm', 2, '-ndf', 3)
ops.node(1, x, 0)
ops.node(2, 0, 144)
ops.node(3, 240, 144)
ops.node(4, 240, 0)
ops.fix(1, 1, 1, 1)
ops.fix(4, 1, 1, 1)
Ag = 25.0
Ig = 1500.0
Ac = 29.0
Ic = 2000.0
gsecTag = 1
ops.section('Elastic', gsecTag, E, Ag, Ig)
csecTag = 2
ops.section('Elastic', csecTag, E, Ac, Ic)
transfTag = 1
ops.geomTransf('Linear', transfTag)
N = 3
gbiTag = 1
ops.beamIntegration('Lobatto', gbiTag, gsecTag, N)
cbiTag = 2
ops.beamIntegration('Lobatto', cbiTag, csecTag, N)
leftColTag = 1
ops.element('forceBeamColumn', leftColTag, 1, 2, transfTag, cbiTag)
girderTag = 2
ops.element('forceBeamColumn', girderTag, 2, 3, transfTag, gbiTag)
rightColTag = 3
ops.element('forceBeamColumn', rightColTag, 3, 4, transfTag, cbiTag)
tsTag = 1
ops.timeSeries('Constant', tsTag)
patternTag = 1
ops.pattern('Plain', patternTag, tsTag)
ops.load(2, P, 0, 0)
ops.eleLoad('-ele', girderTag, '-type', 'beamUniform', (- w))
ops.constraints('Transformation')
ops.numberer('RCM')
ops.system('BandGeneral')
ops.test('NormDispIncr', 1e-06, 6, 2)
ops.algorithm('Linear')
ops.integrator('LoadControl', 1)
ops.analysis('Static')
ops.analyze(1)
return (0.15 - ops.nodeDisp(2, 1)) |
class TestSerializeStream():
def _set_status(self, stream, status):
stream.status.return_value = status
def stream_mock(self):
m = unittest.mock.MagicMock(spec=QDataStream)
m.status.return_value = QDataStream.Status.Ok
return m
def test_serialize_pre_error_mock(self, stream_mock):
stream_mock.status.return_value = QDataStream.Status.ReadCorruptData
with pytest.raises(OSError, match='The data stream has read corrupt data.'):
qtutils.serialize_stream(stream_mock, QPoint())
assert (not stream_mock.__lshift__.called)
def test_serialize_post_error_mock(self, stream_mock):
obj = QPoint()
stream_mock.__lshift__.side_effect = (lambda _other: self._set_status(stream_mock, QDataStream.Status.ReadCorruptData))
with pytest.raises(OSError, match='The data stream has read corrupt data.'):
qtutils.serialize_stream(stream_mock, obj)
stream_mock.__lshift__.assert_called_once_with(obj)
def test_deserialize_pre_error_mock(self, stream_mock):
stream_mock.status.return_value = QDataStream.Status.ReadCorruptData
with pytest.raises(OSError, match='The data stream has read corrupt data.'):
qtutils.deserialize_stream(stream_mock, QPoint())
assert (not stream_mock.__rshift__.called)
def test_deserialize_post_error_mock(self, stream_mock):
obj = QPoint()
stream_mock.__rshift__.side_effect = (lambda _other: self._set_status(stream_mock, QDataStream.Status.ReadCorruptData))
with pytest.raises(OSError, match='The data stream has read corrupt data.'):
qtutils.deserialize_stream(stream_mock, obj)
stream_mock.__rshift__.assert_called_once_with(obj)
def test_round_trip_real_stream(self):
src_obj = QPoint(23, 42)
dest_obj = QPoint()
data = QByteArray()
write_stream = QDataStream(data, QIODevice.OpenModeFlag.WriteOnly)
qtutils.serialize_stream(write_stream, src_obj)
read_stream = QDataStream(data, QIODevice.OpenModeFlag.ReadOnly)
qtutils.deserialize_stream(read_stream, dest_obj)
assert (src_obj == dest_obj)
.qt_log_ignore('^QIODevice::write.*: ReadOnly device')
def test_serialize_readonly_stream(self):
data = QByteArray()
stream = QDataStream(data, QIODevice.OpenModeFlag.ReadOnly)
with pytest.raises(OSError, match='The data stream cannot write to the underlying device.'):
qtutils.serialize_stream(stream, QPoint())
.qt_log_ignore('QIODevice::read.*: WriteOnly device')
def test_deserialize_writeonly_stream(self):
data = QByteArray()
obj = QPoint()
stream = QDataStream(data, QIODevice.OpenModeFlag.WriteOnly)
with pytest.raises(OSError, match='The data stream has read past the end of the data in the underlying device.'):
qtutils.deserialize_stream(stream, obj) |
def calc_time(lower_bound, upper_bound, latitude, longitude, attribute, value, altitude=0, pressure=101325, temperature=12, horizon='+0:00', xtol=1e-12):
(obs, sun) = _ephem_setup(latitude, longitude, altitude, pressure, temperature, horizon)
def compute_attr(thetime, target, attr):
obs.date = thetime
sun.compute(obs)
return (getattr(sun, attr) - target)
lb = datetime_to_djd(lower_bound)
ub = datetime_to_djd(upper_bound)
djd_root = so.brentq(compute_attr, lb, ub, (value, attribute), xtol=xtol)
return djd_to_datetime(djd_root) |
.parametrize('keys, expected', [([127995], '<>'), ([171510], '<>'), ([Qt.Key.Key_Shift, 171510], '<Shift><>'), ([128104, 8205, 128104, 8205, 128102], '<><\u200d><><\u200d><>')])
_enum_workaround_skip
def test_surrogate_sequences(keys, expected):
infos = [keyutils.KeyInfo(Qt.Key(key)) for key in keys]
seq = keyutils.KeySequence(*infos)
assert (str(seq) == expected) |
def _run_on_single_gpu(model, batch_list_t, batch_list_v, batch_sequence_output_list, batch_seq_features_list, batch_visual_output_list):
sim_matrix = []
for (idx1, b1) in enumerate(batch_list_t):
(input_mask, segment_ids, *_tmp) = b1
sequence_output = batch_sequence_output_list[idx1]
seq_features = batch_seq_features_list[idx1]
each_row = []
for (idx2, b2) in enumerate(batch_list_v):
(video_mask, *_tmp) = b2
visual_output = batch_visual_output_list[idx2]
(b1b2_logits, *_tmp) = model.get_similarity_logits(sequence_output, seq_features, visual_output, input_mask, video_mask, loose_type=model.loose_type)
b1b2_logits = b1b2_logits.cpu().detach().numpy()
each_row.append(b1b2_logits)
each_row = np.concatenate(tuple(each_row), axis=(- 1))
sim_matrix.append(each_row)
return sim_matrix |
class _RemoteEnv(object):
def __init__(self, env_pkl, policy_pkl):
self._sess = tf_utils.create_session()
self._sess.run(tf.global_variables_initializer())
self._env = pickle.loads(env_pkl)
self._policy = pickle.loads(policy_pkl)
if hasattr(self._env, 'initialize'):
self._env.initialize()
def rollout(self, policy_params, path_length):
self._policy.set_param_values(policy_params)
path = rollout(self._env, self._policy, path_length)
return path |
def main_worker(local_rank, args):
args.local_rank = local_rank
args.global_rank = (args.local_rank + (args.node_rank * args.ngpus_per_node))
args.distributed = (args.world_size > 1)
print(args)
config = load_yaml_config(args.config_file)
config = merge_opts_to_config(config, args.opts)
if args.debug:
config = modify_config_for_debug(config)
logger = Logger(args)
logger.save_config(config)
model = build_model(config, args)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
dataloader_info = build_dataloader_fast(config, args)
solver = Solver(config=config, args=args, model=model, dataloader=dataloader_info, logger=logger)
if (args.load_path is not None):
solver.resume(path=args.load_path, load_optimizer_and_scheduler=False, load_others=False)
if args.auto_resume:
solver.resume()
solver.train() |
def write_preprocessing_parameters(params: namedtuple) -> None:
dict_path = (params.dataset_dir + 'preprocessing_params.csv')
keys_to_write = ['atom_types', 'formal_charge', 'imp_H', 'chirality', 'group_size', 'max_n_nodes', 'use_aromatic_bonds', 'use_chirality', 'use_explicit_H', 'ignore_H']
with open(dict_path, 'w') as csv_file:
writer = csv.writer(csv_file, delimiter=';')
for (key, value) in enumerate(params._fields):
if (value in keys_to_write):
writer.writerow([value, params[key]]) |
class Storage(Resource):
('StorageResource', rus.optional(str), rus.optional(ss.Session), rus.optional(sab.Base), rus.optional(dict), rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
(rus.nothing)
def __init__(self, id=None, session=None, _adaptor=None, _adaptor_state={}, _ttype=None):
self._resrc = super(Storage, self)
self._resrc.__init__(id, session, _adaptor, _adaptor_state, _ttype)
if (self.rtype != c.STORAGE):
raise se.BadParameter(('Cannot init Storage resource type %s' % self.rtype)) |
def before_after_plots_for_quantized_model(before_weights_map, after_weights_map):
for key in before_weights_map.keys():
before_quantization_data = before_weights_map[key]
after_quantization_data = after_weights_map[key]
compare_boxplots_before_after_quantization(before_quantization_data, after_quantization_data, tab_name=key, subplot_name='Boxplots')
before_quantization_as_dataframe = get_necessary_statistics_from_dataframe(before_quantization_data)
after_quantization_as_dataframe = get_necessary_statistics_from_dataframe(after_quantization_data)
compare_overall_model_changes_violinplot(before_quantization_data, after_quantization_data, tab_name=key, subplot_name='Violin')
compare_overall_changes_line_plot(before_quantization_as_dataframe, after_quantization_as_dataframe, tab_name=key, subplot_name='Line')
compare_key_stats_scatter_plot(before_quantization_as_dataframe, after_quantization_as_dataframe, tab_name=key, subplot_name='Scatter') |
class ListItemWrapper(uiawrapper.UIAWrapper):
_control_types = ['DataItem', 'ListItem']
def __init__(self, elem, container=None):
super(ListItemWrapper, self).__init__(elem)
self.container = container
def is_checked(self):
return (self.iface_toggle.ToggleState_On == toggle_state_on)
def texts(self):
content = [ch.window_text() for ch in self.children(content_only=True)]
if content:
return content
else:
return super(ListItemWrapper, self).texts() |
def rand_real():
vp = np.random.uniform(low=0, high=360)
vangle = np.random.uniform(low=(- 40), high=(- 70))
cam_dist = np.random.uniform(low=1.5, high=2.5)
distlow = 0.4
distobj = np.random.uniform(low=distlow, high=0.7)
distmult = np.random.uniform(low=1.7, high=2.1)
object_ = [(- (distobj - distlow)), 0.0]
goal = [(- (((distobj * distmult) - distlow) - 0.5)), 0.0]
return dict(vp=vp, vangle=vangle, object=object_, goal=goal, cam_dist=cam_dist, imsize=(36, 64), name='real', meanfile='model/real_inception_Mixed_7c.npz', modeldata='model/vdata_realnew200.npy') |
_edge_encoder('LinearEdge')
class LinearEdgeEncoder(torch.nn.Module):
def __init__(self, emb_dim):
super().__init__()
if (cfg.dataset.name in ['MNIST', 'CIFAR10']):
self.in_dim = 1
else:
raise ValueError('Input edge feature dim is required to be hardset or refactored to use a cfg option.')
self.encoder = torch.nn.Linear(self.in_dim, emb_dim)
def forward(self, batch):
batch.edge_attr = self.encoder(batch.edge_attr.view((- 1), self.in_dim))
return batch |
class TestLinearMapper(QiskitNatureTestCase):
spin_op1 = SpinOp({'Y_0^2': ((- 0.432) + 1.32j)}, 0.5, 1)
ref_qubit_op1 = SparsePauliOp(['II', 'ZZ'], coeffs=[((- 0.054) + 0.165j), (0.054 - 0.165j)])
spin_op2 = SpinOp({'X_0 Z_0': ((- 1.139) + 0.083j)}, 0.5, 2)
ref_qubit_op2 = SparsePauliOp(['IIYX', 'IIXY'], coeffs=[(0.010375 + 0.142375j), ((- 0.010375) - 0.142375j)])
spin_op3 = SpinOp({'X_0 Y_0^2 Z_0 X_1 Y_1 Y_2 Z_2': ((- 0.18) + 1.204j)}, 0.5, 3)
ref_qubit_op3 = SparsePauliOp.from_list([('YYIZYX', (0. + 8.7890625e-05j)), ('XXIZYX', (0. + 8.7890625e-05j)), ('YYZIYX', ((- 0.) - 8.7890625e-05j)), ('XXZIYX', ((- 0.) - 8.7890625e-05j)), ('YYIZXY', ((- 0.) - 8.7890625e-05j)), ('XXIZXY', ((- 0.) - 8.7890625e-05j)), ('YYZIXY', (0. + 8.7890625e-05j)), ('XXZIXY', (0. + 8.7890625e-05j))])
spin_op4 = SpinOp({'Z_1': ((- 0.875) - 0.075j)}, 1.5, 2)
ref_qubit_op4 = SparsePauliOp.from_list([('ZIIIIIII', ((- 0.65625) - 0.05625j)), ('IZIIIIII', ((- 0.21875) - 0.01875j)), ('IIZIIIII', (0.21875 + 0.01875j)), ('IIIZIIII', (0.65625 + 0.05625j))])
spin_op5 = (SpinOp({'X_0': (4 + 0j)}, 0.5, 8) + SpinOp({'': (8 + 0j)}, 0.5, 8))
ref_qubit_op5 = SparsePauliOp.from_list([('IIIIIIIIIIIIIIII', (8.0 + 0j)), ('IIIIIIIIIIIIIIXX', (1.0 + 0j)), ('IIIIIIIIIIIIIIYY', (1.0 + 0j))])
((spin_op1, ref_qubit_op1), (spin_op2, ref_qubit_op2), (spin_op3, ref_qubit_op3), (spin_op4, ref_qubit_op4), (spin_op5, ref_qubit_op5))
def test_mapping(self, spin_op, ref_qubit_op):
mapper = LinearMapper()
qubit_op = mapper.map(spin_op)
self.assertEqualSparsePauliOp(qubit_op, ref_qubit_op)
def test_mapping_overwrite_reg_len(self):
op = SpinOp({'Y_0^2': ((- 0.432) + 1.32j)}, 0.5, 1)
expected = SpinOp({'Y_0^2': ((- 0.432) + 1.32j)}, 0.5, 3)
mapper = LinearMapper()
self.assertEqual(mapper.map(op, register_length=3), mapper.map(expected)) |
def ssim3D(img1, img2, window_size=11, size_average=True):
(_, channel, _, _, _) = img1.size()
window = create_window_3D(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim_3D(img1, img2, window, window_size, channel, size_average) |
def load_sentence(path):
sentences = []
sentence = []
for line in codecs.open(path, 'r', 'utf8'):
line = json.loads(line)
doc_id = line[0]
sentence_text = line[1]
tag = line[(- 1)]
sentence.append(sentence_text)
sentences.append(line)
chars = [[x for x in s] for s in sentence]
dico = create_dico(chars)
dico['<PAD>'] =
dico['<UNK>'] =
(char_to_id, id_to_char) = create_mapping(dico)
return (sentences, dico, char_to_id, id_to_char) |
def test_lock_file_should_not_have_mixed_types(locker: Locker, root: ProjectPackage) -> None:
package_a = get_package('A', '1.0.0')
package_a.add_dependency(Factory.create_dependency('B', '^1.0.0'))
package_a.add_dependency(Factory.create_dependency('B', {'version': '>=1.0.0', 'optional': True}))
package_a.requires[(- 1)].activate()
package_a.extras[canonicalize_name('foo')] = [get_dependency('B', '>=1.0.0')]
locker.set_lock_data(root, [package_a])
expected = f'''# {GENERATED_COMMENT}
[[package]]
name = "A"
version = "1.0.0"
description = ""
optional = false
python-versions = "*"
files = []
[package.dependencies]
B = [
{{version = "^1.0.0"}},
{{version = ">=1.0.0", optional = true}},
]
[package.extras]
foo = ["B (>=1.0.0)"]
[metadata]
lock-version = "2.0"
python-versions = "*"
content-hash = "115cf985d932e9bf5f540555bbdd75decbb62cac81e399375fc19f6277f8c1d8"
'''
with locker.lock.open(encoding='utf-8') as f:
content = f.read()
assert (content == expected) |
class Model(object):
def __init__(self, environment):
self.environment = environment
self._converter = None
def get_value(self, formula, model_completion=True):
raise NotImplementedError
def get_values(self, formulae, model_completion=True):
res = {}
for f in formulae:
v = self.get_value(f, model_completion=model_completion)
res[f] = v
return res
def get_py_value(self, formula, model_completion=True):
res = self.get_value(formula, model_completion=model_completion)
assert res.is_constant()
return res.constant_value()
def get_py_values(self, formulae, model_completion=True):
res = {}
for f in formulae:
v = self.get_py_value(f, model_completion=model_completion)
res[f] = v
return res
def satisfies(self, formula, solver=None):
subs = self.get_values(formula.get_free_variables())
simp = formula.substitute(subs).simplify()
if simp.is_true():
return True
if simp.is_false():
return False
free_vars = simp.get_free_variables()
if (len(free_vars) > 0):
return False
if (self.environment.enable_div_by_0 and (solver is not None)):
stack = [simp]
div_0 = []
while stack:
x = stack.pop()
if x.is_constant():
pass
elif (x.is_div() and x.arg(1).is_zero()):
div_0.append(x)
stack += x.args()
subs = self.get_values(div_0)
simp = simp.substitute(subs).simplify()
return simp.is_true()
return False
def converter(self):
return self._converter
def converter(self, value):
self._converter = value
def __getitem__(self, idx):
return self.get_value(idx, model_completion=True)
def __str__(self):
return '\n'.join([('%s := %s' % (var, value)) for (var, value) in self]) |
def _expand_requires_extra(re):
for (extra, reqs) in sorted(re.items()):
for req in reqs:
if (';' in req):
(name, envmark) = req.split(';', 1)
(yield '{} ; extra == "{}" and ({})'.format(name, extra, envmark))
else:
(yield '{} ; extra == "{}"'.format(req, extra)) |
def cifar10_iterator(cfg, kv):
train_rec = os.path.join(cfg.dataset.data_dir, 'cifar10_train.rec')
val_rec = os.path.join(cfg.dataset.data_dir, 'cifar10_val.rec')
mean = [125.31, 123.01, 113.91]
std = [63.01, 62.09, 66.71]
train = mx.io.ImageRecordIter(path_imgrec=train_rec, label_width=1, data_name='data', label_name='softmax_label', data_shape=(3, 32, 32), batch_size=cfg.batch_size, pad=4, fill_value=127, rand_crop=(True if (cfg.dataset.aug_level > 0) else False), rand_mirror=(True if (cfg.dataset.aug_level > 0) else False), shuffle=(True if (cfg.dataset.aug_level >= 0) else False), num_parts=kv.num_workers, part_index=kv.rank)
val = mx.io.ImageRecordIter(path_imgrec=val_rec, label_width=1, data_name='data', label_name='softmax_label', batch_size=cfg.batch_size, data_shape=(3, 32, 32), rand_crop=False, rand_mirror=False, num_parts=kv.num_workers, part_index=kv.rank)
return (train, val) |
def fix_gnu_param(arch, ex):
d = collections.defaultdict(list)
version = None
for item in ex:
if item.get('linux_version'):
if (not version):
version = item.get('linux_version')
else:
raise Exception('More than one linux_version defined')
elif (item['lang'] == 'C++'):
d['C'].extend(to_c(item['sym']))
else:
d[item['lang']].append(item['sym'])
with tempfile.NamedTemporaryFile(delete=False) as f:
if version:
f.write('{} {{\nglobal:\n'.format(version))
else:
f.write('{\nglobal:\n')
for (k, v) in d.items():
f.write(((' extern "' + k) + '" {\n'))
for x in v:
f.write(((' ' + x) + ';\n'))
f.write(' };\n')
f.write('local: *;\n};\n')
ret = [('-Wl,--version-script=' + f.name)]
if (arch == 'ANDROID'):
ret += ['-Wl,--export-dynamic']
return ret |
class ApocalypticMetropolis(pm.Metropolis):
stats_dtypes_shapes = {**pm.Metropolis.stats_dtypes_shapes, 'warning': (SamplerWarning, None)}
def astep(self, q0):
(draw, stats) = super().astep(q0)
stats[0]['warning'] = SamplerWarning(WarningType.BAD_ENERGY, 'Asteroid incoming!', 'warn')
return (draw, stats) |
def cannot_combine_with_fragment_options(ctx, cache):
if (cache is None):
return
used_names = ctx.meta[fragment_click.FRAGMENTATION_OPTION_NAMES]
if (not used_names):
return
names = sorted((name_to_command_line(name) for name in used_names))
if (len(names) == 1):
raise click.UsageError(f'Cannot combine {names[0]} with --cache')
else:
(*first, last) = names
first_str = ', '.join(first)
raise click.UsageError(f'Cannot combine {first_str} or {last} with --cache') |
class MatchCase(_base_nodes.MultiLineBlockNode):
_astroid_fields = ('pattern', 'guard', 'body')
_multi_line_block_fields = ('body',)
lineno: None
col_offset: None
end_lineno: None
end_col_offset: None
def __init__(self, *, parent: (NodeNG | None)=None) -> None:
self.pattern: Pattern
self.guard: (NodeNG | None)
self.body: list[NodeNG]
super().__init__(parent=parent, lineno=None, col_offset=None, end_lineno=None, end_col_offset=None)
def postinit(self, *, pattern: Pattern, guard: (NodeNG | None), body: list[NodeNG]) -> None:
self.pattern = pattern
self.guard = guard
self.body = body |
def verify_onnx(model, path, force_cpu):
import onnxruntime
import numpy as np
model_weight_file = os.path.join(path, (model + '.pth'))
model_weight_file = './weights/GPEN-512.pth'
model_setenv(force_cpu)
torch_model = get_model(model_weight_file)
torch_model.eval()
onnx_file_name = os.path.join(path, (model + '.onnx'))
onnxruntime_engine = onnxruntime.InferenceSession(onnx_file_name)
def to_numpy(tensor):
return (tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy())
dummy_input = Variable(torch.randn(1, 3, 512, 512))
with torch.no_grad():
(torch_output, _) = torch_model(dummy_input)
onnxruntime_inputs = {onnxruntime_engine.get_inputs()[0].name: to_numpy(dummy_input)}
onnxruntime_outputs = onnxruntime_engine.run(None, onnxruntime_inputs)
np.testing.assert_allclose(to_numpy(torch_output), onnxruntime_outputs[0], rtol=0.01, atol=0.01)
print('Example: Onnx model has been tested with ONNXRuntime, the result looks good !') |
def no_envs(monkeypatch):
monkeypatch.delenv('PYPYR_CMD_ENCODING', raising=False)
monkeypatch.delenv('PYPYR_ENCODING', raising=False)
monkeypatch.delenv('PYPYR_SKIP_INIT', raising=False)
monkeypatch.delenv('PYPYR_CONFIG_GLOBAL', raising=False)
monkeypatch.delenv('PYPYR_CONFIG_LOCAL', raising=False)
monkeypatch.delenv('PYPYR_NO_CACHE', raising=False) |
def train(args, train_dataset, model, tokenizer):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
global_step = int(args.model_name_or_path.split('-')[(- 1)].split('/')[0])
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
inputs['token_type_ids'] = batch[2]
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = ((tr_loss - logging_loss) / args.logging_steps)
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for (key, value) in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def load_w2v_embedding(word_list, uniform_scale, dimension_size):
embed_file = '../../../code/embedding/GoogleNews-vectors-negative300.bin'
model = gensim.models.KeyedVectors.load_word2vec_format(embed_file, binary=True)
word_vectors = []
for word in word_list:
if (word in model):
word_vectors.append(model[word])
elif (word == '<pad>'):
word_vectors.append(np.zeros(dimension_size, dtype=np.float32))
else:
word_vectors.append(np.random.uniform((- uniform_scale), uniform_scale, dimension_size))
return word_vectors |
def traverse_imports(names):
pending = [names]
while pending:
node = pending.pop()
if (node.type == token.NAME):
(yield node.value)
elif (node.type == syms.dotted_name):
(yield ''.join([ch.value for ch in node.children]))
elif (node.type == syms.dotted_as_name):
pending.append(node.children[0])
elif (node.type == syms.dotted_as_names):
pending.extend(node.children[::(- 2)])
else:
raise AssertionError('unknown node type') |
class FC3_AutoPart(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=100, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.autopart = kwargs.get('autopart', False)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.autopart:
retval += 'autopart\n'
return retval
def parse(self, args):
if (len(args) > 0):
raise KickstartParseError((_('Kickstart command %s does not take any arguments') % 'autopart'), lineno=self.lineno)
self.autopart = True
return self
def _getParser(self):
return KSOptionParser(prog='autopart', description='\n Automatically create partitions -- a root (``/``) partition,\n a swap partition, and an appropriate boot partition\n for the architecture. On large enough drives, this\n will also create a /home partition.', version=FC3, conflicts=self.conflictingCommands) |
_register
class CodecListObject(BaseObject):
GUID = guid2bytes('86D15240-311D-11D0-A3A4-00A0C90348F6')
def _parse_entry(self, data, offset):
(type_, offset) = cdata.uint16_le_from(data, offset)
(units, offset) = cdata.uint16_le_from(data, offset)
next_offset = (offset + (units * 2))
try:
name = data[offset:next_offset].decode('utf-16-le').strip('\x00')
except UnicodeDecodeError:
name = u''
offset = next_offset
(units, offset) = cdata.uint16_le_from(data, offset)
next_offset = (offset + (units * 2))
try:
desc = data[offset:next_offset].decode('utf-16-le').strip('\x00')
except UnicodeDecodeError:
desc = u''
offset = next_offset
(bytes_, offset) = cdata.uint16_le_from(data, offset)
next_offset = (offset + bytes_)
codec = u''
if (bytes_ == 2):
codec_id = cdata.uint16_le_from(data, offset)[0]
if (codec_id in CODECS):
codec = CODECS[codec_id]
offset = next_offset
return (offset, type_, name, desc, codec)
def parse(self, asf, data):
super(CodecListObject, self).parse(asf, data)
offset = 16
(count, offset) = cdata.uint32_le_from(data, offset)
for i in range(count):
try:
(offset, type_, name, desc, codec) = self._parse_entry(data, offset)
except cdata.error:
raise ASFError('invalid codec entry')
if (type_ == 2):
name = name.strip()
desc = desc.strip()
asf.info.codec_type = codec
asf.info.codec_name = name
asf.info.codec_description = desc
return |
class F18_TestCase(F17_TestCase):
def runTest(self, iscrypted=False):
F17_TestCase.runTest(self, iscrypted=iscrypted)
self.assert_parse('bootloader --location=mbr --timeout=5 --append="rhgb quiet"')
self.assert_parse('bootloader --location=mbr --timeout=5 --leavebootorder --append="rhgb quiet"', 'bootloader --append="rhgb quiet" --location=mbr --timeout=5 --leavebootorder\n') |
class ADE20K(BaseDataLoader):
def __init__(self, data_dir, batch_size, split, crop_size=None, base_size=None, scale=True, num_workers=1, val=False, shuffle=False, flip=False, rotate=False, blur=False, augment=False, val_split=None, return_id=False):
self.MEAN = [0., 0., 0.4294]
self.STD = [0., 0., 0.]
kwargs = {'root': data_dir, 'split': split, 'mean': self.MEAN, 'std': self.STD, 'augment': augment, 'crop_size': crop_size, 'base_size': base_size, 'scale': scale, 'flip': flip, 'blur': blur, 'rotate': rotate, 'return_id': return_id, 'val': val}
self.dataset = ADE20KDataset(**kwargs)
super(ADE20K, self).__init__(self.dataset, batch_size, shuffle, num_workers, val_split) |
class MFWPositionWiseFeedForward(torch.nn.Module):
def __init__(self, model_size, inner_size, dropout=0.0, variational=False, activation='relu', n_languages=1, rank=1, use_multiplicative=False, weight_drop=0.0, mfw_activation='none', glu=False, no_bias=False):
super().__init__()
self.variational = variational
self.dropout = dropout
self.activation = activation
self.n_languages = n_languages
self.weight_drop = weight_drop
self.glu = glu
self.dropout_residual = False
self.fused = False
self.input_linear = MultilingualLinear(model_size, (inner_size * (2 if glu else 1)), n_languages, rank, use_multiplicative, weight_drop, mfw_activation=mfw_activation, no_bias=no_bias)
self.output_linear = MultilingualLinear(inner_size, model_size, n_languages, rank, use_multiplicative, weight_drop, mfw_activation=mfw_activation, no_bias=no_bias)
if (self.activation == 'relu'):
self.act = nn.ReLU(inplace=True)
elif (self.activation == 'gelu'):
self.act = nn.GELU()
elif (self.activation in ['silu', 'swish']):
self.act = nn.SiLU(inplace=True)
if self.variational:
from onmt.modules.dropout import variational_dropout
self.dropout_function = variational_dropout
else:
self.dropout_function = F.dropout
if ((not self.glu) and (self.activation in ['relu', 'silu', 'swish', 'gelu', 'agelu']) and (not self.variational)):
if (self.activation == 'relu'):
from onmt.modules.mlp.mlp import mlp_relu_function
if (mlp_relu_function is not None):
self.fused_function = mlp_relu_function
self.fused = True
elif (self.activation in ['silu', 'swish']):
from onmt.modules.mlp.mlp import mlp_silu_function
if (mlp_silu_function is not None):
self.fused_function = mlp_silu_function
self.fused = True
elif (self.activation == 'gelu'):
from onmt.modules.mlp.mlp import mlp_gelu_function
if (mlp_gelu_function is not None):
self.fused_function = mlp_gelu_function
self.fused = True
elif (self.activation == 'agelu'):
from onmt.modules.mlp.mlp import mlp_agelu_function
if (mlp_agelu_function is not None):
self.fused_function = mlp_agelu_function
self.fused = True
def freeze(self):
self.input_linear.freeze()
self.output_linear.freeze()
def unfreeze(self):
self.input_linear.unfreeze()
self.output_linear.unfreeze()
def forward(self, hidden, indices=None, factorize=True, **kwargs):
if (self.fused and hidden.is_cuda):
(in_weight, in_bias) = self.input_linear.get_weight(indices, factorize=factorize)
(out_weight, out_bias) = self.output_linear.get_weight(indices, factorize=factorize)
with autocast(enabled=False):
input = hidden
weights = [in_weight.half(), out_weight.half()]
biases = [in_bias.half(), out_bias.half()]
(seq_len, bsz, hidden_size) = (input.size(0), input.size(1), input.size(2))
recompute = False
dropout = (self.dropout if self.training else 0.0)
hidden = self.fused_function(dropout, recompute, input.half().view((seq_len * bsz), (- 1)), *weights, *biases).type_as(input)
hidden = hidden.view(seq_len, bsz, hidden_size)
return hidden
else:
hidden = self.input_linear(hidden, indices)
if self.glu:
(hidden, gate) = hidden.chunk(2, dim=(- 1))
hidden = (self.act(hidden) * gate)
else:
hidden = self.act(hidden)
hidden = self.dropout_function(hidden, p=self.dropout, training=self.training)
hidden = self.output_linear(hidden, indices)
return hidden
def reset_parameters(self, init='normal'):
self.input_linear.reset_parameters(init)
self.output_linear.reset_parameters(init) |
class AbstractLazyTensor(ABC):
def logical_not(self):
return new_lazy_tensor(torch.Tensor.logical_not, [self])
def logical_and(self, arg):
return new_lazy_tensor(torch.Tensor.logical_and, [self, arg])
def logical_or(self, arg):
return new_lazy_tensor(torch.Tensor.logical_or, [self, arg])
def logical_xor(self, arg):
return new_lazy_tensor(torch.Tensor.logical_xor, [self, arg])
def sum(self, dim=None):
return new_lazy_tensor(torch.Tensor.sum, (self, dim))
def softmax(self, dim):
return new_lazy_tensor(torch.Tensor.softmax, (self, dim))
def sigmoid(self):
return new_lazy_tensor(torch.Tensor.sigmoid, self)
def relu(self):
return new_lazy_tensor(torch.Tensor.relu, self)
def min(self, dim=None):
return new_lazy_tensor(torch.Tensor.min, (self, dim))
def max(self, dim=None):
return new_lazy_tensor(torch.Tensor.max, (self, dim))
def all(self, dim):
return new_lazy_tensor(torch.Tensor.all, (self, dim))
def any(self, dim):
return new_lazy_tensor(torch.Tensor.any, (self, dim))
def squeeze(self, dim):
return new_lazy_tensor(torch.Tensor.squeeze, (self,))
def unsqueeze(self, dim):
return new_lazy_tensor(torch.Tensor.unsqueeze, (self, dim))
def clone(self):
return new_lazy_tensor(torch.clone, (self, None))
def expand_as(self, arg):
return new_lazy_tensor(torch.Tensor.expand_as, (self, arg))
def size(self):
return new_lazy_tensor(torch.Tensor.size, (self,))
def diagonal(self, offset, dim1, dim2):
return new_lazy_tensor(torch.Tensor.diagonal, (self, offset, dim1, dim2))
def zero_(self):
raise Exception('in-place operation not supported.')
def add_(self, arg):
raise Exception('in-place operation not supported.')
def sub_(self, arg):
raise Exception('in-place operation not supported.')
def mul_(self, arg):
raise Exception('in-place operation not supported.')
def div_(self, arg):
raise Exception('in-place operation not supported.')
def tile(self, *arg):
return new_lazy_tensor(torch.Tensor.tile, (self, *arg))
def add(self, arg):
return new_lazy_tensor(torch.Tensor.add, (self, arg))
def sub(self, arg):
return new_lazy_tensor(torch.Tensor.sub, (self, arg))
def mul(self, arg):
return new_lazy_tensor(torch.Tensor.multiply, (self, arg))
def div(self, arg):
return new_lazy_tensor(torch.Tensor.div, (self, arg))
def bmm(self, arg):
return new_lazy_tensor(torch.Tensor.bmm, (self, arg))
def mm(self, arg):
return new_lazy_tensor(torch.Tensor.mm, (self, arg))
def log(self):
return new_lazy_tensor(torch.Tensor.log, [self])
def exp(self):
return new_lazy_tensor(torch.Tensor.exp, [self])
def logsumexp(self, arg):
return new_lazy_tensor(torch.Tensor.logsumexp, [self, arg])
def masked_select(self, arg):
return new_lazy_tensor(torch.Tensor.masked_select, [self, arg])
def float(self):
return new_lazy_tensor(torch.Tensor.float, [self])
def half(self):
return new_lazy_tensor(torch.Tensor.half, [self])
def double(self):
return new_lazy_tensor(torch.Tensor.double, [self])
def int(self):
return new_lazy_tensor(torch.Tensor.int, [self])
def short(self):
return new_lazy_tensor(torch.Tensor.short, [self])
def long(self):
return new_lazy_tensor(torch.Tensor.long, [self])
def byte(self):
return new_lazy_tensor(torch.Tensor.byte, [self])
def bool(self):
return new_lazy_tensor(torch.Tensor.bool, [self])
def __getitem__(self, key):
return new_lazy_tensor(torch.Tensor.__getitem__, [self, key])
def __gt__(self, other):
return new_lazy_tensor(torch.gt, [self, other])
def __lt__(self, other):
return new_lazy_tensor(torch.lt, [self, other])
def __ne__(self, other):
return new_lazy_tensor(torch.ne, [self, other])
def __eq__(self, other):
return new_lazy_tensor(torch.eq, [self, other])
def __add__(self, other):
return new_lazy_tensor(torch.Tensor.add, (self, other))
def __sub__(self, other):
return new_lazy_tensor(torch.Tensor.sub, (self, other))
def __mul__(self, other):
return new_lazy_tensor(torch.Tensor.mul, (self, other))
def __truediv__(self, other):
return new_lazy_tensor(torch.Tensor.div, (self, other))
def __floordiv__(self, other):
return new_lazy_tensor(torch.Tensor.floor_divide, (self, other))
def __mod__(self, other):
return new_lazy_tensor(torch.Tensor.remainder, (self, other))
def __lshift__(self, other):
raise NotImplemented
def __rshift__(self, other):
raise NotImplemented
def __and__(self, other):
return new_lazy_tensor(torch.Tensor.bitwise_and, (self, other))
def __or__(self, other):
return new_lazy_tensor(torch.Tensor.bitwise_or, (self, other))
def __xor__(self, other):
return new_lazy_tensor(torch.Tensor.bitwise_xor, (self, other))
def __invert__(self, other):
return new_lazy_tensor(torch.Tensor.bitwise_not, (self, other))
def __le__(self, other):
return new_lazy_tensor(torch.le, (self, other))
def __ge__(self, other):
return new_lazy_tensor(torch.le, (other, self))
def __str__(self, level=0):
print('Some implementation!') |
.parametrize(('permalink', 'version'), [('CrhkAGTOLJD7Kf6Y', 10), ('DLhkAGTOLJD7Kf6Y', 12)])
def test_decode_old_version(permalink: str, version: int):
expect = f'Given permalink has version {version}, but this Randovania support only permalink of version {Permalink.current_schema_version()}.'
with pytest.raises(ValueError, match=re.escape(expect)):
Permalink.from_str(permalink) |
class AttrVI_ATTR_WIN_ACCESS_PRIV(EnumAttribute):
resources = [(constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'MEMACC')]
py_name = ''
visa_name = 'VI_ATTR_WIN_ACCESS_PRIV'
visa_type = 'ViUInt16'
default = constants.VI_DATA_PRIV
(read, write, local) = (True, True, True)
enum_type = constants.AddressModifiers |
_db
def test_add_slot_fails_when_not_logged(conference_factory, graphql_client):
conference = conference_factory(start=datetime(2020, 4, 2, tzinfo=pytz.UTC), end=datetime(2020, 4, 2, tzinfo=pytz.UTC))
resp = graphql_client.query('\n mutation AddScheduleSlot($code: ID!, $day: Date!, $duration: Int!) {\n addScheduleSlot(conference: $code, day: $day, duration: $duration) {\n ... on Day {\n day\n slots {\n hour\n duration\n }\n }\n }\n }\n ', variables={'code': conference.code, 'day': '2020-04-02', 'duration': 60})
assert ('errors' in resp)
assert (resp['errors'] == [{'message': 'You need to be a staff user', 'locations': [{'line': 3, 'column': 13}], 'path': ['addScheduleSlot']}]) |
def write_title(title, stream=None, sep='~'):
if (stream is None):
stream = sys.stderr
(width, height) = shutil.get_terminal_size()
fill = int((((width - len(title)) - 2) / 2))
line = ' '.join([(sep * fill), title, (sep * fill)])
if (len(line) < width):
line += (sep * (width - len(line)))
stream.write((('\n' + line) + '\n')) |
class ExeclineLexer(RegexLexer):
name = 'execline'
aliases = ['execline']
filenames = ['*.exec']
url = '
version_added = '2.7'
tokens = {'root': [include('basic'), include('data'), include('interp')], 'interp': [('\\$\\{', String.Interpol, 'curly'), ('\\$[\\#]+', Name.Variable), ('\\$', Text)], 'basic': [('\\b(background|backtick|cd|define|dollarat|elgetopt|elgetpositionals|elglob|emptyenv|envfile|exec|execlineb|exit|export|fdblock|fdclose|fdmove|fdreserve|fdswap|forbacktickx|foreground|forstdin|forx|getcwd|getpid|heredoc|homeof|if|ifelse|ifte|ifthenelse|importas|loopwhilex|multidefine|multisubstitute|pipeline|piperw|posix-cd|redirfd|runblock|shift|trap|tryexec|umask|unexport|wait|withstdinas)\\b', Name.Builtin), ('\\A#!.+\\n', Comment.Hashbang), ('#.*\\n', Comment.Single), ('[{}]', Operator)], 'data': [('(?s)"(\\\\.|[^"\\\\$])*"', String.Double), ('"', String.Double, 'string'), ('\\s+', Text), ('[^\\s{}$"\\\\]+', Text)], 'string': [('"', String.Double, '#pop'), ('(?s)(|\\\\.|[^"\\\\$])+', String.Double), include('interp')], 'curly': [('\\}', String.Interpol, '#pop'), ('[\\w#]+', Name.Variable), include('root')]}
def analyse_text(text):
if shebang_matches(text, 'execlineb'):
return 1 |
def main(cfg: DictConfig, **unused_kwargs):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
use_fp16 = cfg.common.fp16
use_cuda = (torch.cuda.is_available() and (not cfg.common.cpu))
if use_cuda:
torch.cuda.set_device(cfg.distributed_training.device_id)
logger.info(cfg)
logger.info('loading model(s) from {}'.format(cfg.common_eval.path))
cfg.task.tokens_per_sample -= cfg.eval_lm.context_window
task = tasks.setup_task(cfg.task)
(models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task)
gen_subset = cfg.dataset.gen_subset
task.load_dataset(gen_subset)
dataset = task.dataset(gen_subset)
if (cfg.eval_lm.context_window > 0):
dataset = LMContextWindowDataset(dataset=dataset, tokens_per_sample=cfg.task.tokens_per_sample, context_window=cfg.eval_lm.context_window, pad_idx=task.source_dictionary.pad())
logger.info('{} {} {} examples'.format(cfg.task.data, gen_subset, len(dataset)))
for model in models:
if use_fp16:
model.half()
if (use_cuda and (not cfg.distributed_training.pipeline_model_parallel)):
model.cuda()
model.prepare_for_inference_(cfg)
assert (len(models) > 0)
logger.info('num. model params: {}'.format(sum((p.numel() for p in models[0].parameters()))))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=(cfg.dataset.max_tokens or 36000), max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions(*[model.max_positions() for model in models]), ignore_invalid_inputs=True, num_shards=max(cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size), shard_id=max(cfg.dataset.shard_id, cfg.distributed_training.distributed_rank), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=('tqdm' if (not cfg.common.no_progress_bar) else 'simple'))
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, cfg.eval_lm.softmax_batch)
score_sum = 0.0
count = 0
if (cfg.common_eval.post_process is not None):
if (cfg.common_eval.post_process == 'sentencepiece'):
raise NotImplementedError
else:
bpe_cont = cfg.common_eval.post_process.rstrip()
bpe_toks = {i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont)}
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
wps_meter = TimeMeter()
for sample in progress:
if ('net_input' not in sample):
continue
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for (i, hypos_i) in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if getattr(cfg.task, 'add_bos_token', False):
assert (hypo['tokens'][0].item() == task.target_dictionary.bos())
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if (bpe_toks is not None):
for i in range((tgt_len - 1)):
if (tokens[i].item() in bpe_toks):
skipped_toks += 1
pos_scores[(i + 1)] += pos_scores[i]
pos_scores[i] = 0
inf_scores = (pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf')))
if inf_scores.any():
logger.info('skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()]))
pos_scores = pos_scores[(~ inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += (pos_scores.numel() - skipped_toks)
if (cfg.eval_lm.output_word_probs or cfg.eval_lm.output_word_stats):
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if ((bpe_toks is not None) and (w_ind in bpe_toks)):
w = w[:(- bpe_len)]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = (i + 1)
while (ind < len(tokens)):
if (pos_scores[ind].item() != 0):
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if cfg.eval_lm.output_word_probs:
logger.info(((str(int(sample_id)) + ' ') + '\t'.join(('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))))
wps_meter.update(sample['ntokens'])
progress.log({'wps': round(wps_meter.avg)})
avg_nll_loss = ((((- score_sum) / count) / math.log(2)) if (count > 0) else 0)
logger.info('Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, ((1.0 / gen_timer.avg) if (gen_timer.avg > 0) else 0)))
logger.info('Loss (base 2): {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, (2 ** avg_nll_loss)))
if cfg.eval_lm.output_word_stats:
for ws in sorted(word_stats.values(), key=(lambda x: x.count), reverse=True):
logger.info(ws) |
_tokenizers
class MarkupLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MarkupLMTokenizer
rust_tokenizer_class = MarkupLMTokenizerFast
test_rust_tokenizer = True
from_pretrained_kwargs = {'cls_token': '<s>'}
test_seq2seq = False
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', 'Ghello', 'Gworld', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3}
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
self.tokenizer_config_file = os.path.join(self.tmpdirname, 'tokenizer_config.json')
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
with open(self.tokenizer_config_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps({'tags_dict': self.tags_dict}))
def get_nodes_and_xpaths(self):
nodes = ['hello', 'world']
xpaths = ['/html/body/div/li[1]/div/span', '/html/body/div/li[1]/div/span']
return (nodes, xpaths)
def get_nodes_and_xpaths_batch(self):
nodes = [['hello world', 'running'], ['hello my name is bob']]
xpaths = [['/html/body/div/li[1]/div/span', '/html/body/div/li[1]/div/span'], ['/html/body/div/li[2]/div/span']]
return (nodes, xpaths)
def get_question_nodes_and_xpaths(self):
question = "what's his name?"
nodes = ['hello world']
xpaths = ['/html/body/div/li[1]/div/span']
return (question, nodes, xpaths)
def get_question_nodes_and_xpaths_batch(self):
questions = ["what's his name?", 'how is he called?']
nodes = [['hello world', 'running'], ['hello my name is bob']]
xpaths = [['/html/body/div/li[1]/div/span', '/html/body/div/li[1]/div/span'], ['/html/body/div/li[2]/div/span']]
return (questions, nodes, xpaths)
def get_input_output_texts(self, tokenizer):
input_text = 'UNwanted,running'
output_text = 'unwanted, running'
return (input_text, output_text)
def test_add_special_tokens(self):
tokenizers: List[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
special_token = '[SPECIAL_TOKEN]'
special_token_xpath = '/html/body/div/li[1]/div/span'
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode([special_token], xpaths=[special_token_xpath], add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_add_tokens_tokenizer(self):
tokenizers: List[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
new_toks = ['aaaaa', 'bbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, (all_size + len(new_toks)))
nodes = 'aaaaa bbbbbb low cccccccccdddddddd l'.split()
xpaths = ['/html/body/div/li[1]/div/span' for _ in range(len(nodes))]
tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, (all_size_2 + len(new_toks_2)))
nodes = '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l'.split()
xpaths = ['/html/body/div/li[1]/div/span' for _ in range(len(nodes))]
tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], tokens[(- 3)])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[(- 2)], tokenizer.pad_token_id)
_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
new_toks = [AddedToken('[ABC]', normalized=False), AddedToken('[DEF]', normalized=False)]
tokenizer.add_tokens(new_toks)
input = '[ABC][DEF][ABC][DEF]'
if self.space_between_special_tokens:
output = '[ABC] [DEF] [ABC] [DEF]'
else:
output = input
encoded = tokenizer.encode(input.split(), xpaths=xpaths, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
('Not implemented')
def test_right_and_left_truncation(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
self._check_no_pad_token_padding(tokenizer, nodes)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, return_special_tokens_mask=True)
input_ids = encoded_sequence['input_ids']
special_tokens_mask = encoded_sequence['special_tokens_mask']
sequence_length = len(input_ids)
tokenizer.padding_side = 'right'
not_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue((sequence_length == not_padded_sequence_length))
self.assertTrue((input_ids == not_padded_input_ids))
self.assertTrue((special_tokens_mask == not_padded_special_tokens_mask))
not_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue((sequence_length == not_padded_sequence_length))
self.assertTrue((input_ids == not_padded_input_ids))
self.assertTrue((special_tokens_mask == not_padded_special_tokens_mask))
tokenizer.padding_side = 'right'
right_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
right_padded_input_ids = right_padded_sequence['input_ids']
right_padded_special_tokens_mask = right_padded_sequence['special_tokens_mask']
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(((sequence_length + padding_size) == right_padded_sequence_length))
self.assertTrue(((input_ids + ([padding_idx] * padding_size)) == right_padded_input_ids))
self.assertTrue(((special_tokens_mask + ([1] * padding_size)) == right_padded_special_tokens_mask))
tokenizer.padding_side = 'left'
left_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
left_padded_input_ids = left_padded_sequence['input_ids']
left_padded_special_tokens_mask = left_padded_sequence['special_tokens_mask']
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(((sequence_length + padding_size) == left_padded_sequence_length))
self.assertTrue(((([padding_idx] * padding_size) + input_ids) == left_padded_input_ids))
self.assertTrue(((([1] * padding_size) + special_tokens_mask) == left_padded_special_tokens_mask))
if ('token_type_ids' in tokenizer.model_input_names):
token_type_ids = encoded_sequence['token_type_ids']
left_padded_token_type_ids = left_padded_sequence['token_type_ids']
right_padded_token_type_ids = right_padded_sequence['token_type_ids']
assert ((token_type_ids + ([0] * padding_size)) == right_padded_token_type_ids)
assert ((([0] * padding_size) + token_type_ids) == left_padded_token_type_ids)
if ('attention_mask' in tokenizer.model_input_names):
attention_mask = encoded_sequence['attention_mask']
right_padded_attention_mask = right_padded_sequence['attention_mask']
left_padded_attention_mask = left_padded_sequence['attention_mask']
self.assertTrue(((attention_mask + ([0] * padding_size)) == right_padded_attention_mask))
self.assertTrue(((([0] * padding_size) + attention_mask) == left_padded_attention_mask))
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
tokens = []
for word in nodes:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
if ((tokenizer.build_inputs_with_special_tokens.__qualname__.split('.')[0] != 'PreTrainedTokenizer') and ('token_type_ids' in tokenizer.model_input_names)):
information = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
(sequences, mask) = (information['input_ids'], information['token_type_ids'])
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
attached_sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=False), (len(attached_sequences) - len(sequences)))
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=True), (len(attached_sequences) - len(sequences)))
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
padding_size = 10
self._check_no_pad_token_padding(tokenizer, nodes)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(nodes, xpaths=xpaths, max_length=(sequence_length + padding_size), pad_to_max_length=True)
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
def test_padding(self, max_length=50):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
(nodes, xpaths) = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(nodes, xpaths=xpaths, padding='longest')
input_p = tokenizer_p.encode(nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
input_r = tokenizer_r.encode(question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode(question, nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, nodes, xpaths=xpaths, padding=True)
input_p = tokenizer_p.encode(question, nodes, xpaths=xpaths, padding='longest')
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
(nodes, xpaths) = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, padding='longest')
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(question, nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus(question, nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(question, nodes, xpaths=xpaths, padding='longest')
input_p = tokenizer_p.encode_plus(question, nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding='longest')
input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, padding='longest')
input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(questions, nodes, xpaths) = self.get_question_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(list(zip(questions, nodes)), is_pair=True, xpaths=xpaths, max_length=max_length, truncation=True, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(list(zip(questions, nodes)), is_pair=True, xpaths=xpaths, max_length=max_length, truncation=True, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(list(zip(questions, nodes)), is_pair=True, xpaths=xpaths, padding=True)
input_p = tokenizer_p.batch_encode_plus(list(zip(questions, nodes)), is_pair=True, xpaths=xpaths, padding='longest')
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(nodes, xpaths) = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
encoded_sequences = [tokenizer.encode_plus(nodes_example, xpaths=xpaths_example) for (nodes_example, xpaths_example) in zip(nodes, xpaths)]
encoded_sequences_batch = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
maximum_length = len(max([encoded_sequence['input_ids'] for encoded_sequence in encoded_sequences], key=len))
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences_padded = [tokenizer.encode_plus(nodes_example, xpaths=xpaths_example, max_length=maximum_length, padding='max_length') for (nodes_example, xpaths_example) in zip(nodes, xpaths)]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, padding=True)
self.assertListEqual(encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded))
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, max_length=(maximum_length + 10), padding='longest')
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, max_length=(maximum_length + 10), padding=False)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
('batch_encode_plus does not handle overflowing tokens.')
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences = [tokenizer.encode_plus(nodes_example, xpaths=xpaths_example, max_length=max_length, padding='max_length') for (nodes_example, xpaths_example) in zip(nodes, xpaths)]
encoded_sequences_batch = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokenizer.padding_side = 'left'
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences = [tokenizer.encode_plus(nodes_example, xpaths=xpaths_example, max_length=max_length, padding='max_length') for (nodes_example, xpaths_example) in zip(nodes, xpaths)]
encoded_sequences_batch = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.pad_token is None):
self.skipTest('No padding token.')
else:
(nodes, xpaths) = self.get_nodes_and_xpaths()
normal_tokens = tokenizer(nodes, xpaths=xpaths, padding=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer(nodes, xpaths=xpaths, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertNotEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer(nodes, xpaths=xpaths, padding=True, truncation=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
self.assertRaises(ValueError, tokenizer.__call__, nodes, xpaths=xpaths, padding=True, truncation=True, max_length=12, pad_to_multiple_of=8)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if (parameter.default != inspect.Parameter.empty):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(nodes, xpaths) = self.get_nodes_and_xpaths()
input_simple = tokenizer_p.encode(nodes, xpaths=xpaths, add_special_tokens=False)
input_pair = tokenizer_p.encode(nodes, xpaths=xpaths, add_special_tokens=False)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [(x if (not special_tokens_mask[i]) else None) for (i, x) in enumerate(encoded_sequence_w_special)]
filtered_sequence = [x for x in filtered_sequence if (x is not None)]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for (i, x) in enumerate(encoded_sequence_w_special) if (not special_tokens_mask[i])]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(nodes, xpaths=xpaths, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
tokenizer.padding_side = 'left'
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(nodes, xpaths=xpaths, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((([padding_idx] * padding_size) + encoded_sequence) == padded_sequence)
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding='longest')
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
output = tokenizer(nodes, xpaths=xpaths, return_token_type_ids=True)
self.assertEqual(len(output['token_type_ids']), len(output['input_ids']))
self.assertEqual(len(output['token_type_ids']), len(output['attention_mask']))
self.assertIn(0, output['token_type_ids'])
self.assertNotIn(1, output['token_type_ids'])
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
output = tokenizer(question, nodes, xpaths, return_token_type_ids=True)
self.assertEqual(len(output['token_type_ids']), len(output['input_ids']))
self.assertEqual(len(output['token_type_ids']), len(output['attention_mask']))
self.assertIn(0, output['token_type_ids'])
def test_offsets_mapping(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ['a', 'wonderful', 'test']
xpaths = ['html/body' for _ in range(len(text))]
tokens_with_offsets = tokenizer_r.encode_plus(text, xpaths=xpaths, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
text = "what's his name"
pair = ['a', 'wonderful', 'test']
xpaths = ['html/body' for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(text, pair, xpaths=xpaths, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
is_using_common_embeddings = hasattr(model.get_input_embeddings(), 'weight')
assert ((model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True)
(nodes, xpaths) = self.get_nodes_and_xpaths()
encoded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, return_tensors='pt')
batch_encoded_sequence = tokenizer.batch_encode_plus([nodes, nodes], [xpaths, xpaths], return_tensors='pt')
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
if (not self.test_slow_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
(nodes, xpaths) = self.get_nodes_and_xpaths()
ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(nodes, xpaths) = self.get_nodes_and_xpaths()
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths)
input_pairs_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])), input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
nodes = ['hello' for _ in range(1000)]
xpaths = ['html/body' for _ in range(1000)]
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(nodes, xpaths) = self.get_nodes_and_xpaths()
tokens_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
tokens_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if ('token_type_ids' in tokens_r):
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
(nodes, xpaths) = self.get_nodes_and_xpaths()
no_special_tokens = tokenizer_r.tokenize(' '.join(nodes), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(' '.join(nodes), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(len(no_special_tokens[key]), (len(with_special_tokens[key]) - simple_num_special_tokens_to_add))
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
for key in no_special_tokens.keys():
for (i_no, i_with) in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), (len(i_with) - simple_num_special_tokens_to_add))
def test_markuplm_truncation_integration_test(self):
(nodes, xpaths) = self.get_nodes_and_xpaths()
tokenizer = MarkupLMTokenizer.from_pretrained('microsoft/markuplm-base', model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, max_length=i, truncation=True)
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True)
dropped_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True)
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors='tf')
if (tokenizer.pad_token_id is None):
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, padding=True, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, padding='longest', return_tensors='tf')
else:
pytorch_tensor = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, padding=True, return_tensors='pt')
tensorflow_tensor = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, padding='longest', return_tensors='tf')
encoded_sequences = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = ['With', 'these', 'inputs.']
xpaths = ['html/body' for _ in range(len(seq_1))]
output = tokenizer(seq_0.split(), xpaths=xpaths)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, xpaths=xpaths)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = [AddedToken('<special>', lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
nodes = 'Hey this is a <special> token'.split()
xpaths = ['html/body' for _ in range(len(nodes))]
r_output = tokenizer_r.encode(nodes, xpaths=xpaths)
special_token_id = tokenizer_r.encode(['<special>'], xpaths=['html/body'], add_special_tokens=False)[0]
self.assertTrue((special_token_id in r_output))
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
nodes = 'Hey this is a <special> token'.split()
xpaths = ['html/body' for _ in range(len(nodes))]
p_output = tokenizer_p.encode(nodes, xpaths=xpaths)
cr_output = tokenizer_cr.encode(nodes, xpaths=xpaths)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in cr_output))
def test_training_new_tokenizer(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
text = [['this', 'is', 'the'], ['how', 'are', 'you']]
xpaths = [(['html/body'] * 3), (['html/body'] * 3)]
inputs = new_tokenizer(text, xpaths=xpaths)
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = 'thisisthe'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
self.assertSequenceEqual(tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
class_signature = inspect.signature(tokenizer.__class__)
if ('cls_token' in class_signature.parameters):
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: '<cls>'})
cls_id = new_tokenizer.get_vocab()['<cls>']
self.assertEqual(new_tokenizer.cls_token, '<cls>')
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove('additional_special_tokens')
special_tokens_map = {}
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is not None):
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f'{special_token}a'
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map)
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is None):
continue
special_token = getattr(tokenizer, token)
if (special_token in special_tokens_map):
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f'{token}_id'), new_id)
for special_token in tokenizer.all_special_tokens_extended:
if (isinstance(special_token, AddedToken) and (special_token.content not in special_tokens_map)):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
elif isinstance(special_token, AddedToken):
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (isinstance(candidate, AddedToken) and (candidate.content == new_special_token_str) and (candidate.lstrip == special_token.lstrip) and (candidate.rstrip == special_token.rstrip) and (candidate.normalized == special_token.normalized) and (candidate.single_word == special_token.single_word)):
find = True
break
self.assertTrue(find, f"'{new_special_token_str}' doesn't appear in the list '{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as '{special_token}' in the list {tokenizer.all_special_tokens_extended}")
elif (special_token not in special_tokens_map):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
else:
self.assertTrue((special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended))
nodes = [['this', 'is'], ['hello', '']]
xpaths = [(['html/body'] * 2), (['html/body'] * 2)]
inputs = new_tokenizer(nodes, xpaths=xpaths)
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = 'thisis'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
if (tokenizer.__class__.__name__ == 'MarkupLMTokenizerFast'):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
(nodes, xpaths) = self.get_nodes_and_xpaths()
prepared_input_dict = tokenizer.prepare_for_model(nodes, xpaths=xpaths, add_special_tokens=True)
input_dict = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_r['inputs'] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p['inputs'] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
tokenizer_r.model_input_names = (['inputs'] + tokenizer_r.model_input_names[1:])
tokenizer_p.model_input_names = (['inputs'] + tokenizer_p.model_input_names[1:])
input_r = tokenizer_r.pad(input_r, padding='longest')
input_p = tokenizer_r.pad(input_p, padding='longest')
max_length = len(input_p['inputs'][0])
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id, model_main_input_name='inputs')
def test_batch_encode_dynamic_overflowing(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})'):
if is_torch_available():
returned_tensor = 'pt'
elif is_tf_available():
returned_tensor = 'tf'
else:
returned_tensor = 'jax'
(nodes, xpaths) = self.get_nodes_and_xpaths()
tokens = tokenizer.encode_plus(nodes, xpaths=xpaths, max_length=1, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
if ('xpath' not in key):
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
tokens = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
if ('xpath' not in key):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[(- 2)], 6)
('TO DO: overwrite this very extensive test.')
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter((lambda t: re.match('^[ a-zA-Z]+$', t[1])), toks))
toks = list(filter((lambda t: ([t[0]] == tokenizer.encode(t[1].split(' '), xpaths=(len(t[1]) * ['html/body']), add_special_tokens=False))), toks))
if ((max_length is not None) and (len(toks) > max_length)):
toks = toks[:max_length]
if ((min_length is not None) and (len(toks) < min_length) and (len(toks) > 0)):
while (len(toks) < min_length):
toks = (toks + toks)
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if with_prefix_space:
output_txt = (' ' + output_txt)
nodes = output_txt.split(' ')
xpaths = ['html/body' for i in range(len(nodes))]
output_ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
return (nodes, xpaths, output_ids)
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
stride = 2
(seq_0, xpaths_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = ' '.join(map(str, seq_0))
if (len(ids) <= (2 + stride)):
seq_0 = ((seq_0 + ' ') * (2 + stride))
ids = None
seq0_tokens = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens['input_ids']), (2 + stride))
question_1 = 'This is another sentence to be encoded.'
seq_1 = ['hello', 'world']
xpaths_1 = ['html/body' for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
if (abs((len(seq0_tokens['input_ids']) - len(seq1_tokens['input_ids']))) <= 2):
seq1_tokens_input_ids = (seq1_tokens['input_ids'] + seq1_tokens['input_ids'])
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(' ')
xpaths_1 = ['html/body' for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens['input_ids']), (2 + stride))
smallest = (seq1_tokens['input_ids'] if (len(seq0_tokens['input_ids']) > len(seq1_tokens['input_ids'])) else seq0_tokens['input_ids'])
sequence = tokenizer(question_0, seq_1, xpaths=xpaths_1, add_special_tokens=False)
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = (seq_0 * model_max_length)
question_2 = ' '.join(map(str, seq_2))
xpaths_2 = (xpaths_0 * model_max_length)
self.assertGreaterEqual(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
sequence2 = tokenizer(question_2, seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length2 = len(sequence2['input_ids'])
self.assertLess(total_length1, model_max_length, 'Issue with the testing sequence, please update it.')
self.assertGreater(total_length2, model_max_length, 'Issue with the testing sequence, please update it.')
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'{tokenizer.__class__.__name__} Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'{tokenizer.__class__.__name__} Truncation: {truncation_state}'):
output = tokenizer(question_2, seq_1, xpaths=xpaths_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['xpath_tags_seq']), model_max_length)
self.assertEqual(len(output['xpath_subs_seq']), model_max_length)
output = tokenizer([question_2], [seq_1], xpaths=[xpaths_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['xpath_tags_seq'][0]), model_max_length)
self.assertEqual(len(output['xpath_subs_seq'][0]), model_max_length)
output = tokenizer(question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['xpath_tags_seq']), model_max_length)
self.assertEqual(len(output['xpath_subs_seq']), model_max_length)
output = tokenizer([question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['xpath_tags_seq'][0]), model_max_length)
self.assertEqual(len(output['xpath_subs_seq'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertNotEqual(len(output['xpath_tags_seq']), model_max_length)
self.assertNotEqual(len(output['xpath_subs_seq']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertNotEqual(len(output['xpath_tags_seq'][0]), model_max_length)
self.assertNotEqual(len(output['xpath_subs_seq'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
truncated_first_sequence = (tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids'][:(- 2)] + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['input_ids'])
truncated_second_sequence = (tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids'] + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['input_ids'][:(- 2)])
truncated_longest_sequence = (truncated_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else truncated_second_sequence)
overflow_first_sequence = (tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids'][(- (2 + stride)):] + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['input_ids'])
overflow_second_sequence = (tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids'] + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['input_ids'][(- (2 + stride)):])
overflow_longest_sequence = (overflow_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else overflow_second_sequence)
xpath_tags_seq_first = ([([5] * 50)] * (len(tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids']) - 2))
xpath_tags_seq_first_sequence = (xpath_tags_seq_first + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['xpath_tags_seq'])
overflowing_token_xpath_tags_seq_first_sequence_slow = ([([5] * 50)] * (2 + stride))
overflowing_token_xpath_tags_seq_first_sequence_fast = (([([5] * 50)] * (2 + stride)) + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['xpath_tags_seq'])
xpath_tags_seq_second = ([([5] * 50)] * len(tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids']))
xpath_tags_seq_second_sequence = (xpath_tags_seq_second + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['xpath_tags_seq'][:(- 2)])
overflowing_token_xpath_tags_seq_second_sequence_slow = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['xpath_tags_seq'][(- (2 + stride)):]
overflowing_token_xpath_tags_seq_second_sequence_fast = (([([5] * 50)] * len(tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)['input_ids'])) + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)['xpath_tags_seq'][(- (2 + stride)):])
xpath_tags_seq_longest_sequence = (xpath_tags_seq_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else xpath_tags_seq_second_sequence)
overflowing_token_xpath_tags_seq_longest_sequence_fast = (overflowing_token_xpath_tags_seq_first_sequence_fast if (len(seq0_tokens) > len(seq1_tokens)) else overflowing_token_xpath_tags_seq_second_sequence_fast)
if isinstance(tokenizer, MarkupLMTokenizerFast):
information = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
xpath_tags_seq = information['xpath_tags_seq'][0]
overflowing_xpath_tags_seq = information['xpath_tags_seq'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence)
self.assertEqual(len(overflowing_xpath_tags_seq), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
if isinstance(tokenizer, MarkupLMTokenizerFast):
information = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
xpath_tags_seq = information['xpath_tags_seq'][0]
overflowing_xpath_tags_seq = information['xpath_tags_seq'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
information_first_truncated = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='only_first', return_overflowing_tokens=True)
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information_first_truncated['input_ids'][0]
overflowing_tokens = information_first_truncated['input_ids'][1]
xpath_tags_seq = information_first_truncated['xpath_tags_seq'][0]
overflowing_xpath_tags_seq = information_first_truncated['xpath_tags_seq'][1]
self.assertEqual(len(information_first_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq1_tokens['input_ids'])))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_fast)
else:
truncated_sequence = information_first_truncated['input_ids']
overflowing_tokens = information_first_truncated['overflowing_tokens']
overflowing_xpath_tags_seq = information_first_truncated['overflowing_xpath_tags_seq']
xpath_tags_seq = information_first_truncated['xpath_tags_seq']
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq0_tokens['input_ids'][(- (2 + stride)):])
self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_slow)
information_second_truncated = tokenizer(question_0, seq_1, xpaths=xpaths_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='only_second', return_overflowing_tokens=True)
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information_second_truncated['input_ids'][0]
overflowing_tokens = information_second_truncated['input_ids'][1]
xpath_tags_seq = information_second_truncated['xpath_tags_seq'][0]
overflowing_xpath_tags_seq = information_second_truncated['xpath_tags_seq'][1]
self.assertEqual(len(information_second_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq0_tokens['input_ids'])))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_fast)
else:
truncated_sequence = information_second_truncated['input_ids']
overflowing_tokens = information_second_truncated['overflowing_tokens']
xpath_tags_seq = information_second_truncated['xpath_tags_seq']
overflowing_xpath_tags_seq = information_second_truncated['overflowing_xpath_tags_seq']
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq1_tokens['input_ids'][(- (2 + stride)):])
self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_slow)
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(seq_0, xpaths_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)
total_length = len(sequence['input_ids'])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short")
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = (seq_0 * model_max_length)
xpaths_1 = (xpaths_0 * model_max_length)
sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
self.assertGreater(total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short")
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'Truncation: {truncation_state}'):
output = tokenizer(seq_1, xpaths=xpaths_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['xpath_tags_seq']), model_max_length)
self.assertEqual(len(output['xpath_subs_seq']), model_max_length)
output = tokenizer([seq_1], xpaths=[xpaths_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['xpath_tags_seq'][0]), model_max_length)
self.assertEqual(len(output['xpath_subs_seq'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, xpaths=xpaths_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertNotEqual(len(output['xpath_tags_seq']), model_max_length)
self.assertNotEqual(len(output['xpath_subs_seq']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], xpaths=[xpaths_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertNotEqual(len(output['xpath_tags_seq'][0]), model_max_length)
self.assertNotEqual(len(output['xpath_subs_seq'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
stride = 2
information = tokenizer(seq_0, xpaths=xpaths_0, max_length=(total_length - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
xpath_tags_seq = information['xpath_tags_seq'][0]
overflowing_xpath_tags_seq = information['xpath_tags_seq'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence['input_ids'][:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence['input_ids'][(- (2 + stride)):])
self.assertEqual(xpath_tags_seq, sequence['xpath_tags_seq'][:(- 2)])
self.assertEqual(overflowing_xpath_tags_seq, sequence['xpath_tags_seq'][(- (2 + stride)):])
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
xpath_tags_seq = information['xpath_tags_seq']
overflowing_xpath_tags_seq = information['overflowing_xpath_tags_seq']
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence['input_ids'][:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence['input_ids'][(- (2 + stride)):])
self.assertEqual(xpath_tags_seq, sequence['xpath_tags_seq'][:(- 2)])
self.assertEqual(overflowing_xpath_tags_seq, sequence['xpath_tags_seq'][(- (2 + stride)):])
('MarkupLM tokenizer requires xpaths besides sequences.')
def test_pretokenized_inputs(self):
pass
('MarkupLM tokenizer always expects pretokenized inputs.')
def test_compare_pretokenized_inputs(self):
pass
('MarkupLM fast tokenizer does not support prepare_for_model')
def test_compare_prepare_for_model(self):
pass
def test_only_label_first_subword(self):
nodes = ['hello', 'niels']
xpaths = ['/html/body/div/li[1]/div/span' for _ in range(len(nodes))]
node_labels = [0, 1]
tokenizer_p = MarkupLMTokenizer.from_pretrained('microsoft/markuplm-base')
encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, (- 100), (- 100)])
tokenizer_p = MarkupLMTokenizer.from_pretrained('microsoft/markuplm-base', only_label_first_subword=False)
encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, 1, (- 100)])
tokenizer_r = MarkupLMTokenizerFast.from_pretrained('microsoft/markuplm-base')
encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, (- 100), (- 100)])
tokenizer_r = MarkupLMTokenizerFast.from_pretrained('microsoft/markuplm-base', only_label_first_subword=False)
encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, 1, (- 100)])
def test_markuplm_integration_test(self):
tokenizer_p = MarkupLMTokenizer.from_pretrained('microsoft/markuplm-base')
tokenizer_r = MarkupLMTokenizerFast.from_pretrained('microsoft/markuplm-base')
(nodes, xpaths) = self.get_nodes_and_xpaths()
expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding='max_length', max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding='max_length', max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(nodes, xpaths) = self.get_nodes_and_xpaths()
node_labels = [1, 2, 3]
expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [(- 100), 1, 2, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding='max_length', max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(nodes, xpaths) = self.get_nodes_and_xpaths_batch()
node_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[(- 100), 1, (- 100), 2, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)], [(- 100), 2, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding='max_length', max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(question, nodes, xpaths) = self.get_question_nodes_and_xpaths()
expected_results = {'input_ids': [0, 12196, 18, 39, 766, 116, 2, 42891, 232, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(question, nodes, xpaths, padding='max_length', max_length=20)
encoding_r = tokenizer_r(question, nodes, xpaths, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(questions, nodes, xpaths) = self.get_question_nodes_and_xpaths_batch()
expected_results = {'input_ids': [[0, 12196, 18, 39, 766, 116, 2, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 9178, 16, 37, 373, 116, 2, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(questions, nodes, xpaths, padding='max_length', max_length=20)
encoding_r = tokenizer_r(questions, nodes, xpaths, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
def test_padding_warning_message_fast_tokenizer(self):
if (not self.test_rust_tokenizer):
return
(nodes, xpaths) = self.get_nodes_and_xpaths()
tokenizer_fast = self.get_rust_tokenizer()
self._check_no_pad_token_padding(tokenizer_fast, nodes)
encoding_fast = tokenizer_fast(nodes, xpaths=xpaths)
with self.assertLogs('transformers', level='WARNING') as cm:
tokenizer_fast.pad(encoding_fast)
self.assertEqual(len(cm.records), 1)
self.assertIn('Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.', cm.records[0].message)
if (not self.test_slow_tokenizer):
return
tokenizer_slow = self.get_tokenizer()
self._check_no_pad_token_padding(tokenizer_slow, nodes)
encoding_slow = tokenizer_slow(nodes, xpaths=xpaths)
with self.assertLogs(level='WARNING') as cm:
logger.warning('Dummy warning')
tokenizer_slow.pad(encoding_slow)
self.assertEqual(len(cm.records), 1)
self.assertIn('Dummy warning', cm.records[0].message) |
()
('-i', '--input-file', help='The name of the input file containing a molecule to be parameterised.', type=click.Path(exists=True, dir_okay=False, resolve_path=True, readable=True))
('-sm', '--smiles', help='The smiles string of the molecule to be parameterised.', type=click.STRING)
('-m', '--multiplicity', type=click.INT, help='The multiplicity of the molecule used in QM calculations.', default=1)
('-n', '--name', type=click.STRING, help='The name of the molecule, used for fileIO and folder setup.')
('-p', '--protocol', type=protocols, help='The alias of the parametrisation protocol.')
_options
def run(input_file: Optional[str]=None, smiles: Optional[str]=None, name: Optional[str]=None, multiplicity: int=1, end: Optional[str]=None, skip_stages: Optional[List[str]]=None, config: Optional[str]=None, protocol: Optional[str]=None, cores: Optional[int]=None, memory: Optional[int]=None):
if ((input_file is not None) and (smiles is not None)):
raise RuntimeError('Please supply either the name of the input file or a smiles string not both.')
if (input_file is not None):
molecule = Ligand.from_file(file_name=input_file, multiplicity=multiplicity)
else:
if (name is None):
raise RuntimeError('Please also pass a name for the molecule when starting from smiles.')
molecule = Ligand.from_smiles(smiles_string=smiles, name=name, multiplicity=multiplicity)
workflow = prep_config(config_file=config, memory=memory, cores=cores, protocol=protocol)
with folder_setup(f"QUBEKit_{molecule.name}_{datetime.now().strftime('%Y_%m_%d')}"):
molecule.to_file(file_name=f'{molecule.name}.pdb')
workflow.new_workflow(molecule=molecule, skip_stages=skip_stages, end=end) |
class TestSelect(BaseTestCase):
def setUp(self):
super().setUp()
sync(self.page.goto((self.url + 'static/select.html')))
async def test_select(self):
value = (await self.page.select('select', 'blue'))
self.assertEqual(value, ['blue'])
_input = (await self.page.evaluate('result.onInput'))
self.assertEqual(_input, ['blue'])
change = (await self.page.evaluate('result.onChange'))
self.assertEqual(change, ['blue'])
_input = (await self.page.evaluate('result.onBubblingInput'))
self.assertEqual(_input, ['blue'])
change = (await self.page.evaluate('result.onBubblingChange'))
self.assertEqual(change, ['blue'])
async def test_select_first_item(self):
(await self.page.select('select', 'blue', 'green', 'red'))
self.assertEqual((await self.page.evaluate('result.onInput')), ['blue'])
self.assertEqual((await self.page.evaluate('result.onChange')), ['blue'])
async def test_select_multiple(self):
(await self.page.evaluate('makeMultiple();'))
values = (await self.page.select('select', 'blue', 'green', 'red'))
self.assertEqual(values, ['blue', 'green', 'red'])
_input = (await self.page.evaluate('result.onInput'))
self.assertEqual(_input, ['blue', 'green', 'red'])
change = (await self.page.evaluate('result.onChange'))
self.assertEqual(change, ['blue', 'green', 'red'])
async def test_select_not_select_element(self):
with self.assertRaises(ElementHandleError):
(await self.page.select('body', ''))
async def test_select_no_match(self):
values = (await self.page.select('select', 'abc', 'def'))
self.assertEqual(values, [])
async def test_return_selected_elements(self):
(await self.page.evaluate('makeMultiple()'))
result = (await self.page.select('select', 'blue', 'black', 'magenta'))
self.assertEqual(len(result), 3)
self.assertEqual(set(result), {'blue', 'black', 'magenta'})
async def test_select_not_multiple(self):
values = (await self.page.select('select', 'blue', 'green', 'red'))
self.assertEqual(len(values), 1)
async def test_select_no_value(self):
values = (await self.page.select('select'))
self.assertEqual(values, [])
async def test_select_deselect(self):
(await self.page.select('select', 'blue', 'green', 'red'))
(await self.page.select('select'))
result = (await self.page.Jeval('select', 'elm => Array.from(elm.options).every(option => !option.selected)'))
self.assertTrue(result)
async def test_select_deselect_multiple(self):
(await self.page.evaluate('makeMultiple();'))
(await self.page.select('select', 'blue', 'green', 'red'))
(await self.page.select('select'))
result = (await self.page.Jeval('select', 'elm => Array.from(elm.options).every(option => !option.selected)'))
self.assertTrue(result)
async def test_select_nonstring(self):
with self.assertRaises(TypeError):
(await self.page.select('select', 12)) |
def test_invalid_compute_mask():
model = Sequential()
model.add(Conv2D(1, [2, 2], input_shape=[3, 3, 1]))
assert (model.layers[0].supports_masking is False)
assert (model.layers[0].compute_mask([model.input], [None]) is None)
mask = np.array([[0.0, 1.0], [1.0, 0.0]])
with pytest.raises(TypeError):
model.layers[0].compute_mask([model.input], [mask])
with pytest.raises(TypeError):
model.layers[0].compute_mask([model.input], mask) |
class TopK():
def __init__(self, k: int):
self.k = k
def __repr__(self) -> str:
repr = f'Filter {self.__class__.__name__}'
repr += f'''
k: {self.k}'''
return repr
def __call__(self, documents: typing.Union[typing.List[typing.List[typing.Dict[(str, str)]]]], **kwargs) -> typing.Union[(typing.List[typing.List[typing.Dict[(str, str)]]], typing.List[typing.Dict[(str, str)]])]:
if (not documents):
return []
if isinstance(documents[0], list):
return [document[:self.k] for document in documents]
return documents[:self.k] |
(netloc='fakegitlab', path='/api/v4/projects/4/repository/files/Dockerfile$')
def dockerfile_handler(_, request):
if (not (request.headers.get('Authorization') == 'Bearer foobar')):
return {'status_code': 401}
return {'status_code': 200, 'headers': {'Content-Type': 'application/json'}, 'content': json.dumps({'file_name': 'Dockerfile', 'file_path': 'Dockerfile', 'size': 10, 'encoding': 'base64', 'content': base64.b64encode(b'hello world').decode('ascii'), 'ref': 'master', 'blob_id': '79f7bbd25901ea9bd021f0e4c83', 'commit_id': 'd5a3ff139356ce33e37e73add446fb50', 'last_commit_id': '570e7b2abdd848b95f2f578043fc23bd6f6fd24d'})} |
class EpisodeDescriptionConfig(object):
def __init__(self, num_ways, num_support, num_query, min_ways, max_ways_upper_bound, max_num_query, max_support_set_size, max_support_size_contrib_per_class, min_log_weight, max_log_weight, ignore_dag_ontology, ignore_bilevel_ontology):
arg_groups = {'num_ways': (num_ways, ('min_ways', 'max_ways_upper_bound'), (min_ways, max_ways_upper_bound)), 'num_query': (num_query, ('max_num_query',), (max_num_query,)), 'num_support': (num_support, ('max_support_set_size', 'max_support_size_contrib_per_class', 'min_log_weight', 'max_log_weight'), (max_support_set_size, max_support_size_contrib_per_class, min_log_weight, max_log_weight))}
for (first_arg_name, values) in arg_groups.items():
(first_arg, required_arg_names, required_args) = values
if ((first_arg is None) and any(((arg is None) for arg in required_args))):
none_arg_names = [name for (var, name) in zip(required_args, required_arg_names) if (var is None)]
raise RuntimeError(('The following arguments: %s can not be None, since %s is None. Arguments can be set up with gin, for instance by providing `--gin_file=learn/gin/setups/data_config.gin` or calling `gin.parse_config_file(...)` in the code. Please ensure the following gin arguments of EpisodeDescriptionConfig are set: %s' % (none_arg_names, first_arg_name, none_arg_names)))
self.num_ways = num_ways
self.num_support = num_support
self.num_query = num_query
self.min_ways = min_ways
self.max_ways_upper_bound = max_ways_upper_bound
self.max_num_query = max_num_query
self.max_support_set_size = max_support_set_size
self.max_support_size_contrib_per_class = max_support_size_contrib_per_class
self.min_log_weight = min_log_weight
self.max_log_weight = max_log_weight
self.ignore_dag_ontology = ignore_dag_ontology
self.ignore_bilevel_ontology = ignore_bilevel_ontology |
def _generate_html(data):
extra_params = {'initial_header_level': '2', 'syntax_highlight': 'short', 'input_encoding': 'utf-8', 'exit_status_level': 2, 'compact_p': False, 'embed_stylesheet': False}
pub = docutils.core.Publisher(source_class=docutils.io.StringInput, destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.writer.translator_class = PelicanHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source=data, source_path=None)
pub.publish(enable_exit_status=True)
return pub.writer.parts['body'] |
def check_all_auto_mapping_names_in_config_mapping_names():
check_missing_backends()
failures = []
mappings_to_check = {'IMAGE_PROCESSOR_MAPPING_NAMES': IMAGE_PROCESSOR_MAPPING_NAMES, 'FEATURE_EXTRACTOR_MAPPING_NAMES': FEATURE_EXTRACTOR_MAPPING_NAMES, 'PROCESSOR_MAPPING_NAMES': PROCESSOR_MAPPING_NAMES}
for module_name in ['modeling_auto', 'modeling_tf_auto', 'modeling_flax_auto']:
module = getattr(transformers.models.auto, module_name, None)
if (module is None):
continue
mapping_names = [x for x in dir(module) if x.endswith('_MAPPING_NAMES')]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for (name, mapping) in mappings_to_check.items():
for (model_type, class_names) in mapping.items():
if (model_type not in CONFIG_MAPPING_NAMES):
failures.append(f'`{model_type}` appears in the mapping `{name}` but it is not defined in the keys of `CONFIG_MAPPING_NAMES`.')
if (len(failures) > 0):
raise Exception((f'''There were {len(failures)} failures:
''' + '\n'.join(failures))) |
def get_cfg(cls=CN):
cfg = cls()
cfg.NUM_GPUS = 8
cfg.TRAIN = cls()
cfg.TRAIN.HYPERPARAMETER_1 = 0.1
cfg.TRAIN.SCALES = (2, 4, 8, 16)
cfg.MODEL = cls()
cfg.MODEL.TYPE = 'a_foo_model'
cfg.STR = cls()
cfg.STR.KEY1 = 1
cfg.STR.KEY2 = 2
cfg.STR.FOO = cls()
cfg.STR.FOO.KEY1 = 1
cfg.STR.FOO.KEY2 = 2
cfg.STR.FOO.BAR = cls()
cfg.STR.FOO.BAR.KEY1 = 1
cfg.STR.FOO.BAR.KEY2 = 2
cfg.register_deprecated_key('FINAL_MSG')
cfg.register_deprecated_key('MODEL.DILATION')
cfg.register_renamed_key('EXAMPLE.OLD.KEY', 'EXAMPLE.NEW.KEY', message='Please update your config fil config file.')
cfg.KWARGS = cls(new_allowed=True)
cfg.KWARGS.z = 0
cfg.KWARGS.Y = cls()
cfg.KWARGS.Y.X = 1
return cfg |
class DS1000Problem():
def __init__(self, problem_path: Union[(str, Path)]):
self.problem_path = Path(problem_path)
self.problem_id = int(self.problem_path.name.replace('q', ''))
self.data = dict()
problem_config = configparser.RawConfigParser()
problem_config.read((self.problem_path / '.cfg'))
for args in [('library', 'lib'), ('test', 'test_type'), ('test', 'test_case_cnt'), ('perturbation', 'perturbation_type'), ('perturbation', 'perturbation_origin_id')]:
self.data[args[1]] = problem_config.get(*args)
for file_name in ['reference_code.txt', 'test_code.py', 'code_context.txt', 'test_generate_pickle.py', 'prompt.txt']:
with open((self.problem_path / file_name), 'r', encoding='UTF-8') as f:
self.data[file_name.split('.')[0]] = f.read()
self.data['ans'] = []
test_cnt = max(int(self.data['test_case_cnt']), 1)
for i in range(1, (test_cnt + 1)):
if os.path.exists((self.problem_path / f'ans/ans{i}.pkl')):
try:
with open((self.problem_path / 'ans/ans{}.pkl'.format(i)), 'rb') as f:
self.data['ans'].append(pickle.load(f))
except:
self.data['ans'].append(None)
else:
self.data['ans'].append(None)
self.data['source_url'] = ''
def __getitem__(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def postprocess(self, generated_code: str):
if (self.data['lib'] == 'Matplotlib'):
code_lines = generated_code.split('\n')
postprocessed_lines = []
for line in code_lines:
skip_line_flag = False
for phrase in ['plt.show()', 'plt.clf()', 'plt.close()', 'savefig']:
if (phrase in line):
skip_line_flag = True
break
if skip_line_flag:
continue
else:
postprocessed_lines.append(line)
generated_code = '\n'.join(postprocessed_lines)
return generated_code
def test(self, generated_code: str):
cwd = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir_name:
for file_name in os.listdir(tempdir_name):
os.remove(file_name)
tempdir_name = Path(tempdir_name)
for file_name in os.listdir(self.problem_path):
if os.path.isfile((self.problem_path / file_name)):
shutil.copy((self.problem_path / file_name), (tempdir_name / file_name))
elif os.path.isdir((self.problem_path / file_name)):
shutil.copytree((self.problem_path / file_name), (tempdir_name / file_name))
else:
raise ValueError('impossible.')
if os.path.exists((tempdir_name / 'result')):
shutil.rmtree((tempdir_name / 'result'))
os.mkdir((tempdir_name / 'result'))
generated_code = self.postprocess(generated_code)
program = self.data['code_context'].replace('[insert]', generated_code)
with open((tempdir_name / 'program.py'), 'w', encoding='UTF-8') as f:
f.write(program)
os.chdir(tempdir_name)
execution_status = []
test_cnt = max(1, int(self['test_case_cnt']))
for i in range(1, (test_cnt + 1)):
cmd_text = f'python program.py --test_case {i}'
time_limit = 60
cmd = Command(cmd_text)
exit_code = cmd.run(timeout=time_limit)
execution_status.append(exit_code)
test_module = import_source_file((tempdir_name / 'test_code.py'), 'test_code')
pass_flag = True
if (int(self['test_type']) == 3):
generated_code = generated_code.split('\n')
for line in generated_code:
if (('print' in line) and ('#' not in line.split('print'))):
generated_code.remove(line)
generated_code = '\n'.join(generated_code)
try:
pass_flag = test_module.stringTest(generated_code)
except:
pass_flag = False
for i in range(1, (test_cnt + 1)):
if (not pass_flag):
break
if (not os.path.exists(f'result/result_{i}.pkl')):
pass_flag = False
else:
try:
result = pickle.load(open('result/result_{}.pkl'.format(i), 'rb'))
expected_result = self.data['ans'][(i - 1)]
try:
pass_flag = (test_module.test(result, expected_result) == 1)
except:
pass_flag = False
except:
pass_flag = False
os.chdir(cwd)
return pass_flag |
def sample_info_video(video_frames, time_window, time_stride):
samples = ([0] * len(video_frames))
area_sum_samples = ([0] * len(video_frames))
for (i, video) in enumerate(video_frames):
samples[i] = ((len(video) - time_window) // time_stride)
if (i != 0):
area_sum_samples[i] = sum(samples[:i])
return (samples, area_sum_samples) |
def composite(*args):
import qutip.core.superop_reps
if (not all((isinstance(arg, Qobj) for arg in args))):
raise TypeError('All arguments must be Qobjs.')
if all(map(_isoperlike, args)):
if any((arg.issuper for arg in args)):
return super_tensor(*map(qutip.core.superop_reps.to_super, args))
return tensor(*args)
if all(map(_isketlike, args)):
if any((arg.isoperket for arg in args)):
return super_tensor(*((arg if arg.isoperket else operator_to_vector(arg.proj())) for arg in args))
return tensor(*args)
if all(map(_isbralike, args)):
return composite(*(arg.dag() for arg in args)).dag()
raise TypeError('Unsupported Qobj types [{}].'.format(', '.join((arg.type for arg in args)))) |
.parametrize(('requirement_string', 'expected'), [('extras_dep', None), ('missing_dep', ('missing_dep',)), ('requireless_dep', None), ('extras_dep[undefined_extra]', None), ('extras_dep[extra-without-associated-deps]', None), ('extras_dep[extra-with-unmet-deps]', ('extras_dep[extra-with-unmet-deps]', 'unmet_dep; extra == "extra-with-unmet-deps"')), ('extras_dep[recursive-extra-with-unmet-deps]', ('extras_dep[recursive-extra-with-unmet-deps]', 'recursive_dep; extra == "recursive-extra-with-unmet-deps"', 'recursive_unmet_dep')), ('extras_dep[extra-with-met-deps]', None), ('missing_dep; python_version>"10"', None), ('missing_dep; python_version<="1"', None), ('missing_dep; python_version>="1"', ('missing_dep; python_version >= "1"',)), ('extras_dep == 1.0.0', None), ('extras_dep == 2.0.0', ('extras_dep==2.0.0',)), ('extras_dep[extra-without-associated-deps] == 1.0.0', None), ('extras_dep[extra-without-associated-deps] == 2.0.0', ('extras_dep[extra-without-associated-deps]==2.0.0',)), ('prerelease_dep >= 1.0.0', None), ('circular_dep', None)])
def test_check_dependency(monkeypatch, requirement_string, expected):
monkeypatch.setattr(_importlib.metadata, 'Distribution', MockDistribution)
assert (next(build.check_dependency(requirement_string), None) == expected) |
def get_decode_dir_name(ckpt_name):
if (('train' in FLAGS.full_data_path) or ('train' in FLAGS.partial_data_path)):
dataset = 'train'
elif (('val' in FLAGS.full_data_path) or ('val' in FLAGS.partial_data_path)):
dataset = 'val'
elif (('test' in FLAGS.full_data_path) or ('test' in FLAGS.partial_data_path)):
dataset = 'test'
else:
raise ValueError(('FLAGS.full_data_path or FLAGS.partial_data_path %s should contain one of train, val or test' % FLAGS.full_data_path))
full_or_partial = ('partial' if FLAGS.partial_decoding else 'full')
dirname = ('decode_%s_%s_%s_%imaxenc_%ibeam_%imindec_%imaxdec' % (dataset, full_or_partial, FLAGS.decode_name, FLAGS.max_enc_steps, FLAGS.beam_size, FLAGS.min_dec_steps, FLAGS.max_dec_steps))
if (ckpt_name is not None):
dirname += ('_%s' % ckpt_name)
return dirname |
class DevDataset(Dataset):
def __init__(self, meta_args, tasks_dev_data):
self.meta_args = meta_args
self.meta_dev_data = MultiTaskWrapper(args_path2dataset=tasks_dev_data, meta_args=meta_args, section='dev')
def __getitem__(self, index) -> T_co:
return self.meta_dev_data[index]
def __len__(self):
return len(self.meta_dev_data) |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, kernel_size=hp.fft_conv1d_kernel[0], padding=hp.fft_conv1d_padding[0])
self.w_2 = nn.Conv1d(d_hid, d_in, kernel_size=hp.fft_conv1d_kernel[1], padding=hp.fft_conv1d_padding[1])
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm((output + residual))
return output |
class TestWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = pyglet.graphics.Batch()
self.document = pyglet.text.decode_html(doctext)
self.margin = 2
self.layout = layout.IncrementalTextLayout(self.document, (self.width - (self.margin * 2)), (self.height - (self.margin * 2)), multiline=True, batch=self.batch)
self.caret = caret.Caret(self.layout)
self.push_handlers(self.caret)
self.set_mouse_cursor(self.get_system_mouse_cursor('text'))
def on_resize(self, width, height):
super(TestWindow, self).on_resize(width, height)
self.layout.begin_update()
self.layout.x = self.margin
self.layout.y = self.margin
self.layout.width = (width - (self.margin * 2))
self.layout.height = (height - (self.margin * 2))
self.layout.end_update()
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.layout.view_x -= scroll_x
self.layout.view_y += (scroll_y * 16)
def on_draw(self):
pyglet.gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_key_press(self, symbol, modifiers):
super(TestWindow, self).on_key_press(symbol, modifiers)
if (symbol == pyglet.window.key.TAB):
self.caret.on_text('\t') |
def model_info(model, verbose=True):
n_p = sum((x.numel() for x in model.parameters()))
n_g = sum((x.numel() for x in model.parameters() if x.requires_grad))
device = next(model.parameters()).device
if verbose:
print(('%5s %60s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')))
for (i, (name, p)) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print(('%5g %60s %9s %12g %20s %10.3g %10.3g' % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())))
try:
from thop import profile
(total_ops, total_params) = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 640, 640).to(device), None), verbose=False)
fs = (', Params:%.3f M ,FLOPs:%.3f G' % ((total_params / (1000 ** 2)), (total_ops / (1000 ** 3))))
except Exception as e:
print(e)
fs = ''
print(('Input size(640, 640),Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))) |
def info_from_p2p_addr(addr: Multiaddr) -> PeerInfo:
if (addr is None):
raise InvalidAddrError('`addr` should not be `None`')
parts = addr.split()
if (not parts):
raise InvalidAddrError(f'`parts`={parts} should at least have a protocol `P_P2P`')
p2p_part = parts[(- 1)]
last_protocol_code = p2p_part.protocols()[0].code
if (last_protocol_code != protocols.P_P2P):
raise InvalidAddrError(f'The last protocol should be `P_P2P` instead of `{last_protocol_code}`')
peer_id_str: str = p2p_part.value_for_protocol(protocols.P_P2P)
peer_id = PeerID.from_base58(peer_id_str)
if (len(parts) > 1):
addr = Multiaddr.join(*parts[:(- 1)])
return PeerInfo(peer_id, [addr]) |
.parametrize('method, signal, timeout', [('waitSignal', None, None), ('waitSignal', None, 1000), ('waitSignals', [], None), ('waitSignals', [], 1000), ('waitSignals', None, None), ('waitSignals', None, 1000)])
def test_signal_blocker_none(qtbot, method, signal, timeout):
meth = getattr(qtbot, method)
with pytest.raises(ValueError):
meth(signal, timeout=timeout).wait() |
class TFVisualization(unittest.TestCase):
def test_visualize_weight_ranges_single_layer(self):
tf.compat.v1.reset_default_graph()
_ = ResNet50(weights=None)
model = tf.compat.v1.get_default_graph()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=model)
sess.run(init)
results_dir = 'artifacts'
if (not os.path.exists('artifacts')):
os.makedirs('artifacts')
conv_op = sess.graph.get_operation_by_name('conv1_conv/Conv2D')
plotting_utils.visualize_weight_ranges_single_layer(sess, conv_op, results_dir)
sess.close()
def test_visualize_relative_weight_ranges_single_layer(self):
tf.compat.v1.reset_default_graph()
_ = ResNet50(weights=None)
model = tf.compat.v1.get_default_graph()
init = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session(graph=model)
sess.run(init)
results_dir = 'artifacts'
if (not os.path.exists('artifacts')):
os.makedirs('artifacts')
conv_op = sess.graph.get_operation_by_name('conv1_conv/Conv2D')
plotting_utils.visualize_relative_weight_ranges_single_layer(sess, conv_op, results_dir)
sess.close() |
.parametrize('dist_args, size, cm', [pytest.param([set_test_value(pt.dvector(), np.array([100000, 1, 1], dtype=np.float64))], None, contextlib.suppress()), pytest.param([set_test_value(pt.dmatrix(), np.array([[100000, 1, 1], [1, 100000, 1], [1, 1, 100000]], dtype=np.float64))], (10, 3), contextlib.suppress()), pytest.param([set_test_value(pt.dmatrix(), np.array([[100000, 1, 1]], dtype=np.float64))], (5, 4, 3), contextlib.suppress()), pytest.param([set_test_value(pt.dmatrix(), np.array([[100000, 1, 1], [1, 100000, 1], [1, 1, 100000]], dtype=np.float64))], (10, 4), pytest.raises(ValueError, match='objects cannot be broadcast to a single shape'))])
def test_CategoricalRV(dist_args, size, cm):
rng = shared(np.random.RandomState(29402))
g = ptr.categorical(*dist_args, size=size, rng=rng)
g_fg = FunctionGraph(outputs=[g])
with cm:
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
def create_rand_tensors_given_shapes(input_shape: Union[(Tuple, List[Tuple])]) -> List[np.ndarray]:
if isinstance(input_shape, List):
input_shapes = input_shape
else:
input_shapes = [input_shape]
rand_tensors = []
for shape in input_shapes:
rand_tensors.append(np.random.rand(*shape))
return rand_tensors |
.parametrize('transform, gcps, rpcs', [((Affine.identity() * Affine.scale(2.0)), None, None), (None, [rasterio.control.GroundControlPoint(0, 0, 0, 0, 0)], None), (None, None, gen_rpcs())])
def test_no_notgeoref_warning(transform, gcps, rpcs):
with rasterio.MemoryFile() as mem:
with mem.open(driver='GTiff', width=10, height=10, dtype='uint8', count=1, transform=transform) as src:
if gcps:
src.gcps = (gcps, rasterio.crs.CRS.from_epsg(4326))
if rpcs:
src.rpcs = rpcs
with pytest.warns(None) as record:
with mem.open() as dst:
pass
assert (len(record) == 0) |
.mosaiqdb
def test_get_qcls_by_date(connection: pymedphys.mosaiq.Connection):
a_completion_datetime = QCL_COMPLETED_DATETIMES[0]
large_time_delta = np.timedelta64(90, 'D')
start = (np.datetime64(a_completion_datetime) - large_time_delta)
end = (np.datetime64(a_completion_datetime) + large_time_delta)
qcls_by_date = helpers.get_qcls_by_date(connection, QCL_LOCATION, start, end)
assert (np.datetime64(AN_UNCOMPLETED_QCL_DUE_DATETIME) not in qcls_by_date['due'].tolist())
for dt in QCL_COMPLETED_DATETIMES:
assert (np.datetime64(dt) in qcls_by_date['actual_completed_time'].tolist())
small_time_delta = np.timedelta64(3, 's')
start = (np.datetime64(a_completion_datetime) - small_time_delta)
end = (np.datetime64(a_completion_datetime) + small_time_delta)
qcls_by_date = helpers.get_qcls_by_date(connection, QCL_LOCATION, start, end)
assert (np.datetime64(a_completion_datetime) in qcls_by_date['actual_completed_time'].tolist())
for dt in list(set(QCL_COMPLETED_DATETIMES).difference({a_completion_datetime})):
assert (np.datetime64(dt) not in qcls_by_date['actual_completed_time'].tolist()) |
class SepConvLSTM2DCell(DropoutRNNCellMixin, Layer):
def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), depth_multiplier=1, activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, **kwargs):
super(SepConvLSTM2DCell, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.depth_multiplier = depth_multiplier
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.state_size = (self.filters, self.filters)
def build(self, input_shape):
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape[channel_axis] is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
depth_kernel_shape = (self.kernel_size + (input_dim, (self.depth_multiplier * 4)))
point_kernel_shape = ((1, 1) + ((input_dim * self.depth_multiplier), (self.filters * 4)))
self.depth_kernel_shape = depth_kernel_shape
self.point_kernel_shape = point_kernel_shape
recurrent_depth_kernel_shape = (self.kernel_size + (self.filters, (self.depth_multiplier * 4)))
recurrent_point_kernel_shape = ((1, 1) + ((self.filters * self.depth_multiplier), (self.filters * 4)))
self.depth_kernel_shape = depth_kernel_shape
self.point_kernel_shape = point_kernel_shape
self.depth_kernel = self.add_weight(shape=depth_kernel_shape, initializer=self.kernel_initializer, name='depth_kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.point_kernel = self.add_weight(shape=point_kernel_shape, initializer=self.kernel_initializer, name='point_kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.recurrent_depth_kernel = self.add_weight(shape=recurrent_depth_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_depth_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
self.recurrent_point_kernel = self.add_weight(shape=recurrent_point_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_point_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([self.bias_initializer((self.filters,), *args, **kwargs), initializers.Ones()((self.filters,), *args, **kwargs), self.bias_initializer(((self.filters * 2),), *args, **kwargs)])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(shape=((self.filters * 4),), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0]
c_tm1 = states[1]
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(h_tm1, training, count=4)
if (0 < self.dropout < 1.0):
inputs_i = (inputs * dp_mask[0])
inputs_f = (inputs * dp_mask[1])
inputs_c = (inputs * dp_mask[2])
inputs_o = (inputs * dp_mask[3])
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if (0 < self.recurrent_dropout < 1.0):
h_tm1_i = (h_tm1 * rec_dp_mask[0])
h_tm1_f = (h_tm1 * rec_dp_mask[1])
h_tm1_c = (h_tm1 * rec_dp_mask[2])
h_tm1_o = (h_tm1 * rec_dp_mask[3])
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(depth_kernel_i, depth_kernel_f, depth_kernel_c, depth_kernel_o) = array_ops.split(self.depth_kernel, 4, axis=3)
(recurrent_depth_kernel_i, recurrent_depth_kernel_f, recurrent_depth_kernel_c, recurrent_depth_kernel_o) = array_ops.split(self.recurrent_depth_kernel, 4, axis=3)
(point_kernel_i, point_kernel_f, point_kernel_c, point_kernel_o) = array_ops.split(self.point_kernel, 4, axis=3)
(recurrent_point_kernel_i, recurrent_point_kernel_f, recurrent_point_kernel_c, recurrent_point_kernel_o) = array_ops.split(self.recurrent_point_kernel, 4, axis=3)
if self.use_bias:
(bias_i, bias_f, bias_c, bias_o) = array_ops.split(self.bias, 4)
else:
(bias_i, bias_f, bias_c, bias_o) = (None, None, None, None)
x_i = self.input_conv(inputs_i, depth_kernel_i, point_kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, depth_kernel_f, point_kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, depth_kernel_c, point_kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, depth_kernel_o, point_kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_depth_kernel_i, recurrent_point_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_depth_kernel_f, recurrent_point_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_depth_kernel_c, recurrent_point_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_depth_kernel_o, recurrent_point_kernel_o)
i = self.recurrent_activation((x_i + h_i))
f = self.recurrent_activation((x_f + h_f))
c = ((f * c_tm1) + (i * self.activation((x_c + h_c))))
o = self.recurrent_activation((x_o + h_o))
h = (o * self.activation(c))
return (h, [h, c])
def input_conv(self, x, dw, pw, b=None, padding='valid'):
conv_out = K.separable_conv2d(x, dw, pw, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate)
if (b is not None):
conv_out = K.bias_add(conv_out, b, data_format=self.data_format)
return conv_out
def recurrent_conv(self, x, dw, pw):
conv_out = K.separable_conv2d(x, dw, pw, strides=(1, 1), padding='same', data_format=self.data_format)
return conv_out
def get_config(self):
config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'depth_multiplier': self.depth_multiplier, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout}
base_config = super(SepConvLSTM2DCell, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class Prf():
DIGESTS_1 = {enums.HashId_1.MD5: (hashlib.md5, 16), enums.HashId_1.SHA: (hashlib.sha1, 20), enums.HashId_1.SHA2_256: (hashlib.sha256, 32), enums.HashId_1.SHA2_384: (hashlib.sha384, 48), enums.HashId_1.SHA2_512: (hashlib.sha512, 64)}
DIGESTS = {enums.PrfId.PRF_HMAC_MD5: (hashlib.md5, 16), enums.PrfId.PRF_HMAC_SHA1: (hashlib.sha1, 20), enums.PrfId.PRF_HMAC_SHA2_256: (hashlib.sha256, 32), enums.PrfId.PRF_HMAC_SHA2_384: (hashlib.sha384, 48), enums.PrfId.PRF_HMAC_SHA2_512: (hashlib.sha512, 64)}
def __init__(self, transform):
(self.hasher, self.key_size) = (self.DIGESTS[transform] if (type(transform) is enums.PrfId) else self.DIGESTS_1[transform])
def prf(self, key, data):
return hmac.HMAC(key, data, digestmod=self.hasher).digest()
def prfplus(self, key, seed, count=True):
temp = bytes()
for i in range(1, 1024):
temp = self.prf(key, ((temp + seed) + (bytes([i]) if count else b'')))
(yield from temp) |
()
('project_name')
_tracking
def init(project_name):
print(f'Creating {project_name} template project')
dir_path = os.path.dirname(os.path.realpath(__file__))
shutil.copytree(os.path.join(dir_path, 'dbt_template'), project_name)
with open(f'{project_name}/dbt_project.yml', 'w') as f:
f.write(render.render_dbt_project(project_name))
bash_command = f'cd {project_name} && dbt deps'
response = os.system(bash_command)
if (not response):
info = chalk.green('SUCCESS')
else:
info = chalk.red('FAILURE')
print(f'Creating {project_name} template project', info)
if (not response):
print(f'Setup profile & re_data:schemas var in dbt_project.yml', 'INFO') |
def final_i_index_finder(min_switch_ind, i_omega, m_omega):
assert (type(min_switch_ind) == int), 'min_switch_ind should be an int.'
assert (type(i_omega) == list), 'i_omega should be a list.'
assert (type(m_omega) == list), 'm_omega should be a list.'
final_i_index = np.searchsorted(i_omega, m_omega[min_switch_ind], side='right')
final_i_index = int(final_i_index)
return final_i_index |
class Extension():
persist = True
def __init__(self, name: Optional[str]=None):
self._name = (name or underscore(self.__class__.__name__))
def name(self):
return self._name
def flag(self) -> str:
return f'--{dasherize(self.name)}'
def help_text(self) -> str:
if (self.__doc__ is None):
raise NotImplementedError('Please provide a help text for your extension')
doc = textwrap.dedent(self.__doc__)
return (doc[0].lower() + doc[1:])
def augment_cli(self, parser: argparse.ArgumentParser):
parser.add_argument(self.flag, dest='extensions', action='append_const', const=self, help=self.help_text)
return self
def activate(self, actions: List[Action]) -> List[Action]:
raise NotImplementedError(f'Extension {self.name} has no actions registered')
register = staticmethod(register)
unregister = staticmethod(unregister)
def __call__(self, actions: List[Action]) -> List[Action]:
return self.activate(actions) |
class CostFuncWrapper():
def __init__(self, maxeval=5000, progressbar=True, logp_func=None, dlogp_func=None):
self.n_eval = 0
self.maxeval = maxeval
self.logp_func = logp_func
if (dlogp_func is None):
self.use_gradient = False
self.desc = 'logp = {:,.5g}'
else:
self.dlogp_func = dlogp_func
self.use_gradient = True
self.desc = 'logp = {:,.5g}, ||grad|| = {:,.5g}'
self.previous_x = None
self.progressbar = progressbar
if progressbar:
self.progress = progress_bar(range(maxeval), total=maxeval, display=progressbar)
self.progress.update(0)
else:
self.progress = range(maxeval)
def __call__(self, x):
neg_value = np.float64(self.logp_func(pm.floatX(x)))
value = ((- 1.0) * neg_value)
if self.use_gradient:
neg_grad = self.dlogp_func(pm.floatX(x))
if np.all(np.isfinite(neg_grad)):
self.previous_x = x
grad = ((- 1.0) * neg_grad)
grad = grad.astype(np.float64)
else:
self.previous_x = x
grad = None
if ((self.n_eval % 10) == 0):
self.update_progress_desc(neg_value, grad)
if (self.n_eval > self.maxeval):
self.update_progress_desc(neg_value, grad)
raise StopIteration
self.n_eval += 1
if self.progressbar:
assert isinstance(self.progress, ProgressBar)
self.progress.update_bar(self.n_eval)
if self.use_gradient:
return (value, grad)
else:
return value
def update_progress_desc(self, neg_value: float, grad: np.float64=None) -> None:
if self.progressbar:
if (grad is None):
self.progress.comment = self.desc.format(neg_value)
else:
norm_grad = np.linalg.norm(grad)
self.progress.comment = self.desc.format(neg_value, norm_grad) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_list(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
response = client.get(url)
if (project_id in view_snapshot_permission_map.get(username, [])):
assert (response.status_code == 200)
assert isinstance(response.json(), list)
if (username == 'user'):
assert (sorted([item['id'] for item in response.json()]) == [])
else:
values_list = Snapshot.objects.filter(project_id=project_id).order_by('id').values_list('id', flat=True)
assert (sorted([item['id'] for item in response.json()]) == list(values_list))
else:
assert (response.status_code == 404) |
def solar_holiday_to_number(string) -> str:
solar = {'': '11', '': '214', '': '22', '': '38', '': '312', '': '322', '': '41', '': '422', '': '423', '': '51', '': '54', '': '54', '': '512', '': '518', '': '519', '': '61', '': '71', '': '711', '': '81', '': '910', '': '918', '': '101', '': '118', '': '1117', '': '121', '': '124', '': '1224', '': '1225'}
solar_holiday = '|'.join(solar.keys())
result = re.sub(pattern=solar_holiday, repl=(lambda x: solar[x.group()]), string=string)
return result |
def run(parser, args):
from pyrocko import squirrel as sq
squirrel = args.make_squirrel()
kwargs = args.squirrel_query
kinds = kwargs.pop('kind', sq.supported_content_kinds())
codes_query = kwargs.pop('codes', None)
for kind in kinds:
for (kind_id, codes, deltat, _, count) in sorted(squirrel._iter_codes_info(kind=kind, codes=codes_query)):
print(to_kind(kind_id), codes, deltat, count) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.