code stringlengths 281 23.7M |
|---|
class RequestScope(Scope):
if False:
_local_manager = None
_locals = None
def cleanup(self) -> None:
self._local_manager.cleanup()
def prepare(self) -> None:
self._locals.scope = {}
def configure(self) -> None:
self._locals = Local()
self._local_manager = LocalManager([self._locals])
self.prepare()
def get(self, key: Any, provider: Provider) -> Any:
try:
return self._locals.scope[key]
except KeyError:
new_provider = self._locals.scope[key] = CachedProviderWrapper(provider)
return new_provider |
def existing_config(file):
text = dedent(' [metadata]\n author = John Doe\n author-email = john.\n license = gpl3\n\n [pyscaffold]\n # Comment\n version = 3.78\n extensions =\n namespace\n tox\n cirrus\n namespace = my_namespace.my_sub_namespace\n ')
file.write_text(text)
return file |
def to_official(preds, features):
(h_idx, t_idx, title) = ([], [], [])
for f in features:
hts = f['hts']
h_idx += [ht[0] for ht in hts]
t_idx += [ht[1] for ht in hts]
title += [f['title'] for ht in hts]
res = []
for i in range(preds.shape[0]):
pred = preds[i]
pred = np.nonzero(pred)[0].tolist()
for p in pred:
if (p != 0):
res.append({'title': title[i], 'h_idx': h_idx[i], 't_idx': t_idx[i], 'r': id2rel[p]})
return res |
class DateTimeOnCriterion(Criterion):
def __init__(self, term, criteria):
super(DateTimeOnCriterion, self).__init__()
self.term = term
self.criteria = criteria
def get_query(self, **kwargs):
term = self.term.get_query(**kwargs)
if isinstance(self.criteria, DateTimeOn):
return '{term}ON{criteria}'.format(term=term, criteria=self.criteria.value)
else:
return '{term}{start}{end}'.format(term=term, start=self.criteria.get_query(date_only=True, extra_param='start'), end=self.criteria.get_query(date_only=True, extra_param='end')) |
class INR(nn.Module):
def __init__(self, in_features, hidden_features, hidden_layers, out_features, outermost_linear=True, first_omega_0=30, hidden_omega_0=30, sigma=10.0, pos_encode_configs={'type': None, 'use_nyquist': None, 'scale_B': None, 'mapping_input': None}):
super().__init__()
self.pos_encode = pos_encode_configs['type']
if (self.pos_encode in Encoding().encoding_dict.keys()):
self.positional_encoding = Encoding(self.pos_encode).run(in_features=in_features, pos_encode_configs=pos_encode_configs)
in_features = self.positional_encoding.out_dim
elif (self.pos_encode == None):
self.pos_encode = False
else:
assert 'Invalid pos_encode. Choose from: [frequency, Gaussian]'
self.nonlin = ComplexGaborLayer2D
hidden_features = int((hidden_features / 2))
dtype = torch.cfloat
self.complex = True
self.wavelet = 'gabor'
self.pos_encode = False
self.net = []
self.net.append(self.nonlin(in_features, hidden_features, omega0=first_omega_0, sigma0=sigma, is_first=True, trainable=False))
for i in range(hidden_layers):
self.net.append(self.nonlin(hidden_features, hidden_features, omega0=hidden_omega_0, sigma0=sigma))
final_linear = nn.Linear(hidden_features, out_features, dtype=dtype)
self.net.append(final_linear)
self.net = nn.Sequential(*self.net)
def forward(self, coords):
if self.pos_encode:
coords = self.positional_encoding(coords)
output = self.net(coords)
if (self.wavelet == 'gabor'):
return output.real
return output |
_model
def identityformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=nn.Identity, norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['identityformer_s36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
def test_dict_from_weights():
weights = ['mbart50', 'mbart-large-50-many-to-many-mmt', 'facebook/mbart-large-50-many-to-many-mmt', 'm2m100', 'm2m100_418M', 'm2m100_1.2B', 'facebook/m2m100_418M', 'facebook/m2m100_1.2B']
valid_keys = ['langs', 'codes', 'pairs']
for w in weights:
assert (type(utils._dict_from_weights(w)) is dict)
keys = utils._dict_from_weights(w).keys()
for key in valid_keys:
assert (key in keys) |
def copy_var_format(var, as_var):
if (not hasattr(var, 'dtype')):
return var
rval = var
if (rval.type.dtype != as_var.type.dtype):
rval = rval.astype(as_var.type.dtype)
if (rval.ndim == as_var.ndim):
rval = as_var.type.filter_variable(rval)
else:
tmp = as_var.type.clone(shape=(tuple(var.type.shape[:1]) + tuple(as_var.type.shape)))
rval = tmp.filter_variable(rval)
return rval |
def check_comparative(qdmr_args, i_op, qdmr, change_stage=0):
ok = True
corrected = None
ok = (ok and (len(qdmr_args) == 3))
ok = (ok and QdmrInstance.is_good_qdmr_ref(qdmr_args[0], i_op))
ok = (ok and QdmrInstance.is_good_qdmr_ref(qdmr_args[1], i_op))
matches = re.findall(BETWEEN_RE_PATTERN, qdmr_args[2], flags=re.IGNORECASE)
if matches:
ok = False
group = matches[0]
corrected = insert_qdmr_op('comparative', [qdmr_args[0], qdmr_args[1], ' '.join([group[0], 'betweenleftside', group[1]])], i_op, qdmr)
corrected.ops[(i_op + 1)] = 'comparative'
corrected.args[(i_op + 1)] = [QdmrInstance.index_to_ref(i_op), qdmr_args[1], ' '.join([group[0], 'betweenrightside', group[2]])]
return (ok, corrected) |
def to_melspec(y, sr, n_fft=400, hop_t=0.01, win_t=0.025, window='hamming', preemphasis=0.97, n_mels=80, log=True, norm_mel=None, log_floor=(- 20)):
spec = rstft(y, sr, n_fft, hop_t, win_t, window, preemphasis, log=False)
hop_length = int((sr * hop_t))
melspec = librosa.feature.melspectrogram(sr=sr, S=spec, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels, norm=norm_mel)
if log:
melspec = np.log(melspec)
melspec[(melspec < log_floor)] = log_floor
return melspec |
.skipif((torch.cuda.device_count() < 2), reason='test requires multi-GPU machine')
.parametrize('num_workers', [1, 2])
def test_load_gpu(tmpdir, ray_start_2_gpus, seed, num_workers):
model = BoringModel()
strategy = HorovodRayStrategy(num_workers=num_workers, use_gpu=True)
trainer = get_trainer(tmpdir, strategy=strategy)
load_test(trainer, model) |
def _make_pickup(pickup_category: PickupCategory, generator_params: PickupGeneratorParams):
return PickupEntry(name='Pickup', model=PickupModel(game=RandovaniaGame.METROID_PRIME_ECHOES, name='EnergyTransferModule'), pickup_category=pickup_category, broad_category=pickup_category, progression=(), generator_params=generator_params) |
def create_disk_folder_split(annotation_path: str, data_path: str, output_path: str):
assert os.path.exists(annotation_path), f'Could not find annotation path {annotation_path}'
assert os.path.exists(data_path), f'Could not find data folder {data_path}'
dataset = _ExtractMiddleFrameDataset(data_path=data_path, annotation_path=annotation_path)
loader = DataLoader(dataset, num_workers=8, batch_size=1, collate_fn=(lambda x: x[0]))
for batch in tqdm(loader):
(mid_frame, image_name, category) = batch
category_folder = os.path.join(output_path, category)
os.makedirs(category_folder, exist_ok=True)
image_path = os.path.join(category_folder, image_name)
with open(image_path, 'w') as image_file:
mid_frame.save(image_file) |
class upsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels, activation=relu):
super(upsampleBlock, self).__init__()
self.act = activation
self.pad = nn.ReflectionPad2d(1)
self.conv = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=0)
self.shuffler = nn.PixelShuffle(2)
def forward(self, x):
return self.act(self.shuffler(self.conv(self.pad(x)))) |
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, no_cuda=False):
super(MultiHeadedAttention, self).__init__()
assert ((d_model % h) == 0)
self.d_k = (d_model // h)
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
self.no_cuda = no_cuda
def forward(self, query, key, value, group_prob=None, mask=None):
if (mask is not None):
mask = mask.unsqueeze(1)
nbatches = query.size(0)
(query, key, value) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
(x, self.attn) = attention(query, key, value, mask=mask, dropout=self.dropout, group_prob=group_prob, no_cuda=self.no_cuda)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
return self.linears[(- 1)](x) |
def get_fixed_samples(env, num_actions, num_samples):
fixed_samples = []
num_environment = env.num_process
env.reset()
for _ in range(0, num_samples, num_environment):
(old_state, action, reward, new_state, is_terminal) = env.get_state()
action = np.random.randint(0, num_actions, size=(num_environment,))
env.take_action(action)
for state in new_state:
fixed_samples.append(state)
return np.array(fixed_samples) |
.parametrize('trick_levels', [None, MagicMock()])
def test_click_on_link(echoes_game_description, skip_qtbot, trick_levels):
main_window = QWidget()
main_window.open_data_visualizer_at = MagicMock()
skip_qtbot.add_widget(main_window)
world_name = 'World'
area_name = 'Area'
popup = trick_details_popup.TrickDetailsPopup(main_window, main_window, echoes_game_description, TrickResourceInfo(1234, 'Nothing', 'Nothing', 'Some description!'), LayoutTrickLevel.EXPERT, trick_levels)
popup._on_click_link_to_data_editor(f'data-editor://{world_name}/{area_name}')
main_window.open_data_visualizer_at.assert_called_once_with(world_name, area_name, game=RandovaniaGame.METROID_PRIME_ECHOES, trick_levels=trick_levels) |
def main():
client = pypilotClient('192.168.14.1')
client.watch('imu.frequency', 1.0)
client.watch('ap.heading', 0.25)
while True:
msgs = client.receive()
if (not msgs):
time.sleep(0.03)
continue
for (name, value) in msgs.items():
print(name, '=', value) |
(frozen=True)
class BenchSchema():
entry_point: Union[(Callable, str)]
base: str
tags: Iterable[str]
kwargs: Mapping[(str, Any)]
used_distributions: Sequence[str]
skip_if: Optional[Callable[([EnvSpec], bool)]] = None
check_params: Callable[([EnvSpec], CheckParams)] = (lambda env_spec: CheckParams()) |
def test_warning(tmpdir, caplog):
profile = DefaultGTiffProfile(count=1, height=256, width=256, compression='lolwut', foo='bar')
with rasterio.Env(GDAL_VALIDATE_CREATION_OPTIONS=True):
rasterio.open(str(tmpdir.join('test.tif')), 'w', **profile)
assert (set(['CPLE_NotSupported in driver GTiff does not support creation option COMPRESSION', 'CPLE_NotSupported in driver GTiff does not support creation option FOO']) <= set([rec.message for rec in caplog.records if ((rec.levelno == logging.WARNING) and (rec.name == 'rasterio._env'))])) |
class Product(Resource):
def __init__(self, client=None):
super(Product, self).__init__(client)
self.base_url = (URL.V2 + URL.ACCOUNT)
def requestProductConfiguration(self, account_id, data={}, **kwargs):
url = '{}/{}{}'.format(self.base_url, account_id, URL.PRODUCT)
return self.post_url(url, data, **kwargs)
def fetch(self, account_id, product_id, data={}, **kwargs):
url = '{}/{}{}/{}'.format(self.base_url, account_id, URL.PRODUCT, product_id)
return self.get_url(url, data, **kwargs)
def edit(self, account_id, product_id, data={}, **kwargs):
url = '{}/{}{}/{}'.format(self.base_url, account_id, URL.PRODUCT, product_id)
return self.patch_url(url, data, **kwargs)
def fetchTnc(self, product_name, data={}, **kwargs):
url = '{}{}/{}{}'.format(URL.V2, URL.PRODUCT, product_name, URL.TNC)
return self.get_url(url, data, **kwargs) |
def compileType(value):
if isinstance(value, Data):
ctype = 'Data'
elif isinstance(value, numbers.Integral):
ctype = 'int'
elif isinstance(value, numbers.Real):
ctype = 'double'
elif isinstance(value, numbers.Complex):
ctype = 'complex'
elif isinstance(value, str):
ctype = 'str'
else:
ctype = 'object'
return ctype |
class MBConv(nn.Module):
def __init__(self, w_in, exp_r, kernel, stride, se_r, w_out, bn_norm):
super(MBConv, self).__init__()
self.exp = None
w_exp = int((w_in * exp_r))
if (w_exp != w_in):
self.exp = nn.Conv2d(w_in, w_exp, 1, stride=1, padding=0, bias=False)
self.exp_bn = get_norm(bn_norm, w_exp)
self.exp_swish = Swish()
dwise_args = {'groups': w_exp, 'padding': ((kernel - 1) // 2), 'bias': False}
self.dwise = nn.Conv2d(w_exp, w_exp, kernel, stride=stride, **dwise_args)
self.dwise_bn = get_norm(bn_norm, w_exp)
self.dwise_swish = Swish()
self.se = SE(w_exp, int((w_in * se_r)))
self.lin_proj = nn.Conv2d(w_exp, w_out, 1, stride=1, padding=0, bias=False)
self.lin_proj_bn = get_norm(bn_norm, w_out)
self.has_skip = ((stride == 1) and (w_in == w_out))
def forward(self, x):
f_x = x
if self.exp:
f_x = self.exp_swish(self.exp_bn(self.exp(f_x)))
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
f_x = self.se(f_x)
f_x = self.lin_proj_bn(self.lin_proj(f_x))
if self.has_skip:
if (self.training and (effnet_cfg.EN.DC_RATIO > 0.0)):
f_x = drop_connect(f_x, effnet_cfg.EN.DC_RATIO)
f_x = (x + f_x)
return f_x |
class ConvBlock1bit(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, bn_affine=True, activate=True, binarized=False):
super(ConvBlock1bit, self).__init__()
self.activate = activate
self.conv = Conv2d1bit(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, binarized=binarized)
self.bn = nn.BatchNorm2d(num_features=out_channels, affine=bn_affine)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x |
def flatten_state_dict(state_dict: dict) -> np.ndarray:
states = []
for (key, value) in state_dict.items():
if isinstance(value, dict):
state = flatten_state_dict(value)
if (state.size == 0):
state = None
elif isinstance(value, (tuple, list)):
state = (None if (len(value) == 0) else value)
elif isinstance(value, (bool, np.bool_, int, np.int32, np.int64)):
state = int(value)
elif isinstance(value, (float, np.float32, np.float64)):
state = np.float32(value)
elif isinstance(value, np.ndarray):
if (value.ndim > 2):
raise AssertionError('The dimension of {} should not be more than 2.'.format(key))
state = (value if (value.size > 0) else None)
else:
raise TypeError('Unsupported type: {}'.format(type(value)))
if (state is not None):
states.append(state)
if (len(states) == 0):
return np.empty(0)
else:
return np.hstack(states) |
def split_at(iterable, pred, maxsplit=(- 1), keep_separator=False):
if (maxsplit == 0):
(yield list(iterable))
return
buf = []
it = iter(iterable)
for item in it:
if pred(item):
(yield buf)
if keep_separator:
(yield [item])
if (maxsplit == 1):
(yield list(it))
return
buf = []
maxsplit -= 1
else:
buf.append(item)
(yield buf) |
def children(handle):
child_windows = []
def enum_child_proc(hwnd, lparam):
child_windows.append(hwnd)
return True
enum_child_proc_t = WINFUNCTYPE(c_int, wintypes.HWND, wintypes.LPARAM)
proc = enum_child_proc_t(enum_child_proc)
win32functions.EnumChildWindows(handle, proc, 0)
return child_windows |
def validate_all_op_level_dtype_bw_overrides(op_configs: OpTypeType, default_candidate: QuantDtypeBwInfo):
for (op_name, op_config) in op_configs.items():
if (ConfigDictKeys.SUPPORTED_KERNELS in op_config):
op_level_supported_kernels = op_config[ConfigDictKeys.SUPPORTED_KERNELS]
if current_config_in_supported_kernels(default_candidate, op_level_supported_kernels):
logger.info(' Default option found in op level supported kernels list, skip op level override needed for op {%s} \n', op_name)
else:
override_dtype_bw_info = get_override_from_supported_kernels(op_level_supported_kernels)
if (not is_override_dtype_bw_valid(override_dtype_bw_info, default_candidate)):
error_msg = f'''Op level supported_kernels override check failed for op {op_name}
Op level override only with higher precision kernel is supported
(please check both quantsim defaults and default supported_kernels in config file specified at override index {DEFAULT_OVERRIDE_SUPPORTED_KERNEL_INDEX})
One way to rectify this is to specify lower precision data type and bit-width as defaults
ex : (act_bw = {override_dtype_bw_info.act_bw}, param_bw = {override_dtype_bw_info.param_bw}, act_data_type = {override_dtype_bw_info.act_dtype},param_data_type = {override_dtype_bw_info.param_dtype}) and use op level supported_kernels override
for this op to indicate higher precision kernel that is supported on given target
ex: (act_bw = {default_candidate.act_bw}, param_bw ={override_dtype_bw_info.param_bw} , act_data_type = {override_dtype_bw_info.act_dtype},param_data_type = {override_dtype_bw_info.param_dtype})
'''
logger.info(error_msg)
raise NotImplementedError(error_msg)
return True |
class App_qt(App_base):
def __init__(self):
import types
(QtGui, QtCore) = self.importCoreAndGui()
(self._QtGui, self._QtCore) = (QtGui, QtCore)
if (not hasattr(QtGui, 'real_QApplication')):
QtGui.real_QApplication = QtGui.QApplication
class QApplication_hijacked(QtGui.QApplication):
def __new__(cls, *args, **kwargs):
theApp = cls.instance()
if (theApp is None):
theApp = QtGui.real_QApplication(*args, **kwargs)
QtGui.qApp = theApp
for key in ['__init__', 'exec_', 'quit']:
if (not hasattr(cls, key)):
continue
val = getattr(cls, key)
if hasattr(theApp.__class__, key):
if (hash(val) == hash(getattr(theApp.__class__, key))):
continue
if hasattr(val, '__call__'):
if hasattr(val, 'im_func'):
val = val.im_func
val = types.MethodType(val, theApp.__class__)
try:
setattr(theApp, key, val)
except Exception:
pass
setattr(theApp, 'exec', theApp.exec_)
theApp.__init__(*args, **kwargs)
return theApp
def __init__(self, *args, **kwargs):
pass
def exec_(self, *args, **kwargs):
print_mainloop_warning(mainloopWarning_qt)
import inspect, __main__
for caller in inspect.stack()[1:]:
(frame, name) = (caller[0], caller[3])
if name.startswith('<'):
break
else:
__main__.__dict__[(name + '_locals')] = frame.f_locals
sys._pyzoInterpreter.ignore_sys_exit = True
def reEnableSysExit():
sys._pyzoInterpreter.ignore_sys_exit = False
self._reEnableSysExitTimer = timer = QtCore.QTimer()
timer.singleShot(0, reEnableSysExit)
def quit(self, *args, **kwargs):
pass
self.app = QApplication_hijacked([''])
self.app.setQuitOnLastWindowClosed(False)
QtGui.QApplication = QApplication_hijacked
self.app._in_event_loop = 'Pyzo'
QtGui._in_event_loop = 'Pyzo'
self._original_excepthook = sys.excepthook
sys.excepthook = self._excepthook
def _excepthook(self, type, value, traceback):
if issubclass(type, KeyboardInterrupt):
self._keyboard_interrupt()
elif (self._original_excepthook is not None):
return self._original_excepthook(type, value, traceback)
def process_events(self):
self.app.sendPostedEvents()
self.app.processEvents()
def run(self, repl_callback):
timer = self._timer = self._QtCore.QTimer()
timer.setSingleShot(False)
timer.setInterval(int((0.1 * 1000)))
timer.timeout.connect(repl_callback)
timer.start()
exec_ = getattr(self._QtGui.real_QApplication, 'exec', None)
if (exec_ is None):
exec_ = self._QtGui.real_QApplication.exec_
try:
try_again = False
exec_(self.app)
except TypeError:
try_again = True
if try_again:
exec_()
def quit(self):
self._QtGui.real_QApplication.quit() |
class TestIndex(object):
def setup_method(self):
reload(pysat.instruments.pysat_testing)
self.name = 'testing'
self.ref_time = pysat.instruments.pysat_testing._test_dates['']['']
return
def teardown_method(self):
del self.ref_time, self.name
return
.parametrize('kwargs,msg', [({'non_monotonic_index': True}, 'Loaded data is not monotonic'), ({'non_unique_index': True}, 'Loaded data is not unique')])
def test_index_error_messages(self, kwargs, msg):
test_inst = pysat.Instrument(platform='pysat', name=self.name, num_samples=10, clean_level='clean', update_files=True, strict_time_flag=True, use_header=True, **kwargs)
(year, doy) = pysat.utils.time.getyrdoy(self.ref_time)
testing.eval_bad_input(test_inst.load, ValueError, msg, input_args=[year, doy])
return |
def read_cn_block(fid, pointer):
if ((pointer != 0) and (pointer is not None)):
temp = dict()
fid.seek(pointer)
(temp['BlockType'], temp['BlockSize'], temp['pointerToNextCNBlock'], temp['pointerToConversionFormula'], temp['pointerToCEBlock'], temp['pointerToCDBlock'], temp['pointerToChannelCommentBlock'], temp['channelType'], temp['signalName'], temp['signalDescription'], temp['numberOfTheFirstBits'], temp['numberOfBits'], temp['signalDataType'], temp['valueRangeKnown'], temp['valueRangeMinimum'], temp['valueRangeMaximum'], temp['rateVariableSampled'], temp['pointerToASAMNameBlock'], temp['pointerToSignalIdentifierBlock'], temp['ByteOffset']) = cn_struct.unpack(fid.read(228))
temp['signalName'] = temp['signalName'].rstrip(b'\x00').decode('latin1', 'replace')
temp['signalDescription'] = temp['signalDescription'].rstrip(b'\x00').decode('latin1', 'replace')
return temp
else:
return None |
class FlagList(List):
combinable_values: Optional[Sequence] = None
_show_valtype = False
def __init__(self, *, none_ok: bool=False, completions: _Completions=None, valid_values: ValidValues=None, length: int=None) -> None:
super().__init__(valtype=String(), none_ok=none_ok, length=length, completions=completions)
self.valtype.valid_values = valid_values
def _check_duplicates(self, values: ListType) -> None:
if (len(set(values)) != len(values)):
raise configexc.ValidationError(values, 'List contains duplicate values!')
def to_py(self, value: Union[(usertypes.Unset, ListType)]) -> Union[(usertypes.Unset, ListType)]:
vals = super().to_py(value)
if (not isinstance(vals, usertypes.Unset)):
self._check_duplicates(vals)
return vals
def complete(self) -> _Completions:
if (self._completions is not None):
return self._completions
valid_values = self.valtype.valid_values
if (valid_values is None):
return None
out = []
for value in valid_values:
desc = valid_values.descriptions.get(value, '')
out.append((json.dumps([value]), desc))
combinables = self.combinable_values
if (combinables is None):
combinables = list(valid_values)
for size in range(2, (len(combinables) + 1)):
for combination in itertools.combinations(combinables, size):
out.append((json.dumps(combination), ''))
return out
def __repr__(self) -> str:
return utils.get_repr(self, none_ok=self.none_ok, valid_values=self.valid_values, length=self.length) |
def make_fast_generalized_attention(qkv_dim, renormalize_attention=True, numerical_stabilizer=0.0, nb_features=256, features_type='deterministic', kernel_fn=jax.nn.relu, kernel_epsilon=0.001, redraw_features=False, unidirectional=False, lax_scan_unroll=1):
logging.info('Fast generalized attention.: %s features and renormalize=%s', nb_features, renormalize_attention)
if (features_type == 'ortho'):
matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif (features_type == 'iid'):
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim)
elif (features_type == 'deterministic'):
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn |
class SoundcloudLibrary(SongLibrary[(K, 'SoundcloudFile')]):
STAR = ['artist', 'title', 'genre', 'tags']
def __init__(self, client, player=None):
super().__init__('Soundcloud')
self.client = client
self._sids = [self.client.connect('songs-received', self._on_songs_received), self.client.connect('stream-uri-received', self._on_stream_uri_received), self.client.connect('comments-received', self._on_comments_received)]
self._psid = None
self._dirty = set()
GLib.timeout_add(2000, self._on_tick)
if player:
self.player = player
self._psid = self.player.connect('song-started', self.__song_started)
def destroy(self):
super().destroy()
for sid in self._sids:
self.client.disconnect(sid)
if self._psid:
self.player.disconnect(self._psid)
def query(self, text, sort=None, star=STAR):
values = self._contents.values()
try:
return SoundcloudQuery(text).filter(values)
except SoundcloudQuery.Error:
return values
def query_with_refresh(self, query: SoundcloudQuery):
current = self._contents.values()
if (not query.is_parsable):
return current
self.client.get_tracks(query.terms)
filtered = query.filter(current)
print_d(('Filtered %d results to %d' % (len(current), len(filtered))))
return filtered
def rename(self, song, newname, changed=None):
raise TypeError("Can't rename Soundcloud files")
def _get_stream_urls(self, songs):
with Task(_('Soundcloud'), 'Pre-fetching stream URLs') as task:
total = len(songs)
for (i, song) in enumerate(songs):
if (('~uri' not in song) or ('api.soundcloud.com' in song['~uri'])):
self.client.get_stream_url(song)
task.update((i / total))
(yield)
def _on_songs_received(self, client, songs):
print_d(f'Got {len(songs)} songs')
self.add(songs)
funcid = hash(''.join((s['~uri'] for s in songs)))
copool.add(self._get_stream_urls, songs, timeout=100, funcid=funcid)
def _on_stream_uri_received(self, client, song: AudioFile, uri: str):
song['~uri'] = uri
self._dirty.add(song)
def _on_tick(self) -> bool:
if self._dirty:
self.changed(self._dirty)
self._dirty.clear()
return True
def _on_comments_received(self, client, track_id, comments):
def bookmark_for(com):
text = f"{com['body']!r} {com['user']['username']}"
return (max(0, int(((com.get('timestamp') or 0) / 1000.0))), text)
try:
song = self.song_by_track_id(track_id)
except KeyError:
print_exc()
return
song.bookmarks = [bookmark_for(c) for c in comments]
print_d(f"Updated song bookmarks for {song('title')}")
def song_by_track_id(self, track_id):
for song in self.values():
if (song.track_id == track_id):
return song
raise KeyError(f'No track with id {track_id}. Do have {[s.track_id for s in self.values()]}')
def _changed(self, items):
super()._changed(items)
for item in items:
item.write()
def __song_started(self, player, song):
if isinstance(song, SoundcloudFile):
print_d(f"Getting comments for {song('title')} ({song.key})")
self.client.get_comments(song.track_id) |
class LaneSection(XodrBase):
def __init__(self, s, centerlane):
super().__init__()
self.s = s
self.centerlane = centerlane
self.centerlane._set_lane_id(0)
self.leftlanes = []
self.rightlanes = []
self._left_id = 1
self._right_id = (- 1)
def __eq__(self, other):
if (isinstance(other, LaneSection) and super().__eq__(other)):
if ((self.get_attributes() == other.get_attributes()) and (self.centerlane == other.centerlane) and (self.leftlanes == other.leftlanes) and (self.rightlanes == other.rightlanes)):
return True
return False
def add_left_lane(self, lane):
lane._set_lane_id(self._left_id)
self._left_id += 1
self.leftlanes.append(lane)
return self
def add_right_lane(self, lane):
lane._set_lane_id(self._right_id)
self._right_id -= 1
self.rightlanes.append(lane)
return self
def get_attributes(self):
retdict = {}
retdict['s'] = str(self.s)
return retdict
def get_element(self):
element = ET.Element('laneSection', attrib=self.get_attributes())
self._add_additional_data_to_element(element)
if self.leftlanes:
left = ET.SubElement(element, 'left')
for l in reversed(self.leftlanes):
left.append(l.get_element())
center = ET.SubElement(element, 'center')
center.append(self.centerlane.get_element())
if self.rightlanes:
right = ET.SubElement(element, 'right')
for l in self.rightlanes:
right.append(l.get_element())
return element |
class OptSimilarity_Albuterol(Molecule):
def _reward(self):
scorer = similarity(smiles='CC(C)(C)NCC(O)c1ccc(O)c(CO)c1', name='Albuterol', fp_type='FCFP4', threshold=0.75)
s_fn = scorer.wrapped_objective
molecule = Chem.MolFromSmiles(self._state)
if (molecule is None):
return 0.0
return (s_fn.score(self._state) * (self.discount_factor ** (self.max_steps - self.num_steps_taken))) |
.unit()
.parametrize('decorator', [pytask.mark.depends_on, pytask.mark.produces])
.parametrize(('values', 'expected'), [('a', ['a']), (['b'], [['b']]), (['e', 'f'], [['e', 'f']])])
def test_extract_args_from_mark(decorator, values, expected):
(values)
def task_example():
pass
parser = (depends_on if (decorator.name == 'depends_on') else produces)
result = list(_extract_nodes_from_function_markers(task_example, parser))
assert (result == expected) |
class Effect8470(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda drone: drone.item.requiresSkill('Drones')), 'damageMultiplier', container.getModifiedItemAttr('capitalIndustrialCommandBonusDroneDamage'), skill='Capital Industrial Ships', **kwargs) |
def test_set_mixin(gl):
class M(SetMixin, FakeManager):
pass
url = '
responses.add(method=responses.PUT, url=url, json={'key': 'foo', 'value': 'bar'}, status=200, match=[responses.matchers.query_param_matcher({})])
mgr = M(gl)
obj = mgr.set('foo', 'bar')
assert isinstance(obj, FakeObject)
assert (obj.key == 'foo')
assert (obj.value == 'bar')
assert (responses.assert_call_count(url, 1) is True) |
class Discriminator(nn.Module):
def __init__(self, image_in_channels, edge_in_channels):
super(Discriminator, self).__init__()
self.texture_branch = TextureBranch(in_channels=image_in_channels)
self.structure_branch = StructureBranch(in_channels=edge_in_channels)
self.edge_detector = EdgeDetector()
def forward(self, output, gray_image, real_edge, is_real):
if (is_real == True):
texture_pred = self.texture_branch(output)
fake_edge = self.edge_detector(output)
structure_pred = self.structure_branch(torch.cat((real_edge, gray_image), dim=1))
else:
texture_pred = self.texture_branch(output)
fake_edge = self.edge_detector(output)
structure_pred = self.structure_branch(torch.cat((fake_edge, gray_image), dim=1))
return (torch.cat((texture_pred, structure_pred), dim=1), fake_edge) |
class TempFile():
def __init__(self, suffix='.nc'):
self.filename = None
self.suffix = suffix
def __enter__(self):
(self.handle, self.filename) = tempfile.mkstemp(suffix=self.suffix)
os.close(self.handle)
return self.filename
def __exit__(self, *args):
os.remove(self.filename) |
class TestChangeHosts(EndianTest):
def setUp(self):
self.req_args_0 = {'host': [183, 251, 198, 200], 'host_family': 0, 'mode': 0}
self.req_bin_0 = b'm\x00\x03\x00\x00\x00\x04\x00\xb7\xfb\xc6\xc8'
def testPackRequest0(self):
bin = request.ChangeHosts._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.ChangeHosts._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class MyFormatter(argparse.RawTextHelpFormatter):
def add_argument(self, action):
if (action.help is not argparse.SUPPRESS):
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
current_indent = self._current_indent
for subaction in self._iter_indented_subactions(action):
indent_chg = (self._current_indent - current_indent)
added_indent = ('x' * indent_chg)
invocations.append((added_indent + get_invocation(subaction)))
invocation_length = max([len(s) for s in invocations])
action_length = (invocation_length + self._current_indent)
self._action_max_length = max(self._action_max_length, action_length)
self._add_item(self._format_action, [action]) |
def get_default_smarts_object():
latitude = 40.4966
longitude = (- 3.462)
preasure_model = 1
altitude = 0.625
altura = 0.0
time_zone = 0
season = 'SUMMER'
albedo = 9
solar_position_mode = 3
atmospheric_data = 1
atmosphere_model = 'USSA'
precipitable_water = 0
ozone = 1
gas_contents = 0
gas_scenairo = 2
CO2content = 370
extraterrestial = 0
aerosol = 'S&F_RURAL'
turbidity = 0
tau500_param = 0.085
print_info = 2
total_variables = 1
which_variables = '4'
wavelenght_min = 280
wavelenght_max = 4004
wavelenght_step = 0.5
convolute = 1
conv_function = 1
conv_wl_min = 300
conv_wl_max = 3990
conv_FWHM = 4
conv_step = 2
tilt = 1
altitude_tilt = (- 999)
azimuth_tilt = (- 999)
solar_constant = 1367
sun_correction = 1
circumsolar = 1
slope = 1
aperture = 2.5
limit = 4
illuminance = 0
UVcalc = 0
air_mass = 3
comment = 'Test'
P = 940
T_air = 17
T_day = 25
humid = 30
water_vapour = 1
targetTime = datetime(2015, 5, 19, 12, 30)
smarts_input = {'LATIT': latitude, 'LONGIT': longitude, 'ISPR': preasure_model, 'ALTIT': altitude, 'HEIGHT': altura, 'ZONE': time_zone, 'SEASON': season, 'IALBDX': albedo, 'IALBDG': albedo, 'IMASS': solar_position_mode, 'IATMOS': atmospheric_data, 'ATMOS': atmosphere_model, 'IH2O': precipitable_water, 'IO3': ozone, 'IGAS': gas_contents, 'ILOAD': gas_scenairo, 'qCO2': CO2content, 'ISPCTR': extraterrestial, 'AEROS': aerosol, 'ITURB': turbidity, 'TAU5': tau500_param, 'IPRT': print_info, 'IOTOT': total_variables, 'IOUT': which_variables, 'WLMN': wavelenght_min, 'WPMN': wavelenght_min, 'WLMX': wavelenght_max, 'WPMX': wavelenght_max, 'INTVL': wavelenght_step, 'ISCAN': convolute, 'IFILT': conv_function, 'WV1': conv_wl_min, 'WV2': conv_wl_max, 'FWHM': conv_FWHM, 'STEP': conv_step, 'ITILT': tilt, 'TILT': altitude_tilt, 'WAZIM': azimuth_tilt, 'SOLARC': solar_constant, 'SUNCOR': sun_correction, 'ICIRC': circumsolar, 'SLOPE': slope, 'APERT': aperture, 'LIMIT': limit, 'ILLUM': illuminance, 'IUV': UVcalc, 'COMNT': comment, 'SPR': P, 'TAIR': T_air, 'TDAY': T_day, 'RH': humid, 'W': water_vapour, 'YEAR': targetTime.year, 'MONTH': targetTime.month, 'DAY': targetTime.day, 'HOUR': (targetTime.hour + ((targetTime.minute + (targetTime.second / 60)) / 60))}
return smarts_input |
class Model(nn.Module):
def __init__(self, args):
super().__init__()
self.leaky = 0.1
self.group_layers = nn.Sequential(nn.Conv2d(2, 32, 3, stride=1, padding=1, groups=2), nn.ReLU(inplace=True), nn.Conv2d(32, 64, 3, stride=1, padding=1, groups=2), nn.ReLU(inplace=True))
self.shared_layers1 = nn.Sequential(nn.Conv2d(64, 96, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(96), nn.LeakyReLU(self.leaky, inplace=True))
self.shared_layers2 = ResNet_iqa(BasicBlock, [3, 4, 6, 3])
self.shared_layers3 = nn.Sequential(nn.Conv2d(512, 256, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(256), nn.LeakyReLU(self.leaky, inplace=True))
self.score_layer1 = nn.Sequential(nn.Conv2d(256, 128, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(128), nn.LeakyReLU(self.leaky, inplace=True))
self.score_layer2 = nn.Sequential(nn.Conv2d(256, 64, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.LeakyReLU(self.leaky, inplace=True))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(128, 16, bias=False), nn.BatchNorm1d(16), nn.Linear(16, 1))
def forward(self, x, y):
x = torch.cat((x, y), dim=1)
batch_size = x.shape[0]
x = self.group_layers(x)
x = self.shared_layers3(self.shared_layers2(self.shared_layers1(x)))
z = nn.functional.interpolate(y, x.shape[(- 2):], mode='bilinear')
x = x.mul(z)
x = self.score_layer1(x)
x1 = F.max_pool2d(x, kernel_size=2, stride=2)
x2 = F.avg_pool2d(x, kernel_size=2, stride=2)
x12 = torch.cat((x1, x2), dim=1)
x12 = self.score_layer2(x12)
x11 = F.max_pool2d(x12, kernel_size=2, stride=2)
x21 = F.avg_pool2d(x12, kernel_size=2, stride=2)
x22 = torch.cat((x11, x21), dim=1)
x22 = self.pool(x22)
x22 = x22.view(batch_size, (- 1))
x22 = self.fc(x22)
return x22 |
class ReportGenerator():
def __init__(self, json_report):
self.json_report = json_report
rulegenerate_html_path = pkg_resources.resource_filename('quark.webreport', 'genrule_report_layout.html')
analysis_result_html_path = pkg_resources.resource_filename('quark.webreport', 'analysis_report_layout.html')
with open(rulegenerate_html_path, 'r') as file:
self.rulegenerate_layout = file.read()
file.close()
with open(analysis_result_html_path, 'r') as file:
self.analysis_result_layout = file.read()
file.close()
def get_rule_generate_editor_html(self):
generate_result = self.json_report['result']
filesize = format((float(self.json_report['size_bytes']) / float((1024 * 1024))), '.2f')
filename = self.json_report['apk_filename']
md5 = self.json_report['md5']
rule_number = len(generate_result)
self.insert_genrule_report_html(generate_result, filename, md5, filesize, rule_number)
self.rulegenerate_layout = get_json_report_html(self.rulegenerate_layout, generate_result)
return self.rulegenerate_layout
def get_analysis_report_html(self):
analysis_result = self.json_report['crimes']
filesize = format((float(self.json_report['size_bytes']) / float((1024 * 1024))), '.2f')
filename = self.json_report['apk_filename']
md5 = self.json_report['md5']
rule_number_set = {'all': len(analysis_result), '100%': count_confidence_rule_number(analysis_result, '100%'), '80%': count_confidence_rule_number(analysis_result, '80%'), '60%': count_confidence_rule_number(analysis_result, '60%'), '40%': count_confidence_rule_number(analysis_result, '40%'), '20%': count_confidence_rule_number(analysis_result, '20%'), '0%': count_confidence_rule_number(analysis_result, '0%')}
five_stages_labels = get_five_stages_labels(analysis_result)
all_labels = get_all_labels(analysis_result)
self.insert_sample_information_html(rule_number_set, filename, md5, filesize, five_stages_labels)
self.insert_radarchart_html(five_stages_labels, all_labels)
self.insert_report_html(analysis_result)
self.analysis_result_layout = get_json_report_html(self.analysis_result_layout, analysis_result)
return self.analysis_result_layout
def insert_radarchart_html(self, five_stages_labels, all_labels):
five_labels_html = ''
for label in five_stages_labels:
five_labels_html += f'<label class="label-tag">{label}</label>'
all_labels_html = ''
for label in all_labels:
if (label == 'power manager'):
label = 'power'
elif (label == 'accessibility service'):
label = 'accessibility'
elif (label == 'dexClassLoader'):
label = 'dex'
all_labels_html += f'''
<label id="collection" class="label-container">{label}
<input class="rule-label" type="checkbox"
name="label" value="{label}">
<span class="checkmark"></span>
</label>
'''
replace_dict = {'$five_labels_html$': five_labels_html, '$all_labels_html$': all_labels_html}
for (key, replace_str) in replace_dict.items():
self.analysis_result_layout = self.analysis_result_layout.replace(key, str(replace_str))
def insert_genrule_report_html(self, data, filename, md5, filesize, rule_number):
contentHTML = ''
for rule in data:
api1 = (rule['api'][0]['class'].split('/')[(- 1)] + rule['api'][0]['method'])
api2 = (rule['api'][0]['class'].split('/')[(- 1)] + rule['api'][1]['method'])
api1 = api1.replace('<', '<').replace('>', '>')
api2 = api2.replace('<', '<').replace('>', '>')
contentHTML += f'''
<tr id="{rule['number']}">
<td><p class="fw-normal mb-1">{rule['number']}</p></td>
<td><p class="api-td fw-normal mb-1">{api1}</p></td>
<td><p class="api-td fw-normal mb-1">{api2}</p></td>
<td>
<a href="#" class="edit-btn btn btn-info btn-sm">
Edit
</a>
</td>
</tr>
'''
replace_dict = {'$genrule_report$': contentHTML, '$filename$': filename, '$md5$': md5, '$filesize$': filesize, '$rule_numbers$': rule_number}
for (key, replace_str) in replace_dict.items():
self.rulegenerate_layout = self.rulegenerate_layout.replace(key, str(replace_str))
def insert_report_html(self, data):
confidence_badge = {'0%': 'badge-secondary', '20%': 'badge-success', '40%': 'badge-info', '60%': 'badge-primary', '80%': 'badge-warning', '100%': 'badge-danger'}
contentHTML = ''
for crime in data:
description = crime['crime']
confidence = crime['confidence']
rule_number = crime['rule'].split('.')[0]
contentHTML += f'''
<tr>
<td><p class="fw-normal mb-1">{rule_number}</p></td>
<td><p class="fw-normal mb-1">{description}</p></td>
<td>
<span class="badge {confidence_badge[confidence]}">
{confidence}
</span>
</td>
</tr>
'''
self.analysis_result_layout = self.analysis_result_layout.replace('$report_content$', contentHTML)
def insert_sample_information_html(self, rules_number_set, filename, md5, filesize, labels):
five_labels_html = ''
for label in labels:
five_labels_html += f'<label class="label-tag">{label}</label>'
replace_dict = {'$effective_rules_number_100$': rules_number_set['100%'], '$effective_rules_number_80$': rules_number_set['80%'], '$effective_rules_number_60$': rules_number_set['60%'], '$effective_rules_number_40$': rules_number_set['40%'], '$effective_rules_number_20$': rules_number_set['20%'], '$effective_rules_number_0$': rules_number_set['0%'], '$all_rules_number$': rules_number_set['all'], '$filename$': filename, '$md5$': md5, '$filesize$': filesize, '$five_labels_html$': five_labels_html}
for (key, replace_str) in replace_dict.items():
self.analysis_result_layout = self.analysis_result_layout.replace(key, str(replace_str)) |
def sanitize_source(source):
match = re.match('^\\s*(C|CH|CHAN|CHANNEL)\\s*(?P<number>\\d)\\s*$|^\\s*(?P<name_only>MATH|LINE)\\s*$', source, re.IGNORECASE)
if match:
if (match.group('number') is not None):
source = ('C' + match.group('number'))
else:
source = match.group('name_only')
source = source.upper()
else:
raise ValueError(f'source {source} not recognized')
return source |
('/migrate_rooms', methods=['POST'])
_params([], need_username=True)
_wrapper_json
_web_opration_log('migrate_rooms', get_op_info=migrate_rooms_log)
def migrate_rooms(username):
json_data = request.get_json(force=True)
src_rooms = json_data.get('src_rooms', [])
dst_rooms = json_data.get('dst_rooms', [])
isps = json_data.get('isps', [])
return migrate.migrate_rooms(src_rooms, dst_rooms, isps, username) |
def local_do_test(m):
if isinstance(m, type):
m = m.DUT()
m.elaborate()
m.apply(BehavioralRTLIRGenL2Pass(m))
m.apply(BehavioralRTLIRTypeCheckL2Pass(m))
try:
ref = m._rtlir_test_ref
for blk in m.get_update_blocks():
upblk = m.get_metadata(BehavioralRTLIRGenL2Pass.rtlir_upblks)[blk]
assert (upblk == ref[blk.__name__])
except AttributeError:
pass |
class KeystoneV3AuthTests(KeystoneAuthTestsMixin, unittest.TestCase):
def fake_keystone(self):
return fake_keystone(3, requires_email=True)
def emails(self):
return True
def test_query(self):
with self.fake_keystone() as keystone:
(response, federated_id, error_message) = keystone.query_users('cool')
self.assertIsNone(error_message)
self.assertEqual(1, len(response))
self.assertEqual('keystone', federated_id)
user_info = response[0]
self.assertEqual('cool.user', user_info.username)
(response, federated_id, error_message) = keystone.query_users('unknown')
self.assertIsNone(error_message)
self.assertEqual(0, len(response))
self.assertEqual('keystone', federated_id)
def test_link_user(self):
with self.fake_keystone() as keystone:
(user, error_message) = keystone.link_user('cool.user')
self.assertIsNone(error_message)
self.assertIsNotNone(user)
self.assertEqual('cool_user', user.username)
self.assertEqual('cool.', user.email)
(user_again, _) = keystone.link_user('cool.user')
self.assertEqual(user_again.id, user.id)
(result, _) = keystone.confirm_existing_user('cool_user', 'password')
self.assertIsNotNone(result)
self.assertEqual('cool_user', result.username)
def test_check_group_lookup_args(self):
with self.fake_keystone() as keystone:
(status, err) = keystone.check_group_lookup_args({})
self.assertFalse(status)
self.assertEqual('Missing group_id', err)
(status, err) = keystone.check_group_lookup_args({'group_id': 'unknownid'})
self.assertFalse(status)
self.assertEqual('Group not found', err)
(status, err) = keystone.check_group_lookup_args({'group_id': 'somegroupid'})
self.assertTrue(status)
self.assertIsNone(err)
def test_iterate_group_members(self):
with self.fake_keystone() as keystone:
(itt, err) = keystone.iterate_group_members({'group_id': 'somegroupid'})
self.assertIsNone(err)
results = list(itt)
results.sort()
self.assertEqual(2, len(results))
self.assertEqual('adminuser', results[0][0].id)
self.assertEqual('cool.user', results[1][0].id) |
def get_device_class_from_sys_info(info: Dict[(str, Any)]) -> Type[SmartDevice]:
if (('system' not in info) or ('get_sysinfo' not in info['system'])):
raise SmartDeviceException("No 'system' or 'get_sysinfo' in response")
sysinfo: Dict[(str, Any)] = info['system']['get_sysinfo']
type_: Optional[str] = sysinfo.get('type', sysinfo.get('mic_type'))
if (type_ is None):
raise SmartDeviceException('Unable to find the device type field!')
if (('dev_name' in sysinfo) and ('Dimmer' in sysinfo['dev_name'])):
return SmartDimmer
if ('smartplug' in type_.lower()):
if ('children' in sysinfo):
return SmartStrip
return SmartPlug
if ('smartbulb' in type_.lower()):
if ('length' in sysinfo):
return SmartLightStrip
return SmartBulb
raise UnsupportedDeviceException(('Unknown device type: %s' % type_)) |
class Effect8013(BaseEffect):
runTime = 'early'
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
fit.appliedImplants.filteredItemMultiply((lambda target: target.item.requiresSkill('Cybernetics')), 'shieldHpBonus', (implant.getModifiedItemAttr('ImplantSetNirvana') or 1), **kwargs) |
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False)
self.bn = nn.BatchNorm2d(nOut, eps=0.001)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return output |
def build_lr_scheduler(optimizer: Optimizer, warmup_epochs: Union[(float, int)], epochs: int, num_lrs: int, train_data_size: int, batch_size: int, init_lr: float, max_lr: float, final_lr: float) -> Type[_LRScheduler]:
return NoamLR(optimizer=optimizer, warmup_epochs=[warmup_epochs], total_epochs=([epochs] * num_lrs), steps_per_epoch=(train_data_size // batch_size), init_lr=[init_lr], max_lr=[max_lr], final_lr=[final_lr]) |
class FloorplanOptions():
tw1: float = 1.0
tw2: float = 1.0
tw3: float = 1.0
tw4: float = 1.0
tl1: float = 1.0
tl2: float = 1.0
tl3: float = 1.0
tl4: float = 1.0
type: FloorPlanType = FloorPlanType.RECTANGULAR
seed: int = 1
width: float = 4.0
length: float = 4.0
radius: float = 1.0
segments: int = 32
tail_angle: float = 0.0
extension_amount: int = 4
random_extension_amount: bool = True |
class ModelType(type):
def _check_abstract(self):
if (self.__table__ is None):
raise TypeError('GINO model {} is abstract, no table is defined.'.format(self.__name__))
def __iter__(self):
self._check_abstract()
return iter(self.__table__.columns)
def __getattr__(self, item):
try:
if (item in {'insert', 'join', 'outerjoin', 'gino'}):
self._check_abstract()
return getattr(self.__table__, item)
raise AttributeError
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.__name__, item))
def __prepare__(mcs, name, bases, **kwargs):
return Dict()
def __new__(mcs, name, bases, namespace, **kwargs):
rv = type.__new__(mcs, name, bases, namespace)
rv.__namespace__ = namespace
if (rv.__table__ is None):
rv.__table__ = getattr(rv, '_init_table')(rv)
visited = set()
for each_cls in rv.__mro__:
for (k, v) in getattr(each_cls, '__namespace__', each_cls.__dict__).items():
if (k in visited):
continue
visited.add(k)
if (callable(v) and getattr(v, '__declared_attr_with_table__', False)):
setattr(rv, k, v(rv))
return rv |
def build_scheduler(cfg_sheduler, optimizer, model, logger):
lr_scheduler_cfg = cfg_sheduler['lr_scheduler']
lr_scheduler = build_lr_scheduler(optimizer=optimizer, lr=lr_scheduler_cfg['lr'], lr_clip=lr_scheduler_cfg['clip'], lr_decay_list=lr_scheduler_cfg['decay_list'], lr_decay_rate=lr_scheduler_cfg['decay_rate'], last_epoch=lr_scheduler_cfg['last_epoch'])
bnm_scheduler = None
bnm_scheduler_cfg = cfg_sheduler['bnm_scheduler']
if bnm_scheduler_cfg['enable']:
bnm_scheduler = build_bnm_scheduler(model=model, bnm=bnm_scheduler_cfg['momentum'], bnm_clip=bnm_scheduler_cfg['clip'], bnm_decay_list=bnm_scheduler_cfg['decay_list'], bnm_decay_rate=bnm_scheduler_cfg['decay_rate'], last_epoch=bnm_scheduler_cfg['last_epoch'])
return (lr_scheduler, bnm_scheduler) |
def main():
parser = OptionParser()
parser.add_option('--maxage', dest='maxage', default='3600', help='Maximum age of information to use before re-running commands for this module', type='int')
(options, args) = parser.parse_args()
ops.survey.print_header('OS information')
lang_data = ops.system.systemversion.get_os_language(maxage=timedelta(seconds=options.maxage)).languages
sysver_data = ops.system.systemversion.get_os_version(maxage=timedelta(seconds=options.maxage)).versioninfo
install_date = ops.system.systemversion.get_os_install_date(maxage=timedelta(seconds=options.maxage))
ops.survey.print_agestring(lang_data.dszobjage)
print()
dsz.ui.Echo(('OS installed on %s' % install_date), dsz.GOOD)
print('- System language settings')
print((' Locale: %s' % lang_data.localelanguage.english))
print((' Installed: %s' % lang_data.installedlanguage.english))
print((' UI: %s' % lang_data.uilanguage.english))
print((' OS: %s' % ', '.join(map((lambda x: x.english), lang_data.oslanguages.oslanguage))))
print('- System version information')
print((' Version: %d.%d.%d.%d Build %d %s %s %s' % (sysver_data.major, sysver_data.minor, sysver_data.revisionmajor, sysver_data.revisionminor, sysver_data.build, sysver_data.platform, sysver_data.arch, sysver_data.extrainfo))) |
class TestSerializer(APITestCase, CassandraTestCase):
def test_serialize_creates(self):
now = datetime.now()
data = {'id': str(uuid.uuid4()), 'first_name': 'Homer', 'last_name': 'Simpson', 'is_real': True, 'favourite_number': 10, 'favourite_float_number': float(10.1), 'created_on': now}
serializer = CassandraFamilyMemberSerializer(data=data)
serializer.is_valid()
self.assertEqual(serializer.errors, {})
self.assertEqual(serializer.is_valid(), True)
serializer.save()
self.assertEqual(CassandraFamilyMember.objects.all().count(), 1)
model = CassandraFamilyMember.objects.all()[0]
self.assertEqual(model.first_name, 'Homer')
self.assertEqual(model.last_name, 'Simpson')
self.assertEqual(model.is_real, True)
self.assertEqual(model.favourite_number, 10)
self.assertEqual(model.id, uuid.UUID(data['id'])) |
class AppliedSponsorshipNotificationToSponsorsTests(TestCase):
def setUp(self):
self.notification = notifications.AppliedSponsorshipNotificationToSponsors()
self.user = baker.make(settings.AUTH_USER_MODEL, email='')
self.verified_email = baker.make(EmailAddress, verified=True)
self.unverified_email = baker.make(EmailAddress, verified=False)
self.sponsor_contacts = [baker.make('sponsors.SponsorContact', email='', primary=True, sponsor__name='foo'), baker.make('sponsors.SponsorContact', email=self.verified_email.email), baker.make('sponsors.SponsorContact', email=self.unverified_email.email)]
self.sponsor = baker.make('sponsors.Sponsor', contacts=self.sponsor_contacts)
self.sponsorship = baker.make('sponsors.Sponsorship', sponsor=self.sponsor, submited_by=self.user)
self.subject_template = 'sponsors/email/sponsor_new_application_subject.txt'
self.content_template = 'sponsors/email/sponsor_new_application.txt'
def test_send_email_using_correct_templates(self):
context = {'sponsorship': self.sponsorship}
expected_subject = render_to_string(self.subject_template, context).strip()
expected_content = render_to_string(self.content_template, context).strip()
self.notification.notify(sponsorship=self.sponsorship)
self.assertTrue(mail.outbox)
email = mail.outbox[0]
self.assertEqual(expected_subject, email.subject)
self.assertEqual(expected_content, email.body)
self.assertEqual(settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL, email.from_email)
self.assertCountEqual([self.user.email, self.verified_email.email], email.to)
def test_send_email_to_correct_recipients(self):
context = {'user': self.user, 'sponsorship': self.sponsorship}
expected_contacts = ['', self.verified_email.email]
self.assertCountEqual(expected_contacts, self.notification.get_recipient_list(context))
def test_list_required_assets_in_email_context(self):
cfg = baker.make(RequiredTextAssetConfiguration, internal_name='input')
benefit = baker.make(SponsorBenefit, sponsorship=self.sponsorship)
asset = cfg.create_benefit_feature(benefit)
request = Mock()
base_context = {'sponsorship': self.sponsorship, 'request': request}
context = self.notification.get_email_context(**base_context)
self.assertEqual(3, len(context))
self.assertEqual(self.sponsorship, context['sponsorship'])
self.assertEqual(request, context['request'])
self.assertEqual(1, len(context['required_assets']))
self.assertIn(asset, context['required_assets']) |
class Geodesic(object):
GEOGRAPHICLIB_GEODESIC_ORDER = 6
nA1_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC1_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC1p_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA2_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC2_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA3_ = GEOGRAPHICLIB_GEODESIC_ORDER
nA3x_ = nA3_
nC3_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC3x_ = ((nC3_ * (nC3_ - 1)) // 2)
nC4_ = GEOGRAPHICLIB_GEODESIC_ORDER
nC4x_ = ((nC4_ * (nC4_ + 1)) // 2)
maxit1_ = 20
maxit2_ = ((maxit1_ + Math.digits) + 10)
tiny_ = math.sqrt(Math.minval)
tol0_ = Math.epsilon
tol1_ = (200 * tol0_)
tol2_ = math.sqrt(tol0_)
tolb_ = (tol0_ * tol2_)
xthresh_ = (1000 * tol2_)
CAP_NONE = GeodesicCapability.CAP_NONE
CAP_C1 = GeodesicCapability.CAP_C1
CAP_C1p = (1 << 1)
CAP_C2 = (1 << 2)
CAP_C3 = (1 << 3)
CAP_C4 = (1 << 4)
CAP_ALL = 31
OUT_ALL = 32640
EMPTY = 0
LATITUDE = ((1 << 7) | CAP_NONE)
LONGITUDE = ((1 << 8) | CAP_C3)
AZIMUTH = ((1 << 9) | CAP_NONE)
DISTANCE = ((1 << 10) | CAP_C1)
DISTANCE_IN = (((1 << 11) | CAP_C1) | CAP_C1p)
REDUCEDLENGTH = (((1 << 12) | CAP_C1) | CAP_C2)
GEODESICSCALE = (((1 << 13) | CAP_C1) | CAP_C2)
AREA = ((1 << 14) | CAP_C4)
ALL = (OUT_ALL | CAP_ALL)
def SinCosSeries(sinp, sinx, cosx, c, n):
k = (n + sinp)
ar = ((2 * (cosx - sinx)) * (cosx + sinx))
y1 = 0
if (n & 1):
k -= 1
y0 = c[k]
else:
y0 = 0
n = (n // 2)
while n:
n -= 1
k -= 1
y1 = (((ar * y0) - y1) + c[k])
k -= 1
y0 = (((ar * y1) - y0) + c[k])
return ((((2 * sinx) * cosx) * y0) if sinp else (cosx * (y0 - y1)))
SinCosSeries = staticmethod(SinCosSeries)
def AngRound(x):
z = (1 / 16.0)
y = abs(x)
if (y < z):
y = (z - (z - y))
return ((- y) if (x < 0) else y)
AngRound = staticmethod(AngRound)
def SinCosNorm(sinx, cosx):
r = math.hypot(sinx, cosx)
return ((sinx / r), (cosx / r))
SinCosNorm = staticmethod(SinCosNorm)
def Astroid(x, y):
p = Math.sq(x)
q = Math.sq(y)
r = (((p + q) - 1) / 6)
if (not ((q == 0) and (r <= 0))):
S = ((p * q) / 4)
r2 = Math.sq(r)
r3 = (r * r2)
disc = (S * (S + (2 * r3)))
u = r
if (disc >= 0):
T3 = (S + r3)
T3 += ((- math.sqrt(disc)) if (T3 < 0) else math.sqrt(disc))
T = Math.cbrt(T3)
u += (T + ((r2 / T) if (T != 0) else 0))
else:
ang = math.atan2(math.sqrt((- disc)), (- (S + r3)))
u += ((2 * r) * math.cos((ang / 3)))
v = math.sqrt((Math.sq(u) + q))
uv = ((q / (v - u)) if (u < 0) else (u + v))
w = ((uv - q) / (2 * v))
k = (uv / (math.sqrt((uv + Math.sq(w))) + w))
else:
k = 0
return k
Astroid = staticmethod(Astroid)
def A1m1f(eps):
eps2 = Math.sq(eps)
t = ((eps2 * ((eps2 * (eps2 + 4)) + 64)) / 256)
return ((t + eps) / (1 - eps))
A1m1f = staticmethod(A1m1f)
def C1f(eps, c):
eps2 = Math.sq(eps)
d = eps
c[1] = ((d * (((6 - eps2) * eps2) - 16)) / 32)
d *= eps
c[2] = ((d * (((64 - (9 * eps2)) * eps2) - 128)) / 2048)
d *= eps
c[3] = ((d * ((9 * eps2) - 16)) / 768)
d *= eps
c[4] = ((d * ((3 * eps2) - 5)) / 512)
d *= eps
c[5] = (((- 7) * d) / 1280)
d *= eps
c[6] = (((- 7) * d) / 2048)
C1f = staticmethod(C1f)
def C1pf(eps, c):
eps2 = Math.sq(eps)
d = eps
c[1] = ((d * ((eps2 * ((205 * eps2) - 432)) + 768)) / 1536)
d *= eps
c[2] = ((d * ((eps2 * ((4005 * eps2) - 4736)) + 3840)) / 12288)
d *= eps
c[3] = ((d * (116 - (225 * eps2))) / 384)
d *= eps
c[4] = ((d * (2695 - (7173 * eps2))) / 7680)
d *= eps
c[5] = ((3467 * d) / 7680)
d *= eps
c[6] = ((38081 * d) / 61440)
C1pf = staticmethod(C1pf)
def A2m1f(eps):
eps2 = Math.sq(eps)
t = ((eps2 * ((eps2 * ((25 * eps2) + 36)) + 64)) / 256)
return ((t * (1 - eps)) - eps)
A2m1f = staticmethod(A2m1f)
def C2f(eps, c):
eps2 = Math.sq(eps)
d = eps
c[1] = ((d * ((eps2 * (eps2 + 2)) + 16)) / 32)
d *= eps
c[2] = ((d * ((eps2 * ((35 * eps2) + 64)) + 384)) / 2048)
d *= eps
c[3] = ((d * ((15 * eps2) + 80)) / 768)
d *= eps
c[4] = ((d * ((7 * eps2) + 35)) / 512)
d *= eps
c[5] = ((63 * d) / 1280)
d *= eps
c[6] = ((77 * d) / 2048)
C2f = staticmethod(C2f)
def __init__(self, a, f):
self._a = float(a)
self._f = (float(f) if (f <= 1) else (1.0 / f))
self._f1 = (1 - self._f)
self._e2 = (self._f * (2 - self._f))
self._ep2 = (self._e2 / Math.sq(self._f1))
self._n = (self._f / (2 - self._f))
self._b = (self._a * self._f1)
self._c2 = ((Math.sq(self._a) + (Math.sq(self._b) * (1 if (self._e2 == 0) else ((Math.atanh(math.sqrt(self._e2)) if (self._e2 > 0) else math.atan(math.sqrt((- self._e2)))) / math.sqrt(abs(self._e2)))))) / 2)
self._etol2 = ((0.1 * Geodesic.tol2_) / math.sqrt(((max(0.001, abs(self._f)) * min(1.0, (1 - (self._f / 2)))) / 2)))
if (not (Math.isfinite(self._a) and (self._a > 0))):
raise ValueError('Major radius is not positive')
if (not (Math.isfinite(self._b) and (self._b > 0))):
raise ValueError('Minor radius is not positive')
self._A3x = list(range(Geodesic.nA3x_))
self._C3x = list(range(Geodesic.nC3x_))
self._C4x = list(range(Geodesic.nC4x_))
self.A3coeff()
self.C3coeff()
self.C4coeff()
def A3coeff(self):
_n = self._n
self._A3x[0] = 1
self._A3x[1] = ((_n - 1) / 2)
self._A3x[2] = (((_n * ((3 * _n) - 1)) - 2) / 8)
self._A3x[3] = (((((- _n) - 3) * _n) - 1) / 16)
self._A3x[4] = ((((- 2) * _n) - 3) / 64)
self._A3x[5] = ((- 3) / 128.0)
def C3coeff(self):
_n = self._n
self._C3x[0] = ((1 - _n) / 4)
self._C3x[1] = ((1 - (_n * _n)) / 8)
self._C3x[2] = ((((3 - _n) * _n) + 3) / 64)
self._C3x[3] = (((2 * _n) + 5) / 128)
self._C3x[4] = (3 / 128.0)
self._C3x[5] = ((((_n - 3) * _n) + 2) / 32)
self._C3x[6] = ((((((- 3) * _n) - 2) * _n) + 3) / 64)
self._C3x[7] = ((_n + 3) / 128)
self._C3x[8] = (5 / 256.0)
self._C3x[9] = (((_n * ((5 * _n) - 9)) + 5) / 192)
self._C3x[10] = ((9 - (10 * _n)) / 384)
self._C3x[11] = (7 / 512.0)
self._C3x[12] = ((7 - (14 * _n)) / 512)
self._C3x[13] = (7 / 512.0)
self._C3x[14] = (21 / 2560.0)
def C4coeff(self):
_n = self._n
self._C4x[0] = (((_n * ((_n * ((_n * ((_n * ((100 * _n) + 208)) + 572)) + 3432)) - 12012)) + 30030) / 45045)
self._C4x[1] = (((_n * ((_n * ((_n * ((64 * _n) + 624)) - 4576)) + 6864)) - 3003) / 15015)
self._C4x[2] = (((_n * (((14144 - (10656 * _n)) * _n) - 4576)) - 858) / 45045)
self._C4x[3] = ((((((- 224) * _n) - 4784) * _n) + 1573) / 45045)
self._C4x[4] = (((1088 * _n) + 156) / 45045)
self._C4x[5] = (97 / 15015.0)
self._C4x[6] = (((_n * ((_n * (((((- 64) * _n) - 624) * _n) + 4576)) - 6864)) + 3003) / 135135)
self._C4x[7] = (((_n * ((_n * ((5952 * _n) - 11648)) + 9152)) - 2574) / 135135)
self._C4x[8] = (((_n * ((5792 * _n) + 1040)) - 1287) / 135135)
self._C4x[9] = ((468 - (2944 * _n)) / 135135)
self._C4x[10] = (1 / 9009.0)
self._C4x[11] = (((_n * (((4160 - (1440 * _n)) * _n) - 4576)) + 1716) / 225225)
self._C4x[12] = ((((4992 - (8448 * _n)) * _n) - 1144) / 225225)
self._C4x[13] = (((1856 * _n) - 936) / 225225)
self._C4x[14] = (8 / 10725.0)
self._C4x[15] = (((_n * ((3584 * _n) - 3328)) + 1144) / 315315)
self._C4x[16] = (((1024 * _n) - 208) / 105105)
self._C4x[17] = ((- 136) / 63063.0)
self._C4x[18] = ((832 - (2560 * _n)) / 405405)
self._C4x[19] = ((- 128) / 135135.0)
self._C4x[20] = (128 / 99099.0)
def A3f(self, eps):
v = 0
for i in range((Geodesic.nA3x_ - 1), (- 1), (- 1)):
v = ((eps * v) + self._A3x[i])
return v
def C3f(self, eps, c):
j = Geodesic.nC3x_
k = (Geodesic.nC3_ - 1)
while k:
t = 0
for _ in range((Geodesic.nC3_ - k)):
j -= 1
t = ((eps * t) + self._C3x[j])
c[k] = t
k -= 1
mult = 1
for k in range(1, Geodesic.nC3_):
mult *= eps
c[k] *= mult
def C4f(self, eps, c):
j = Geodesic.nC4x_
k = Geodesic.nC4_
while k:
t = 0
for _ in range(((Geodesic.nC4_ - k) + 1)):
j -= 1
t = ((eps * t) + self._C4x[j])
k -= 1
c[k] = t
mult = 1
for k in range(1, Geodesic.nC4_):
mult *= eps
c[k] *= mult
def Lengths(self, eps, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, scalep, C1a, C2a):
Geodesic.C1f(eps, C1a)
Geodesic.C2f(eps, C2a)
A1m1 = Geodesic.A1m1f(eps)
AB1 = ((1 + A1m1) * (Geodesic.SinCosSeries(True, ssig2, csig2, C1a, Geodesic.nC1_) - Geodesic.SinCosSeries(True, ssig1, csig1, C1a, Geodesic.nC1_)))
A2m1 = Geodesic.A2m1f(eps)
AB2 = ((1 + A2m1) * (Geodesic.SinCosSeries(True, ssig2, csig2, C2a, Geodesic.nC2_) - Geodesic.SinCosSeries(True, ssig1, csig1, C2a, Geodesic.nC2_)))
m0 = (A1m1 - A2m1)
J12 = ((m0 * sig12) + (AB1 - AB2))
m12b = (((dn2 * (csig1 * ssig2)) - (dn1 * (ssig1 * csig2))) - ((csig1 * csig2) * J12))
s12b = (((1 + A1m1) * sig12) + AB1)
if scalep:
csig12 = ((csig1 * csig2) + (ssig1 * ssig2))
t = (((self._ep2 * (cbet1 - cbet2)) * (cbet1 + cbet2)) / (dn1 + dn2))
M12 = (csig12 + ((((t * ssig2) - (csig2 * J12)) * ssig1) / dn1))
M21 = (csig12 - ((((t * ssig1) - (csig1 * J12)) * ssig2) / dn2))
else:
M12 = M21 = Math.nan
return (s12b, m12b, m0, M12, M21)
def InverseStart(self, sbet1, cbet1, dn1, sbet2, cbet2, dn2, lam12, C1a, C2a):
sig12 = (- 1)
salp2 = calp2 = dnm = Math.nan
sbet12 = ((sbet2 * cbet1) - (cbet2 * sbet1))
cbet12 = ((cbet2 * cbet1) + (sbet2 * sbet1))
sbet12a = (sbet2 * cbet1)
sbet12a += (cbet2 * sbet1)
shortline = ((cbet12 >= 0) and (sbet12 < 0.5) and ((cbet2 * lam12) < 0.5))
omg12 = lam12
if shortline:
sbetm2 = Math.sq((sbet1 + sbet2))
sbetm2 /= (sbetm2 + Math.sq((cbet1 + cbet2)))
dnm = math.sqrt((1 + (self._ep2 * sbetm2)))
omg12 /= (self._f1 * dnm)
somg12 = math.sin(omg12)
comg12 = math.cos(omg12)
salp1 = (cbet2 * somg12)
calp1 = ((sbet12 + (((cbet2 * sbet1) * Math.sq(somg12)) / (1 + comg12))) if (comg12 >= 0) else (sbet12a - (((cbet2 * sbet1) * Math.sq(somg12)) / (1 - comg12))))
ssig12 = math.hypot(salp1, calp1)
csig12 = ((sbet1 * sbet2) + ((cbet1 * cbet2) * comg12))
if (shortline and (ssig12 < self._etol2)):
salp2 = (cbet1 * somg12)
calp2 = (sbet12 - ((cbet1 * sbet2) * ((Math.sq(somg12) / (1 + comg12)) if (comg12 >= 0) else (1 - comg12))))
(salp2, calp2) = Geodesic.SinCosNorm(salp2, calp2)
sig12 = math.atan2(ssig12, csig12)
elif ((abs(self._n) >= 0.1) or (csig12 >= 0) or (ssig12 >= (((6 * abs(self._n)) * math.pi) * Math.sq(cbet1)))):
pass
else:
if (self._f >= 0):
k2 = (Math.sq(sbet1) * self._ep2)
eps = (k2 / ((2 * (1 + math.sqrt((1 + k2)))) + k2))
lamscale = (((self._f * cbet1) * self.A3f(eps)) * math.pi)
betscale = (lamscale * cbet1)
x = ((lam12 - math.pi) / lamscale)
y = (sbet12a / betscale)
else:
cbet12a = ((cbet2 * cbet1) - (sbet2 * sbet1))
bet12a = math.atan2(sbet12a, cbet12a)
(dummy, m12b, m0, dummy, dummy) = self.Lengths(self._n, (math.pi + bet12a), sbet1, (- cbet1), dn1, sbet2, cbet2, dn2, cbet1, cbet2, False, C1a, C2a)
x = ((- 1) + (m12b / (((cbet1 * cbet2) * m0) * math.pi)))
betscale = ((sbet12a / x) if (x < (- 0.01)) else (((- self._f) * Math.sq(cbet1)) * math.pi))
lamscale = (betscale / cbet1)
y = ((lam12 - math.pi) / lamscale)
if ((y > (- Geodesic.tol1_)) and (x > ((- 1) - Geodesic.xthresh_))):
if (self._f >= 0):
salp1 = min(1.0, (- x))
calp1 = (- math.sqrt((1 - Math.sq(salp1))))
else:
calp1 = max((0.0 if (x > (- Geodesic.tol1_)) else (- 1.0)), x)
salp1 = math.sqrt((1 - Math.sq(calp1)))
else:
k = Geodesic.Astroid(x, y)
omg12a = (lamscale * ((((- x) * k) / (1 + k)) if (self._f >= 0) else (((- y) * (1 + k)) / k)))
somg12 = math.sin(omg12a)
comg12 = (- math.cos(omg12a))
salp1 = (cbet2 * somg12)
calp1 = (sbet12a - (((cbet2 * sbet1) * Math.sq(somg12)) / (1 - comg12)))
if (salp1 > 0):
(salp1, calp1) = Geodesic.SinCosNorm(salp1, calp1)
else:
salp1 = 1
calp1 = 0
return (sig12, salp1, calp1, salp2, calp2, dnm)
def Lambda12(self, sbet1, cbet1, dn1, sbet2, cbet2, dn2, salp1, calp1, diffp, C1a, C2a, C3a):
if ((sbet1 == 0) and (calp1 == 0)):
calp1 = (- Geodesic.tiny_)
salp0 = (salp1 * cbet1)
calp0 = math.hypot(calp1, (salp1 * sbet1))
ssig1 = sbet1
somg1 = (salp0 * sbet1)
csig1 = comg1 = (calp1 * cbet1)
(ssig1, csig1) = Geodesic.SinCosNorm(ssig1, csig1)
salp2 = ((salp0 / cbet2) if (cbet2 != cbet1) else salp1)
calp2 = ((math.sqrt((Math.sq((calp1 * cbet1)) + (((cbet2 - cbet1) * (cbet1 + cbet2)) if (cbet1 < (- sbet1)) else ((sbet1 - sbet2) * (sbet1 + sbet2))))) / cbet2) if ((cbet2 != cbet1) or (abs(sbet2) != (- sbet1))) else abs(calp1))
ssig2 = sbet2
somg2 = (salp0 * sbet2)
csig2 = comg2 = (calp2 * cbet2)
(ssig2, csig2) = Geodesic.SinCosNorm(ssig2, csig2)
sig12 = math.atan2(max(((csig1 * ssig2) - (ssig1 * csig2)), 0.0), ((csig1 * csig2) + (ssig1 * ssig2)))
omg12 = math.atan2(max(((comg1 * somg2) - (somg1 * comg2)), 0.0), ((comg1 * comg2) + (somg1 * somg2)))
k2 = (Math.sq(calp0) * self._ep2)
eps = (k2 / ((2 * (1 + math.sqrt((1 + k2)))) + k2))
self.C3f(eps, C3a)
B312 = (Geodesic.SinCosSeries(True, ssig2, csig2, C3a, (Geodesic.nC3_ - 1)) - Geodesic.SinCosSeries(True, ssig1, csig1, C3a, (Geodesic.nC3_ - 1)))
h0 = ((- self._f) * self.A3f(eps))
domg12 = ((salp0 * h0) * (sig12 + B312))
lam12 = (omg12 + domg12)
if diffp:
if (calp2 == 0):
dlam12 = ((((- 2) * self._f1) * dn1) / sbet1)
else:
(dummy, dlam12, dummy, dummy, dummy) = self.Lengths(eps, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, False, C1a, C2a)
dlam12 *= (self._f1 / (calp2 * cbet2))
else:
dlam12 = Math.nan
return (lam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps, domg12, dlam12)
def GenInverse(self, lat1, lon1, lat2, lon2, outmask):
a12 = s12 = azi1 = azi2 = m12 = M12 = M21 = S12 = Math.nan
outmask &= Geodesic.OUT_ALL
lon12 = Math.AngDiff(Math.AngNormalize(lon1), Math.AngNormalize(lon2))
lon12 = Geodesic.AngRound(lon12)
lonsign = (1 if (lon12 >= 0) else (- 1))
lon12 *= lonsign
lat1 = Geodesic.AngRound(lat1)
lat2 = Geodesic.AngRound(lat2)
swapp = (1 if (abs(lat1) >= abs(lat2)) else (- 1))
if (swapp < 0):
lonsign *= (- 1)
(lat2, lat1) = (lat1, lat2)
latsign = (1 if (lat1 < 0) else (- 1))
lat1 *= latsign
lat2 *= latsign
phi = (lat1 * Math.degree)
sbet1 = (self._f1 * math.sin(phi))
cbet1 = (Geodesic.tiny_ if (lat1 == (- 90)) else math.cos(phi))
(sbet1, cbet1) = Geodesic.SinCosNorm(sbet1, cbet1)
phi = (lat2 * Math.degree)
sbet2 = (self._f1 * math.sin(phi))
cbet2 = (Geodesic.tiny_ if (abs(lat2) == 90) else math.cos(phi))
(sbet2, cbet2) = Geodesic.SinCosNorm(sbet2, cbet2)
if (cbet1 < (- sbet1)):
if (cbet2 == cbet1):
sbet2 = (sbet1 if (sbet2 < 0) else (- sbet1))
elif (abs(sbet2) == (- sbet1)):
cbet2 = cbet1
dn1 = math.sqrt((1 + (self._ep2 * Math.sq(sbet1))))
dn2 = math.sqrt((1 + (self._ep2 * Math.sq(sbet2))))
lam12 = (lon12 * Math.degree)
slam12 = (0 if (lon12 == 180) else math.sin(lam12))
clam12 = math.cos(lam12)
C1a = list(range((Geodesic.nC1_ + 1)))
C2a = list(range((Geodesic.nC2_ + 1)))
C3a = list(range(Geodesic.nC3_))
meridian = ((lat1 == (- 90)) or (slam12 == 0))
if meridian:
calp1 = clam12
salp1 = slam12
calp2 = 1
salp2 = 0
ssig1 = sbet1
csig1 = (calp1 * cbet1)
ssig2 = sbet2
csig2 = (calp2 * cbet2)
sig12 = math.atan2(max(((csig1 * ssig2) - (ssig1 * csig2)), 0.0), ((csig1 * csig2) + (ssig1 * ssig2)))
(s12x, m12x, dummy, M12, M21) = self.Lengths(self._n, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, ((outmask & Geodesic.GEODESICSCALE) != 0), C1a, C2a)
if ((sig12 < 1) or (m12x >= 0)):
m12x *= self._b
s12x *= self._b
a12 = (sig12 / Math.degree)
else:
meridian = False
if ((not meridian) and (sbet1 == 0) and ((self._f <= 0) or (lam12 <= (math.pi - (self._f * math.pi))))):
calp1 = calp2 = 0
salp1 = salp2 = 1
s12x = (self._a * lam12)
sig12 = omg12 = (lam12 / self._f1)
m12x = (self._b * math.sin(sig12))
if (outmask & Geodesic.GEODESICSCALE):
M12 = M21 = math.cos(sig12)
a12 = (lon12 / self._f1)
elif (not meridian):
(sig12, salp1, calp1, salp2, calp2, dnm) = self.InverseStart(sbet1, cbet1, dn1, sbet2, cbet2, dn2, lam12, C1a, C2a)
if (sig12 >= 0):
s12x = ((sig12 * self._b) * dnm)
m12x = ((Math.sq(dnm) * self._b) * math.sin((sig12 / dnm)))
if (outmask & Geodesic.GEODESICSCALE):
M12 = M21 = math.cos((sig12 / dnm))
a12 = (sig12 / Math.degree)
omg12 = (lam12 / (self._f1 * dnm))
else:
numit = 0
tripn = tripb = False
salp1a = Geodesic.tiny_
calp1a = 1
salp1b = Geodesic.tiny_
calp1b = (- 1)
while (numit < Geodesic.maxit2_):
(nlam12, salp2, calp2, sig12, ssig1, csig1, ssig2, csig2, eps, omg12, dv) = self.Lambda12(sbet1, cbet1, dn1, sbet2, cbet2, dn2, salp1, calp1, (numit < Geodesic.maxit1_), C1a, C2a, C3a)
v = (nlam12 - lam12)
if (tripb or (not (abs(v) >= ((8 if tripn else 2) * Geodesic.tol0_)))):
break
if ((v > 0) and ((numit > Geodesic.maxit1_) or ((calp1 / salp1) > (calp1b / salp1b)))):
salp1b = salp1
calp1b = calp1
elif ((v < 0) and ((numit > Geodesic.maxit1_) or ((calp1 / salp1) < (calp1a / salp1a)))):
salp1a = salp1
calp1a = calp1
numit += 1
if ((numit < Geodesic.maxit1_) and (dv > 0)):
dalp1 = ((- v) / dv)
sdalp1 = math.sin(dalp1)
cdalp1 = math.cos(dalp1)
nsalp1 = ((salp1 * cdalp1) + (calp1 * sdalp1))
if ((nsalp1 > 0) and (abs(dalp1) < math.pi)):
calp1 = ((calp1 * cdalp1) - (salp1 * sdalp1))
salp1 = nsalp1
(salp1, calp1) = Geodesic.SinCosNorm(salp1, calp1)
tripn = (abs(v) <= (16 * Geodesic.tol0_))
continue
salp1 = ((salp1a + salp1b) / 2)
calp1 = ((calp1a + calp1b) / 2)
(salp1, calp1) = Geodesic.SinCosNorm(salp1, calp1)
tripn = False
tripb = (((abs((salp1a - salp1)) + (calp1a - calp1)) < Geodesic.tolb_) or ((abs((salp1 - salp1b)) + (calp1 - calp1b)) < Geodesic.tolb_))
(s12x, m12x, dummy, M12, M21) = self.Lengths(eps, sig12, ssig1, csig1, dn1, ssig2, csig2, dn2, cbet1, cbet2, ((outmask & Geodesic.GEODESICSCALE) != 0), C1a, C2a)
m12x *= self._b
s12x *= self._b
a12 = (sig12 / Math.degree)
omg12 = (lam12 - omg12)
if (outmask & Geodesic.DISTANCE):
s12 = (0 + s12x)
if (outmask & Geodesic.REDUCEDLENGTH):
m12 = (0 + m12x)
if (outmask & Geodesic.AREA):
salp0 = (salp1 * cbet1)
calp0 = math.hypot(calp1, (salp1 * sbet1))
if ((calp0 != 0) and (salp0 != 0)):
ssig1 = sbet1
csig1 = (calp1 * cbet1)
ssig2 = sbet2
csig2 = (calp2 * cbet2)
k2 = (Math.sq(calp0) * self._ep2)
eps = (k2 / ((2 * (1 + math.sqrt((1 + k2)))) + k2))
A4 = (((Math.sq(self._a) * calp0) * salp0) * self._e2)
(ssig1, csig1) = Geodesic.SinCosNorm(ssig1, csig1)
(ssig2, csig2) = Geodesic.SinCosNorm(ssig2, csig2)
C4a = list(range(Geodesic.nC4_))
self.C4f(eps, C4a)
B41 = Geodesic.SinCosSeries(False, ssig1, csig1, C4a, Geodesic.nC4_)
B42 = Geodesic.SinCosSeries(False, ssig2, csig2, C4a, Geodesic.nC4_)
S12 = (A4 * (B42 - B41))
else:
S12 = 0
if ((not meridian) and (omg12 < (0.75 * math.pi)) and ((sbet2 - sbet1) < 1.75)):
somg12 = math.sin(omg12)
domg12 = (1 + math.cos(omg12))
dbet1 = (1 + cbet1)
dbet2 = (1 + cbet2)
alp12 = (2 * math.atan2((somg12 * ((sbet1 * dbet2) + (sbet2 * dbet1))), (domg12 * ((sbet1 * sbet2) + (dbet1 * dbet2)))))
else:
salp12 = ((salp2 * calp1) - (calp2 * salp1))
calp12 = ((calp2 * calp1) + (salp2 * salp1))
if ((salp12 == 0) and (calp12 < 0)):
salp12 = (Geodesic.tiny_ * calp1)
calp12 = (- 1)
alp12 = math.atan2(salp12, calp12)
S12 += (self._c2 * alp12)
S12 *= ((swapp * lonsign) * latsign)
S12 += 0
if (swapp < 0):
(salp2, salp1) = (salp1, salp2)
(calp2, calp1) = (calp1, calp2)
if (outmask & Geodesic.GEODESICSCALE):
(M21, M12) = (M12, M21)
salp1 *= (swapp * lonsign)
calp1 *= (swapp * latsign)
salp2 *= (swapp * lonsign)
calp2 *= (swapp * latsign)
if (outmask & Geodesic.AZIMUTH):
azi1 = (0 - (math.atan2((- salp1), calp1) / Math.degree))
azi2 = (0 - (math.atan2((- salp2), calp2) / Math.degree))
return (a12, s12, azi1, azi2, m12, M12, M21, S12)
def CheckPosition(lat, lon):
if (abs(lat) > 90):
raise ValueError((('latitude ' + str(lat)) + ' not in [-90, 90]'))
if ((lon < (- 540)) or (lon >= 540)):
raise ValueError((('longitude ' + str(lon)) + ' not in [-540, 540)'))
return Math.AngNormalize(lon)
CheckPosition = staticmethod(CheckPosition)
def CheckAzimuth(azi):
if ((azi < (- 540)) or (azi >= 540)):
raise ValueError((('azimuth ' + str(azi)) + ' not in [-540, 540)'))
return Math.AngNormalize(azi)
CheckAzimuth = staticmethod(CheckAzimuth)
def CheckDistance(s):
if (not Math.isfinite(s)):
raise ValueError((('distance ' + str(s)) + ' not a finite number'))
CheckDistance = staticmethod(CheckDistance)
def Inverse(self, lat1, lon1, lat2, lon2, outmask=(DISTANCE | AZIMUTH)):
lon1 = Geodesic.CheckPosition(lat1, lon1)
lon2 = Geodesic.CheckPosition(lat2, lon2)
result = {'lat1': lat1, 'lon1': lon1, 'lat2': lat2, 'lon2': lon2}
(a12, s12, azi1, azi2, m12, M12, M21, S12) = self.GenInverse(lat1, lon1, lat2, lon2, outmask)
outmask &= Geodesic.OUT_ALL
result['a12'] = a12
if (outmask & Geodesic.DISTANCE):
result['s12'] = s12
if (outmask & Geodesic.AZIMUTH):
result['azi1'] = azi1
result['azi2'] = azi2
if (outmask & Geodesic.REDUCEDLENGTH):
result['m12'] = m12
if (outmask & Geodesic.GEODESICSCALE):
result['M12'] = M12
result['M21'] = M21
if (outmask & Geodesic.AREA):
result['S12'] = S12
return result
def GenDirect(self, lat1, lon1, azi1, arcmode, s12_a12, outmask):
from geographiclib.geodesicline import GeodesicLine
line = GeodesicLine(self, lat1, lon1, azi1, (outmask | (Geodesic.EMPTY if arcmode else Geodesic.DISTANCE_IN)))
return line.GenPosition(arcmode, s12_a12, outmask)
def Direct(self, lat1, lon1, azi1, s12, outmask=((LATITUDE | LONGITUDE) | AZIMUTH)):
lon1 = Geodesic.CheckPosition(lat1, lon1)
azi1 = Geodesic.CheckAzimuth(azi1)
Geodesic.CheckDistance(s12)
result = {'lat1': lat1, 'lon1': lon1, 'azi1': azi1, 's12': s12}
(a12, lat2, lon2, azi2, s12, m12, M12, M21, S12) = self.GenDirect(lat1, lon1, azi1, False, s12, outmask)
outmask &= Geodesic.OUT_ALL
result['a12'] = a12
if (outmask & Geodesic.LATITUDE):
result['lat2'] = lat2
if (outmask & Geodesic.LONGITUDE):
result['lon2'] = lon2
if (outmask & Geodesic.AZIMUTH):
result['azi2'] = azi2
if (outmask & Geodesic.REDUCEDLENGTH):
result['m12'] = m12
if (outmask & Geodesic.GEODESICSCALE):
result['M12'] = M12
result['M21'] = M21
if (outmask & Geodesic.AREA):
result['S12'] = S12
return result
def ArcDirect(self, lat1, lon1, azi1, a12, outmask=(((LATITUDE | LONGITUDE) | AZIMUTH) | DISTANCE)):
lon1 = Geodesic.CheckPosition(lat1, lon1)
azi1 = Geodesic.CheckAzimuth(azi1)
Geodesic.CheckDistance(a12)
result = {'lat1': lat1, 'lon1': lon1, 'azi1': azi1, 'a12': a12}
(a12, lat2, lon2, azi2, s12, m12, M12, M21, S12) = self.GenDirect(lat1, lon1, azi1, True, a12, outmask)
outmask &= Geodesic.OUT_ALL
if (outmask & Geodesic.DISTANCE):
result['s12'] = s12
if (outmask & Geodesic.LATITUDE):
result['lat2'] = lat2
if (outmask & Geodesic.LONGITUDE):
result['lon2'] = lon2
if (outmask & Geodesic.AZIMUTH):
result['azi2'] = azi2
if (outmask & Geodesic.REDUCEDLENGTH):
result['m12'] = m12
if (outmask & Geodesic.GEODESICSCALE):
result['M12'] = M12
result['M21'] = M21
if (outmask & Geodesic.AREA):
result['S12'] = S12
return result
def Line(self, lat1, lon1, azi1, caps=ALL):
from geographiclib.geodesicline import GeodesicLine
lon1 = Geodesic.CheckPosition(lat1, lon1)
azi1 = Geodesic.CheckAzimuth(azi1)
return GeodesicLine(self, lat1, lon1, azi1, (caps | Geodesic.DISTANCE_IN))
def Area(self, points, polyline=False):
from geographiclib.polygonarea import PolygonArea
for p in points:
Geodesic.CheckPosition(p['lat'], p['lon'])
(num, perimeter, area) = PolygonArea.Area(self, points, polyline)
result = {'number': num, 'perimeter': perimeter}
if (not polyline):
result['area'] = area
return result |
def unpack_inline_message_id(inline_message_id: str) -> 'raw.base.InputBotInlineMessageID':
padded = (inline_message_id + ('=' * ((- len(inline_message_id)) % 4)))
decoded = base64.urlsafe_b64decode(padded)
if (len(decoded) == 20):
unpacked = struct.unpack('<iqq', decoded)
return raw.types.InputBotInlineMessageID(dc_id=unpacked[0], id=unpacked[1], access_hash=unpacked[2])
else:
unpacked = struct.unpack('<iqiq', decoded)
return raw.types.InputBotInlineMessageID64(dc_id=unpacked[0], owner_id=unpacked[1], id=unpacked[2], access_hash=unpacked[3]) |
def test_hamming_matrix():
answer = np.array([[0, 1, 1, 2, 1, 2, 2, 3], [1, 0, 2, 1, 2, 1, 3, 2], [1, 2, 0, 1, 2, 3, 1, 2], [2, 1, 1, 0, 3, 2, 2, 1], [1, 2, 2, 3, 0, 1, 1, 2], [2, 1, 3, 2, 1, 0, 2, 1], [2, 3, 1, 2, 1, 2, 0, 1], [3, 2, 2, 1, 2, 1, 1, 0]]).astype(float)
assert np.array_equal(distribution._hamming_matrix(3), answer) |
def main():
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % args.gpu))
logging.info('args = %s', args)
genotype = eval(('genotypes.%s' % args.arch))
print('Genotype')
logging.info(genotype)
print('')
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info('param size = %fMB', utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
(train_transform, valid_transform) = utils._data_transforms_cifar10(args)
if (args.dataset == 'cifar100'):
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
else:
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True)
valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
best_acc = 0.0
for epoch in range(args.epochs):
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = ((args.drop_path_prob * epoch) / args.epochs)
(train_acc, train_obj) = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
(valid_acc, valid_obj) = infer(valid_queue, model, criterion)
if (valid_acc > best_acc):
best_acc = valid_acc
logging.info('valid_acc %f, best_acc %f', valid_acc, best_acc)
scheduler.step()
utils.save(model, os.path.join(args.save, 'weights.pt')) |
class Service(sb.Base, sasync.Async):
('Service', rus.optional((str, ru.Url)), rus.optional(ss.Session), rus.optional(sab.Base), rus.optional(dict), rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
(rus.nothing)
def __init__(self, rm=None, session=None, _adaptor=None, _adaptor_state={}, _ttype=None):
try:
import gc
gc.collect()
except:
pass
self.valid = False
url = ru.Url(rm)
if (not url.scheme):
url.scheme = 'fork'
if (not url.host):
url.host = 'localhost'
if (not session):
session = ss.Session(default=True)
scheme = url.scheme.lower()
self._super = super(Service, self)
self._super.__init__(scheme, _adaptor, _adaptor_state, url, session, ttype=_ttype)
self.valid = True
('Service', rus.optional((ru.Url, str)), rus.optional(ss.Session), rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
(st.Task)
def create(cls, rm=None, session=None, ttype=SYNC):
if (not session):
session = ss.Session(default=True)
url = ru.Url(rm)
return cls(url, session, _ttype=ttype)._init_task
('Service')
(str)
def __str__(self):
if self.valid:
return ('[%s]' % self.url)
return ''
('Service')
(rus.nothing)
def close(self):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
self._adaptor.close()
self.valid = False
('Service', descr.Description, rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
((j.Job, st.Task))
def create_job(self, job_desc, ttype=None):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
jd_copy = descr.Description()
job_desc._attributes_deep_copy(jd_copy)
adaptor_info = self._adaptor._adaptor.get_info()
if (('capabilities' in adaptor_info) and ('jdes_attributes' in adaptor_info['capabilities'])):
supported_keys = adaptor_info['capabilities']['jdes_attributes']
jd_default = descr.Description()
for key in jd_copy.list_attributes():
val = jd_copy.get_attribute(key)
default = jd_default.get_attribute(key)
if isinstance(val, str):
val = val.lower()
if isinstance(default, str):
default = default.lower()
if ((key not in supported_keys) and (val != default) and val):
msg = ("'JobDescription.%s' (%s) not supported by %s" % (key, val, adaptor_info['name']))
raise se.BadParameter._log(self._logger, msg)
if (jd_copy.executable is None):
raise se.BadParameter('No executable defined')
if jd_copy.attribute_exists('Environment'):
for (key, value) in jd_copy.environment.items():
jd_copy.environment[key] = str(value)
return self._adaptor.create_job(jd_copy, ttype=ttype)
('Service', str, rus.optional(str), rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
((j.Job, st.Task))
def run_job(self, cmd, host=None, ttype=None):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
if (not cmd):
raise se.BadParameter('run_job needs a command to run. Duh!')
try:
return self._adaptor.run_job(cmd, host, ttype=ttype)
except:
pass
args = cmd.split()
jd = descr.Description()
jd.executable = args[0]
jd.arguments = args[1:]
job = self.create_job(jd)
job.run()
return job
('Service', rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
((rus.list_of(str), st.Task))
def list(self, ttype=None):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
return self._adaptor.list(ttype=ttype)
jobs = property(list)
('Service', rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
((ru.Url, st.Task))
def get_url(self, ttype=None):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
return self._adaptor.get_url(ttype=ttype)
url = property(get_url)
('Service', str, rus.optional(rus.one_of(SYNC, ASYNC, TASK)))
((j.Job, st.Task))
def get_job(self, job_id, ttype=None):
if (not self.valid):
raise se.IncorrectState('This instance was already closed.')
return self._adaptor.get_job(job_id, ttype=ttype) |
def make_some_widgets() -> List[Widget]:
widget_id = 0
widgets = []
for creator_id in range(3):
for kind in WidgetKind:
for has_knob in [True, False]:
for has_spinner in [True, False]:
derived = [w.widget_id for w in widgets[::(creator_id + 1)]]
widgets.append(Widget(widget_id, creator_id, derived, kind, has_knob, has_spinner))
widget_id += 1
assert (len(widgets) == 24)
return widgets |
class OrganizationTest(TestCase):
def setUp(self):
pass
def test_createOrganization(self):
self.assertEqual(Organization.objects.count(), 0)
o = Organization.objects.create()
self.assertEqual(Organization.objects.count(), 1)
def test_org_autocreate_slug(self):
o = Organization.objects.create(name='RAP')
self.assertEqual(o.slugname, 'rap')
def test_name_should_be_uniq(self):
o = Organization.objects.create(name='RAP')
self.assertEqual(Organization.objects.count(), 1)
self.assertRaises(django.db.utils.IntegrityError, Organization.objects.create, name='RAP')
def test_add_member(self):
o = Organization.objects.create(name='RAP')
User = get_user_model()
u = User.objects.create_user('julia', password='julia')
pu = PytitionUser.objects.get(user__username='julia')
self.assertEqual(o.members.count(), 0)
o.members.add(pu)
self.assertEqual(o.members.count(), 1)
def test_delete_org(self):
org = Organization.objects.create(name='RAP')
p = Petition.objects.create(title='Antipub', org=org)
pt = PetitionTemplate.objects.create(name='Default', org=org)
self.assertEqual(org.petition_set.count(), 1)
self.assertEqual(Petition.objects.count(), 1)
self.assertEqual(PetitionTemplate.objects.count(), 1)
org.delete()
self.assertEqual(Petition.objects.count(), 0)
self.assertEqual(PetitionTemplate.objects.count(), 0)
def test_is_last_admin(self):
add_default_data()
julia = PytitionUser.objects.get(user__username='julia')
org = Organization.objects.get(name='Les Amis de la Terre')
self.assertEqual(org.is_last_admin(julia), True)
max = PytitionUser.objects.get(user__username='max')
perm = Permission.objects.get(organization=org, user=max)
perm.can_modify_permissions = True
perm.save()
self.assertEqual(org.is_last_admin(julia), False)
def test_is_allowed_to(self):
o = Organization.objects.create(name='RAP')
User = get_user_model()
u = User.objects.create_user('julia', password='julia')
pu = PytitionUser.objects.get(user__username='julia')
o.members.add(pu)
self.assertEqual(o.is_allowed_to(pu, 'can_add_members'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_remove_members'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_create_petitions'), True)
self.assertEqual(o.is_allowed_to(pu, 'can_modify_petitions'), True)
self.assertEqual(o.is_allowed_to(pu, 'can_delete_petitions'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_create_templates'), True)
self.assertEqual(o.is_allowed_to(pu, 'can_modify_templates'), True)
self.assertEqual(o.is_allowed_to(pu, 'can_delete_templates'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_view_signatures'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_modify_signatures'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_delete_signatures'), False)
self.assertEqual(o.is_allowed_to(pu, 'can_modify_permissions'), False) |
class TestMessageSorting(unittest.TestCase):
def test_simple_sorting(self) -> None:
msgs = ['x.py:1: error: "int" not callable', 'foo/y.py:123: note: "X" not defined']
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert (sort_messages_preserving_file_order(msgs, old_msgs) == list(reversed(msgs)))
assert (sort_messages_preserving_file_order(list(reversed(msgs)), old_msgs) == list(reversed(msgs)))
def test_long_form_sorting(self) -> None:
msg1 = ['x.py:1: error: "int" not callable', 'and message continues (x: y)', ' 1()', ' ^~~']
msg2 = ['foo/y.py: In function "f":', 'foo/y.py:123: note: "X" not defined', 'and again message continues']
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert (sort_messages_preserving_file_order((msg1 + msg2), old_msgs) == (msg2 + msg1))
assert (sort_messages_preserving_file_order((msg2 + msg1), old_msgs) == (msg2 + msg1))
def test_mypy_error_prefix(self) -> None:
msg1 = 'x.py:1: error: "int" not callable'
msg2 = 'foo/y:123: note: "X" not defined'
msg3 = 'mypy: Error not associated with a file'
old_msgs = ['mypy: Something wrong', 'foo/y:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert (sort_messages_preserving_file_order([msg1, msg2, msg3], old_msgs) == [msg2, msg1, msg3])
assert (sort_messages_preserving_file_order([msg3, msg2, msg1], old_msgs) == [msg2, msg1, msg3])
def test_new_file_at_the_end(self) -> None:
msg1 = 'x.py:1: error: "int" not callable'
msg2 = 'foo/y.py:123: note: "X" not defined'
new1 = 'ab.py:3: error: Problem: error'
new2 = 'aaa:3: error: Bad'
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
assert (sort_messages_preserving_file_order([msg1, msg2, new1], old_msgs) == [msg2, msg1, new1])
assert (sort_messages_preserving_file_order([new1, msg1, msg2, new2], old_msgs) == [msg2, msg1, new1, new2]) |
def test_download_and_cache_classifiers(monkeypatch, tmp_path):
responses.add(responses.GET, ' body='A\nB\nC')
def mock_get_cache_dir():
return tmp_path
monkeypatch.setattr(fv, 'get_cache_dir', mock_get_cache_dir)
classifiers = fv._download_and_cache_classifiers()
assert (classifiers == {'A', 'B', 'C'}) |
def lookup_fully_qualified_typeinfo(modules: dict[(str, MypyFile)], name: str, *, allow_missing: bool) -> TypeInfo:
stnode = lookup_fully_qualified(name, modules, raise_on_missing=(not allow_missing))
node = (stnode.node if stnode else None)
if isinstance(node, TypeInfo):
return node
else:
assert allow_missing, 'Should never get here in normal mode, got {}:{} instead of TypeInfo'.format(type(node).__name__, (node.fullname if node else ''))
return missing_info(modules) |
class _BaseUserscriptRunner(QObject):
got_cmd = pyqtSignal(str)
finished = pyqtSignal(guiprocess.GUIProcess)
def __init__(self, parent=None):
super().__init__(parent)
self._cleaned_up = False
self._filepath = None
self.proc = None
self._env: MutableMapping[(str, str)] = {}
self._text_stored = False
self._html_stored = False
self._args: Tuple[(Any, ...)] = ()
self._kwargs = {}
def store_text(self, text):
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', suffix='.txt', delete=False) as txt_file:
txt_file.write(text)
self._env['QUTE_TEXT'] = txt_file.name
self._text_stored = True
log.procs.debug('Text stored from webview')
if (self._text_stored and self._html_stored):
log.procs.debug('Both text/HTML stored, kicking off userscript!')
self._run_process(*self._args, **self._kwargs)
def store_html(self, html):
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8', suffix='.html', delete=False) as html_file:
html_file.write(html)
self._env['QUTE_HTML'] = html_file.name
self._html_stored = True
log.procs.debug('HTML stored from webview')
if (self._text_stored and self._html_stored):
log.procs.debug('Both text/HTML stored, kicking off userscript!')
self._run_process(*self._args, **self._kwargs)
def _run_process(self, cmd, *args, env=None, verbose=False, output_messages=False):
assert (self._filepath is not None)
self._env['QUTE_FIFO'] = self._filepath
if (env is not None):
self._env.update(env)
self.proc = guiprocess.GUIProcess('userscript', additional_env=self._env, output_messages=output_messages, verbose=verbose, parent=self)
self.proc.finished.connect(self.on_proc_finished)
self.proc.error.connect(self.on_proc_error)
self.proc.start(cmd, args)
def _cleanup(self):
if self._cleaned_up:
return
assert (self._filepath is not None)
self._cleaned_up = True
tempfiles = [self._filepath]
if ('QUTE_HTML' in self._env):
tempfiles.append(self._env['QUTE_HTML'])
if ('QUTE_TEXT' in self._env):
tempfiles.append(self._env['QUTE_TEXT'])
for fn in tempfiles:
log.procs.debug('Deleting temporary file {}.'.format(fn))
try:
os.remove(fn)
except OSError as e:
message.error('Failed to delete tempfile {} ({})!'.format(fn, e))
self._filepath = None
self.proc = None
self._env = {}
self._text_stored = False
self._html_stored = False
def prepare_run(self, *args, **kwargs):
raise NotImplementedError
()
def on_proc_finished(self):
raise NotImplementedError
()
def on_proc_error(self):
raise NotImplementedError |
def test_pype_use_parent_context_swallow_stop_error(mock_pipe):
mocked_runner = mock_pipe.return_value.load_and_run_pipeline
mocked_runner.side_effect = Stop()
context = Context({'pype': {'name': 'pipe name', 'pipeArg': 'argument here', 'useParentContext': True, 'skipParse': True, 'raiseError': False}})
with patch_logger('pypyr.steps.pype', logging.ERROR) as mock_logger_error:
with pytest.raises(Stop) as err_info:
with get_arb_pipeline_scope(context):
pype.run_step(context)
assert isinstance(err_info.value, Stop)
mock_pipe.assert_called_once_with(name='pipe name', context_args=['argument', 'here'], parse_input=False, loader=None, groups=None, success_group=None, failure_group=None, py_dir=None)
mocked_runner.assert_called_once_with(context, None)
mock_logger_error.assert_not_called() |
def do_commit(new_ver, old_ver, dry_run, amend, ver_files):
import pathlib
cmt_msg = ('chore(ver): bump %s-->%s' % (old_ver, new_ver))
ver_files = [pathlib.Path(f).as_posix() for f in ver_files]
git_add = (['git', 'add'] + ver_files)
git_cmt = ['git', 'commit', '-m', cmt_msg]
if amend:
git_cmt.append('--amend')
commands = [git_add, git_cmt]
for cmd in commands:
cmd_str = format_syscmd(cmd)
if dry_run:
(yield ('DRYRUN: %s' % cmd_str))
else:
(yield ('EXEC: %s' % cmd_str))
exec_cmd(cmd) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, help='path where the pretrained model is stored.')
parser.add_argument('--data_root', type=str, default='data', help='path where the testing data is stored')
parser.add_argument('--rnn_type', type=str, default='simple', help='path where the testing data is stored')
parser.add_argument('--device', type=str, default='0', help='specify the GPU device')
parser.add_argument('--test_mods', type=str, default='VATS', help='modalities available in the test phase')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
model_info = get_model_info(args.model_path)
model_root = os.path.split(args.model_path)[0]
pred_root = os.path.join(model_root, 'test', ord_rep(args.test_mods))
if (not os.path.exists(pred_root)):
os.makedirs(pred_root)
pred_path = os.path.join(pred_root, 'predict.npy')
test_mods = rep2mods(ord_rep(args.test_mods))
target_root = os.path.join(args.data_root, 'len_{}'.format(model_info['length']))
split_root = os.path.join(args.data_root, 'split', str(model_info['split']))
test_gen = get_testgen(args.data_root, target_root, split_root, test_mods)
train_shapes = ([[mod_shape_dict[mod]] for mod in train_mods] + [[model_info['length'], 1]])
test_model = build_test_model(args.model_path, train_shapes, test_mods, args.rnn_type)
(preds, truth) = predict(test_model, test_gen, pred_path)
eval_path = os.path.join(pred_root, 'eval.txt')
evaluate(preds, truth, eval_path) |
def define_template(title, page):
if (not page):
return
m = re.match('#REDIRECT.*?\\[\\[([^\\]]*)]]', page[0], re.IGNORECASE)
if m:
options.redirects[title] = m.group(1)
return
text = unescape(''.join(page))
text = comment.sub('', text)
text = reNoinclude.sub('', text)
text = re.sub('<noinclude\\s*>.*$', '', text, flags=re.DOTALL)
text = re.sub('<noinclude/>', '', text)
onlyincludeAccumulator = ''
for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
onlyincludeAccumulator += m.group(1)
if onlyincludeAccumulator:
text = onlyincludeAccumulator
else:
text = reIncludeonly.sub('', text)
if text:
if (title in options.templates):
logging.warn('Redefining: %s', title)
options.templates[title] = text |
class CollaborativeCallback(transformers.TrainerCallback):
def __init__(self, dht: hivemind.DHT, optimizer: hivemind.CollaborativeOptimizer, model: torch.nn.Module, local_public_key: bytes, statistics_expiration: float):
super().__init__()
self.model = model
(self.dht, self.collaborative_optimizer) = (dht, optimizer)
self.local_public_key = local_public_key
self.statistics_expiration = statistics_expiration
self.last_reported_collaboration_step = (- 1)
self.previous_state = self.get_current_state()
self.total_samples_processed = 0
self.samples = 0
self.steps = 0
self.loss = 0
def on_train_begin(self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
logger.info('Loading state from peers')
self.collaborative_optimizer.load_state_from_peers()
def on_step_end(self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
control.should_log = True
if (not self.params_are_finite()):
self.load_from_state(self.previous_state)
return control
self.previous_state = self.get_current_state()
if state.log_history:
self.loss += state.log_history[(- 1)]['loss']
self.steps += 1
if (self.collaborative_optimizer.local_step != self.last_reported_collaboration_step):
self.last_reported_collaboration_step = self.collaborative_optimizer.local_step
self.total_samples_processed += self.samples
samples_per_second = self.collaborative_optimizer.performance_ema.samples_per_second
statistics = metrics_utils.LocalMetrics(step=self.collaborative_optimizer.local_step, samples_per_second=samples_per_second, samples_accumulated=self.samples, loss=self.loss, mini_steps=self.steps)
logger.info(f'Step {self.collaborative_optimizer.local_step}')
logger.info(f'Your current contribution: {self.total_samples_processed} samples')
if self.steps:
logger.info(f'Local loss: {(self.loss / self.steps)}')
self.loss = 0
self.steps = 0
if self.collaborative_optimizer.is_synchronized:
self.dht.store(key=(self.collaborative_optimizer.prefix + '_metrics'), subkey=self.local_public_key, value=statistics.dict(), expiration_time=(hivemind.get_dht_time() + self.statistics_expiration), return_future=True)
self.samples = self.collaborative_optimizer.local_samples_accumulated
return control
_grad()
def get_current_state(self) -> Dict[(str, Any)]:
return {'model': self.model.state_dict(), 'opt': self.collaborative_optimizer.opt.state_dict()}
_grad()
def load_from_state(self, state):
self.model.load_state_dict(state['model'])
self.collaborative_optimizer.opt.load_state_dict(state['opt'])
_grad()
def params_are_finite(self):
for param in self.model.parameters():
if (not torch.all(torch.isfinite(param))):
return False
return True |
class PageQuestion(models.Model):
page = models.ForeignKey('Page', on_delete=models.CASCADE, related_name='page_questions')
question = models.ForeignKey('Question', on_delete=models.CASCADE, related_name='question_pages')
order = models.IntegerField(default=0)
class Meta():
ordering = ('page', 'order')
def __str__(self):
return f'{self.page} / {self.question} [{self.order}]'
def element(self):
return self.question |
class WAEnMMD(base_ae.SingleLatentWithPriorAE):
def __init__(self, encoder: BaseParameterisedDistribution, decoder: BaseParameterisedDistribution, latent_prior: BaseParameterisedDistribution, kernel: similarity_funcs.BaseSimilarityFunctions, c_function: similarity_funcs.SquaredEuclideanDistSimilarity()=None):
super().__init__(encoder, decoder, latent_prior)
self.kernel = kernel
self.c_function = (similarity_funcs.SquaredEuclideanDistSimilarity() if (c_function is None) else c_function)
self._last_z_sample_on_obj = None
def forward(self, x, lambda_):
return self.objective_to_maximise(x, lambda_)
def objective_to_maximise(self, x, lambda_=1.0):
self.encoder.update(x)
z_sample = self.encoder.sample_via_reparam(1)[0]
self._last_z_sample_on_obj = z_sample
self.decoder.update(z_sample)
expected_cost = self.decoder.convolve_with_function(x, self.c_function)
obj = (- expected_cost)
collect_extra_stats = self._collect_extra_stats_flag
if collect_extra_stats:
extra_statistics = {}
if (lambda_ != 0.0):
samples_from_latent_prior = torch.cat(self.latent_prior.sample_no_grad(num_samples=z_sample.shape[0]))
divergence_term = similarity_funcs.estimate_mmd(self.kernel, z_sample, samples_from_latent_prior)
obj += ((- lambda_) * divergence_term)
if collect_extra_stats:
extra_statistics['sum-divergence_term(no_lambda)(smaller_better)'] = divergence_term.sum().item()
if collect_extra_stats:
extra_statistics.update({'sum-reconstruction_term(smaller_better)': expected_cost.sum().item(), 'sum-wae_objective(larger_better)': obj.sum().item(), 'raw-batchsize': expected_cost.shape[0]})
self._logger_manager.add_statistics(extra_statistics)
return obj |
class RubberbandItem(BaseItemMixin, QtWidgets.QGraphicsRectItem):
def __init__(self):
super().__init__()
color = QtGui.QColor(SELECT_COLOR)
color.setAlpha(40)
self.setBrush(QtGui.QBrush(color))
pen = QtGui.QPen(QtGui.QColor(0, 0, 0))
pen.setWidth(1)
pen.setCosmetic(True)
self.setPen(pen)
def __str__(self):
return f'RubberbandItem {self.width} x {self.height}'
def fit(self, point1, point2):
self.setRect(utils.get_rect_from_points(point1, point2))
logger.debug(f'Updated rubberband {self}') |
def gridsearch_var0(model, hessians, val_loader, ood_loader, interval, lam=1):
targets = torch.cat([y for (x, y) in val_loader], dim=0).float().cuda()
targets_out = (torch.ones_like(targets) * 0.5)
(vals, var0s) = ([], [])
pbar = tqdm(interval)
for var0 in pbar:
(mu, S) = estimate_variance(var0, hessians)
preds = predict(val_loader, model, mu, S)
preds_out = predict(ood_loader, model, mu, S)
loss_in = F.binary_cross_entropy(preds, targets).detach().item()
loss_out = F.binary_cross_entropy(preds_out, targets_out).detach().item()
loss = (loss_in + (lam * loss_out))
vals.append(loss)
var0s.append(var0)
pbar.set_description(f'var0: {var0:.5f}, Loss-in: {loss_in:.3f}, Loss-out: {loss_out:.3f}, Loss: {loss:.3f}')
best_var0 = var0s[np.argmin(vals)]
return best_var0 |
class BNAfterDynamicMatMul(torch.nn.Module):
def __init__(self, padding=0, stride=1, dilation=1, groups=1, bias=False):
super(BNAfterDynamicMatMul, self).__init__()
self.conv1d = torch.nn.Conv1d(10, 20, 3, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=bias)
self.fc1 = torch.nn.Linear((24 * 10), 20)
self.flatten = torch.nn.Flatten()
self.conv1d = torch.nn.Conv1d(10, 20, 3, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=bias)
self.bn1 = torch.nn.BatchNorm1d(1)
def forward(self, x):
x1 = self.conv1d(x)
x2 = self.fc1(self.flatten(x)).unsqueeze(1)
x = torch.matmul(x2, x1)
x = self.bn1(x)
return x |
def __select_backend(backend: Optional[str], use_csc: bool):
if (backend is None):
return (piqp.SparseSolver() if use_csc else piqp.DenseSolver())
if (backend == 'dense'):
return piqp.DenseSolver()
if (backend == 'sparse'):
return piqp.SparseSolver()
raise ParamError(f'Unknown PIQP backend "{backend}') |
class Vaihingen(Dataset):
NUM_CLASSES = 6
def __init__(self, args, base_dir=Path.db_root_dir('vaihingen'), split='train'):
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, split, 'src')
self._cat_dir = os.path.join(self._base_dir, split, 'label')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
self.im_ids = []
self.images = []
self.categories = []
n = len([name for name in os.listdir(self._image_dir) if os.path.isfile(os.path.join(self._image_dir, name))])
print(self.split, n)
for i in range(n):
i = str(i)
_image = os.path.join(self._image_dir, (i + '.png'))
_cat = os.path.join(self._cat_dir, (i + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(i)
self.images.append(_image)
self.categories.append(_cat)
assert (len(self.images) == len(self.categories))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
for split in self.split:
if (split == 'train'):
return self.transform_tr(sample)
elif (split == 'val'):
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
return (_img, _target)
def transform_tr(self, sample):
composed_transforms = transforms.Compose([tr.RandomHorizontalFlip(), tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size), tr.RandomGaussianBlur(), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
composed_transforms = transforms.Compose([tr.FixScaleCrop(crop_size=self.args.crop_size), tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), tr.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return (('Vaihingen(split=' + str(self.split)) + ')') |
class TestAllocColorCells(EndianTest):
def setUp(self):
self.req_args_0 = {'cmap': , 'colors': 45892, 'contiguous': 0, 'planes': 25420}
self.req_bin_0 = b'V\x00\x03\\xc2\xf3[D\xb3Lc'
self.reply_args_0 = {'masks': [, , ], 'pixels': [, , , , , , , , , , , , , , , , ], 'sequence_number': 34200}
self.reply_bin_0 = b'\x01\x00\x98\x85\x14\x00\x00\x00\x11\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00J\xc0H?1\xd3(3\xda\xe1;\x1f"\x9e\xe4\x12t\xb5\xfaS\xe6\x02\x05XC\xb1\xb6\x00P\xadCr\x9c\x9a `\x9c\xea#n\x96\xdf\x91b\x84\xecWq\n\xa7\xa5\r\xacG\xa4c\xc90l6\x83\xfc\x03\x1cgt\x84\x16]\x12\xeef\x98\xa1\x9fz\x16\xa3\x14Y'
self.reply_args_1 = {'masks': [], 'pixels': [], 'sequence_number': 30700}
self.reply_bin_1 = b'\x01\x00\xecw\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.AllocColorCells._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.AllocColorCells._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.AllocColorCells._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.AllocColorCells._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0)
def testPackReply1(self):
bin = request.AllocColorCells._reply.to_binary(*(), **self.reply_args_1)
self.assertBinaryEqual(bin, self.reply_bin_1)
def testUnpackReply1(self):
(args, remain) = request.AllocColorCells._reply.parse_binary(self.reply_bin_1, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_1) |
class Metadata():
_raw: RawMetadata
def from_raw(cls, data: RawMetadata, *, validate: bool=True) -> 'Metadata':
ins = cls()
ins._raw = data.copy()
if validate:
exceptions: List[Exception] = []
try:
metadata_version = ins.metadata_version
metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
except InvalidMetadata as metadata_version_exc:
exceptions.append(metadata_version_exc)
metadata_version = None
fields_to_check = (frozenset(ins._raw) | _REQUIRED_ATTRS)
fields_to_check -= {'metadata_version'}
for key in fields_to_check:
try:
if metadata_version:
try:
field_metadata_version = cls.__dict__[key].added
except KeyError:
exc = InvalidMetadata(key, f'unrecognized field: {key!r}')
exceptions.append(exc)
continue
field_age = _VALID_METADATA_VERSIONS.index(field_metadata_version)
if (field_age > metadata_age):
field = _RAW_TO_EMAIL_MAPPING[key]
exc = InvalidMetadata(field, '{field} introduced in metadata version {field_metadata_version}, not {metadata_version}')
exceptions.append(exc)
continue
getattr(ins, key)
except InvalidMetadata as exc:
exceptions.append(exc)
if exceptions:
raise ExceptionGroup('invalid metadata', exceptions)
return ins
def from_email(cls, data: Union[(bytes, str)], *, validate: bool=True) -> 'Metadata':
(raw, unparsed) = parse_email(data)
if validate:
exceptions: list[Exception] = []
for unparsed_key in unparsed:
if (unparsed_key in _EMAIL_TO_RAW_MAPPING):
message = f'{unparsed_key!r} has invalid data'
else:
message = f'unrecognized field: {unparsed_key!r}'
exceptions.append(InvalidMetadata(unparsed_key, message))
if exceptions:
raise ExceptionGroup('unparsed', exceptions)
try:
return cls.from_raw(raw, validate=validate)
except ExceptionGroup as exc_group:
raise ExceptionGroup('invalid or unparsed metadata', exc_group.exceptions) from None
metadata_version: _Validator[_MetadataVersion] = _Validator()
name: _Validator[str] = _Validator()
version: _Validator[version_module.Version] = _Validator()
dynamic: _Validator[Optional[List[str]]] = _Validator(added='2.2')
platforms: _Validator[Optional[List[str]]] = _Validator()
supported_platforms: _Validator[Optional[List[str]]] = _Validator(added='1.1')
summary: _Validator[Optional[str]] = _Validator()
description: _Validator[Optional[str]] = _Validator()
description_content_type: _Validator[Optional[str]] = _Validator(added='2.1')
keywords: _Validator[Optional[List[str]]] = _Validator()
home_page: _Validator[Optional[str]] = _Validator()
download_url: _Validator[Optional[str]] = _Validator(added='1.1')
author: _Validator[Optional[str]] = _Validator()
author_email: _Validator[Optional[str]] = _Validator()
maintainer: _Validator[Optional[str]] = _Validator(added='1.2')
maintainer_email: _Validator[Optional[str]] = _Validator(added='1.2')
license: _Validator[Optional[str]] = _Validator()
classifiers: _Validator[Optional[List[str]]] = _Validator(added='1.1')
requires_dist: _Validator[Optional[List[requirements.Requirement]]] = _Validator(added='1.2')
requires_python: _Validator[Optional[specifiers.SpecifierSet]] = _Validator(added='1.2')
requires_external: _Validator[Optional[List[str]]] = _Validator(added='1.2')
project_urls: _Validator[Optional[Dict[(str, str)]]] = _Validator(added='1.2')
provides_extra: _Validator[Optional[List[utils.NormalizedName]]] = _Validator(added='2.1')
provides_dist: _Validator[Optional[List[str]]] = _Validator(added='1.2')
obsoletes_dist: _Validator[Optional[List[str]]] = _Validator(added='1.2')
requires: _Validator[Optional[List[str]]] = _Validator(added='1.1')
provides: _Validator[Optional[List[str]]] = _Validator(added='1.1')
obsoletes: _Validator[Optional[List[str]]] = _Validator(added='1.1') |
def extractor_maker(classifier):
def extractor(imgs):
import torch.nn.functional as F
x = imgs
x = F.relu(F.max_pool2d(classifier.conv1(x), 2))
x = F.relu(F.max_pool2d(classifier.conv2(x), 2))
x = x.view((- 1), 320)
x = F.relu(classifier.fc1(x))
x = x.view(imgs.shape[0], (- 1))
return x
adapter = net.ModuleAdapter(extractor)
return adapter |
class SimpleAverager(hivemind.DecentralizedAverager):
def __init__(self, trainer: Trainer, **kwargs):
self.trainer = trainer
initialize_optimizer_state(self.trainer.optimizer)
averaged_tensors = tuple((param.detach().cpu().float().clone() for param in self.trainer.model.parameters()))
averaged_tensors += tuple((torch.zeros_like(tensor) for tensor in averaged_tensors))
super().__init__(averaged_tensors=averaged_tensors, **kwargs)
def get_current_state(self):
with torch.no_grad():
model_parameters = [x.cpu() for x in self.trainer.model.parameters()]
(optimizer_metadata, optimizer_tensors) = dump_optimizer_state(self.trainer.optimizer)
metadata = dict(step=self.trainer.state.global_step, group_bits=self.get_group_bits(), optimizer_metadata=optimizer_metadata)
return (metadata, list(chain(model_parameters, optimizer_tensors)))
def load_state_from_peers(self, **kwargs):
loadad_state = super().load_state_from_peers(**kwargs)
if (loadad_state is None):
return
(metadata, flat_tensors) = loadad_state
num_params = len(list(self.trainer.model.parameters()))
(model_parameters, opt_tensors) = (flat_tensors[:num_params], flat_tensors[num_params:])
with torch.no_grad():
for (local_param, loaded_param) in zip(self.trainer.model.parameters(), model_parameters):
local_param[...] = loaded_param
load_optimizer_state(self.trainer.optimizer, metadata['optimizer_metadata'], opt_tensors)
collaboration_step = metadata['step']
while (self.trainer.state.global_step < collaboration_step):
self.trainer.state.global_step += 1
self.trainer.lr_scheduler.step() |
def codegen_module(kernel, device='cpu'):
adj = kernel.adj
forward_args = ['launch_bounds_t dim']
forward_params = ['dim']
for arg in adj.args:
forward_args.append(((arg.ctype() + ' var_') + arg.label))
forward_params.append(('var_' + arg.label))
reverse_args = [*forward_args]
reverse_params = [*forward_params]
for arg in adj.args:
reverse_args.append(((arg.ctype() + ' adj_') + arg.label))
reverse_params.append(('adj_' + arg.label))
if (device == 'cpu'):
template = cpu_module_template
elif (device == 'cuda'):
template = cuda_module_template
else:
raise ValueError('Device {} is not supported'.format(device))
s = template.format(name=kernel.key, forward_args=indent(forward_args), reverse_args=indent(reverse_args), forward_params=indent(forward_params, 3), reverse_params=indent(reverse_params, 3))
return s |
_grad()
def generate_x_adv_denoised_v2(x, y, diffusion, model, classifier, pgd_conf, device, t):
net = Denoised_Classifier(diffusion, model, classifier, t)
delta = torch.zeros(x.shape).to(x.device)
loss_fn = torch.nn.CrossEntropyLoss(reduction='sum')
eps = pgd_conf['eps']
alpha = pgd_conf['alpha']
iter = pgd_conf['iter']
for pgd_iter_id in range(iter):
x_diff = net.sdedit((x + delta), t).detach()
x_diff.requires_grad_()
with torch.enable_grad():
loss = loss_fn(classifier(x_diff), y)
loss.backward()
grad_sign = x_diff.grad.data.sign()
delta += (grad_sign * alpha)
delta = torch.clamp(delta, (- eps), eps)
print('Done')
x_adv = torch.clamp((x + delta), 0, 1)
return x_adv.detach() |
def loss_coteaching_plus(logits, logits2, labels, forget_rate, ind, noise_or_not, step):
outputs = F.softmax(logits, dim=1)
outputs2 = F.softmax(logits2, dim=1)
(_, pred1) = torch.max(logits.data, 1)
(_, pred2) = torch.max(logits2.data, 1)
(pred1, pred2) = (pred1.cpu().numpy(), pred2.cpu().numpy())
logical_disagree_id = np.zeros(labels.size(), dtype=bool)
disagree_id = []
for (idx, p1) in enumerate(pred1):
if (p1 != pred2[idx]):
disagree_id.append(idx)
logical_disagree_id[idx] = True
temp_disagree = (ind * logical_disagree_id.astype(np.int64))
ind_disagree = np.asarray([i for i in temp_disagree if (i != 0)]).transpose()
try:
assert (ind_disagree.shape[0] == len(disagree_id))
except:
disagree_id = disagree_id[:ind_disagree.shape[0]]
_update_step = np.logical_or(logical_disagree_id, (step < 5000)).astype(np.float32)
update_step = Variable(torch.from_numpy(_update_step)).cuda()
if (len(disagree_id) > 0):
update_labels = labels[disagree_id]
update_outputs = outputs[disagree_id]
update_outputs2 = outputs2[disagree_id]
(loss_1, loss_2, pure_ratio_1, pure_ratio_2) = loss_coteaching(update_outputs, update_outputs2, update_labels, forget_rate, ind_disagree, noise_or_not)
else:
update_labels = labels
update_outputs = outputs
update_outputs2 = outputs2
cross_entropy_1 = F.cross_entropy(update_outputs, update_labels)
cross_entropy_2 = F.cross_entropy(update_outputs2, update_labels)
loss_1 = (torch.sum((update_step * cross_entropy_1)) / labels.size()[0])
loss_2 = (torch.sum((update_step * cross_entropy_2)) / labels.size()[0])
pure_ratio_1 = (np.sum(noise_or_not[ind]) / ind.shape[0])
pure_ratio_2 = (np.sum(noise_or_not[ind]) / ind.shape[0])
return (loss_1, loss_2, pure_ratio_1, pure_ratio_2) |
class CIFAR10MSDNet(nn.Module):
def __init__(self, channels, init_layer_channels, num_feature_blocks, use_bottleneck, bottleneck_factors, in_channels=3, in_size=(32, 32), num_classes=10):
super(CIFAR10MSDNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.init_layer = CIFAR10MSDInitLayer(in_channels=in_channels, out_channels=init_layer_channels)
in_channels = init_layer_channels
self.feature_blocks = nn.Sequential()
self.classifiers = nn.Sequential()
for i in range(num_feature_blocks):
self.feature_blocks.add_module('block{}'.format((i + 1)), MSDFeatureBlock(in_channels=in_channels, out_channels=channels[i], use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors[i]))
in_channels = channels[i][(- 1)]
self.classifiers.add_module('classifier{}'.format((i + 1)), CIFAR10MSDClassifier(in_channels=in_channels[(- 1)], num_classes=num_classes))
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x, only_last=True):
x = self.init_layer(x)
outs = []
for (feature_block, classifier) in zip(self.feature_blocks, self.classifiers):
x = feature_block(x)
y = classifier(x[(- 1)])
outs.append(y)
if only_last:
return outs[(- 1)]
else:
return outs |
.django_db
def test_create_user_with_extra_fields():
user = User.objects.create_user('', 'johnpassword', name='John', full_name='John Lennon', gender='male', date_birth=datetime.datetime.strptime('09/10/1940', '%d/%m/%Y'))
assert (user.name == 'John')
assert (user.full_name == 'John Lennon')
assert (user.gender == 'male')
assert (user.date_birth == datetime.datetime.strptime('09/10/1940', '%d/%m/%Y')) |
def test_vertical_perspective_operation__defaults():
aeaop = VerticalPerspectiveConversion(viewpoint_height=10)
assert (aeaop.name == 'unknown')
assert (aeaop.method_name == 'Vertical Perspective')
assert (_to_dict(aeaop) == {'Latitude of topocentric origin': 0.0, 'Longitude of topocentric origin': 0.0, 'Ellipsoidal height of topocentric origin': 0.0, 'Viewpoint height': 10.0, 'False easting': 0.0, 'False northing': 0.0}) |
class TestAdaroundOptimizer():
.skipif((not torch.cuda.is_available()), reason='This unit-test is meant to be run on GPU')
.parametrize('warm_start', [1.0, 0.2])
def test_optimize_rounding(self, warm_start):
if (version.parse(torch.__version__) >= version.parse('1.13')):
np.random.seed(0)
torch.manual_seed(0)
model = test_models.single_residual_model()
model_data = ModelData(model.model)
sim = QuantizationSimModel(copy.deepcopy(model))
param_to_tq_dict = create_param_to_tensor_quantizer_dict(sim)
quant_module = model_data.module_to_info['/conv1/Conv']
old_weights = torch.from_numpy(numpy_helper.to_array(quant_module.params['weight'].tensor)).clone()
data_loader = dataloader()
path = './tmp/cached_dataset/'
cached_dataset = CachedDataset(data_loader, 1, path)
opt_params = AdaroundHyperParameters(num_iterations=10, reg_param=0.01, beta_range=(20, 2), warm_start=warm_start)
AdaroundOptimizer.adaround_module(quant_module, 'input_updated', model, sim.model, 'Relu', cached_dataset, opt_params, param_to_tq_dict, True, 0)
new_weights = torch.from_numpy(numpy_helper.to_array(quant_module.params['weight'].tensor))
weight_name = quant_module.params['weight'].name
for tensor in sim.model.model.graph.initializer:
if (tensor.name == weight_name):
quantized_weight = torch.from_numpy(numpy_helper.to_array(tensor))
break
assert (not torch.all(quantized_weight.eq(new_weights)))
assert torch.all(old_weights.eq(new_weights))
assert torch.all(param_to_tq_dict[quant_module.params['weight'].name].alpha)
def test_compute_recons_metrics(self):
if (version.parse(torch.__version__) >= version.parse('1.13')):
np.random.seed(0)
torch.manual_seed(0)
model = test_models.single_residual_model()
model_data = ModelData(model.model)
sim = QuantizationSimModel(model)
param_to_tq_dict = create_param_to_tensor_quantizer_dict(sim)
quant_module = model_data.module_to_info['/conv1/Conv']
inp_data = torch.randn(1, 3, 32, 32)
out_data = torch.randn(1, 32, 18, 18)
(recon_error_soft, recon_error_hard) = AdaroundOptimizer._compute_recons_metrics(quant_module, None, inp_data, out_data, param_to_tq_dict, False)
assert (recon_error_hard > recon_error_soft > 1.4)
def test_compute_output_with_adarounded_weights(self):
if (version.parse(torch.__version__) >= version.parse('1.13')):
model = test_models.single_residual_model()
model_data = ModelData(model.model)
sim = QuantizationSimModel(model)
param_to_tq_dict = create_param_to_tensor_quantizer_dict(sim)
quant_module = model_data.module_to_info['/conv2/Conv']
weights = torch.from_numpy(numpy_helper.to_array(quant_module.params['weight'].tensor))
inp_data = torch.randn(1, 32, 32, 32)
out_data = AdaroundOptimizer._compute_output_with_adarounded_weights(weights, quant_module, inp_data, param_to_tq_dict[quant_module.params['weight'].name])
assert (out_data.requires_grad == True)
assert (out_data.shape == torch.Size([1, 16, 18, 18]))
quant_module = model_data.module_to_info['/fc/Gemm']
weights = torch.from_numpy(numpy_helper.to_array(quant_module.params['weight'].tensor))
inp_data = torch.randn(1, 72)
out_data = AdaroundOptimizer._compute_output_with_adarounded_weights(weights, quant_module, inp_data, param_to_tq_dict[quant_module.params['weight'].name])
assert (out_data.shape == torch.Size([1, 10]))
model = test_models.transposed_conv_model_without_bn()
model_data = ModelData(model.model)
sim = QuantizationSimModel(model)
param_to_tq_dict = create_param_to_tensor_quantizer_dict(sim)
quant_module = model_data.module_to_info['/conv1/ConvTranspose']
weights = torch.from_numpy(numpy_helper.to_array(quant_module.params['weight'].tensor))
inp_data = torch.randn(10, 10, 4, 4)
out_data = AdaroundOptimizer._compute_output_with_adarounded_weights(weights, quant_module, inp_data, param_to_tq_dict[quant_module.params['weight'].name])
assert (out_data.shape == torch.Size([10, 10, 6, 6])) |
def test_add_nodes_inbetween_branches():
(a, b, c, d, e, f, x, y) = get_pseudo_nodes(8)
g = Graph()
c0 = ((g.orphan() >> a) >> b)
c1 = ((g.orphan() >> x) >> y)
c2 = (((c0 >> c) >> d) >> c1)
c3 = (((c0 >> e) >> f) >> c1)
assert (c0.range == g.indexes_of(a, b, _type=tuple))
assert (c1.range == g.indexes_of(x, y, _type=tuple))
assert (c2.range == g.indexes_of(a, y, _type=tuple))
assert (c3.range == g.indexes_of(a, y, _type=tuple))
assert (g.outputs_of(b) == g.indexes_of(c, e))
assert (g.outputs_of(d) == g.indexes_of(x))
assert (g.outputs_of(f) == g.indexes_of(x))
assert (g.outputs_of(y) == set()) |
class BatchSampler(BaseSampler):
def __init__(self, algo):
self.algo = algo
super(BatchSampler, self).__init__(algo)
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_params, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated |
_tokenizers
_pandas
class LayoutLMv3TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = LayoutLMv3Tokenizer
rust_tokenizer_class = LayoutLMv3TokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = False
test_seq2seq = False
from_pretrained_kwargs = {'cls_token': '<s>'}
def get_words_and_boxes(self):
words = ['lower', 'newer']
boxes = [[423, 237, 440, 251], [427, 272, 441, 287]]
return (words, boxes)
def get_words_and_boxes_batch(self):
words = [['lower', 'newer'], ['new', 'low']]
boxes = [[[423, 237, 440, 251], [427, 272, 441, 287]], [[961, 885, 992, 912], [256, 38, 330, 58]]]
return (words, boxes)
def get_question_words_and_boxes(self):
question = "what's his name?"
words = ['lower', 'newer']
boxes = [[423, 237, 440, 251], [427, 272, 441, 287]]
return (question, words, boxes)
def get_question_words_and_boxes_batch(self):
questions = ["what's his name?", 'how is he called?']
words = [['lower', 'newer'], ['newer', 'lower']]
boxes = [[[423, 237, 440, 251], [427, 272, 441, 287]], [[256, 38, 330, 58], [256, 38, 330, 58]]]
return (questions, words, boxes)
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '<unk>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return LayoutLMv3TokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = 'lower newer'
bpe_tokens = ['Glow', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('microsoft/layoutlmv3-base')
(question, words, boxes) = self.get_question_words_and_boxes()
text = tokenizer.encode(question.split(), boxes=[tokenizer.pad_token_box for _ in range(len(question.split()))], add_special_tokens=False)
text_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_pair == ((((([0] + text) + [2]) + [2]) + text_2) + [2]))
def test_add_special_tokens(self):
tokenizers: List[LayoutLMv3Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
special_token = '[SPECIAL_TOKEN]'
special_token_box = [1000, 1000, 1000, 1000]
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode([special_token], boxes=[special_token_box], add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_add_tokens_tokenizer(self):
tokenizers: List[LayoutLMv3Tokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
new_toks = ['aaaaa', 'bbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, (all_size + len(new_toks)))
words = 'aaaaa bbbbbb low cccccccccdddddddd l'.split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, (all_size_2 + len(new_toks_2)))
words = '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l'.split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], tokens[(- 3)])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[(- 2)], tokenizer.pad_token_id)
_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
new_toks = [AddedToken('[ABC]', normalized=False), AddedToken('[DEF]', normalized=False)]
tokenizer.add_tokens(new_toks)
input = '[ABC][DEF][ABC][DEF]'
if self.space_between_special_tokens:
output = '[ABC] [DEF] [ABC] [DEF]'
else:
output = input
encoded = tokenizer.encode(input.split(), boxes=boxes, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
('Not implemented')
def test_right_and_left_truncation(self):
pass
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
self._check_no_pad_token_padding(tokenizer, words)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_special_tokens_mask=True)
input_ids = encoded_sequence['input_ids']
special_tokens_mask = encoded_sequence['special_tokens_mask']
sequence_length = len(input_ids)
tokenizer.padding_side = 'right'
not_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue((sequence_length == not_padded_sequence_length))
self.assertTrue((input_ids == not_padded_input_ids))
self.assertTrue((special_tokens_mask == not_padded_special_tokens_mask))
not_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue((sequence_length == not_padded_sequence_length))
self.assertTrue((input_ids == not_padded_input_ids))
self.assertTrue((special_tokens_mask == not_padded_special_tokens_mask))
tokenizer.padding_side = 'right'
right_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
right_padded_input_ids = right_padded_sequence['input_ids']
right_padded_special_tokens_mask = right_padded_sequence['special_tokens_mask']
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(((sequence_length + padding_size) == right_padded_sequence_length))
self.assertTrue(((input_ids + ([padding_idx] * padding_size)) == right_padded_input_ids))
self.assertTrue(((special_tokens_mask + ([1] * padding_size)) == right_padded_special_tokens_mask))
tokenizer.padding_side = 'left'
left_padded_sequence = tokenizer.encode_plus(words, boxes=boxes, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
left_padded_input_ids = left_padded_sequence['input_ids']
left_padded_special_tokens_mask = left_padded_sequence['special_tokens_mask']
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(((sequence_length + padding_size) == left_padded_sequence_length))
self.assertTrue(((([padding_idx] * padding_size) + input_ids) == left_padded_input_ids))
self.assertTrue(((([1] * padding_size) + special_tokens_mask) == left_padded_special_tokens_mask))
if ('token_type_ids' in tokenizer.model_input_names):
token_type_ids = encoded_sequence['token_type_ids']
left_padded_token_type_ids = left_padded_sequence['token_type_ids']
right_padded_token_type_ids = right_padded_sequence['token_type_ids']
assert ((token_type_ids + ([0] * padding_size)) == right_padded_token_type_ids)
assert ((([0] * padding_size) + token_type_ids) == left_padded_token_type_ids)
if ('attention_mask' in tokenizer.model_input_names):
attention_mask = encoded_sequence['attention_mask']
right_padded_attention_mask = right_padded_sequence['attention_mask']
left_padded_attention_mask = left_padded_sequence['attention_mask']
self.assertTrue(((attention_mask + ([0] * padding_size)) == right_padded_attention_mask))
self.assertTrue(((([0] * padding_size) + attention_mask) == left_padded_attention_mask))
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
tokens = []
for word in words:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
output_text = ' lower newer'
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
if ((tokenizer.build_inputs_with_special_tokens.__qualname__.split('.')[0] != 'PreTrainedTokenizer') and ('token_type_ids' in tokenizer.model_input_names)):
information = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
(sequences, mask) = (information['input_ids'], information['token_type_ids'])
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=False), (len(attached_sequences) - len(sequences)))
(question, words, boxes) = self.get_question_words_and_boxes()
sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, words, boxes=boxes, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=True), (len(attached_sequences) - len(sequences)))
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
padding_size = 10
self._check_no_pad_token_padding(tokenizer, words)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(words, boxes=boxes, max_length=(sequence_length + padding_size), pad_to_max_length=True)
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(words, boxes=boxes, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
def test_padding(self, max_length=50):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
(words, boxes) = self.get_words_and_boxes()
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode(words, boxes=boxes, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(words, boxes=boxes, padding='longest')
input_p = tokenizer_p.encode(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
(question, words, boxes) = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode(question, words, boxes=boxes, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, words, boxes=boxes, padding=True)
input_p = tokenizer_p.encode(question, words, boxes=boxes, padding='longest')
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
(words, boxes) = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(words, boxes=boxes, padding='longest')
input_p = tokenizer_p.encode_plus(words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
(question, words, boxes) = self.get_question_words_and_boxes()
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus(question, words, boxes=boxes, padding='longest')
input_p = tokenizer_p.encode_plus(question, words, boxes=boxes, padding=True)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
(words, boxes) = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, max_length=max_length, pad_to_max_length=True)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, max_length=max_length, padding='longest')
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, max_length=max_length, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes, padding='longest')
input_p = tokenizer_p.batch_encode_plus(words, boxes=boxes, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(questions, words, boxes) = self.get_question_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(list(zip(questions, words)), is_pair=True, boxes=boxes, max_length=max_length, truncation=True, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(list(zip(questions, words)), is_pair=True, boxes=boxes, padding=True)
input_p = tokenizer_p.batch_encode_plus(list(zip(questions, words)), is_pair=True, boxes=boxes, padding='longest')
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(words, boxes) = self.get_words_and_boxes()
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']), pad_token_id)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length, pad_token_id)
(words, boxes) = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]), pad_token_id)
(words, boxes) = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_padding_warning_message_fast_tokenizer(self):
if (not self.test_rust_tokenizer):
return
(words, boxes) = self.get_words_and_boxes_batch()
tokenizer_fast = self.get_rust_tokenizer()
encoding_fast = tokenizer_fast(words, boxes=boxes)
with self.assertLogs('transformers', level='WARNING') as cm:
tokenizer_fast.pad(encoding_fast)
self.assertEqual(len(cm.records), 1)
self.assertIn('Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.', cm.records[0].message)
if (not self.test_slow_tokenizer):
return
tokenizer_slow = self.get_tokenizer()
encoding_slow = tokenizer_slow(words, boxes=boxes)
with self.assertLogs(level='WARNING') as cm:
logger.warning('Dummy warning')
tokenizer_slow.pad(encoding_slow)
self.assertEqual(len(cm.records), 1)
self.assertIn('Dummy warning', cm.records[0].message)
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
(question, words, boxes) = self.get_question_words_and_boxes()
encoded_sequences_1 = tokenizer.encode_plus(words, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
(words, boxes) = self.get_words_and_boxes_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes)
encoded_sequences_2 = tokenizer(words, boxes=boxes)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes_batch()
encoded_sequences = [tokenizer.encode_plus(words_example, boxes=boxes_example) for (words_example, boxes_example) in zip(words, boxes)]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
maximum_length = len(max([encoded_sequence['input_ids'] for encoded_sequence in encoded_sequences], key=len))
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences_padded = [tokenizer.encode_plus(words_example, boxes=boxes_example, max_length=maximum_length, padding='max_length') for (words_example, boxes_example) in zip(words, boxes)]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=True)
self.assertListEqual(encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded))
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, max_length=(maximum_length + 10), padding='longest')
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, max_length=(maximum_length + 10), padding=False)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
('batch_encode_plus does not handle overflowing tokens.')
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [tokenizer.encode_plus(words_example, boxes=boxes_example, max_length=max_length, padding='max_length') for (words_example, boxes_example) in zip(words, boxes)]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokenizer.padding_side = 'left'
(words, boxes) = self.get_words_and_boxes_batch()
max_length = 100
self._check_no_pad_token_padding(tokenizer, words)
encoded_sequences = [tokenizer.encode_plus(words_example, boxes=boxes_example, max_length=max_length, padding='max_length') for (words_example, boxes_example) in zip(words, boxes)]
encoded_sequences_batch = tokenizer.batch_encode_plus(words, is_pair=False, boxes=boxes, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.pad_token is None):
self.skipTest('No padding token.')
else:
(words, boxes) = self.get_words_and_boxes()
normal_tokens = tokenizer(words, boxes=boxes, padding=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer(words, boxes=boxes, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertNotEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
normal_tokens = tokenizer(words, boxes=boxes, padding=True, truncation=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, f'BatchEncoding.{key} is not multiple of 8')
self.assertRaises(ValueError, tokenizer.__call__, words, boxes=boxes, padding=True, truncation=True, max_length=12, pad_to_multiple_of=8)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if (parameter.default != inspect.Parameter.empty):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_build_inputs_with_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(words, boxes) = self.get_words_and_boxes()
input_simple = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
input_pair = tokenizer_p.encode(words, boxes=boxes, add_special_tokens=False)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [(x if (not special_tokens_mask[i]) else None) for (i, x) in enumerate(encoded_sequence_w_special)]
filtered_sequence = [x for x in filtered_sequence if (x is not None)]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for (i, x) in enumerate(encoded_sequence_w_special) if (not special_tokens_mask[i])]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(words, boxes=boxes, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
tokenizer.padding_side = 'left'
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(words, boxes=boxes, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((([padding_idx] * padding_size) + encoded_sequence) == padded_sequence)
encoded_sequence = tokenizer.encode(words, boxes=boxes)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(words, boxes=boxes, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding='longest')
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(words, boxes=boxes)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(words, boxes=boxes, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
output = tokenizer(words, boxes=boxes, return_token_type_ids=True)
self.assertEqual(len(output['token_type_ids']), len(output['input_ids']))
self.assertEqual(len(output['token_type_ids']), len(output['attention_mask']))
self.assertIn(0, output['token_type_ids'])
self.assertNotIn(1, output['token_type_ids'])
(question, words, boxes) = self.get_question_words_and_boxes()
output = tokenizer(question, words, boxes, return_token_type_ids=True)
self.assertEqual(len(output['token_type_ids']), len(output['input_ids']))
self.assertEqual(len(output['token_type_ids']), len(output['attention_mask']))
self.assertIn(0, output['token_type_ids'])
def test_offsets_mapping(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = ['a', 'wonderful', 'test']
boxes = [[1, 8, 12, 20] for _ in range(len(text))]
tokens_with_offsets = tokenizer_r.encode_plus(text, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
text = "what's his name"
pair = ['a', 'wonderful', 'test']
boxes = [[1, 8, 12, 20] for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(text, pair, boxes=boxes, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
is_using_common_embeddings = hasattr(model.get_input_embeddings(), 'weight')
assert ((model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True)
(words, boxes) = self.get_words_and_boxes()
encoded_sequence = tokenizer.encode_plus(words, boxes=boxes, return_tensors='pt')
batch_encoded_sequence = tokenizer.batch_encode_plus([words, words], boxes=[boxes, boxes], return_tensors='pt')
encoded_sequence['pixel_values'] = torch.randn(1, 3, 224, 224)
batch_encoded_sequence['pixel_values'] = torch.randn(2, 3, 224, 224)
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
if (not self.test_slow_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
(words, boxes) = self.get_words_and_boxes()
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(words, boxes=boxes, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenization_python_rust_equals(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(words, boxes) = self.get_words_and_boxes()
input_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'bbox'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(words, boxes=boxes)
input_pairs_r = tokenizer_r.encode_plus(words, boxes=boxes)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'bbox'])), input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
words = ['hello' for _ in range(1000)]
boxes = [[1000, 1000, 1000, 1000] for _ in range(1000)]
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'bbox'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_p = tokenizer_p.encode_plus(words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
input_r = tokenizer_r.encode_plus(words, boxes=boxes, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask', 'bbox'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_embeded_special_tokens(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
(words, boxes) = self.get_words_and_boxes()
tokens_r = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
tokens_p = tokenizer_p.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if ('token_type_ids' in tokens_r):
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
(words, boxes) = self.get_words_and_boxes()
no_special_tokens = tokenizer_r.tokenize(' '.join(words), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(' '.join(words), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(len(no_special_tokens[key]), (len(with_special_tokens[key]) - simple_num_special_tokens_to_add))
(words, boxes) = self.get_words_and_boxes_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(words, boxes=boxes, add_special_tokens=True)
for key in no_special_tokens.keys():
for (i_no, i_with) in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), (len(i_with) - simple_num_special_tokens_to_add))
def test_layoutlmv3_truncation_integration_test(self):
(words, boxes) = self.get_words_and_boxes()
tokenizer = LayoutLMv3Tokenizer.from_pretrained('microsoft/layoutlmv3-base', model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, max_length=i, truncation=True)
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
dropped_encoded_inputs = tokenizer.encode(words, boxes=boxes, truncation=True)
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes_batch()
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors='tf')
if (tokenizer.pad_token_id is None):
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding=True, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, padding='longest', return_tensors='tf')
else:
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors='pt')
tensorflow_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding='longest', return_tensors='tf')
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = ['With', 'these', 'inputs.']
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(seq_1))]
output = tokenizer(seq_0.split(), boxes=boxes)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, boxes=boxes)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
added_tokens = [AddedToken('<special>', lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
words = 'Hey this is a <special> token'.split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
r_output = tokenizer_r.encode(words, boxes=boxes)
special_token_id = tokenizer_r.encode(['<special>'], boxes=[1000, 1000, 1000, 1000], add_special_tokens=False)[0]
self.assertTrue((special_token_id in r_output))
if self.test_slow_tokenizer:
tokenizer_cr = self.rust_tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, additional_special_tokens=added_tokens, **kwargs)
words = 'Hey this is a <special> token'.split()
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
p_output = tokenizer_p.encode(words, boxes=boxes)
cr_output = tokenizer_cr.encode(words, boxes=boxes)
self.assertEqual(p_output, r_output)
self.assertEqual(cr_output, r_output)
self.assertTrue((special_token_id in p_output))
self.assertTrue((special_token_id in cr_output))
def test_training_new_tokenizer(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
text = [['this', 'is', 'the'], ['how', 'are', 'you']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8], [1, 3, 4, 8]], [[5, 6, 7, 8], [4, 5, 6, 7], [3, 9, 2, 7]]]
inputs = new_tokenizer(text, boxes=boxes)
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = ' this is the'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
self.assertSequenceEqual(tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_rust_tokenizer()
class_signature = inspect.signature(tokenizer.__class__)
if ('cls_token' in class_signature.parameters):
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: '<cls>'})
cls_id = new_tokenizer.get_vocab()['<cls>']
self.assertEqual(new_tokenizer.cls_token, '<cls>')
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_list.remove('additional_special_tokens')
special_tokens_map = {}
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is not None):
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f'{special_token}a'
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map)
for token in special_tokens_list:
if (getattr(tokenizer, f'_{token}') is None):
continue
special_token = getattr(tokenizer, token)
if (special_token in special_tokens_map):
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f'{token}_id'), new_id)
for special_token in tokenizer.all_special_tokens_extended:
if (isinstance(special_token, AddedToken) and (special_token.content not in special_tokens_map)):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
elif isinstance(special_token, AddedToken):
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens_extended:
if (isinstance(candidate, AddedToken) and (candidate.content == new_special_token_str) and (candidate.lstrip == special_token.lstrip) and (candidate.rstrip == special_token.rstrip) and (candidate.normalized == special_token.normalized) and (candidate.single_word == special_token.single_word)):
find = True
break
self.assertTrue(find, f"'{new_special_token_str}' doesn't appear in the list '{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as '{special_token}' in the list {tokenizer.all_special_tokens_extended}")
elif (special_token not in special_tokens_map):
self.assertTrue((special_token in new_tokenizer.all_special_tokens_extended), f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}")
else:
self.assertTrue((special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended))
words = [['this', 'is'], ['hello', '']]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[1, 2, 3, 4], [5, 6, 7, 8]]]
inputs = new_tokenizer(words, boxes=boxes)
self.assertEqual(len(inputs['input_ids']), 2)
decoded_input = new_tokenizer.decode(inputs['input_ids'][0], skip_special_tokens=True)
expected_result = ' this is'
if (tokenizer.backend_tokenizer.normalizer is not None):
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
if (tokenizer.__class__.__name__ == 'LayoutLMv3TokenizerFast'):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
(words, boxes) = self.get_words_and_boxes()
prepared_input_dict = tokenizer.prepare_for_model(words, boxes=boxes, add_special_tokens=True)
input_dict = tokenizer.encode_plus(words, boxes=boxes, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_padding_different_model_input_name(self):
if (not self.test_slow_tokenizer):
return
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
(words, boxes) = self.get_words_and_boxes_batch()
input_r = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_p = tokenizer_r.batch_encode_plus(words, boxes=boxes)
input_r['inputs'] = input_r[tokenizer_r.model_input_names[0]]
del input_r[tokenizer_r.model_input_names[0]]
input_p['inputs'] = input_p[tokenizer_p.model_input_names[0]]
del input_p[tokenizer_p.model_input_names[0]]
tokenizer_r.model_input_names = (['inputs'] + tokenizer_r.model_input_names[1:])
tokenizer_p.model_input_names = (['inputs'] + tokenizer_p.model_input_names[1:])
input_r = tokenizer_r.pad(input_r, padding='longest')
input_p = tokenizer_r.pad(input_p, padding='longest')
max_length = len(input_p['inputs'][0])
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id, model_main_input_name='inputs')
def test_batch_encode_dynamic_overflowing(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})'):
if is_torch_available():
returned_tensor = 'pt'
elif is_tf_available():
returned_tensor = 'tf'
else:
returned_tensor = 'jax'
words = ['HuggingFace', 'is', 'solving', 'NLP', 'one', 'commit', 'at', 'a', 'time']
boxes = [[i, i, i, i] for i in range(len(words))]
tokens = tokenizer.encode_plus(words, boxes=boxes, max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
if (key != 'bbox'):
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
words_batched = [['HuggingFace', 'is', 'solving', 'NLP', 'one', 'commit', 'at', 'a', 'time'], ['Very', 'tiny', 'input']]
boxes_batched = [[[i, i, i, i] for i in range(len(words_item))] for words_item in words_batched]
tokens = tokenizer.batch_encode_plus(words_batched, boxes=boxes_batched, max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
if (key != 'bbox'):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[(- 1)], 4)
('TO DO: overwrite this very extensive test.')
def test_alignement_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter((lambda t: re.match('^[ a-zA-Z]+$', t[1])), toks))
toks = list(filter((lambda t: ([t[0]] == tokenizer.encode(t[1].split(' '), boxes=(len(t[1]) * [[1, 1, 1, 1]]), add_special_tokens=False))), toks))
if ((max_length is not None) and (len(toks) > max_length)):
toks = toks[:max_length]
if ((min_length is not None) and (len(toks) < min_length) and (len(toks) > 0)):
while (len(toks) < min_length):
toks = (toks + toks)
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if ((' ' not in output_txt) and (len(toks_ids) > 1)):
output_txt = ((tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + ' ') + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False))
if with_prefix_space:
output_txt = (' ' + output_txt)
words = output_txt.split(' ')
boxes = [[i, i, i, i] for i in range(len(words))]
output_ids = tokenizer.encode(words, boxes=boxes, add_special_tokens=False)
return (words, boxes, output_ids)
def test_added_token_with_space_before(self):
tokenizer_s = self.get_tokenizer()
tokenizer_f = self.get_rust_tokenizer()
tokens_to_add = ['AAA', 'bbb']
words_with_space = [f' {token}' for token in (tokens_to_add + tokenizer_s.unique_no_split_tokens)]
words_without_space = (tokens_to_add + tokenizer_s.unique_no_split_tokens)
boxes = [[i, i, i, i] for i in range(len(words_with_space))]
tokens_to_add_formated = [AddedToken(token, rstrip=True, lstrip=True, single_word=False) for token in tokens_to_add]
tokenizer_s.add_tokens(tokens_to_add_formated)
tokenizer_f.add_tokens(tokens_to_add_formated)
ids_s = tokenizer_s(words_with_space, boxes=boxes).input_ids
ids_f = tokenizer_f(words_with_space, boxes=boxes).input_ids
tokens_s = tokenizer_s.convert_ids_to_tokens(ids_s)
tokens_f = tokenizer_f.convert_ids_to_tokens(ids_f)
ids_s = tokenizer_s(words_without_space, boxes=boxes).input_ids
ids_f = tokenizer_f(words_without_space, boxes=boxes).input_ids
tokens_s = tokenizer_s.convert_ids_to_tokens(ids_s)
tokens_f = tokenizer_f.convert_ids_to_tokens(ids_f)
self.assertEqual(tokens_s, tokens_f)
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
stride = 2
(seq_0, boxes_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = ' '.join(map(str, seq_0))
if (len(ids) <= (2 + stride)):
seq_0 = ((seq_0 + ' ') * (2 + stride))
ids = None
seq0_tokens = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
seq0_input_ids = seq0_tokens['input_ids']
self.assertGreater(len(seq0_input_ids), (2 + stride))
question_1 = 'This is another sentence to be encoded.'
seq_1 = ['what', 'a', 'weird', 'test', 'weirdly', 'weird']
boxes_1 = [[i, i, i, i] for i in range(1, (len(seq_1) + 1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
if (abs((len(seq0_input_ids) - len(seq1_tokens['input_ids']))) <= 2):
seq1_tokens_input_ids = (seq1_tokens['input_ids'] + seq1_tokens['input_ids'])
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(' ')
boxes_1 = [[i, i, i, i] for i in range(1, (len(seq_1) + 1))]
seq1_tokens = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
seq1_input_ids = seq1_tokens['input_ids']
self.assertGreater(len(seq1_input_ids), (2 + stride))
smallest = (seq1_input_ids if (len(seq0_input_ids) > len(seq1_input_ids)) else seq0_input_ids)
sequence = tokenizer(question_0, seq_1, boxes=boxes_1, add_special_tokens=False)
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = (seq_0 * model_max_length)
question_2 = ' '.join(map(str, seq_2))
boxes_2 = (boxes_0 * model_max_length)
self.assertGreater(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
sequence2 = tokenizer(question_2, seq_1, boxes=boxes_1, add_special_tokens=False)
total_length2 = len(sequence2['input_ids'])
self.assertLess(total_length1, model_max_length, 'Issue with the testing sequence, please update it.')
self.assertGreater(total_length2, model_max_length, 'Issue with the testing sequence, please update it.')
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'{tokenizer.__class__.__name__} Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'{tokenizer.__class__.__name__} Truncation: {truncation_state}'):
output = tokenizer(question_2, seq_1, boxes=boxes_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['bbox']), model_max_length)
output = tokenizer([question_2], [seq_1], boxes=[boxes_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['bbox'][0]), model_max_length)
output = tokenizer(question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['bbox']), model_max_length)
output = tokenizer([question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['bbox'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(question_1, seq_2, boxes=boxes_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertNotEqual(len(output['bbox']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([question_1], [seq_2], boxes=[boxes_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertNotEqual(len(output['bbox'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
truncated_first_sequence = (tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)['input_ids'][:(- 2)] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['input_ids'])
truncated_second_sequence = (tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)['input_ids'] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['input_ids'][:(- 2)])
truncated_longest_sequence = (truncated_first_sequence if (len(seq0_input_ids) > len(seq1_input_ids)) else truncated_second_sequence)
overflow_first_sequence = (tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)['input_ids'][(- (2 + stride)):] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['input_ids'])
overflow_second_sequence = (tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)['input_ids'] + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['input_ids'][(- (2 + stride)):])
overflow_longest_sequence = (overflow_first_sequence if (len(seq0_input_ids) > len(seq1_input_ids)) else overflow_second_sequence)
bbox_first = ([[0, 0, 0, 0]] * (len(seq0_input_ids) - 2))
bbox_first_sequence = (bbox_first + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['bbox'])
overflowing_token_bbox_first_sequence_slow = ([[0, 0, 0, 0]] * (2 + stride))
overflowing_token_bbox_first_sequence_fast = (([[0, 0, 0, 0]] * (2 + stride)) + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['bbox'])
bbox_second = ([[0, 0, 0, 0]] * len(seq0_input_ids))
bbox_second_sequence = (bbox_second + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['bbox'][:(- 2)])
overflowing_token_bbox_second_sequence_slow = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['bbox'][(- (2 + stride)):]
overflowing_token_bbox_second_sequence_fast = (([[0, 0, 0, 0]] * len(seq0_input_ids)) + tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)['bbox'][(- (2 + stride)):])
bbox_longest_sequence = (bbox_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else bbox_second_sequence)
overflowing_token_bbox_longest_sequence_fast = (overflowing_token_bbox_first_sequence_fast if (len(seq0_tokens) > len(seq1_tokens)) else overflowing_token_bbox_second_sequence_fast)
if isinstance(tokenizer, LayoutLMv3TokenizerFast):
information = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
bbox = information['bbox'][0]
overflowing_bbox = information['bbox'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(len(overflowing_bbox), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
if isinstance(tokenizer, LayoutLMv3TokenizerFast):
information = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
bbox = information['bbox'][0]
overflowing_bbox = information['bbox'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(bbox, bbox_longest_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_longest_sequence_fast)
else:
with self.assertRaises(ValueError) as context:
information = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
self.assertTrue(context.exception.args[0].startswith('Not possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.'))
information_first_truncated = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='only_first', return_overflowing_tokens=True)
if isinstance(tokenizer, LayoutLMv3TokenizerFast):
truncated_sequence = information_first_truncated['input_ids'][0]
overflowing_tokens = information_first_truncated['input_ids'][1]
bbox = information_first_truncated['bbox'][0]
overflowing_bbox = information_first_truncated['bbox'][0]
self.assertEqual(len(information_first_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq1_input_ids)))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_fast)
else:
truncated_sequence = information_first_truncated['input_ids']
overflowing_tokens = information_first_truncated['overflowing_tokens']
overflowing_bbox = information_first_truncated['overflowing_token_boxes']
bbox = information_first_truncated['bbox']
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq0_input_ids[(- (2 + stride)):])
self.assertEqual(bbox, bbox_first_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_first_sequence_slow)
information_second_truncated = tokenizer(question_0, seq_1, boxes=boxes_1, max_length=(len(sequence['input_ids']) - 2), add_special_tokens=False, stride=stride, truncation='only_second', return_overflowing_tokens=True)
if isinstance(tokenizer, LayoutLMv3TokenizerFast):
truncated_sequence = information_second_truncated['input_ids'][0]
overflowing_tokens = information_second_truncated['input_ids'][1]
bbox = information_second_truncated['bbox'][0]
overflowing_bbox = information_second_truncated['bbox'][1]
self.assertEqual(len(information_second_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq0_input_ids)))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_fast)
else:
truncated_sequence = information_second_truncated['input_ids']
overflowing_tokens = information_second_truncated['overflowing_tokens']
bbox = information_second_truncated['bbox']
overflowing_bbox = information_second_truncated['overflowing_token_boxes']
self.assertEqual(len(truncated_sequence), (len(sequence['input_ids']) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq1_input_ids[(- (2 + stride)):])
self.assertEqual(bbox, bbox_second_sequence)
self.assertEqual(overflowing_bbox, overflowing_token_bbox_second_sequence_slow)
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(seq_0, boxes_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, boxes=boxes_0, add_special_tokens=False)
total_length = len(sequence['input_ids'])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it, it's too short")
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = (seq_0 * model_max_length)
boxes_1 = (boxes_0 * model_max_length)
sequence1 = tokenizer(seq_1, boxes=boxes_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
self.assertGreater(total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short")
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'Truncation: {truncation_state}'):
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(output['bbox']), model_max_length)
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(output['bbox'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, boxes=boxes_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertNotEqual(len(output['bbox']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], boxes=[boxes_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertNotEqual(len(output['bbox'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
stride = 2
information = tokenizer(seq_0, boxes=boxes_0, max_length=(total_length - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
if isinstance(tokenizer, LayoutLMv3TokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence['input_ids'][:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence['input_ids'][(- (2 + stride)):])
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence['input_ids'][:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence['input_ids'][(- (2 + stride)):])
('LayoutLMv3 tokenizer requires boxes besides sequences.')
def test_pretokenized_inputs(self):
pass
('LayoutLMv3 tokenizer always expects pretokenized inputs.')
def test_compare_pretokenized_inputs(self):
pass
('LayoutLMv3 fast tokenizer does not support prepare_for_model')
def test_compare_prepare_for_model(self):
pass
def test_only_label_first_subword(self):
words = ['hello', 'niels']
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(words))]
word_labels = [0, 1]
tokenizer_p = LayoutLMv3Tokenizer.from_pretrained('microsoft/layoutlmv3-base', add_visual_labels=False)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, (- 100), (- 100)])
tokenizer_p = LayoutLMv3Tokenizer.from_pretrained('microsoft/layoutlmv3-base', only_label_first_subword=False, add_visual_labels=False)
encoding = tokenizer_p(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, 1, (- 100)])
tokenizer_r = LayoutLMv3TokenizerFast.from_pretrained('microsoft/layoutlmv3-base', add_visual_labels=False)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, (- 100), (- 100)])
tokenizer_r = LayoutLMv3Tokenizer.from_pretrained('microsoft/layoutlmv3-base', only_label_first_subword=False, add_visual_labels=False)
encoding = tokenizer_r(words, boxes=boxes, word_labels=word_labels)
self.assertListEqual(encoding.labels, [(- 100), 0, 1, 1, (- 100)])
def test_layoutlmv3_integration_test(self):
tokenizer_p = LayoutLMv3Tokenizer.from_pretrained('microsoft/layoutlmv3-base')
tokenizer_r = LayoutLMv3TokenizerFast.from_pretrained('microsoft/layoutlmv3-base')
(words, boxes) = self.get_words_and_boxes()
expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(words, boxes=boxes, padding='max_length', max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(words, boxes) = self.get_words_and_boxes_batch()
expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(words, boxes=boxes, padding='max_length', max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(words, boxes) = self.get_words_and_boxes()
word_labels = [1, 2]
expected_results = {'input_ids': [0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'labels': [(- 100), 1, 2, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding='max_length', max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(words, boxes) = self.get_words_and_boxes_batch()
word_labels = [[1, 2], [2, 46]]
expected_results = {'input_ids': [[0, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 92, 614, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [961, 885, 992, 912], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'labels': [[(- 100), 1, 2, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)], [(- 100), 2, 46, (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100), (- 100)]], 'attention_mask': [[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(words, boxes=boxes, word_labels=word_labels, padding='max_length', max_length=20)
encoding_r = tokenizer_r(words, boxes=boxes, word_labels=word_labels, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(question, words, boxes) = self.get_question_words_and_boxes()
expected_results = {'input_ids': [0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'bbox': [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
encoding_p = tokenizer_p(question, words, boxes, padding='max_length', max_length=20)
encoding_r = tokenizer_r(question, words, boxes, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
(questions, words, boxes) = self.get_question_words_and_boxes_batch()
expected_results = {'input_ids': [[0, 99, 18, 39, 766, 116, 2, 2, 795, 13964, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 141, 16, 37, 373, 116, 2, 2, 13964, 795, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'bbox': [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [423, 237, 440, 251], [427, 272, 441, 287], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [256, 38, 330, 58], [256, 38, 330, 58], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
encoding_p = tokenizer_p(questions, words, boxes, padding='max_length', max_length=20)
encoding_r = tokenizer_r(questions, words, boxes, padding='max_length', max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
_tf
def test_tf_encode_plus_sent_to_model(self):
from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
self.assertGreaterEqual(model.config.vocab_size, len(tokenizer))
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
boxes = [[1000, 1000, 1000, 1000] for _ in range(len(first_ten_tokens))]
encoded_sequence = tokenizer.encode_plus(first_ten_tokens, boxes=boxes, return_tensors='tf')
batch_encoded_sequence = tokenizer.batch_encode_plus([first_ten_tokens, first_ten_tokens], boxes=[boxes, boxes], return_tensors='tf')
model(encoded_sequence)
model(batch_encoded_sequence) |
def test_apply_text_edits_multiline(pylsp):
pylsp.workspace.put_document(DOC_URI, '0\n1\n2\n3\n4')
test_doc = pylsp.workspace.get_document(DOC_URI)
assert (apply_text_edits(test_doc, [{'range': {'start': {'line': 2, 'character': 0}, 'end': {'line': 3, 'character': 0}}, 'newText': 'Hello'}, {'range': {'start': {'line': 1, 'character': 1}, 'end': {'line': 1, 'character': 1}}, 'newText': 'World'}]) == '0\n1World\nHello3\n4') |
class metric_manager(object):
def __init__(self, save_dir, model, dic_eval_trial, save_best_only=True):
self.save_dir = save_dir
self.model = model
self.save_best_only = save_best_only
self.best_eer = {}
self.best_min_dcf = {}
for key in dic_eval_trial.keys():
self.best_eer[key] = 99.0
self.best_min_dcf[key] = 99.0
self.f_result = open((save_dir + 'results.txt'), 'a', buffering=1)
def update_metric_l(self, epoch, l_eer, l_min_dcf, trial_type):
print(('\nepoch:%d, %s, eval_eer_org:%.4f, eval_min_dcf_org:%.4f, eval_eer_1:%.4f, eval_min_dcf_1:%.4f, eval_eer_2:%.4f, eval_min_dcf_2:%.4f, eval_eer_5:%.4f, eval_min_dcf_5:%.4f\n' % (epoch, trial_type, l_eer[0], l_min_dcf[0], l_eer[1], l_min_dcf[1], l_eer[2], l_min_dcf[2], l_eer[3], l_min_dcf[3])))
self.f_result.write(('epoch:%d, %s, eval_eer_org:%.4f, eval_min_dcf_org:%.4f, eval_eer_1:%.4f, eval_min_dcf_1:%.4f, eval_eer_2:%.4f, eval_min_dcf_2:%.4f, eval_eer_5:%.4f, eval_min_dcf_5:%.4f\n' % (epoch, trial_type, l_eer[0], l_min_dcf[0], l_eer[1], l_min_dcf[1], l_eer[2], l_min_dcf[2], l_eer[3], l_min_dcf[3])))
if (self.best_eer[trial_type] > l_eer[0]):
print(('New best eer %s: %f' % (trial_type, float(l_eer[0]))))
self.best_eer[trial_type] = l_eer[0]
if self.save_best_only:
checkpoint = {'model': self.model.state_dict()}
torch.save(checkpoint, (self.save_dir + 'weights/checkpoint_best.pt'))
if (self.best_min_dcf[trial_type] > l_min_dcf[0]):
print(('New best mindcf %s: %f' % (trial_type, float(l_min_dcf[0]))))
self.best_min_dcf[trial_type] = l_min_dcf[0]
if (not self.save_best_only):
checkpoint = {'model': self.model.state_dict()}
torch.save(checkpoint, (self.save_dir + ('weights/checkpoint_%.2f_%.4f.pt' % (epoch, l_eer[0]))))
def update_metric(self, epoch, eer, min_dcf, trial_type):
print(('\nepoch:%d, %s, eval_eer:%.4f, eval_min_dcf:%.4f\n' % (epoch, trial_type, eer, min_dcf)))
self.f_result.write(('epoch:%d, %s, eval_eer:%.4f, eval_min_dcf:%.4f\n' % (epoch, trial_type, eer, min_dcf)))
if (self.best_eer[trial_type] > eer):
print(('New best eer %s: %f' % (trial_type, float(eer))))
self.best_eer[trial_type] = eer
if (self.best_min_dcf[trial_type] > min_dcf):
print(('New best mindcf %s: %f' % (trial_type, float(min_dcf))))
self.best_min_dcf[trial_type] = min_dcf |
def test_multi_marker_union_with_union_multi_is_single_marker() -> None:
m = parse_marker('sys_platform == "darwin" and python_version == "3"')
m2 = parse_marker('sys_platform == "darwin" and (python_version < "3" or python_version > "3")')
assert (str(m.union(m2)) == 'sys_platform == "darwin"')
assert (str(m2.union(m)) == 'sys_platform == "darwin"') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.