code stringlengths 281 23.7M |
|---|
def _parameters_conversion(converter: callable, argument: str, parameter) -> typing.Any:
if (converter is bool):
return _convert_to_bool(argument)
try:
return converter(argument)
except Exception as exc:
try:
name = converter.__name__
except AttributeError:
name = converter.__class__.__name__
raise ValueError('Converting to "{}" failed for parameter "{}".'.format(name, parameter)) from exc |
def make_output_filtering_dataframe(spark_context, spark_session):
data = [{'id': 1, 'ts': 1, 'feature1': 0, 'feature2': None, 'feature3': 1}, {'id': 1, 'ts': 2, 'feature1': 0, 'feature2': 1, 'feature3': 1}, {'id': 1, 'ts': 3, 'feature1': None, 'feature2': None, 'feature3': None}, {'id': 1, 'ts': 4, 'feature1': 0, 'feature2': 1, 'feature3': 1}, {'id': 1, 'ts': 6, 'feature1': None, 'feature2': None, 'feature3': None}]
df = spark_session.read.json(spark_context.parallelize(data).map((lambda x: json.dumps(x))))
df = df.withColumn(TIMESTAMP_COLUMN, df.ts.cast(DataType.TIMESTAMP.spark))
return df |
def test_copy_method():
m.ExampleMandA.add2c = m.ExampleMandA.add2
m.ExampleMandA.add2d = m.ExampleMandA.add2b
a = m.ExampleMandA(123)
assert (a.value == 123)
a.add2(m.ExampleMandA((- 100)))
assert (a.value == 23)
a.add2b(m.ExampleMandA(20))
assert (a.value == 43)
a.add2c(m.ExampleMandA(6))
assert (a.value == 49)
a.add2d(m.ExampleMandA((- 7)))
assert (a.value == 42) |
def _pad_version(left: List[str], right: List[str]) -> Tuple[(List[str], List[str])]:
(left_split, right_split) = ([], [])
left_split.append(list(itertools.takewhile((lambda x: x.isdigit()), left)))
right_split.append(list(itertools.takewhile((lambda x: x.isdigit()), right)))
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
left_split.insert(1, (['0'] * max(0, (len(right_split[0]) - len(left_split[0])))))
right_split.insert(1, (['0'] * max(0, (len(left_split[0]) - len(right_split[0])))))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) |
def create_tf_mixup_batch_augmentation(heads: List[TrainerHeadInterface], mixup_alpha: float) -> Callable:
def tf_mixup_batch_augmentation(batch_features, batch_targets):
batch_features_shape = tf.shape(batch_features)
batch_size = batch_features_shape[0]
beta = tfp.distributions.Beta(mixup_alpha, mixup_alpha)
if BATCH_MIXUP_LAMBDA:
lambdas = (beta.sample(1) * tf.ones(batch_size))
else:
lambdas = beta.sample(batch_size)
indices = tf.range(start=0, limit=batch_size, dtype=tf.int32)
permutation = tf.random.shuffle(indices)
shuffled_batch_features = tf.gather(batch_features, permutation)
features_lambdas_shape = ((batch_size,) + ((1,) * (len(batch_features_shape) - 1)))
features_lambdas = tf.reshape(lambdas, features_lambdas_shape)
mixed_features = ((features_lambdas * batch_features) + ((1.0 - features_lambdas) * shuffled_batch_features))
mixed_targets = _tf_mixup_batch_targets(heads, batch_targets, permutation, lambdas, batch_size)
return (mixed_features, mixed_targets)
return tf_mixup_batch_augmentation |
def test_dynamic_compile_shows_nicely():
import importlib.util
import sys
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
spec = importlib.util.spec_from_loader(name, loader=None)
module = importlib.util.module_from_spec(spec)
code = compile(src, name, 'exec')
exec(code, module.__dict__)
sys.modules[name] = module
module.foo() |
def evaluate(args, model, tokenizer, prefix=''):
(dataset, examples, features) = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True)
if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(args.output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = (SequentialSampler(dataset) if (args.local_rank == (- 1)) else DistributedSampler(dataset))
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
(eval_loss, eval_accuracy) = (0, 0)
(nb_eval_steps, nb_eval_examples) = (0, 0)
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'labels': batch[3]}
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
logits = logits.detach().cpu().numpy()
label_ids = inputs['labels'].to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_steps += 1
nb_eval_examples += inputs['input_ids'].size(0)
eval_loss = (eval_loss / nb_eval_steps)
eval_accuracy = (eval_accuracy / nb_eval_examples)
result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy}
output_eval_file = os.path.join(args.output_dir, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info('%s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return result |
class DmRaid_TestCase(unittest.TestCase):
def runTest(self):
data1 = FC6_DmRaidData()
data2 = FC6_DmRaidData()
self.assertEqual(data1, data2)
data1.name = ''
data2.name = 'test'
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
data1.name = ''
data2.name = ''
data1.devices = []
data2.devices = ['test']
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
data1.devices = []
data2.devices = [] |
class Calculator(QWidget):
NumDigitButtons = 10
def __init__(self, parent=None):
super(Calculator, self).__init__(parent)
self.pendingAdditiveOperator = ''
self.pendingMultiplicativeOperator = ''
self.sumInMemory = 0.0
self.sumSoFar = 0.0
self.factorSoFar = 0.0
self.waitingForOperand = True
self.display = QLineEdit('0')
self.display.setReadOnly(True)
self.display.setAlignment(Qt.AlignRight)
self.display.setMaxLength(15)
font = self.display.font()
font.setPointSize((font.pointSize() + 8))
self.display.setFont(font)
self.digitButtons = []
for i in range(Calculator.NumDigitButtons):
self.digitButtons.append(self.createButton(str(i), self.digitClicked))
self.pointButton = self.createButton('.', self.pointClicked)
self.changeSignButton = self.createButton(u'', self.changeSignClicked)
self.backspaceButton = self.createButton('Backspace', self.backspaceClicked)
self.clearButton = self.createButton('Clear', self.clear)
self.clearAllButton = self.createButton('Clear All', self.clearAll)
self.clearMemoryButton = self.createButton('MC', self.clearMemory)
self.readMemoryButton = self.createButton('MR', self.readMemory)
self.setMemoryButton = self.createButton('MS', self.setMemory)
self.addToMemoryButton = self.createButton('M+', self.addToMemory)
self.divisionButton = self.createButton(u'', self.multiplicativeOperatorClicked)
self.timesButton = self.createButton(u'', self.multiplicativeOperatorClicked)
self.minusButton = self.createButton('-', self.additiveOperatorClicked)
self.plusButton = self.createButton('+', self.additiveOperatorClicked)
self.squareRootButton = self.createButton('Sqrt', self.unaryOperatorClicked)
self.powerButton = self.createButton(u'x2', self.unaryOperatorClicked)
self.reciprocalButton = self.createButton('1/x', self.unaryOperatorClicked)
self.equalButton = self.createButton('=', self.equalClicked)
mainLayout = QGridLayout()
mainLayout.setSizeConstraint(QLayout.SetFixedSize)
mainLayout.addWidget(self.display, 0, 0, 1, 6)
mainLayout.addWidget(self.backspaceButton, 1, 0, 1, 2)
mainLayout.addWidget(self.clearButton, 1, 2, 1, 2)
mainLayout.addWidget(self.clearAllButton, 1, 4, 1, 2)
mainLayout.addWidget(self.clearMemoryButton, 2, 0)
mainLayout.addWidget(self.readMemoryButton, 3, 0)
mainLayout.addWidget(self.setMemoryButton, 4, 0)
mainLayout.addWidget(self.addToMemoryButton, 5, 0)
for i in range(1, Calculator.NumDigitButtons):
row = (((9 - i) / 3) + 2)
column = (((i - 1) % 3) + 1)
mainLayout.addWidget(self.digitButtons[i], row, column)
mainLayout.addWidget(self.digitButtons[0], 5, 1)
mainLayout.addWidget(self.pointButton, 5, 2)
mainLayout.addWidget(self.changeSignButton, 5, 3)
mainLayout.addWidget(self.divisionButton, 2, 4)
mainLayout.addWidget(self.timesButton, 3, 4)
mainLayout.addWidget(self.minusButton, 4, 4)
mainLayout.addWidget(self.plusButton, 5, 4)
mainLayout.addWidget(self.squareRootButton, 2, 5)
mainLayout.addWidget(self.powerButton, 3, 5)
mainLayout.addWidget(self.reciprocalButton, 4, 5)
mainLayout.addWidget(self.equalButton, 5, 5)
self.setLayout(mainLayout)
self.setWindowTitle('Calculator')
def digitClicked(self):
clickedButton = self.sender()
digitValue = int(clickedButton.text())
if ((self.display.text() == '0') and (digitValue == 0.0)):
return
if self.waitingForOperand:
self.display.clear()
self.waitingForOperand = False
self.display.setText((self.display.text() + str(digitValue)))
def unaryOperatorClicked(self):
clickedButton = self.sender()
clickedOperator = clickedButton.text()
operand = float(self.display.text())
if (clickedOperator == 'Sqrt'):
if (operand < 0.0):
self.abortOperation()
return
result = math.sqrt(operand)
elif (clickedOperator == u'x2'):
result = math.pow(operand, 2.0)
elif (clickedOperator == '1/x'):
if (operand == 0.0):
self.abortOperation()
return
result = (1.0 / operand)
self.display.setText(str(result))
self.waitingForOperand = True
def additiveOperatorClicked(self):
clickedButton = self.sender()
clickedOperator = clickedButton.text()
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if (not self.calculate(operand, self.pendingMultiplicativeOperator)):
self.abortOperation()
return
self.display.setText(str(self.factorSoFar))
operand = self.factorSoFar
self.factorSoFar = 0.0
self.pendingMultiplicativeOperator = ''
if self.pendingAdditiveOperator:
if (not self.calculate(operand, self.pendingAdditiveOperator)):
self.abortOperation()
return
self.display.setText(str(self.sumSoFar))
else:
self.sumSoFar = operand
self.pendingAdditiveOperator = clickedOperator
self.waitingForOperand = True
def multiplicativeOperatorClicked(self):
clickedButton = self.sender()
clickedOperator = clickedButton.text()
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if (not self.calculate(operand, self.pendingMultiplicativeOperator)):
self.abortOperation()
return
self.display.setText(str(self.factorSoFar))
else:
self.factorSoFar = operand
self.pendingMultiplicativeOperator = clickedOperator
self.waitingForOperand = True
def equalClicked(self):
operand = float(self.display.text())
if self.pendingMultiplicativeOperator:
if (not self.calculate(operand, self.pendingMultiplicativeOperator)):
self.abortOperation()
return
operand = self.factorSoFar
self.factorSoFar = 0.0
self.pendingMultiplicativeOperator = ''
if self.pendingAdditiveOperator:
if (not self.calculate(operand, self.pendingAdditiveOperator)):
self.abortOperation()
return
self.pendingAdditiveOperator = ''
else:
self.sumSoFar = operand
self.display.setText(str(self.sumSoFar))
self.sumSoFar = 0.0
self.waitingForOperand = True
def pointClicked(self):
if self.waitingForOperand:
self.display.setText('0')
if ('.' not in self.display.text()):
self.display.setText((self.display.text() + '.'))
self.waitingForOperand = False
def changeSignClicked(self):
text = self.display.text()
value = float(text)
if (value > 0.0):
text = ('-' + text)
elif (value < 0.0):
text = text[1:]
self.display.setText(text)
def backspaceClicked(self):
if self.waitingForOperand:
return
text = self.display.text()[:(- 1)]
if (not text):
text = '0'
self.waitingForOperand = True
self.display.setText(text)
def clear(self):
if self.waitingForOperand:
return
self.display.setText('0')
self.waitingForOperand = True
def clearAll(self):
self.sumSoFar = 0.0
self.factorSoFar = 0.0
self.pendingAdditiveOperator = ''
self.pendingMultiplicativeOperator = ''
self.display.setText('0')
self.waitingForOperand = True
def clearMemory(self):
self.sumInMemory = 0.0
def readMemory(self):
self.display.setText(str(self.sumInMemory))
self.waitingForOperand = True
def setMemory(self):
self.equalClicked()
self.sumInMemory = float(self.display.text())
def addToMemory(self):
self.equalClicked()
self.sumInMemory += float(self.display.text())
def createButton(self, text, member):
button = Button(text)
button.clicked.connect(member)
return button
def abortOperation(self):
self.clearAll()
self.display.setText('####')
def calculate(self, rightOperand, pendingOperator):
if (pendingOperator == '+'):
self.sumSoFar += rightOperand
elif (pendingOperator == '-'):
self.sumSoFar -= rightOperand
elif (pendingOperator == u''):
self.factorSoFar *= rightOperand
elif (pendingOperator == u''):
if (rightOperand == 0.0):
return False
self.factorSoFar /= rightOperand
return True |
class TestLabeledPriceWithoutRequest(TestLabeledPriceBase):
def test_slot_behaviour(self, labeled_price):
inst = labeled_price
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, labeled_price):
assert (labeled_price.label == self.label)
assert (labeled_price.amount == self.amount)
def test_to_dict(self, labeled_price):
labeled_price_dict = labeled_price.to_dict()
assert isinstance(labeled_price_dict, dict)
assert (labeled_price_dict['label'] == labeled_price.label)
assert (labeled_price_dict['amount'] == labeled_price.amount)
def test_equality(self):
a = LabeledPrice('label', 100)
b = LabeledPrice('label', 100)
c = LabeledPrice('Label', 101)
d = Location(123, 456)
assert (a == b)
assert (hash(a) == hash(b))
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d)) |
class SampledResponse(FrequencyResponse):
frequencies = Array.T(shape=(None,), dtype=float, serialize_as='list')
values = Array.T(shape=(None,), dtype=complex, serialize_as='list')
left = Complex.T(optional=True)
right = Complex.T(optional=True)
def __init__(self, frequencies, values, left=None, right=None, **kwargs):
FrequencyResponse.__init__(self, frequencies=asarray_1d(frequencies, float), values=asarray_1d(values, complex), **kwargs)
def evaluate(self, freqs):
ereal = num.interp(freqs, self.frequencies, num.real(self.values), left=self.left, right=self.right)
eimag = num.interp(freqs, self.frequencies, num.imag(self.values), left=self.left, right=self.right)
transfer = (ereal + (1j * eimag))
return transfer
def inverse(self):
def inv_or_none(x):
if (x is not None):
return (1.0 / x)
return SampledResponse(self.frequencies, (1.0 / self.values), left=inv_or_none(self.left), right=inv_or_none(self.right))
def summary(self):
return 'sampled' |
def ytest_base(unit, related_prj_dir, related_prj_name, args):
keywords = {'DEPENDS': (- 1), 'DATA': (- 1)}
(flat_args, spec_args) = _common.sort_by_keywords(keywords, args)
unit.set(['TEST-NAME', os.path.basename(flat_args[0])])
unit.set(['SCRIPT-REL-PATH', flat_args[1]])
unit.set(['SOURCE-FOLDER-PATH', related_prj_dir])
unit.set(['BUILD-FOLDER-PATH', os.path.join('$B', related_prj_dir)])
unit.set(['TESTED-BINARY-PATH', flat_args[0]])
custom_deps = (' '.join(spec_args['DEPENDS']) if ('DEPENDS' in spec_args) else '')
unit.set(['CUSTOM-DEPENDENCIES', custom_deps])
data_lst = (spec_args.get('DATA', []) + (unit.get(['__test_data']) or '').split(' '))
data = ((('"' + ';'.join(data_lst)) + '"') if data_lst else '')
unit.set(['TEST-DATA', data])
ya_root = unit.get('YA_ROOT')
unit.set(['TEST_RUN_SCRIPT', 'devtools/{}/test/node/run_test.py'.format(ya_root)])
related_dirs_list = ['${ARCADIA_ROOT}/devtools/svn_credentials', '{ARCADIA_ROOT}/devtools/${YA_ROOT}', '${ARCADIA_ROOT}/devtools/${YA_ROOT}', '$RELATED_TARGET_SRCDIR']
related_dirs_value = []
for rel in related_dirs_list:
related_dirs_value.extend(['--test-related-path', rel])
unit.set(['RELATED_DIRS', ' '.join(related_dirs_value)])
unit.set(['TEST_KV', '${{kv;hide:"test_related_dirs {}"}}'.format(' '.join(related_dirs_list))]) |
class AppBaseView(social_app.BaseViewClass):
def render_home(self, **extra):
context = common_context(web.config[setting_name('AUTHENTICATION_BACKENDS')], load_strategy(), user=self.get_current_user(), plus_id=web.config.get(setting_name('SOCIAL_AUTH_GOOGLE_PLUS_KEY')), **extra)
return render.home(**context) |
_tf
class TFXLNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((TFXLNetModel, TFXLNetLMHeadModel, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetForQuestionAnsweringSimple, TFXLNetForMultipleChoice) if is_tf_available() else ())
all_generative_model_classes = ((TFXLNetLMHeadModel,) if is_tf_available() else ())
pipeline_model_mapping = ({'feature-extraction': TFXLNetModel, 'question-answering': TFXLNetForQuestionAnsweringSimple, 'text-classification': TFXLNetForSequenceClassification, 'text-generation': TFXLNetLMHeadModel, 'token-classification': TFXLNetForTokenClassification, 'zero-shot': TFXLNetForSequenceClassification} if is_tf_available() else {})
test_head_masking = False
test_onnx = False
def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name):
return True
def setUp(self):
self.model_tester = TFXLNetModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLNetConfig, d_inner=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlnet_base_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_base_model(*config_and_inputs)
def test_xlnet_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_lm_head(*config_and_inputs)
def test_xlnet_sequence_classif(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_sequence_classif(*config_and_inputs)
def test_xlnet_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_for_token_classification(*config_and_inputs)
def test_xlnet_qa(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
def test_xlnet_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_for_multiple_choice(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFXLNetModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_loss_computation(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, 'hf_compute_loss', None):
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[sorted((prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]]
expected_loss_size = added_label.shape.as_list()[:1]
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_name = ('input_ids' if ('input_ids' in prepared_for_class) else 'pixel_values')
input_ids = prepared_for_class.pop(input_name)
loss = model(input_ids, **prepared_for_class)[0]
self.assertTrue(((loss.shape.as_list() == expected_loss_size) or (loss.shape.as_list() == [1])))
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertTrue(((loss.shape.as_list() == expected_loss_size) or (loss.shape.as_list() == [1])))
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
label_keys = (prepared_for_class.keys() - inputs_dict.keys())
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
tuple_index_mapping = {0: input_name}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
list_input = []
for name in signature_names:
if (name != 'kwargs'):
list_input.append(signature[name].default)
for (index, value) in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
loss = model(tuple_input[:(- 1)])[0]
self.assertTrue(((loss.shape.as_list() == expected_loss_size) or (loss.shape.as_list() == [1]))) |
class InlineQueryResultDocument(InlineQueryResult):
__slots__ = ('reply_markup', 'caption_entities', 'document_url', 'thumbnail_width', 'thumbnail_height', 'caption', 'title', 'description', 'parse_mode', 'mime_type', 'thumbnail_url', 'input_message_content')
def __init__(self, id: str, document_url: str, title: str, mime_type: str, caption: Optional[str]=None, description: Optional[str]=None, reply_markup: Optional[InlineKeyboardMarkup]=None, input_message_content: Optional['InputMessageContent']=None, parse_mode: ODVInput[str]=DEFAULT_NONE, caption_entities: Optional[Sequence[MessageEntity]]=None, thumbnail_url: Optional[str]=None, thumbnail_width: Optional[int]=None, thumbnail_height: Optional[int]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(InlineQueryResultType.DOCUMENT, id, api_kwargs=api_kwargs)
with self._unfrozen():
self.document_url: str = document_url
self.title: str = title
self.mime_type: str = mime_type
self.caption: Optional[str] = caption
self.parse_mode: ODVInput[str] = parse_mode
self.caption_entities: Tuple[(MessageEntity, ...)] = parse_sequence_arg(caption_entities)
self.description: Optional[str] = description
self.reply_markup: Optional[InlineKeyboardMarkup] = reply_markup
self.input_message_content: Optional[InputMessageContent] = input_message_content
self.thumbnail_url: Optional[str] = thumbnail_url
self.thumbnail_width: Optional[int] = thumbnail_width
self.thumbnail_height: Optional[int] = thumbnail_height |
def create_embedding(caption_file: str, vocab_file: str, embed_size: int, output: str, **fasttext_kwargs):
caption_df = pd.read_json(caption_file)
caption_df['tokens'] = caption_df['tokens'].apply((lambda x: ((['<start>'] + [token for token in x]) + ['<end>'])))
sentences = list(caption_df['tokens'].values)
vocabulary = torch.load(vocab_file, map_location='cpu')
epochs = fasttext_kwargs.get('epochs', 10)
model = FastText(size=embed_size, min_count=1, **fasttext_kwargs)
model.build_vocab(sentences=sentences)
model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
word_embeddings = np.zeros((len(vocabulary), embed_size))
with tqdm(total=len(vocabulary), ascii=True) as pbar:
for (word, idx) in vocabulary.word2idx.items():
if ((word == '<pad>') or (word == '<unk>')):
continue
word_embeddings[idx] = model.wv[word]
pbar.update()
np.save(output, word_embeddings)
print(('Finish writing fasttext embeddings to ' + output)) |
def get_constant_counts_from_file(infile, has_header, include_count):
constant_counts = {}
line_iter = iter(infile)
lineno = 1
seen = {}
filename = infile.name
def oopsie(msg):
die(f'Unable to process constants file: {msg}, {filename!r} line {lineno}')
if has_header:
try:
next(line_iter)
except StopIteration:
return constants
lineno += 1
for (lineno, line) in enumerate(line_iter, lineno):
fields = line.split()
num_fields = len(fields)
if include_count:
if (num_fields == 0):
oopsie('Missing SMILES and count columns')
if (num_fields == 1):
oopsie('Missing count column')
(smiles, count_str) = fields[:2]
try:
count = int(count_str)
if (count < 1):
raise ValueError
except ValueError as err:
oopsie(f'Count must be a positive integer (not {count_str!r})')
else:
if (num_fields == 0):
oopsie('Missing SMILES column')
smiles = fields[0]
count = 1
if (smiles in seen):
prev_lineno = seen[smiles]
oopsie(f'SMILES is a duplicate from {prev_lineno}')
seen[smiles] = lineno
constant_counts[smiles] = count
return constant_counts |
def pad_shape(shape, must_be_divisible_by):
if (not isinstance(must_be_divisible_by, (tuple, list, np.ndarray))):
must_be_divisible_by = ([must_be_divisible_by] * len(shape))
else:
assert (len(must_be_divisible_by) == len(shape))
new_shp = [((shape[i] + must_be_divisible_by[i]) - (shape[i] % must_be_divisible_by[i])) for i in range(len(shape))]
for i in range(len(shape)):
if ((shape[i] % must_be_divisible_by[i]) == 0):
new_shp[i] -= must_be_divisible_by[i]
new_shp = np.array(new_shp).astype(int)
return new_shp |
class ImageStorage():
def __init__(self, losses: Iterable[Loss]) -> None:
self.target_images_and_guides: Dict[(ComparisonLoss, Tuple[(torch.Tensor, Optional[torch.Tensor])])] = {}
self.input_guides: Dict[(Loss, torch.Tensor)] = {}
for loss in losses:
if isinstance(loss, ComparisonLoss):
if (loss.target_image is not None):
self.target_images_and_guides[loss] = (loss.target_image, loss.target_guide)
if (loss.input_guide is not None):
self.input_guides[loss] = loss.input_guide
def restore(self) -> None:
for (loss, (target_image, target_guide)) in self.target_images_and_guides.items():
loss.set_target_image(target_image, guide=target_guide)
for (loss, input_guide) in self.input_guides.items():
loss.set_input_guide(input_guide) |
(msg=DATAREADER_DEPRECATION_WARNING)
def default_returns_func(symbol, start=None, end=None):
if (start is None):
start = '1/1/1970'
if (end is None):
end = _1_bday_ago()
start = get_utc_timestamp(start)
end = get_utc_timestamp(end)
if (symbol == 'SPY'):
filepath = data_path('spy.csv')
rets = get_returns_cached(filepath, get_symbol_returns_from_yahoo, end, symbol='SPY', start='1/1/1970', end=datetime.now())
rets = rets[start:end]
else:
rets = get_symbol_returns_from_yahoo(symbol, start=start, end=end)
return rets[symbol] |
def get_head(head_name, in_index, idx_to_planes, tasks, task_channel_mapping, atrc_genotype_path=None):
in_index = [int(i) for i in in_index.split(',')]
in_index = (in_index[0] if (len(in_index) == 1) else in_index)
if (head_name == 'DemtHead'):
from .heads.demt_head import DemtHead
partial_head = partial(DemtHead)
elif ('RelationalContextHead' in head_name):
from .heads.relationalcontext import RelationalContextHead
if (head_name == 'GlobalRelationalContextHead'):
atrc_genotype = {t: {a: 1 for a in tasks} for t in tasks}
elif (head_name == 'LocalRelationalContextHead'):
atrc_genotype = {t: {a: 2 for a in tasks} for t in tasks}
elif (head_name == 'TLabelRelationalContextHead'):
atrc_genotype = {t: {a: 3 for a in tasks} for t in tasks}
elif (head_name == 'SLabelRelationalContextHead'):
atrc_genotype = {t: {a: 4 for a in tasks} for t in tasks}
elif (head_name == 'AdaptiveTaskRelationalContextHead'):
assert os.path.isfile(atrc_genotype_path), 'When using ATRC, a path to a valid genotype json file needs to be supplied via `--model.atrc_genotype_path path/to/genotype.json`'
with open(atrc_genotype_path) as f:
atrc_genotype = json.load(f)['data']
else:
raise ValueError
partial_head = partial(RelationalContextHead, atrc_genotype=atrc_genotype)
elif (head_name == 'AdaptiveTaskRelationalContextSearchHead'):
from .heads.relationalcontextsearch import RelationalContextSearchHead
partial_head = partial(RelationalContextSearchHead)
head = partial_head(tasks=tasks, in_index=in_index, idx_to_planes=idx_to_planes, task_channel_mapping=task_channel_mapping)
return head |
class BaseImageDataset(torch.utils.data.Dataset):
def __init__(self, name, root, image_loader=jpeg4py_loader):
self.name = name
self.root = root
self.image_loader = image_loader
self.image_list = []
self.class_list = []
def __len__(self):
return self.get_num_images()
def __getitem__(self, index):
return None
def get_name(self):
raise NotImplementedError
def get_num_images(self):
return len(self.image_list)
def has_class_info(self):
return False
def get_class_name(self, image_id):
return None
def get_num_classes(self):
return len(self.class_list)
def get_class_list(self):
return self.class_list
def get_images_in_class(self, class_name):
raise NotImplementedError
def has_segmentation_info(self):
return False
def get_image_info(self, seq_id):
raise NotImplementedError
def get_image(self, image_id, anno=None):
raise NotImplementedError |
def build_model_base(images, model_name, training, override_params=None):
assert isinstance(images, tf.Tensor)
(blocks_args, global_params) = get_model_params(model_name, override_params)
with tf.variable_scope(model_name):
model = efficientnet_model.Model(blocks_args, global_params)
features = model(images, training=training, features_only=True)
features = tf.identity(features, 'global_pool')
return (features, model.endpoints) |
_families
def test_root_testsuites_tag(pytester: Pytester, run_and_parse: RunAndParse, xunit_family: str) -> None:
pytester.makepyfile('\n def test_x():\n pass\n ')
(_, dom) = run_and_parse(family=xunit_family)
root = dom.get_unique_child
assert (root.tag == 'testsuites')
suite_node = root.get_unique_child
assert (suite_node.tag == 'testsuite') |
(order=True)
class TransactionChannelDeposit(State):
participant_address: Address
contract_balance: TokenAmount
deposit_block_number: BlockNumber
def __post_init__(self) -> None:
typecheck(self.participant_address, T_Address)
typecheck(self.contract_balance, T_TokenAmount)
typecheck(self.deposit_block_number, T_BlockNumber) |
def _create_reader_for_fake_data(observation_type: str, fake_dataset: xr.Dataset, filename_info: Optional[dict]=None):
from satpy.readers.abi_l2_nc import NC_ABI_L2
if (filename_info is None):
filename_info = {'platform_shortname': 'G16', 'scene_abbr': 'C', 'scan_mode': 'M3'}
reader_args = ('filename', filename_info, {'file_type': 'info', 'observation_type': observation_type})
with mock.patch('satpy.readers.abi_base.xr') as xr_:
xr_.open_dataset.return_value = fake_dataset
reader = NC_ABI_L2(*reader_args)
(yield reader) |
def process_korQuAD(corpus_fname, output_fname):
with open(corpus_fname) as f1, open(output_fname, 'w', encoding='utf-8') as f2:
dataset_json = json.load(f1)
dataset = dataset_json['data']
for article in dataset:
w_lines = []
for paragraph in article['paragraphs']:
w_lines.append(paragraph['context'])
for qa in paragraph['qas']:
q_text = qa['question']
for a in qa['answers']:
a_text = a['text']
w_lines.append(((q_text + ' ') + a_text))
for line in w_lines:
f2.writelines((line + '\n')) |
.skip('KAK instability')
def test_zzswap_as_syc_2():
(q1, q2) = cirq.LineQubit.range(2)
zzs = ZZSwap(zz_exponent=0.123)
circuit = zzswap_as_syc(zzs.theta, q1, q2)
assert (str(circuit) == '0: PhX(0.145)^(0)Z^-0.2SYCPhX(0.214)^0.576Z^-0.131SYCSYCPhX(-0.0833)^0.576Z^-0.548\n \n1: PhX(0.973)^(0)Z^(-1/14)SYCPhX(-0.369)Z^0.869SYCPhX(0.0)^0.52Z^0SYCPhX(-0.394)Z^0.036') |
def load_teapot_batch(batch_size=4, target_num=2):
(vertices, faces) = nr.load_obj(os.path.join(data_dir, 'teapot.obj'))
textures = torch.ones((faces.shape[0], 4, 4, 4, 3), dtype=torch.float32)
(vertices, faces, textures) = to_minibatch((vertices, faces, textures), batch_size, target_num)
return (vertices, faces, textures) |
def poly1305(cipher_encrypt, nonce, ciphertext):
otk = cipher_encrypt(nonce, bytes(32))
mac_data = ((ciphertext + bytes((((- len(ciphertext)) % 16) + 8))) + len(ciphertext).to_bytes(8, 'little'))
(acc, r, s) = (0, (int.from_bytes(otk[:16], 'little') & ), int.from_bytes(otk[16:], 'little'))
for i in range(0, len(mac_data), 16):
acc = ((r * (acc + int.from_bytes((mac_data[i:(i + 16)] + b'\x01'), 'little'))) % ((1 << 130) - 5))
return ((acc + s) & ((1 << 128) - 1)).to_bytes(16, 'little') |
class ConnectionTestCase(TestCase):
def setUp(self):
self.test_table_name = 'Thread'
self.region = 'us-east-1'
def test_create_connection(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
self.assertIsNotNone(conn)
def test_connection_session_set_credentials(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]), aws_access_key_id='access_key_id', aws_secret_access_key='secret_access_key')
def get_credentials():
return conn.connection.session.get_credentials()
credentials = get_credentials()
self.assertEqual(credentials.access_key, 'access_key_id')
self.assertEqual(credentials.secret_key, 'secret_access_key')
with ThreadPoolExecutor() as executor:
fut = executor.submit(get_credentials)
credentials = fut.result()
self.assertEqual(credentials.access_key, 'access_key_id')
self.assertEqual(credentials.secret_key, 'secret_access_key')
def test_connection_session_set_credentials_with_session_token(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]), aws_access_key_id='access_key_id', aws_secret_access_key='secret_access_key', aws_session_token='session_token')
credentials = conn.connection.session.get_credentials()
self.assertEqual(credentials.access_key, 'access_key_id')
self.assertEqual(credentials.secret_key, 'secret_access_key')
self.assertEqual(credentials.token, 'session_token')
def test_create_table(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
kwargs = {'read_capacity_units': 1, 'write_capacity_units': 1}
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['attribute_definitions'] = [{'attribute_name': 'key1', 'attribute_type': 'S'}, {'attribute_name': 'key2', 'attribute_type': 'S'}]
self.assertRaises(ValueError, conn.create_table, **kwargs)
kwargs['key_schema'] = [{'attribute_name': 'key1', 'key_type': 'hash'}, {'attribute_name': 'key2', 'key_type': 'range'}]
params = {'TableName': 'Thread', 'ProvisionedThroughput': {'WriteCapacityUnits': 1, 'ReadCapacityUnits': 1}, 'AttributeDefinitions': [{'AttributeType': 'S', 'AttributeName': 'key1'}, {'AttributeType': 'S', 'AttributeName': 'key2'}], 'KeySchema': [{'KeyType': 'HASH', 'AttributeName': 'key1'}, {'KeyType': 'RANGE', 'AttributeName': 'key2'}]}
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.create_table(**kwargs)
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_create_table_with_tags(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
kwargs = {'read_capacity_units': 1, 'write_capacity_units': 1, 'attribute_definitions': [{'attribute_name': 'key1', 'attribute_type': 'S'}, {'attribute_name': 'key2', 'attribute_type': 'S'}], 'key_schema': [{'attribute_name': 'key1', 'key_type': 'hash'}, {'attribute_name': 'key2', 'key_type': 'range'}], 'tags': {'tag-key1': 'tag-value1', 'tag-key2': 'tag-value2'}}
params = {'TableName': 'Thread', 'ProvisionedThroughput': {'WriteCapacityUnits': 1, 'ReadCapacityUnits': 1}, 'AttributeDefinitions': [{'AttributeType': 'S', 'AttributeName': 'key1'}, {'AttributeType': 'S', 'AttributeName': 'key2'}], 'KeySchema': [{'KeyType': 'HASH', 'AttributeName': 'key1'}, {'KeyType': 'RANGE', 'AttributeName': 'key2'}], 'Tags': [{'Key': 'tag-key1', 'Value': 'tag-value1'}, {'Key': 'tag-key2', 'Value': 'tag-value2'}]}
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.create_table(**kwargs)
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_update_time_to_live(self):
params = {'TableName': 'Thread', 'TimeToLiveSpecification': {'AttributeName': 'ttl_attr', 'Enabled': True}}
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), None)
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
conn.update_time_to_live('ttl_attr')
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_delete_table(self):
params = {'TableName': 'Thread'}
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), None)
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
conn.delete_table()
kwargs = req.call_args[0][1]
self.assertEqual(kwargs, params)
def test_update_table(self):
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), None)
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
params = {'ProvisionedThroughput': {'WriteCapacityUnits': 2, 'ReadCapacityUnits': 2}, 'TableName': self.test_table_name}
conn.update_table(read_capacity_units=2, write_capacity_units=2)
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), None)
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
global_secondary_index_updates = [{'index_name': 'foo-index', 'read_capacity_units': 2, 'write_capacity_units': 2}]
params = {'TableName': self.test_table_name, 'ProvisionedThroughput': {'ReadCapacityUnits': 2, 'WriteCapacityUnits': 2}, 'GlobalSecondaryIndexUpdates': [{'Update': {'IndexName': 'foo-index', 'ProvisionedThroughput': {'ReadCapacityUnits': 2, 'WriteCapacityUnits': 2}}}]}
conn.update_table(read_capacity_units=2, write_capacity_units=2, global_secondary_index_updates=global_secondary_index_updates)
self.assertEqual(req.call_args[0][1], params)
def test_describe_table(self):
with patch(PATCH_METHOD) as req:
req.return_value = DESCRIBE_TABLE_DATA
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
data = conn.describe_table()
self.assertEqual(data, DESCRIBE_TABLE_DATA[TABLE_KEY])
self.assertEqual(req.call_args[0][1], {'TableName': 'Thread'})
def test_delete_item(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.delete_item('Amazon DynamoDB', 'How do I update multiple items?')
params = {'ReturnConsumedCapacity': 'TOTAL', 'Key': {'ForumName': {'S': 'Amazon DynamoDB'}, 'Subject': {'S': 'How do I update multiple items?'}}, 'TableName': self.test_table_name}
self.assertEqual(req.call_args[0][1], params)
def test_update_item(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), {})
conn.update_item('foo-key', actions=[Path('Subject').set('foo-subject')], range_key='foo-range-key')
params = {'Key': {'ForumName': {'S': 'foo-key'}, 'Subject': {'S': 'foo-range-key'}}, 'UpdateExpression': 'SET #0 = :0', 'ExpressionAttributeNames': {'#0': 'Subject'}, 'ExpressionAttributeValues': {':0': {'S': 'foo-subject'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'Thread'}
self.assertEqual(req.call_args[0][1], params)
def test_get_item(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = GET_ITEM_DATA
item = conn.get_item('Amazon DynamoDB', 'How do I update multiple items?')
self.assertEqual(item, GET_ITEM_DATA)
def test_put_item(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.put_item('foo-key', range_key='foo-range-key', attributes={'ForumName': 'foo-value'})
params = {'ReturnConsumedCapacity': 'TOTAL', 'TableName': self.test_table_name, 'Item': {'ForumName': {'S': 'foo-value'}, 'Subject': {'S': 'foo-range-key'}}}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.put_item('foo-key', range_key='foo-range-key', attributes={'ForumName': 'foo-value'})
params = {'ReturnConsumedCapacity': 'TOTAL', 'Item': {'ForumName': {'S': 'foo-value'}, 'Subject': {'S': 'foo-range-key'}}, 'TableName': self.test_table_name}
self.assertEqual(req.call_args[0][1], params)
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), {})
conn.put_item('foo-key', range_key='foo-range-key', attributes={'ForumName': 'foo-value'}, condition=Path('ForumName').does_not_exist())
params = {'ReturnConsumedCapacity': 'TOTAL', 'Item': {'ForumName': {'S': 'foo-value'}, 'Subject': {'S': 'foo-range-key'}}, 'TableName': self.test_table_name, 'ConditionExpression': 'attribute_not_exists (#0)', 'ExpressionAttributeNames': {'#0': 'ForumName'}}
self.assertEqual(req.call_args[0][1], params)
def test_batch_write_item(self):
items = []
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
for i in range(10):
items.append({'ForumName': 'FooForum', 'Subject': 'thread-{}'.format(i)})
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.batch_write_item(put_items=items)
params = {'ReturnConsumedCapacity': 'TOTAL', 'RequestItems': {self.test_table_name: [{'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-0'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-1'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-2'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-3'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-4'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-5'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-6'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-7'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-8'}}}}, {'PutRequest': {'Item': {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-9'}}}}]}}
self.assertEqual(req.call_args[0][1], params)
def test_batch_get_item(self):
items = []
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
for i in range(10):
items.append({'ForumName': 'FooForum', 'Subject': 'thread-{}'.format(i)})
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.batch_get_item(items)
params = {'ReturnConsumedCapacity': 'TOTAL', 'RequestItems': {self.test_table_name: {'Keys': [{'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-0'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-1'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-2'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-3'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-4'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-5'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-6'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-7'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-8'}}, {'ForumName': {'S': 'FooForum'}, 'Subject': {'S': 'thread-9'}}]}}}
self.assertEqual(req.call_args[0][1], params)
def test_query(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = {}
conn.query('FooForum', Path('Subject').startswith('thread'))
params = {'ReturnConsumedCapacity': 'TOTAL', 'KeyConditionExpression': '(#0 = :0 AND begins_with (#1, :1))', 'ExpressionAttributeNames': {'#0': 'ForumName', '#1': 'Subject'}, 'ExpressionAttributeValues': {':0': {'S': 'FooForum'}, ':1': {'S': 'thread'}}, 'TableName': self.test_table_name}
self.assertEqual(req.call_args[0][1], params)
def test_scan(self):
conn = TableConnection(self.test_table_name, meta_table=MetaTable(DESCRIBE_TABLE_DATA[TABLE_KEY]))
with patch(PATCH_METHOD) as req:
req.return_value = (HttpOK(), {})
conn.scan()
params = {'ReturnConsumedCapacity': 'TOTAL', 'TableName': self.test_table_name}
self.assertEqual(req.call_args[0][1], params) |
_model('s2t_transformer')
class S2TTransformerModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
def add_args(parser):
parser.add_argument('--conv-kernel-sizes', type=str, metavar='N', help='kernel sizes of Conv1d subsampling layers')
parser.add_argument('--conv-channels', type=int, metavar='N', help='# of channels in Conv1d subsampling layers')
parser.add_argument('--activation-fn', type=str, default='relu', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--layernorm-embedding', action='store_true', help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true', help='if True, dont scale embeddings')
parser.add_argument('--load-pretrained-encoder-from', type=str, metavar='STR', help='model to take encoder weights from (for initialization)')
parser.add_argument('--encoder-freezing-updates', type=int, metavar='N', help='freeze encoder for first N updates')
def build_encoder(cls, args):
encoder = S2TTransformerEncoder(args)
pretraining_path = getattr(args, 'load_pretrained_encoder_from', None)
if (pretraining_path is not None):
if (not Path(pretraining_path).exists()):
logger.warning(f'skipped pretraining because {pretraining_path} does not exist')
else:
encoder = checkpoint_utils.load_pretrained_component_from_model(component=encoder, checkpoint=pretraining_path)
logger.info(f'loaded pretrained encoder from: {pretraining_path}')
return encoder
def build_decoder(cls, args, task, embed_tokens):
return TransformerDecoderScriptable(args, task.target_dictionary, embed_tokens)
def build_model(cls, args, task):
base_architecture(args)
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
decoder_embed_tokens = build_embedding(task.target_dictionary, args.decoder_embed_dim)
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args, task, decoder_embed_tokens)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out = self.encoder(src_tokens=src_tokens, src_lengths=src_lengths)
decoder_out = self.decoder(prev_output_tokens=prev_output_tokens, encoder_out=encoder_out)
return decoder_out |
class ChannelPadding(nn.Module):
def __init__(self, in_planes, out_planes):
super(ChannelPadding, self).__init__()
self.register_buffer('padding', torch.zeros(((out_planes - in_planes) // 2)).view(1, (- 1), 1, 1))
def forward(self, input):
assert (len(input.size()) == 4), 'only support for 4-D tensor for now'
padding = self.padding.expand(input.size(0), (- 1), input.size(2), input.size(3))
return torch.cat([padding, input, padding], dim=1) |
def get_so(atom, a, basis, kmesh):
cell = gto.Cell()
cell.atom = atom
cell.a = a
cell.basis = basis
cell.space_group_symmetry = True
cell.symmorphic = True
cell.build()
kpts = cell.make_kpts(kmesh, with_gamma_point=True, space_group_symmetry=True)
(sos_ks, irrep_ids_ks) = symm_adapted_basis(cell, kpts)
return (sos_ks, irrep_ids_ks) |
class TestCreateGC(EndianTest):
def setUp(self):
self.req_args_0 = {'attrs': {'function': 7, 'plane_mask': , 'foreground': , 'background': , 'line_width': 61484, 'line_style': 2, 'cap_style': 2, 'join_style': 2, 'fill_style': 0, 'fill_rule': 1, 'tile': , 'stipple': , 'tile_stipple_x_origin': (- 25980), 'tile_stipple_y_origin': (- 23968), 'font': , 'subwindow_mode': 0, 'graphics_exposures': 0, 'clip_x_origin': (- 22581), 'clip_y_origin': (- 14920), 'clip_mask': , 'dash_offset': 46571, 'dashes': 215, 'arc_mode': 0}, 'cid': , 'drawable': }
self.req_bin_0 = b'7\x00\x1b\x00\xe9\xe7\x00X\x94\xb5/Q\xff\xff\x7f\x00\x07\x00\x00\x00\xe9\xa9M/\x89w{$\\\x0c2\x14,\xf0\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00/\xc8Dw\xe3\xeb\xeaT\x84\x9a\x00\x00`\xa2\x00\x00\xf7\x04\xdb!\x00\x00\x00\x00\x00\x00\x00\x00\xcb\xa7\x00\x00\xb8\xc5\x00\x00\xed\x96\x11$\xeb\xb5\x00\x00\xd7\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.CreateGC._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CreateGC._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class Solution(object):
def largestPalindrome(self, n):
if (n == 1):
return 9
for a in xrange(2, (9 * (10 ** (n - 1)))):
hi = ((10 ** n) - a)
lo = int(str(hi)[::(- 1)])
if (((a ** 2) - (4 * lo)) < 0):
continue
if ((((a ** 2) - (4 * lo)) ** 0.5) == int((((a ** 2) - (4 * lo)) ** 0.5))):
return ((lo + ((10 ** n) * ((10 ** n) - a))) % 1337) |
class UnboundType(ProperType):
__slots__ = ('name', 'args', 'optional', 'empty_tuple_index', 'original_str_expr', 'original_str_fallback')
def __init__(self, name: (str | None), args: (Sequence[Type] | None)=None, line: int=(- 1), column: int=(- 1), optional: bool=False, empty_tuple_index: bool=False, original_str_expr: (str | None)=None, original_str_fallback: (str | None)=None) -> None:
super().__init__(line, column)
if (not args):
args = []
assert (name is not None)
self.name = name
self.args = tuple(args)
self.optional = optional
self.empty_tuple_index = empty_tuple_index
self.original_str_expr = original_str_expr
self.original_str_fallback = original_str_fallback
def copy_modified(self, args: Bogus[(Sequence[Type] | None)]=_dummy) -> UnboundType:
if (args is _dummy):
args = self.args
return UnboundType(name=self.name, args=args, line=self.line, column=self.column, optional=self.optional, empty_tuple_index=self.empty_tuple_index, original_str_expr=self.original_str_expr, original_str_fallback=self.original_str_fallback)
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_unbound_type(self)
def __hash__(self) -> int:
return hash((self.name, self.optional, tuple(self.args), self.original_str_expr))
def __eq__(self, other: object) -> bool:
if (not isinstance(other, UnboundType)):
return NotImplemented
return ((self.name == other.name) and (self.optional == other.optional) and (self.args == other.args) and (self.original_str_expr == other.original_str_expr) and (self.original_str_fallback == other.original_str_fallback))
def serialize(self) -> JsonDict:
return {'.class': 'UnboundType', 'name': self.name, 'args': [a.serialize() for a in self.args], 'expr': self.original_str_expr, 'expr_fallback': self.original_str_fallback}
def deserialize(cls, data: JsonDict) -> UnboundType:
assert (data['.class'] == 'UnboundType')
return UnboundType(data['name'], [deserialize_type(a) for a in data['args']], original_str_expr=data['expr'], original_str_fallback=data['expr_fallback']) |
.sphinx(srcdir=srcdir)
def test_lazy_tooltips_notlazy(app, status, warning):
app.build()
path = (app.outdir / '_static/js/hoverxref.js')
assert (path.exists() is True)
content = open(path).read()
chunks = [".each(function () { $(this).removeAttr('title') });"]
for chunk in chunks:
assert (chunk in content)
ignored_chunks = [".one('mouseenter click touchstart tap', function(event) {", ".tooltipster('open');"]
for chunk in ignored_chunks:
assert (chunk not in content) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_project_export_csvsemicolon(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_export', args=[project_id, 'csvsemicolon'])
response = client.get(url)
if (project_id in export_project_permission_map.get(username, [])):
assert (response.status_code == 200)
elif password:
assert (response.status_code == 403)
else:
assert (response.status_code == 302) |
def test_zoom_ratio():
vb = pg.ViewBox(lockAspect=1)
vb.setFixedHeight(10)
vb.setFixedWidth(10)
testRange = pg.QtCore.QRect(0, 0, 10, 10)
vb.setRange(testRange, padding=0)
expected = [[testRange.left(), testRange.right()], [testRange.top(), testRange.bottom()]]
viewRange = vb.getState()['viewRange']
viewWidth = (viewRange[0][1] - viewRange[0][0])
viewHeight = (viewRange[1][1] - viewRange[1][0])
assert (viewWidth == viewHeight)
assert (viewRange == expected)
testRange = pg.QtCore.QRect(0, 0, 10, 20)
vb.setRange(testRange, padding=0)
viewRange = vb.getState()['viewRange']
viewWidth = (viewRange[0][1] - viewRange[0][0])
viewHeight = (viewRange[1][1] - viewRange[1][0])
assert (viewWidth == viewHeight) |
def validate_subprotocols(subprotocols: Sequence[Subprotocol]) -> None:
if (not isinstance(subprotocols, Sequence)):
raise TypeError('subprotocols must be a list')
if isinstance(subprotocols, str):
raise TypeError('subprotocols must be a list, not a str')
for subprotocol in subprotocols:
if (not _token_re.fullmatch(subprotocol)):
raise ValueError(f'invalid subprotocol: {subprotocol}') |
class Median(CtrlNode):
nodeName = 'MedianFilter'
uiTemplate = [('n', 'intSpin', {'min': 1, 'max': 1000000})]
def processData(self, data):
try:
import scipy.ndimage
except ImportError:
raise Exception('MedianFilter node requires the package scipy.ndimage.')
return scipy.ndimage.median_filter(data, self.ctrls['n'].value()) |
def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray], list_of_segs_from_prev_stage: Union[(List[np.ndarray], None)], list_of_image_properties: List[dict], truncated_ofnames: Union[(List[str], None)], plans_manager: PlansManager, dataset_json: dict, configuration_manager: ConfigurationManager, target_queue: Queue, done_event: Event, abort_event: Event, verbose: bool=False):
try:
label_manager = plans_manager.get_label_manager(dataset_json)
preprocessor = configuration_manager.preprocessor_class(verbose=verbose)
for idx in range(len(list_of_images)):
(data, seg) = preprocessor.run_case_npy(list_of_images[idx], (list_of_segs_from_prev_stage[idx] if (list_of_segs_from_prev_stage is not None) else None), list_of_image_properties[idx], plans_manager, configuration_manager, dataset_json)
if ((list_of_segs_from_prev_stage is not None) and (list_of_segs_from_prev_stage[idx] is not None)):
seg_onehot = convert_labelmap_to_one_hot(seg[0], label_manager.foreground_labels, data.dtype)
data = np.vstack((data, seg_onehot))
data = torch.from_numpy(data).contiguous().float()
item = {'data': data, 'data_properites': list_of_image_properties[idx], 'ofile': (truncated_ofnames[idx] if (truncated_ofnames is not None) else None)}
success = False
while (not success):
try:
if abort_event.is_set():
return
target_queue.put(item, timeout=0.01)
success = True
except queue.Full:
pass
done_event.set()
except Exception as e:
abort_event.set()
raise e |
.parametrize('n,blocks,expected_chunks', [(1, 1, [1]), (2, 1, [2]), (2, 2, ([1] * 2)), (3, 1, [3]), (3, 3, ([1] * 3)), (3, 2, [2, 1]), (7, 2, [4, 3]), (7, 3, [3, 2, 2]), (7, 7, ([1] * 7))])
def test_split_array_chunks__precomputed(n: int, blocks: int, expected_chunks: List[int]) -> None:
assert (split_array_chunks(n, blocks) == tuple(expected_chunks)) |
def main(_):
if (not FLAGS.dataset_dir):
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
tf_global_step = slim.get_or_create_global_step()
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(FLAGS.model_name, num_classes=(dataset.num_classes - FLAGS.labels_offset), is_training=False)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, shuffle=False, common_queue_capacity=(2 * FLAGS.batch_size), common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
preprocessing_name = (FLAGS.preprocessing_name or FLAGS.model_name)
image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name, is_training=False)
eval_image_size = (FLAGS.eval_image_size or network_fn.default_image_size)
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
(images, labels) = tf.train.batch([image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, capacity=(5 * FLAGS.batch_size))
(logits, _) = network_fn(images)
if FLAGS.moving_average_decay:
variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay, tf_global_step)
variables_to_restore = variable_averages.variables_to_restore(slim.get_model_variables())
variables_to_restore[tf_global_step.op.name] = tf_global_step
else:
variables_to_restore = slim.get_variables_to_restore()
predictions = tf.argmax(logits, 1)
labels = tf.squeeze(labels)
(names_to_values, names_to_updates) = slim.metrics.aggregate_metric_map({'Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'Recall_5': slim.metrics.streaming_recall_at_k(logits, labels, 5)})
for (name, value) in names_to_values.items():
summary_name = ('eval/%s' % name)
op = tf.summary.scalar(summary_name, value, collections=[])
op = tf.Print(op, [value], summary_name)
tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
if FLAGS.max_num_batches:
num_batches = FLAGS.max_num_batches
else:
num_batches = math.ceil((dataset.num_samples / float(FLAGS.batch_size)))
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info(('Evaluating %s' % checkpoint_path))
slim.evaluation.evaluate_once(master=FLAGS.master, checkpoint_path=checkpoint_path, logdir=FLAGS.eval_dir, num_evals=num_batches, eval_op=list(names_to_updates.values()), variables_to_restore=variables_to_restore) |
class TestPassedDrivenMilesCompositeMetric(unittest.TestCase):
def test_failed_frames(self) -> None:
validation_results: Dict[(str, validators.ValidatorOutput)] = {'validator_a': validators.ValidatorOutput(is_valid_scene=False, failed_frames=[15])}
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.ones(20)}
simulation_output = mock.Mock()
pdm_metric = cm.PassedDrivenMilesCompositeMetric('passed_driven_miles', ['validator_a'])
result = pdm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
self.assertEqual(result, 15.0)
def test_failed_frames_multiple_interventions(self) -> None:
validation_results: Dict[(str, validators.ValidatorOutput)] = {'validator_a': validators.ValidatorOutput(is_valid_scene=False, failed_frames=[10, 11]), 'validator_b': validators.ValidatorOutput(is_valid_scene=False, failed_frames=[15]), 'validator_c': validators.ValidatorOutput(is_valid_scene=False, failed_frames=[15, 18]), 'validator_d': validators.ValidatorOutput(is_valid_scene=True, failed_frames=[])}
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.ones(20)}
simulation_output = mock.Mock()
pdm_metric = cm.PassedDrivenMilesCompositeMetric('passed_driven_miles', ['validator_a', 'validator_b', 'validator_c', 'validator_d'])
result = pdm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
self.assertEqual(result, 10.0)
def test_no_failed_frames(self) -> None:
validation_results: Dict[(str, validators.ValidatorOutput)] = {'validator_a': validators.ValidatorOutput(is_valid_scene=True, failed_frames=[])}
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.ones(20)}
simulation_output = mock.Mock()
pdm_metric = cm.PassedDrivenMilesCompositeMetric('passed_driven_miles', ['validator_a'])
result = pdm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
self.assertEqual(result, 20.0)
def test_all_failed_frames(self) -> None:
timesteps = 20
validation_results: Dict[(str, validators.ValidatorOutput)] = {'validator_a': validators.ValidatorOutput(is_valid_scene=True, failed_frames=list(range(timesteps)))}
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.ones(timesteps)}
simulation_output = mock.Mock()
pdm_metric = cm.PassedDrivenMilesCompositeMetric('passed_driven_miles', ['validator_a'])
result = pdm_metric.compute(metric_results, validation_results, simulation_output)
simulation_output.assert_not_called()
self.assertEqual(result, 0.0)
def test_ignore_entire_scene(self) -> None:
validation_results: Dict[(str, validators.ValidatorOutput)] = {'validator_a': validators.ValidatorOutput(is_valid_scene=False, failed_frames=[15])}
metric_results: Dict[(str, torch.Tensor)] = {metrics.SimulatedDrivenMilesMetric.metric_name: torch.ones(20)}
simulation_output = mock.Mock()
pdm_metric = cm.PassedDrivenMilesCompositeMetric('passed_driven_miles', ['validator_a'], ignore_entire_scene=True)
result = pdm_metric.compute(metric_results, validation_results, simulation_output)
self.assertEqual(result, 0.0) |
def conduct_team_search(username, query, encountered_teams, results):
matching_teams = model.team.get_matching_user_teams(query, get_authenticated_user(), limit=5)
for team in matching_teams:
if (team.id in encountered_teams):
continue
encountered_teams.add(team.id)
results.append({'kind': 'team', 'name': team.name, 'organization': search_entity_view(username, team.organization), 'avatar': avatar.get_data_for_team(team), 'score': TEAM_SEARCH_SCORE, 'href': ((('/organization/' + team.organization.username) + '/teams/') + team.name)}) |
def segm2json(dataset, results):
bbox_json_results = []
segm_json_results = []
for idx in range(len(dataset)):
img_id = dataset.img_ids[idx]
(det, seg) = results[idx]
for label in range(len(det)):
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = dataset.cat_ids[label]
bbox_json_results.append(data)
if (len(seg) == 2):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['score'] = float(mask_score[i])
data['category_id'] = dataset.cat_ids[label]
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return (bbox_json_results, segm_json_results) |
def test_manual_add(tmpdir):
workdir_one = os.path.join(str(tmpdir), 'workdir_one')
workdir_two = os.path.join(str(tmpdir), 'workdir_two')
metadir = os.path.join(str(tmpdir), 'metadir')
runner = CliRunner()
statefile = os.path.join(str(tmpdir), 'state.json')
result = runner.invoke(yadage.manualcli.init, [workdir_one, 'workflow.yml', '-t', 'tests/testspecs/local-helloworld', '-s', ('filebacked:' + statefile), '-p', 'par=value', '--metadir', metadir])
result = runner.invoke(yadage.manualcli.add, [workdir_two, 'workflow.yml', '-t', 'tests/testspecs/mapreduce', '-s', ('filebacked:' + statefile)]) |
def test_type_complex_while_labels() -> None:
src = "\n i = 0\n while i < 10:\n j = 0\n while j < 5:\n j += 1\n i += 1\n\n if i > 4:\n print('hi')\n\n print('not else')\n "
expected_labels = {'True', 'False'}
assert (_extract_labels(build_cfg(src)) == expected_labels) |
class Request(testprocess.Line):
def __init__(self, data):
super().__init__(data)
try:
parsed = json.loads(data)
except ValueError:
raise testprocess.InvalidLine(data)
assert isinstance(parsed, dict)
assert (set(parsed.keys()) == {'path', 'verb', 'status'})
self.verb = parsed['verb']
path = parsed['path']
self.path = ('/' if (path == '/') else path.rstrip('/'))
self.status = parsed['status']
self._check_status()
def _check_status(self):
path_to_statuses = {'/favicon.ico': [HTTPStatus.OK, HTTPStatus.PARTIAL_CONTENT, HTTPStatus.NOT_MODIFIED], '/does-not-exist': [HTTPStatus.NOT_FOUND], '/does-not-exist-2': [HTTPStatus.NOT_FOUND], '/404': [HTTPStatus.NOT_FOUND], '/redirect-later': [HTTPStatus.FOUND], '/redirect-self': [HTTPStatus.FOUND], '/redirect-to': [HTTPStatus.FOUND], '/relative-redirect': [HTTPStatus.FOUND], '/absolute-redirect': [HTTPStatus.FOUND], '/redirect- [HTTPStatus.FOUND], '/cookies/set': [HTTPStatus.FOUND], '/cookies/set-custom': [HTTPStatus.FOUND], '/500-inline': [HTTPStatus.INTERNAL_SERVER_ERROR], '/500': [HTTPStatus.INTERNAL_SERVER_ERROR]}
for i in range(25):
path_to_statuses['/redirect/{}'.format(i)] = [HTTPStatus.FOUND]
for suffix in ['', '1', '2', '3', '4', '5', '6']:
key = '/basic-auth/user{suffix}/password{suffix}'.format(suffix=suffix)
path_to_statuses[key] = [HTTPStatus.UNAUTHORIZED, HTTPStatus.OK]
default_statuses = [HTTPStatus.OK, HTTPStatus.NOT_MODIFIED]
sanitized = QUrl((' + self.path)).path()
expected_statuses = path_to_statuses.get(sanitized, default_statuses)
if (self.status not in expected_statuses):
raise AssertionError('{} loaded with status {} but expected {}'.format(sanitized, self.status, ' / '.join((repr(e) for e in expected_statuses))))
def __eq__(self, other):
return NotImplemented |
def test_kinesis_subscription_with_starting_position_at_timestamp():
ksub = {'stream': 'arn:aws:kinesis:eu-west-1::stream/services', 'batch_size': 10, 'starting_position_timestamp': '2017-11-01T11:00:00Z'}
cfg = config.Config(EX_CONFIG, (EX_CONFIG + '/lambda-with-subscription_at_ts.json'))
assert (cfg.raw['subscription']['kinesis']['stream'] == ksub['stream'])
assert (cfg.raw['subscription']['kinesis']['batch_size'] == ksub['batch_size'])
assert (cfg.raw['subscription']['kinesis']['starting_position_timestamp'] == ksub['starting_position_timestamp']) |
class ClassicalOptimizer():
def __init__(self, instance, n, K):
self.instance = instance
self.n = n
self.K = K
def compute_allowed_combinations(self):
f = math.factorial
return ((f(self.n) / f(self.K)) / f((self.n - self.K)))
def cplex_solution(self):
instance = self.instance
n = self.n
K = self.K
my_obj = (list(instance.reshape(1, (n ** 2))[0]) + [0.0 for x in range(0, (n - 1))])
my_ub = [1 for x in range(0, (((n ** 2) + n) - 1))]
my_lb = ([0 for x in range(0, (n ** 2))] + [0.1 for x in range(0, (n - 1))])
my_ctype = (''.join(['I' for x in range(0, (n ** 2))]) + ''.join(['C' for x in range(0, (n - 1))]))
my_rhs = (((2 * ([K] + [1 for x in range(0, (n - 1))])) + [(1 - 0.1) for x in range(0, (((n - 1) ** 2) - (n - 1)))]) + [0 for x in range(0, n)])
my_sense = ((''.join(['E' for x in range(0, (2 * n))]) + ''.join(['L' for x in range(0, (((n - 1) ** 2) - (n - 1)))])) + ''.join(['E' for x in range(0, n)]))
try:
my_prob = cplex.Cplex()
self.populatebyrow(my_prob, my_obj, my_ub, my_lb, my_ctype, my_sense, my_rhs)
my_prob.solve()
except CplexError as exc:
print(exc)
return
x = my_prob.solution.get_values()
x = np.array(x)
cost = my_prob.solution.get_objective_value()
return (x, cost)
def populatebyrow(self, prob, my_obj, my_ub, my_lb, my_ctype, my_sense, my_rhs):
n = self.n
prob.objective.set_sense(prob.objective.sense.minimize)
prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub, types=my_ctype)
prob.set_log_stream(None)
prob.set_error_stream(None)
prob.set_warning_stream(None)
prob.set_results_stream(None)
rows = []
for ii in range(0, n):
col = [x for x in range((0 + (n * ii)), (n + (n * ii)))]
coef = [1 for x in range(0, n)]
rows.append([col, coef])
for ii in range(0, n):
col = [x for x in range((0 + ii), (n ** 2), n)]
coef = [1 for x in range(0, n)]
rows.append([col, coef])
for ii in range(0, n):
for jj in range(0, n):
if ((ii != jj) and ((ii * jj) > 0)):
col = [(ii + (jj * n)), (((n ** 2) + ii) - 1), (((n ** 2) + jj) - 1)]
coef = [1, 1, (- 1)]
rows.append([col, coef])
for ii in range(0, n):
col = [(ii * (n + 1))]
coef = [1]
rows.append([col, coef])
prob.linear_constraints.add(lin_expr=rows, senses=my_sense, rhs=my_rhs) |
def main(config: lib.JSONDict, output: Union[(str, Path)], *, force: bool=False) -> Optional[lib.JSONDict]:
if (not lib.start(output, force=force)):
return None
output = Path(output)
report = lib.create_report(config)
C = lib.make_config(Config, config)
delu.random.seed(C.seed)
device = lib.get_device()
dataset = (C.data if isinstance(C.data, lib.Dataset) else lib.build_dataset(**C.data)).to_torch(device)
if (dataset.X_num is None):
dataset.data['X_num'] = {}
for part in dataset.parts():
dataset.data['X_num'][part] = torch.empty(dataset.size(part), 0, device=device)
if (dataset.X_bin is not None):
for part in dataset.parts():
dataset.data['X_num'][part] = torch.cat([dataset.data['X_num'][part], dataset.data['X_bin'][part]], dim=1)
del dataset.data['X_bin']
Y_train = dataset.Y['train'].to((torch.long if dataset.is_multiclass else torch.float))
model_config = deepcopy(C.model)
model_config['ffn_d_hidden'] = int((model_config['d_token'] * model_config.pop('ffn_d_hidden_factor')))
model_config['last_layer_query_idx'] = [(- 1)]
model = rtdl.FTTransformer.make_baseline(n_num_features=dataset.n_num_features, cat_cardinalities=dataset.cat_cardinalities(), d_out=lib.get_d_out(dataset.n_classes()), **model_config)
report['n_parameters'] = lib.get_n_parameters(model)
logger.info(f"n_parameters = {report['n_parameters']}")
report['prediction_type'] = (None if dataset.is_regression else 'logits')
model.to(device)
if (torch.cuda.device_count() > 1):
model = nn.DataParallel(model)
optimizer = (C.optimizer if isinstance(C.optimizer, torch.optim.Optimizer) else lib.make_optimizer(model, **C.optimizer))
loss_fn = lib.get_loss_fn(dataset.task_type)
epoch = 0
eval_batch_size = 32768
chunk_size = None
progress = delu.ProgressTracker(C.patience)
training_log = []
writer = torch.utils.tensorboard.SummaryWriter(output)
def apply_model(part, idx):
return model(x_num=(None if (dataset.X_num is None) else dataset.X_num[part][idx]), x_cat=(None if (dataset.X_cat is None) else dataset.X_cat[part][idx])).squeeze((- 1))
_mode()
def evaluate(parts: list[str], eval_batch_size: int):
model.eval()
predictions = {}
for part in parts:
while eval_batch_size:
try:
predictions[part] = torch.cat([apply_model(part, idx) for idx in torch.arange(dataset.size(part), device=device).split(eval_batch_size)]).cpu().numpy()
except RuntimeError as err:
if (not lib.is_oom_exception(err)):
raise
eval_batch_size //= 2
logger.warning(f'eval_batch_size = {eval_batch_size}')
else:
break
if (not eval_batch_size):
RuntimeError('Not enough memory even for eval_batch_size=1')
metrics = (dataset.calculate_metrics(predictions, report['prediction_type']) if lib.are_valid_predictions(predictions) else {x: {'score': (- 999999.0)} for x in predictions})
return (metrics, predictions, eval_batch_size)
def save_checkpoint():
lib.dump_checkpoint({'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'random_state': delu.random.get_state(), 'progress': progress, 'report': report, 'timer': timer, 'training_log': training_log}, output)
lib.dump_report(report, output)
lib.backup_output(output)
print()
timer = lib.run_timer()
while (epoch < C.n_epochs):
print(f'[...] {lib.try_get_relative_path(output)} | {timer}')
model.train()
epoch_losses = []
for batch_idx in tqdm(lib.make_random_batches(dataset.size('train'), C.batch_size, device), desc=f'Epoch {epoch}'):
(loss, new_chunk_size) = lib.train_step(optimizer, (lambda x: loss_fn(apply_model('train', x), Y_train[x])), batch_idx, (chunk_size or C.batch_size))
epoch_losses.append(loss.detach())
if (new_chunk_size and (new_chunk_size < (chunk_size or C.batch_size))):
chunk_size = new_chunk_size
logger.warning(f'chunk_size = {chunk_size}')
(epoch_losses, mean_loss) = lib.process_epoch_losses(epoch_losses)
(metrics, predictions, eval_batch_size) = evaluate(['val', 'test'], eval_batch_size)
lib.print_metrics(mean_loss, metrics)
training_log.append({'epoch-losses': epoch_losses, 'metrics': metrics, 'time': timer()})
writer.add_scalars('loss', {'train': mean_loss}, epoch, timer())
for part in metrics:
writer.add_scalars('score', {part: metrics[part]['score']}, epoch, timer())
progress.update(metrics['val']['score'])
if progress.success:
lib.celebrate()
report['best_epoch'] = epoch
report['metrics'] = metrics
save_checkpoint()
lib.dump_predictions(predictions, output)
elif (progress.fail or (not lib.are_valid_predictions(predictions))):
break
epoch += 1
print()
report['time'] = str(timer)
model.load_state_dict(lib.load_checkpoint(output)['model'])
(report['metrics'], predictions, _) = evaluate(['train', 'val', 'test'], eval_batch_size)
report['chunk_size'] = chunk_size
report['eval_batch_size'] = eval_batch_size
lib.dump_predictions(predictions, output)
lib.dump_summary(lib.summarize(report), output)
save_checkpoint()
lib.finish(output, report)
return report |
def execute_fixture(monkeypatch) -> None:
for func in ['check_brew_cmd', 'check_cask', 'set_brewfile_repo', 'set_brewfile_local', 'check_repo', 'repomgr', 'brew_cmd', 'initialize', 'check_input_file', 'edit_brewfile', 'cat_brewfile', 'get_files', 'clean_non_request', 'cleanup', 'install']:
def set_func(func):
name = func
monkeypatch.setattr(brew_file.BrewFile, func, (lambda self, *args, **kw: print(name, args, kw)))
set_func(func)
monkeypatch.setattr(brew_file.BrewFile, 'check_brew_cmd', (lambda self: None))
monkeypatch.setattr(brew_file.BrewHelper, 'brew_val', (lambda self, x: x))
monkeypatch.setattr(brew_file.BrewHelper, 'proc', (lambda self, *args, **kw: print('proc', args, kw)))
bf = brew_file.BrewFile({})
return bf |
def set_embedding(z_target, state, nbits, _mol_embedding=mol_fp):
if (len(state) == 0):
return np.concatenate([np.zeros((1, (2 * nbits))), z_target], axis=1)
else:
e1 = np.expand_dims(_mol_embedding(state[0]), axis=0)
if (len(state) == 1):
e2 = np.zeros((1, nbits))
else:
e2 = _mol_embedding(state[1])
return np.concatenate([e1, e2, z_target], axis=1) |
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, feed_forward, group_attn, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.group_attn = group_attn
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask, group_prob):
(group_prob, break_prob) = self.group_attn(x, mask, group_prob)
x = self.sublayer[0](x, (lambda x: self.self_attn(x, x, x, group_prob, mask)))
return (self.sublayer[1](x, self.feed_forward), group_prob, break_prob) |
def _get_proposal_section_reviewer_vote_choices(conference):
allow_plus_zero_vote = ConferenceSettingConstants.ALLOW_PLUS_ZERO_REVIEWER_VOTE
plus_zero_vote_setting = conference.conferencesetting_set.filter(name=allow_plus_zero_vote['name']).first()
if plus_zero_vote_setting:
plus_zero_vote_setting_value = plus_zero_vote_setting.value
else:
plus_zero_vote_setting_value = True
values = []
for i in ProposalSectionReviewerVoteValue.objects.all().reverse():
if ((i.vote_value == 0) and (not plus_zero_vote_setting_value)):
continue
values.append((i.vote_value, '{}'.format(i.description)))
return values |
class DimShuffle(ExternalCOp):
_f16_ok = True
check_input = False
__props__ = ('input_broadcastable', 'new_order', 'inplace')
c_func_file = 'c_code/dimshuffle.c'
c_func_name = 'APPLY_SPECIFIC(cpu_dimshuffle)'
def params_type(self):
return ParamsType(shuffle=lvector, augment=lvector, transposition=lvector, inplace=scalar_bool)
def __init__(self, input_broadcastable, new_order):
super().__init__([self.c_func_file], self.c_func_name)
self.input_broadcastable = tuple(input_broadcastable)
if (not all((isinstance(bs, (bool, np.bool_)) for bs in self.input_broadcastable))):
raise ValueError(f'input_broadcastable must be boolean, {self.input_broadcastable}')
self.new_order = tuple(new_order)
self.inplace = True
for (i, j) in enumerate(new_order):
if (j != 'x'):
if (not isinstance(j, (int, np.integer))):
raise TypeError(f'DimShuffle indices must be Python ints; got {j} of type {type(j)}.')
if (j >= len(input_broadcastable)):
raise ValueError(f'new_order[{i}] is {j}, but the input only has {len(input_broadcastable)} axes.')
if (j in new_order[(i + 1):]):
raise ValueError(f'The same input dimension may not appear twice in the list of output dimensions: {new_order}')
drop = []
for (i, b) in enumerate(input_broadcastable):
if (i not in new_order):
if (b == 1):
drop.append(i)
else:
raise ValueError(f'Cannot drop a non-broadcastable dimension: {input_broadcastable}, {new_order}')
self.shuffle = [x for x in new_order if (x != 'x')]
self.transposition = (self.shuffle + drop)
self.augment = sorted([i for (i, x) in enumerate(new_order) if (x == 'x')])
self.drop = drop
if self.inplace:
self.view_map = {0: [0]}
def __setstate__(self, state):
self.__dict__.update(state)
if (not hasattr(self, 'func_files')):
super().__init__([self.c_func_file], self.c_func_name)
def make_node(self, _input):
input = as_tensor_variable(_input)
ib = tuple(((s == 1) for s in input.type.shape))
if (ib != self.input_broadcastable):
if (len(ib) != len(self.input_broadcastable)):
raise TypeError(f'The number of dimensions of the input is incorrect for this op. Expected {self.input_broadcastable}, got {ib}.')
for (expected, b) in zip(self.input_broadcastable, ib):
if ((expected is True) and (b is False)):
raise TypeError(f'The broadcastable pattern of the input is incorrect for this op. Expected {self.input_broadcastable}, got {ib}.')
out_static_shape = []
for dim_idx in self.new_order:
if (dim_idx == 'x'):
out_static_shape.append(1)
else:
out_static_shape.append(input.type.shape[dim_idx])
output = TensorType(dtype=input.type.dtype, shape=out_static_shape)()
return Apply(self, [input], [output])
def __str__(self):
shuffle = (sorted(self.shuffle) != self.shuffle)
if (self.augment and (not (shuffle or self.drop))):
if (len(self.augment) == 1):
return f'ExpandDims{{axis={self.augment[0]}}}'
return f'ExpandDims{{axes={self.augment}}}'
if (self.drop and (not (self.augment or shuffle))):
if (len(self.drop) == 1):
return f'DropDims{{axis={self.drop[0]}}}'
return f'DropDims{{axes={self.drop}}}'
if (shuffle and (not (self.augment or self.drop))):
return f'Transpose{{axes={self.shuffle}}}'
return f"DimShuffle{{order=[{','.join(map(str, self.new_order))}]}}"
def perform(self, node, inp, out):
(res,) = inp
(storage,) = out
if (not isinstance(res, (np.ndarray, np.memmap))):
raise TypeError(res)
res = res.transpose(self.transposition)
shape = list(res.shape[:len(self.shuffle)])
for augm in self.augment:
shape.insert(augm, 1)
res = res.reshape(shape)
if (not self.inplace):
res = np.copy(res)
storage[0] = np.asarray(res)
def infer_shape(self, fgraph, node, shapes):
(ishp,) = shapes
rval = [ishp[i] for i in self.shuffle]
for augm in self.augment:
rval.insert(augm, 1)
return [rval]
def R_op(self, inputs, eval_points):
if (None in eval_points):
return [None]
return self(*eval_points, return_list=True)
def grad(self, inp, grads):
(x,) = inp
(gz,) = grads
gz = as_tensor_variable(gz)
grad_order = (['x'] * x.type.ndim)
for (i, v) in enumerate(self.new_order):
if (v != 'x'):
grad_order[v] = i
if (inp[0].dtype in discrete_dtypes):
return [inp[0].zeros_like(dtype=config.floatX)]
else:
return [DimShuffle(tuple(((s == 1) for s in gz.type.shape)), grad_order)(Elemwise(scalar_identity)(gz))] |
def qpos_to_pd_joint_vel(controller: PDJointVelController, qpos):
assert (type(controller) == PDJointVelController)
assert controller.config.normalize_action
delta_qpos = (qpos - controller.qpos)
qvel = (delta_qpos * controller._control_freq)
(low, high) = (controller.config.lower, controller.config.upper)
return inv_scale_action(qvel, low, high) |
class _ModuleProxy():
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if (self._module is not None):
raise
import_name = f'pyglet.{self._module_name}'
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if (self._module is not None):
raise
import_name = f'pyglet.{self._module_name}'
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value) |
def get_failure_msg_from_onion_error(decrypted_error_packet: bytes) -> OnionRoutingFailureMessage:
failure_len = int.from_bytes(decrypted_error_packet[32:34], byteorder='big')
failure_msg = decrypted_error_packet[34:(34 + failure_len)]
return OnionRoutingFailureMessage.from_bytes(failure_msg) |
class SkillTreeView(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.TAB_TRAVERSAL)
self.charEditor = self.Parent.Parent
self.SetBackgroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW))
pmainSizer = wx.BoxSizer(wx.VERTICAL)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.clonesChoice = wx.Choice(self, wx.ID_ANY, style=0)
i = self.clonesChoice.Append('Omega Clone', None)
self.clonesChoice.SetSelection(i)
hSizer.Add(self.clonesChoice, 5, (wx.ALL | wx.EXPAND), 5)
self.searchInput = wx.SearchCtrl(self, wx.ID_ANY)
hSizer.Add(self.searchInput, 1, (wx.ALL | wx.EXPAND), 5)
self.searchInput.Bind(wx.EVT_TEXT, self.delaySearch)
sChar = Character.getInstance()
self.alphaClones = sChar.getAlphaCloneList()
char = self.charEditor.entityEditor.getActiveEntity()
for clone in self.alphaClones:
i = self.clonesChoice.Append(clone.alphaCloneName, clone.ID)
if (clone.ID == char.alphaCloneID):
self.clonesChoice.SetSelection(i)
self.clonesChoice.Bind(wx.EVT_CHOICE, self.cloneChanged)
self.clonesChoice.SetToolTip(wx.ToolTip(_t("Setting an Alpha clone does not replace the character's skills, but rather caps them to Alpha levels.")))
pmainSizer.Add(hSizer, 0, (wx.EXPAND | wx.ALL), 5)
self.searchTimer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.populateSkillTreeSkillSearch, self.searchTimer)
tree = self.skillTreeListCtrl = TreeListCtrl(self, wx.ID_ANY, style=wx.dataview.TL_DEFAULT_STYLE)
pmainSizer.Add(tree, 1, (wx.EXPAND | wx.ALL), 5)
self.imageList = wx.ImageList(16, 16)
tree.SetImageList(self.imageList)
self.skillBookImageId = self.imageList.Add(wx.Icon(BitmapLoader.getBitmap('skill_small', 'gui')))
self.skillBookDirtyImageId = self.imageList.Add(wx.Icon(BitmapLoader.getBitmap('skill_small_red', 'gui')))
tree.AppendColumn(_t('Skill'))
tree.AppendColumn(_t('Level'))
self.root = tree.GetRootItem()
tree.SetColumnWidth(0, 525)
tree.SetColumnWidth(1, 100)
self.secStatusLabel = _t('Sec Status: {0:.2f}')
self.btnSecStatus = wx.Button(self, wx.ID_ANY, self.secStatusLabel.format((char.secStatus or 0.0)))
self.btnSecStatus.Bind(wx.EVT_BUTTON, self.onSecStatus)
self.populateSkillTree()
tree.Bind(wx.dataview.EVT_TREELIST_ITEM_ACTIVATED, self.expand)
tree.Bind(wx.dataview.EVT_TREELIST_ITEM_EXPANDING, self.expandLookup)
tree.Bind(wx.dataview.EVT_TREELIST_ITEM_CONTEXT_MENU, self.spawnMenu)
bSizerButtons = wx.BoxSizer(wx.HORIZONTAL)
bSizerButtons.Add(self.btnSecStatus, 0, wx.ALL, 5)
bSizerButtons.AddStretchSpacer()
importExport = ((_t('Import skills from clipboard'), wx.ART_FILE_OPEN, 'import'), (_t('Export skills to clipboard'), wx.ART_FILE_SAVE_AS, 'export'))
for (tooltip, art, attr) in importExport:
bitmap = wx.ArtProvider.GetBitmap(art, wx.ART_BUTTON)
btn = wx.BitmapButton(self, wx.ID_ANY, bitmap)
btn.SetMinSize(btn.GetSize())
btn.SetMaxSize(btn.GetSize())
btn.Layout()
setattr(self, '{}Btn'.format(attr), btn)
btn.Enable(True)
btn.SetToolTip(tooltip)
bSizerButtons.Add(btn, 0, wx.ALL, 5)
btn.Bind(wx.EVT_BUTTON, getattr(self, '{}Skills'.format(attr)))
pmainSizer.Add(bSizerButtons, 0, wx.EXPAND, 5)
self.charEditor.entityEditor.Bind(wx.EVT_CHOICE, self.charChanged)
self.charEditor.Bind(GE.CHAR_LIST_UPDATED, self.populateSkillTree)
self.idUnlearned = wx.NewId()
self.levelIds = {}
self.idLevels = {}
self.levelIds[self.idUnlearned] = _t('Not learned')
for level in range(6):
id = wx.NewId()
self.levelIds[id] = level
self.idLevels[level] = id
self.revertID = wx.NewId()
self.saveID = wx.NewId()
self.SetSizer(pmainSizer)
(self.ChangeLevelEvent, CHANGE_LEVEL_EVENT) = wx.lib.newevent.NewEvent()
self.Bind(wx.EVT_CHAR_HOOK, self.kbEvent)
self.Bind(CHANGE_LEVEL_EVENT, self.changeLevel)
def kbEvent(self, event):
keyLevelMap = {48: 0, 49: 1, 50: 2, 51: 3, 52: 4, 53: 5, wx.WXK_NUMPAD0: 0, wx.WXK_NUMPAD1: 1, wx.WXK_NUMPAD2: 2, wx.WXK_NUMPAD3: 3, wx.WXK_NUMPAD4: 4, wx.WXK_NUMPAD5: 5}
keycode = event.GetKeyCode()
if ((keycode in keyLevelMap) and (event.GetModifiers() == wx.MOD_NONE)):
level = keyLevelMap[keycode]
selection = self.skillTreeListCtrl.GetSelection()
if selection:
(dataType, skillID) = self.skillTreeListCtrl.GetItemData(selection)
if (dataType == 'skill'):
event = self.ChangeLevelEvent()
event.SetId(self.idLevels[level])
wx.PostEvent(self, event)
return
event.Skip()
def importSkills(self, evt):
with wx.MessageDialog(self, _t('Importing skills into this character will set the skill levels as pending. To save the skills permanently, please click the Save button at the bottom of the window after importing'), _t('Import Skills'), wx.OK) as dlg:
dlg.ShowModal()
text = fromClipboard().strip()
if text:
sCharacter = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
try:
lines = text.splitlines()
for l in lines:
s = l.strip()
(skill, level) = (s.rsplit(None, 1)[0], arabicOrRomanToInt(s.rsplit(None, 1)[1]))
skill = char.getSkill(skill)
if skill:
sCharacter.changeLevel(char.ID, skill.item.ID, level)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
pyfalog.error(e)
with wx.MessageDialog(self, _t('There was an error importing skills, please see log file'), _t('Error'), wx.ICON_ERROR) as dlg:
dlg.ShowModal()
finally:
self.charEditor.btnRestrict()
self.populateSkillTree()
self.charEditor.entityEditor.refreshEntityList(char)
def exportSkills(self, evt):
char = self.charEditor.entityEditor.getActiveEntity()
skills = sorted(char.__class__.getSkillNameMap().keys())
list = ''
for s in skills:
skill = char.getSkill(s)
list += '{} {}\n'.format(skill.item.name, skill.level)
toClipboard(list)
def onSecStatus(self, event):
sChar = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
with SecStatusDialog(self, (char.secStatus or 0.0)) as dlg:
if (dlg.ShowModal() == wx.ID_OK):
value = dlg.floatSpin.GetValue()
sChar.setSecStatus(char, value)
self.btnSecStatus.SetLabel(self.secStatusLabel.format(value))
def delaySearch(self, evt):
if (self.searchInput.GetValue() == ''):
self.populateSkillTree()
else:
self.searchTimer.Stop()
self.searchTimer.Start(150, True)
def cloneChanged(self, event):
sChar = Character.getInstance()
sChar.setAlphaClone(self.charEditor.entityEditor.getActiveEntity(), event.ClientData)
self.populateSkillTree()
def charChanged(self, event=None):
self.searchInput.SetValue('')
char = self.charEditor.entityEditor.getActiveEntity()
for i in range(self.clonesChoice.GetCount()):
cloneID = self.clonesChoice.GetClientData(i)
if (char.alphaCloneID == cloneID):
self.clonesChoice.SetSelection(i)
self.btnSecStatus.SetLabel(self.secStatusLabel.format((char.secStatus or 0.0)))
self.populateSkillTree(event)
def populateSkillTreeSkillSearch(self, event=None):
sChar = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
try:
search = self.searchInput.GetLineText(0)
except AttributeError:
search = self.searchInput.GetValue()
root = self.root
tree = self.skillTreeListCtrl
tree.DeleteAllItems()
for (id, name) in sChar.getSkillsByName(search):
iconId = self.skillBookImageId
(level, dirty) = sChar.getSkillLevel(char.ID, id)
if dirty:
iconId = self.skillBookDirtyImageId
childId = tree.AppendItem(root, name, iconId, data=('skill', id))
tree.SetItemText(childId, 1, (_t('Level {}').format(int(level)) if isinstance(level, float) else level))
def populateSkillTree(self, event=None):
sChar = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
dirtyGroups = set([skill.item.group.ID for skill in char.dirtySkills])
if (char.name in ('All 0', 'All 5')):
self.clonesChoice.Disable()
self.btnSecStatus.Disable()
else:
self.clonesChoice.Enable()
self.btnSecStatus.Enable()
groups = sChar.getSkillGroups()
root = self.root
tree = self.skillTreeListCtrl
tree.DeleteAllItems()
for (id, name) in groups:
imageId = self.skillBookImageId
if (id in dirtyGroups):
imageId = self.skillBookDirtyImageId
childId = tree.AppendItem(root, name, imageId, data=('group', id))
tree.AppendItem(childId, 'dummy')
if event:
event.Skip()
def expand(self, event):
root = event.GetItem()
tree = self.skillTreeListCtrl
if tree.IsExpanded(root):
tree.Collapse(root)
else:
tree.Expand(root)
def expandLookup(self, event):
root = event.GetItem()
tree = self.skillTreeListCtrl
child = tree.GetFirstChild(root)
if (tree.GetItemText(child) == 'dummy'):
tree.DeleteItem(child)
sChar = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
data = tree.GetItemData(root)
for (id, name) in sChar.getSkills(data[1]):
iconId = self.skillBookImageId
(level, dirty) = sChar.getSkillLevel(char.ID, id)
if dirty:
iconId = self.skillBookDirtyImageId
childId = tree.AppendItem(root, name, iconId, data=('skill', id))
tree.SetItemText(childId, 1, (_t('Level {}').format(int(level)) if isinstance(level, float) else level))
def spawnMenu(self, event):
item = event.GetItem()
itemData = self.skillTreeListCtrl.GetItemData(item)
if (itemData is None):
return
self.skillTreeListCtrl.Select(item)
thing = self.skillTreeListCtrl.GetFirstChild(item).IsOk()
if thing:
return
id = itemData[1]
eveItem = Market.getInstance().getItem(id)
srcContext = 'skillItem'
itemContext = _t('Skill')
context = (srcContext, itemContext)
menu = ContextMenu.getMenu(self, eveItem, [eveItem], context)
char = self.charEditor.entityEditor.getActiveEntity()
if (char.name not in ('All 0', 'All 5')):
menu.AppendSeparator()
menu.Append(self.idUnlearned, _t('Unlearn'))
for level in range(6):
menu.Append(self.idLevels[level], _t('Level {}').format(level))
menu.Bind(wx.EVT_MENU, self.changeLevel)
self.PopupMenu(menu)
def changeLevel(self, event):
level = self.levelIds.get(event.Id)
sChar = Character.getInstance()
char = self.charEditor.entityEditor.getActiveEntity()
if (char.name in ('All 0', 'All 5')):
return
selection = self.skillTreeListCtrl.GetSelection()
(dataType, skillID) = self.skillTreeListCtrl.GetItemData(selection)
if (level is not None):
sChar.changeLevel(char.ID, skillID, level, persist=True)
elif (event.Id == self.revertID):
sChar.revertLevel(char.ID, skillID)
elif (event.Id == self.saveID):
sChar.saveSkill(char.ID, skillID)
child = self.skillTreeListCtrl.GetFirstChild(self.root)
def _setTreeSkillLevel(treeItem, skillID):
(lvl, dirty) = sChar.getSkillLevel(char.ID, skillID)
self.skillTreeListCtrl.SetItemText(treeItem, 1, (_t('Level {}').format(int(lvl)) if (not isinstance(lvl, str)) else lvl))
if (not dirty):
self.skillTreeListCtrl.SetItemImage(treeItem, self.skillBookImageId)
while child.IsOk():
(dataType, id) = self.skillTreeListCtrl.GetItemData(child)
if (dataType == 'skill'):
_setTreeSkillLevel(child, id)
else:
grand = self.skillTreeListCtrl.GetFirstChild(child)
while grand.IsOk():
if (self.skillTreeListCtrl.GetItemText(grand) != 'dummy'):
(_, skillID) = self.skillTreeListCtrl.GetItemData(grand)
_setTreeSkillLevel(grand, skillID)
grand = self.skillTreeListCtrl.GetNextSibling(grand)
child = self.skillTreeListCtrl.GetNextSibling(child)
dirtySkills = sChar.getDirtySkills(char.ID)
dirtyGroups = set([skill.item.group.ID for skill in dirtySkills])
parentID = self.skillTreeListCtrl.GetItemParent(selection)
parent = self.skillTreeListCtrl.GetItemData(parentID)
if parent:
if (parent[1] in dirtyGroups):
self.skillTreeListCtrl.SetItemImage(parentID, self.skillBookImageId)
event.Skip() |
.parametrize('line', ['', ';', ';;', ';; ;', '&', '& &', ' && &', '>', "'", '"', '|'])
def test_parse_command_only_empty(parser, line):
statement = parser.parse_command_only(line)
assert (statement == '')
assert (statement.args == statement)
assert (statement.arg_list == [])
assert (statement.command == '')
assert (statement.command_and_args == '')
assert (statement.multiline_command == '')
assert (statement.raw == line)
assert (statement.multiline_command == '')
assert (statement.terminator == '')
assert (statement.suffix == '')
assert (statement.pipe_to == '')
assert (statement.output == '')
assert (statement.output_to == '') |
class VectorScaler(SKCMatrixAndWeightTransformerABC):
_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return scale_by_vector(weights, axis=None)
_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return scale_by_vector(matrix, axis=0) |
class PreconditionerTest():
def __init__(self):
self.x = (torch.randn(mconfig.M, mconfig.K).cuda().half() / 100)
self.y = (torch.randn(mconfig.K, mconfig.N).cuda().half() / 100)
self.num_bins = ((2 ** mconfig.num_bits) - 1)
self.scale_y = (max(abs(self.y.min()), abs(self.y.max())) / 7)
self.quantize_y = (self.y / self.scale_y)
self.quantize_y.clamp_((- 8.0), (self.num_bins - 8)).round_()
self.quantize_y = self.quantize_y.to(torch.int8)
self.dequantize_y = (self.quantize_y * self.scale_y)
self.zero_point1 = 0
self.scale1 = 0
self.zero_point2 = 0
self.scale2 = 0
self.hadmard = T[mconfig.group_size].half()
self.activation = (torch.randn(mconfig.M, mconfig.N).cuda().half() / 50)
self.hadamard_activation = self.activation.view((- 1), mconfig.group_size).matmul(self.hadmard).view(self.activation.shape)
self.scale_activation = torch.randn(1).cuda().half()
def TwoLayerQuantizeInput_python(self, input):
total_time = 0
for i in range((mconfig.testTurn + 1)):
time1 = time.time()
mn = min((input.min() - 1e-08), 0).float()
mx = max((input.max() + 1e-08), 0).float()
self.zero_point1 = mn
self.scale1 = (self.num_bins / (mx - mn))
qzero = ((- self.zero_point1) * self.scale1)
iqzero = torch.floor(qzero)
if (iqzero > 0):
mx = (((iqzero - self.num_bins) * mn) / iqzero)
elif (iqzero == 0):
(self.zero_point1, mn) = (0, 0)
self.scale1 = (self.num_bins / (mx - mn))
first_transform = (((input.float() - self.zero_point1) * self.scale1) - 8)
first_transform.clamp_((- 8.0), (self.num_bins - 8)).round_()
first_quantize = (((first_transform + 8) / self.scale1) + self.zero_point1).half()
residual = (input - first_quantize)
mn = min((residual.min() - 1e-08), 0).float()
mx = max((residual.max() + 1e-08), 0).float()
self.zero_point2 = mn
self.scale2 = (self.num_bins / (mx - mn))
qzero = ((- self.zero_point2) * self.scale2)
iqzero = torch.floor(qzero)
if (iqzero > 0):
mx = (((iqzero - self.num_bins) * mn) / iqzero)
elif (iqzero == 0):
(self.zero_point2, mn) = (0, 0)
self.scale2 = (self.num_bins / (mx - mn))
second_transform = (((residual.float() - self.zero_point2) * self.scale2) - 8)
noise = second_transform.new(second_transform.shape).uniform_((- 0.5), 0.5)
second_transform.add_(noise)
second_transform.clamp_((- 8.0), (self.num_bins - 8)).round_()
second_quantize = (((second_transform + 8) / self.scale2) + self.zero_point2).half()
output = torch.cat([first_transform, second_transform], dim=0)
output_dequantize = torch.cat([first_quantize, second_quantize], dim=0)
I = torch.eye((output_dequantize.shape[0] // 2)).cuda()
I2 = torch.cat([I, I], 0)
x_len = torch.linalg.norm(output_dequantize, dim=1)
I_len = torch.linalg.norm(I2, dim=1)
vec_norm = x_len.mul(I_len).float()
len_norm = len(vec_norm)
norm_activation = (vec_norm / vec_norm.sum())
small_num = ((norm_activation[:(len_norm // 2)].sum() * len_norm) / 2)
small_num = ((small_num / 32).round() * 32)
if (small_num > (len_norm // 2)):
small_num = (small_num - 32)
large_num = ((len_norm // 2) - small_num)
small_num = small_num.int()
large_num = large_num.int()
norm_activation = torch.log(norm_activation)
activation_phi = torch.distributions.Gumbel(norm_activation, torch.ones_like(norm_activation)).rsample()
self.norm_activation = norm_activation
self.activation_phi = activation_phi
(small_values, small_indices) = torch.topk(activation_phi[:(len(norm_activation) // 2)], small_num)
(large_values, large_indices) = torch.topk(activation_phi[(len(norm_activation) // 2):], large_num)
index = torch.cat([small_indices, (large_indices + (len(norm_activation) // 2))])
cnt = 0
norm_activation_loop = ((vec_norm * len_norm) / (2 * vec_norm.sum()))
while ((norm_activation_loop.max() > 1) and (cnt < (len_norm / 2))):
small_index = torch.nonzero((norm_activation_loop < 1)).squeeze()
small_value = norm_activation_loop[small_index]
cnt = (len_norm - len(small_index))
norm_activation_loop = torch.clamp(norm_activation_loop, 0, 1)
if ((small_value.max() == 0) and (small_value.min() == 0)):
break
small_value = ((small_value * ((len_norm // 2) - cnt)) / small_value.sum())
norm_activation_loop[small_index] = small_value
norm_activation_loop[(norm_activation_loop == 0)] = 1e-10
output_dequantize = (output_dequantize / norm_activation_loop.unsqueeze(1))
Ind = torch.zeros_like(output_dequantize)
Ind[index] = 1
output_dequantize = output_dequantize.mul(Ind)
dequantize_sample_x = (output_dequantize[0:(output_dequantize.shape[0] // 2)] + output_dequantize[(output_dequantize.shape[0] // 2):]).half()
dequantize_sample_y = (self.quantize_y * self.scale_y)
grad_output = dequantize_sample_x.matmul(dequantize_sample_y)
q_w = (self.hadamard_activation / self.scale_activation)
indicate_small = (q_w < (- 8)).half()
indicate_big = (q_w > 7).half()
indicate_middle = ((1.0 - indicate_small) - indicate_big)
grad_scale = (1.0 / math.sqrt((self.hadamard_activation.numel() * 7)))
grad_alpha = (((((indicate_small * (- 8)) + (indicate_big * 7)) + (indicate_middle * ((- q_w) + q_w.round()))) * grad_output) * grad_scale).sum().unsqueeze(dim=0)
grad_input = (indicate_middle * grad_output)
sample_x1 = output[0:(output.shape[0] // 2)].half()
sample_x2 = output[(output.shape[0] // 2):].half()
Ind_small = Ind[:(Ind.shape[0] // 2)]
Ind_large = Ind[(Ind.shape[0] // 2):]
Ind_small_index = Ind_small[(small_indices, ...)]
sample_y = dequantize_sample_y
sample_y_tmp = self.quantize_y
sample_x1_tmp = first_transform.mul(Ind_small.half())
gemm1_tmp = sample_x1_tmp.half().matmul(self.quantize_y.half())
sample_x2_tmp = second_transform.mul(Ind_large.half())
gemm2_tmp = sample_x2_tmp.half().matmul(self.quantize_y.half())
gemm1 = sample_x1.matmul(sample_y)
gemm2 = sample_x2.matmul(sample_y)
torch.cuda.synchronize()
time2 = time.time()
if (i >= 1):
total_time += (time2 - time1)
x1_list.append(sample_x1_tmp)
y_list.append(sample_y_tmp)
print('quantize python:')
torch.set_printoptions(precision=4)
print('grad_output is:')
print(grad_output)
print('grad of scale_activation is:')
print(grad_alpha)
def TwoLayerQuantizeInput_cuda_speed(self, input):
total_time = 0
hadmard_time = 0
quantize1_time = 0
quantize2_time = 0
leverage_time = 0
sample_time = 0
pack_time = 0
gemm1_time = 0
gemm2_time = 0
dequantize_time = 0
y_shape = self.y.shape
y_batch = self.y.view((- 1), mconfig.group_size)
for i in range((mconfig.testTurn + 1)):
time1 = time.time()
qmatmul.synchronize()
time_flag = time.time()
qmatmul.synchronize()
time2 = time.time()
qmatmul.synchronize()
time3 = time.time()
first_out = quantize_grad_input_speed.first_quantize(input, self.dequantize_y, self.num_bins)
activation_phi = torch.distributions.Gumbel(self.norm_activation, torch.ones_like(self.norm_activation)).rsample()
activation_phi = self.activation_phi
second_transform = quantize_grad_input_speed.second_quantize(first_out[1], first_out[2], first_out[3], first_out[4], first_out[5], first_out[6], first_out[7], first_out[8], first_out[9], first_out[10], first_out[11], first_out[12], first_out[13], first_out[14], first_out[15], activation_phi, self.quantize_y, self.scale_y, self.hadamard_activation, self.scale_activation)
qmatmul.synchronize()
time_flag2 = time.time()
qmatmul.synchronize()
time4 = time.time()
print('quantize cuda speed:')
torch.set_printoptions(precision=4)
print('grad_output is:')
print(second_transform[2])
print('grad of scale_activation is:')
print(second_transform[1])
print()
cuda_hadmard_time.append(hadmard_time)
cuda_quantize1_time.append(quantize1_time)
cuda_quantize2_time.append(quantize2_time)
cuda_leverage_time.append(leverage_time)
cuda_sample_time.append(sample_time)
cuda_pack_time.append(pack_time)
cuda_gemm1_time.append(gemm1_time)
cuda_gemm2_time.append(gemm2_time)
cuda_dequantize_time.append(dequantize_time)
IPython.embed() |
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
def setUp(self):
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u'CmnCtrl3.exe'))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
self.desktop.window(name=self.window_title, pid=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
dlg = self.desktop.window(name=self.window_title, pid=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name=self.window_title, pid=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name=self.window_title, pid=self.app.process)
def test_get_list_of_windows_win32(self):
dlgs = self.desktop.windows()
self.assertTrue((len(dlgs) > 1))
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue((self.window_title in window_titles))
def test_set_backend_to_windows_win32(self):
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
combo = self.app.Common_Controls_Sample.ComboBox.find()
(x, y) = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
combo = self.app.Common_Controls_Sample.ComboBox.find()
dlg = self.app.Common_Controls_Sample.find()
(x, y) = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(name=self.window_title, pid=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name='msctls_trackbar32').find()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(' ', '_'))
with self.assertRaises(AttributeError):
window.msctls_trackbar32 |
class AdaBound(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), final_lr=0.1, gamma=0.001, eps=1e-08, weight_decay=0, amsbound=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= final_lr)):
raise ValueError('Invalid final learning rate: {}'.format(final_lr))
if (not (0.0 <= gamma < 1.0)):
raise ValueError('Invalid gamma parameter: {}'.format(gamma))
defaults = dict(lr=lr, betas=betas, final_lr=final_lr, gamma=gamma, eps=eps, weight_decay=weight_decay, amsbound=amsbound)
super(AdaBound, self).__init__(params, defaults)
self.base_lrs = list(map((lambda group: group['lr']), self.param_groups))
def __setstate__(self, state):
super(AdaBound, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsbound', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for (group, base_lr) in zip(self.param_groups, self.base_lrs):
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsbound = group['amsbound']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsbound:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsbound:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
if amsbound:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
final_lr = ((group['final_lr'] * group['lr']) / base_lr)
lower_bound = (final_lr * (1 - (1 / ((group['gamma'] * state['step']) + 1))))
upper_bound = (final_lr * (1 + (1 / (group['gamma'] * state['step']))))
step_size = torch.full_like(denom, step_size)
step_size.div_(denom).clamp_(lower_bound, upper_bound).mul_(exp_avg)
p.data.add_((- step_size))
return loss |
class BarthezTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(vocab_file))
self.fairseq_tokens_to_ids = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
self.fairseq_tokens_to_ids['<mask>'] = (len(self.sp_model) - 1)
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text: str) -> List[str]:
return self.sp_model.encode(text, out_type=str)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return (spm_id if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
current_sub_tokens = []
out_string = ''
prev_is_special = False
for token in tokens:
if (token in self.all_special_tokens):
if (not prev_is_special):
out_string += ' '
out_string += (self.sp_model.decode(current_sub_tokens) + token)
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string.strip()
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
def download_cub_metadata(to_path):
acsm_val_mat_path = f'{to_path}/val_cub_cleaned.mat'
if (not os.path.isfile(acsm_val_mat_path)):
acsm_val_mat_url = f'
print("Downloading metadata used to form ACSM's CUB validation set")
download_url(acsm_val_mat_url, to_path)
else:
print('Found pre-existing CUB metadata')
return acsm_val_mat_path |
class COCOInstanceNewBaselineDatasetMapper():
def __init__(self, is_train=True, *, tfm_gens, image_format, mask_format):
self.tfm_gens = tfm_gens
logging.getLogger(__name__).info('[COCOInstanceNewBaselineDatasetMapper] Full TransformGens used in training: {}'.format(str(self.tfm_gens)))
self.img_format = image_format
self.mask_format = mask_format
self.is_train = is_train
def from_config(cls, cfg, is_train=True):
tfm_gens = build_transform_gen(cfg, is_train)
ret = {'is_train': is_train, 'tfm_gens': tfm_gens, 'image_format': cfg.INPUT.FORMAT, 'mask_format': cfg.INPUT.MASK_FORMAT}
return ret
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
image = utils.read_image(dataset_dict['file_name'], format=self.img_format)
utils.check_image_size(dataset_dict, image)
padding_mask = np.ones(image.shape[:2])
(image, transforms) = T.apply_transform_gens(self.tfm_gens, image)
padding_mask = transforms.apply_segmentation(padding_mask)
padding_mask = (~ padding_mask.astype(bool))
image_shape = image.shape[:2]
dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
dataset_dict['padding_mask'] = torch.as_tensor(np.ascontiguousarray(padding_mask))
if (not self.is_train):
dataset_dict.pop('annotations', None)
return dataset_dict
if ('annotations' in dataset_dict):
for anno in dataset_dict['annotations']:
anno.pop('keypoints', None)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)]
instances = utils.annotations_to_instances(annos, image_shape, mask_format=self.mask_format)
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
instances = utils.filter_empty_instances(instances)
(h, w) = instances.image_size
if hasattr(instances, 'gt_masks'):
gt_masks = instances.gt_masks
if (self.mask_format == 'polygons'):
gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w)
else:
gt_masks = gt_masks.tensor
instances.gt_masks = gt_masks
dataset_dict['instances'] = instances
return dataset_dict |
class GRU(nn.Module):
def __init__(self, npoint, hidden_dim, input_dim, use_instance_norm):
super(GRU, self).__init__()
in_ch = (hidden_dim + input_dim)
self.convz = PointNetSetAbstraction(npoint=int((npoint / 4)), radius=None, nsample=4, in_channel=in_ch, mlp=[hidden_dim], group_all=False, use_act=False, use_instance_norm=use_instance_norm)
self.convr = PointNetSetAbstraction(npoint=int((npoint / 4)), radius=None, nsample=4, in_channel=in_ch, mlp=[hidden_dim], group_all=False, use_act=False, use_instance_norm=use_instance_norm)
self.convq = PointNetSetAbstraction(npoint=int((npoint / 4)), radius=None, nsample=4, in_channel=in_ch, mlp=[hidden_dim], group_all=False, use_act=False, use_instance_norm=use_instance_norm)
def forward(self, h, x, pc):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(pc, hx)[1])
r = torch.sigmoid(self.convr(pc, hx)[1])
q = torch.tanh(self.convq(pc, torch.cat([(r * h), x], dim=1))[1])
h = (((1 - z) * h) + (z * q))
return h |
def _flatten(dico, prefix=None):
new_dico = OrderedDict()
if isinstance(dico, dict):
prefix = ((prefix + '.') if (prefix is not None) else '')
for (k, v) in dico.items():
if (v is None):
continue
new_dico.update(_flatten(v, (prefix + k)))
elif isinstance(dico, list):
for (i, v) in enumerate(dico):
new_dico.update(_flatten(v, (((prefix + '.[') + str(i)) + ']')))
else:
new_dico = OrderedDict({prefix: dico})
return new_dico |
def inspect_client_error(val_err: ValueError, eth_node: Optional[EthClient]) -> ClientErrorInspectResult:
json_response = str(val_err).replace("'", '"').replace('("', '(').replace('")', ')')
try:
error = json.loads(json_response)
except json.JSONDecodeError:
return ClientErrorInspectResult.PROPAGATE_ERROR
if (eth_node is EthClient.GETH):
if (error['code'] == (- 32000)):
if ('insufficient funds' in error['message']):
return ClientErrorInspectResult.INSUFFICIENT_FUNDS
if (('always failing transaction' in error['message']) or ('execution reverted' in error['message']) or ('invalid opcode: opcode 0xfe not defined' in error['message'])):
return ClientErrorInspectResult.ALWAYS_FAIL
if ('replacement transaction underpriced' in error['message']):
return ClientErrorInspectResult.TRANSACTION_UNDERPRICED
if ('known transaction:' in error['message']):
return ClientErrorInspectResult.TRANSACTION_PENDING
if ('already know' in error['message']):
return ClientErrorInspectResult.TRANSACTION_PENDING
if ('nonce too low' in error['message']):
return ClientErrorInspectResult.TRANSACTION_ALREADY_IMPORTED
elif (eth_node is EthClient.PARITY):
if (error['code'] == (- 32010)):
if ('Insufficient funds' in error['message']):
return ClientErrorInspectResult.INSUFFICIENT_FUNDS
if ('another transaction with same nonce in the queue' in error['message']):
return ClientErrorInspectResult.TRANSACTION_UNDERPRICED
if ('Transaction nonce is too low. Try incrementing the nonce.' in error['message']):
return ClientErrorInspectResult.TRANSACTION_PENDING_OR_ALREADY_IMPORTED
if ('Transaction with the same hash was already imported' in error['message']):
return ClientErrorInspectResult.TRANSACTION_PENDING_OR_ALREADY_IMPORTED
elif ((error['code'] == (- 32015)) and ('Transaction execution error' in error['message'])):
return ClientErrorInspectResult.ALWAYS_FAIL
return ClientErrorInspectResult.PROPAGATE_ERROR |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None):
super(Bottleneck, self).__init__()
midplanes = (((((inplanes * planes) * 3) * 3) * 3) // (((inplanes * 3) * 3) + (3 * planes)))
self.conv1 = nn.Sequential(nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm3d(planes), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(conv_builder(planes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True))
self.conv3 = nn.Sequential(nn.Conv3d(planes, (planes * self.expansion), kernel_size=1, bias=False), nn.BatchNorm3d((planes * self.expansion)))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
('/usb_devices.html', methods=['GET'])
def usb_devices():
str_devices = subprocess.getoutput('lsusb').split('\n')
devices = []
for device in str_devices:
devices.append({'bus': device.split(': ID ')[0].split(' ')[1], 'device': device.split(': ID ')[0].split(' ')[3], 'id': device.split(': ID ')[1][:9], 'description': device.split(': ID ')[1][10:]})
return render_template('usb_devices.html', devices=devices) |
class MessageView(QWidget):
update_geometry = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self._messages: MutableSequence[Message] = []
self._vbox = QVBoxLayout(self)
self._vbox.setContentsMargins(0, 0, 0, 0)
self._vbox.setSpacing(0)
self.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed)
self._clear_timer = QTimer()
self._clear_timer.timeout.connect(self.clear_messages)
config.instance.changed.connect(self._set_clear_timer_interval)
self._last_info = None
_filter('messages.timeout')
def _set_clear_timer_interval(self):
interval = config.val.messages.timeout
if (interval > 0):
interval *= min(5, len(self._messages))
self._clear_timer.setInterval(interval)
def _remove_message(self, widget):
self._vbox.removeWidget(widget)
widget.hide()
widget.deleteLater()
()
def clear_messages(self):
for widget in self._messages:
self._remove_message(widget)
self._messages = []
self._last_info = None
self.hide()
self._clear_timer.stop()
(message.MessageInfo)
def show_message(self, info: message.MessageInfo) -> None:
if (info == self._last_info):
return
if (info.replace is not None):
existing = [msg for msg in self._messages if (msg.replace == info.replace)]
if existing:
assert (len(existing) == 1), existing
existing[0].update_from_info(info)
self.update_geometry.emit()
return
widget = Message.from_info(info)
self._vbox.addWidget(widget)
widget.show()
self._messages.append(widget)
self._last_info = info
self.show()
self.update_geometry.emit()
if (config.val.messages.timeout != 0):
self._set_clear_timer_interval()
self._clear_timer.start()
def mousePressEvent(self, e):
if (e.button() in [Qt.MouseButton.LeftButton, Qt.MouseButton.MiddleButton, Qt.MouseButton.RightButton]):
self.clear_messages() |
class MultiSimilarityLoss(GenericPairLoss):
def __init__(self, alpha=2, beta=50, base=0.5, **kwargs):
super().__init__(mat_based_loss=True, **kwargs)
self.alpha = alpha
self.beta = beta
self.base = base
self.add_to_recordable_attributes(list_of_names=['alpha', 'beta', 'base'], is_stat=False)
def _compute_loss(self, mat, pos_mask, neg_mask):
pos_exp = self.distance.margin(mat, self.base)
neg_exp = self.distance.margin(self.base, mat)
pos_loss = ((1.0 / self.alpha) * lmu.logsumexp((self.alpha * pos_exp), keep_mask=pos_mask.bool(), add_one=True))
neg_loss = ((1.0 / self.beta) * lmu.logsumexp((self.beta * neg_exp), keep_mask=neg_mask.bool(), add_one=True))
return {'loss': {'losses': (pos_loss + neg_loss), 'indices': c_f.torch_arange_from_size(mat), 'reduction_type': 'element'}}
def get_default_distance(self):
return CosineSimilarity() |
def test_multiprocessing_showcase():
import numpy as np
import joblib
import time
import datetime
def func():
n_jobs = 8
size = 3000
print('Creating data: {size}x{size} ... '.format(size=size), end='')
a = np.random.random((size, size))
print('done ({size:.02f} Gb). '.format(size=((a.size * a.itemsize) / (1024 ** 3))), end='')
def subprocess(i):
aa = a.copy()
r = aa[(1, 1)]
aa = a.copy()
time.sleep(10)
return r
pass
start = datetime.datetime.now()
print('Starting processing: n_jobs={n_jobs} ... '.format(n_jobs=n_jobs), end='')
results = joblib.Parallel(n_jobs=n_jobs)((joblib.delayed(subprocess)(i) for i in range(n_jobs)))
print('done ({}). '.format((datetime.datetime.now() - start)), end='')
return results
rss = memory_usage(proc=func, max_usage=True, backend='psutil', include_children=True, multiprocess=True)
print('RSS: {rss:.02f}'.format(rss=rss))
uss = memory_usage(proc=func, max_usage=True, backend='psutil_uss', include_children=True, multiprocess=True)
print('USS: {uss:.02f}'.format(uss=uss))
pss = memory_usage(proc=func, max_usage=True, backend='psutil_pss', include_children=True, multiprocess=True)
print('PSS: {pss:.02f}'.format(pss=pss)) |
class Votes(models.Model):
class Meta():
table = 'votes'
user_id = fields.BigIntField(pk=True)
is_voter = fields.BooleanField(default=False, index=True)
expire_time = fields.DatetimeField(null=True)
reminder = fields.BooleanField(default=False)
notified = fields.BooleanField(default=False, index=True)
public_profile = fields.BooleanField(default=True)
total_votes = fields.IntField(default=0) |
def create_unet_backbone(bottom_up_layers: List[BottomUpLayer], layers_start: int, layers_end: int, top_down_stack: TopDownStackInterface) -> Nodes:
selected_layer_indexes = list(range(layers_start, layers_end))
top_down = bottom_up_layers[selected_layer_indexes[(- 1)]].tensor
for layer_index in reversed(selected_layer_indexes[:(- 1)]):
bottom_up_layer = bottom_up_layers[layer_index]
num_filters = bottom_up_layer.num_channels
top_down = top_down_stack.expand_and_combine(bottom_up_layer.tensor, top_down, num_filters, layer_index)
start_num_filters = bottom_up_layers[layers_start].num_channels
for layer_index in reversed(range(layers_start)):
num_filters = (start_num_filters // (2 ** (layers_start - layer_index)))
top_down = top_down_stack.expand(top_down, num_filters, layer_index)
top_down = top_down_stack.optional_post_activation(top_down)
return {NODE_PENULTIMATE: top_down} |
class TMPEGInfo(TestCase):
def test_not_real_file(self):
filename = os.path.join(DATA_DIR, 'silence-44-s-v1.mp3')
with open(filename, 'rb') as h:
fileobj = BytesIO(h.read(20))
self.failUnlessRaises(MP3Error, MPEGInfo, fileobj)
def test_empty(self):
fileobj = BytesIO(b'')
self.failUnlessRaises(MP3Error, MPEGInfo, fileobj)
def test_xing_unknown_framecount(self):
frame = b'\xff\xfb\xe4\x0c\x00\x0f\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Info\x00\x00\x00\x02\x00\\x00\xb4R\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
fileobj = BytesIO(frame)
info = MPEGInfo(fileobj)
assert (info.bitrate == 320000)
assert (info.length > 0) |
def compute_cell_extents_grid(bounding_rect=(0.03, 0.03, 0.97, 0.97), num_rows=2, num_cols=6, axis_pad=0.01):
(left, bottom, width, height) = bounding_rect
height_padding = (axis_pad * (num_rows + 1))
width_padding = (axis_pad * (num_cols + 1))
cell_height = float(((height - height_padding) / num_rows))
cell_width = float(((width - width_padding) / num_cols))
cell_height_padded = (cell_height + axis_pad)
cell_width_padded = (cell_width + axis_pad)
extents = list()
for row in range((num_rows - 1), (- 1), (- 1)):
for col in range(num_cols):
extents.append(((left + (col * cell_width_padded)), (bottom + (row * cell_height_padded)), cell_width, cell_height))
return extents |
class TestHSAFFileHandler(unittest.TestCase):
def setUp(self):
try:
import pygrib
except ImportError:
pygrib = None
self.orig_pygrib = pygrib
sys.modules['pygrib'] = mock.MagicMock()
def tearDown(self):
sys.modules['pygrib'] = self.orig_pygrib
('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB())
def test_init(self, pg):
pg.open.return_value = FakeGRIB()
correct_dt = datetime(2019, 6, 3, 16, 45, 0)
from satpy.readers.hsaf_grib import HSAFFileHandler
fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock())
assert (fh._analysis_time == correct_dt)
assert (fh.metadata['projparams']['lat_0'] == 0.0)
assert (fh.metadata['shortName'] == 'irrate')
assert (fh.metadata['nx'] == 3712)
('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB())
def test_get_area_def(self, pg):
pg.open.return_value = FakeGRIB()
from satpy.readers.hsaf_grib import HSAFFileHandler
fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock())
area_def = HSAFFileHandler.get_area_def(fh, 'H03B')
assert (area_def.width == 3712)
assert (area_def.area_extent[0] == approx((- 5569209.3026), abs=0.001))
assert (area_def.area_extent[3] == approx(5587721.9097, abs=0.001))
('satpy.readers.hsaf_grib.pygrib.open', return_value=FakeGRIB())
def test_get_dataset(self, pg):
pg.open.return_value = FakeGRIB()
from satpy.readers.hsaf_grib import HSAFFileHandler
fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock())
fh.filename = 'H03B'
ds_id = make_dataid(name='H03B')
data = fh.get_dataset(ds_id, mock.Mock())
np.testing.assert_array_equal(data.values, np.arange(25.0).reshape((5, 5)))
fh = HSAFFileHandler('filename', mock.MagicMock(), mock.MagicMock())
fh.filename = 'H05B'
ds_id = make_dataid(name='H05B')
data = fh.get_dataset(ds_id, mock.Mock())
np.testing.assert_array_equal(data.values, np.arange(25.0).reshape((5, 5))) |
class Layer(aimet_common.layer_database.Layer):
def _set_type_specific_params(self, module: tf.Operation):
if (module.type == 'Conv2D'):
(strides, padding, groups) = aimet_tensorflow.utils.op.conv.get_conv2d_op_params(module)
params = aimet_common.layer_database.Conv2dTypeSpecificParams(strides, padding, groups)
self.type_specific_params = params
def __init__(self, model: tf.compat.v1.Session, op: tf.Operation, output_shape: List):
self.model = model
weight_shape = aimet_tensorflow.utils.op.conv.get_weight_shape(op)
aimet_common.layer_database.Layer.__init__(self, module=op, name=op.name, weight_shape=weight_shape, output_shape=output_shape) |
_required('wiki.add_wikifile', raise_exception=True)
def wiki_file_list(request):
if (request.method == 'POST'):
try:
wiki_file = WikiFile.objects.create(upload_user=request.user, wiki_file=request.FILES.get('upload_wiki_file'))
file_name = wiki_file.wiki_file.name.split('/')[(- 1)]
res = {'code': 200, 'message': '!', 'id': wiki_file.id, 'file_name': file_name, 'upload_time': wiki_file.upload_time, 'upload_user': wiki_file.upload_user.username}
return JsonResponse(res)
except Exception as e:
return JsonResponse({'code': 500, 'message': '!{}'.format(e)})
else:
wiki_files = WikiFile.objects.all()
return render(request, 'wiki/wiki_file.html', locals()) |
_singleton
def loadModel(device):
ckpt = torch.hub.load_state_dict_from_url(MODELS_URL, map_location=device, check_hash=True)
config = Config.deserialize(ckpt['config'])
model = Compressor(**config.Model.Params).to(device)
model.QuantizationParameter = 'qp_2_msssim'
model.load_state_dict(ckpt['model'])
return model |
def generate_intermediate_ca(opts, parent_certificate_path=p.root_ca_certificate_path(), parent_key_path=p.root_ca_key_path(), suffix=''):
print('Will generate intermediate CA with suffix {}'.format(suffix))
print('Using parent certificate path at {}'.format(parent_certificate_path))
print('Using parent key path at {}'.format(parent_key_path))
prepare_ca_directory(p.intermediate_ca_path(suffix))
if opts.use_ecc:
print('Will use Elliptic Curve Cryptography...')
args = ['-algorithm', 'EC', '-outform', 'PEM', '-out', p.intermediate_ca_key_path(suffix), '-pkeyopt', 'ec_paramgen_curve:{}'.format(opts.ecc_curve)]
else:
print('Will use RSA...')
args = ['-algorithm', 'RSA', '-outform', 'PEM', '-out', p.intermediate_ca_key_path(suffix), '-pkeyopt', 'rsa_keygen_bits:{}'.format(str(opts.key_bits))]
if (len(opts.password) > 0):
args.append('-aes256')
args.append('-pass')
args.append('pass:{}'.format(opts.password))
openssl_genpkey(*args)
args = ['-new', '-key', p.intermediate_ca_key_path(suffix), '-out', p.intermediate_ca_certificate_csr_path(suffix), '-subj', '/CN={}/O={}/L=$$$$/'.format(opts.common_name, 'Intermediate CA {}'.format(suffix))]
if (len(opts.password) > 0):
args.append('-passin')
args.append('pass:{}'.format(opts.password))
args.append('-passout')
args.append('pass:{}'.format(opts.password))
else:
args.append('-nodes')
openssl_req(opts, *args)
args = ['-days', str(opts.validity_days), '-cert', parent_certificate_path, '-keyfile', parent_key_path, '-in', p.intermediate_ca_certificate_csr_path(suffix), '-out', p.intermediate_ca_certificate_path(suffix), '-outdir', p.intermediate_ca_certs_path(suffix), '-notext', '-batch', '-extensions', 'ca_extensions']
if (len(opts.password) > 0):
args.append('-passin')
args.append('pass:{}'.format(opts.password))
openssl_ca(opts, *args) |
def _add_metric_pages(html_file, run_output_paths, report_config, data_frame):
_write_header(html_file, 'Distribution of different measures (except SPICE-related, which come later below)')
for column_name in COLUMNS_FOR_HISTOGRAM_NON_SPICE:
bins = report_config.histogram_bins[column_name]
metric_data = MetricData(data_frame, column_name, bins)
_add_metric_page(html_file, run_output_paths, metric_data)
_write_header(html_file, 'Distribution of SPICE-related measures')
for column_name in COLUMNS_FOR_HISTOGRAM_SPICE:
bins = report_config.histogram_bins[column_name]
metric_data = MetricData(data_frame, column_name, bins)
_add_metric_page(html_file, run_output_paths, metric_data) |
class NetModule():
def __init__(self, args):
self.args = args
self.input_shape = (32, 32, 3)
self.shapes = [(3, 3, 3, 64), (3, 3, 64, 128), (3, 3, 128, 128), (3, 3, 128, 128), (3, 3, 128, 256), (3, 3, 256, 512), (3, 3, 512, 512), (3, 3, 512, 512), (512, self.args.num_classes)]
self.layers = {}
self.lock = threading.Lock()
self.initializer = tf_initializers.VarianceScaling(seed=self.args.seed)
def init_state(self, client_id):
self.state = {'client_id': client_id}
def load_state(self, client_id):
self.state = np_load(self.args.check_pts, f'{client_id}_net.npy')
if (self.args.model in ['fedmatch']):
for (i, psi) in enumerate(self.state['psi']):
self.psi[i].assign(psi)
self.sigma[i].assign(self.state['sigma'][i])
def save_state(self):
if (self.args.model in ['fedmatch']):
self.state['psi'] = [psi.numpy() for psi in self.psi]
self.state['sigma'] = [sigma.numpy() for sigma in self.sigma]
np_save(self.args.check_pts, '{}_net.npy'.format(self.state['client_id']), self.state)
def build_resnet9(self, decomposed):
self.lock.acquire()
if decomposed:
self.sigma = [self.create_variable(name='sigma_{}'.format(i), shape=shape) for (i, shape) in enumerate(self.shapes)]
self.psi = [self.create_variable(name='psi_{}'.format(i), shape=shape) for (i, shape) in enumerate(self.shapes)]
for (i, sigma) in enumerate(self.sigma):
self.psi[i].assign((sigma.numpy() * self.args.psi_factor))
self.lid = 0
def conv_block(in_channels, out_channels, pool=False, pool_no=2):
self.layers[self.lid] = self.conv_decomposed(self.lid, out_channels, (3, 3), (1, 1), 'same', None)
layers = [self.layers[self.lid], tf.keras.layers.ReLU()]
self.lid += 1
if pool:
layers.append(tf_layers.MaxPooling2D(pool_size=(pool_no, pool_no)))
return tf_models.Sequential(layers)
inputs = tf_keras.Input(shape=self.input_shape)
out = conv_block(self.input_shape[(- 1)], 64)(inputs)
out = conv_block(64, 128, pool=True, pool_no=2)(out)
out = (tf_models.Sequential([conv_block(128, 128), conv_block(128, 128)])(out) + out)
out = conv_block(128, 256, pool=True)(out)
out = conv_block(256, 512, pool=True, pool_no=2)(out)
out = (tf_models.Sequential([conv_block(512, 512), conv_block(512, 512)])(out) + out)
out = tf_models.Sequential([tf_layers.MaxPooling2D(pool_size=4), tf_layers.Flatten(), self.dense_decomposed(8, self.args.num_classes, 'softmax')])(out)
model = tf_keras.Model(inputs=inputs, outputs=out)
else:
def conv_block(in_channels, out_channels, pool=False, pool_no=2):
layers = [tf_layers.Conv2D(out_channels, kernel_size=(3, 3), padding='same', use_bias=False, strides=(1, 1), kernel_initializer=self.initializer, kernel_regularizer=tf_regularizers.l2(self.args.wd)), tf.keras.layers.ReLU()]
if pool:
layers.append(tf_layers.MaxPooling2D(pool_size=(pool_no, pool_no)))
return tf_models.Sequential(layers)
inputs = tf_keras.Input(shape=self.input_shape)
out = conv_block(self.input_shape[(- 1)], 64)(inputs)
out = conv_block(64, 128, pool=True, pool_no=2)(out)
out = (tf_models.Sequential([conv_block(128, 128), conv_block(128, 128)])(out) + out)
out = conv_block(128, 256, pool=True)(out)
out = conv_block(256, 512, pool=True, pool_no=2)(out)
out = (tf_models.Sequential([conv_block(512, 512), conv_block(512, 512)])(out) + out)
out = tf_models.Sequential([tf_layers.MaxPooling2D(pool_size=4), tf_layers.Flatten(), tf_layers.Dense(self.args.num_classes, use_bias=False, activation='softmax')])(out)
model = tf_keras.Model(inputs=inputs, outputs=out)
wgts = model.get_weights()
for (i, w) in enumerate(wgts):
wgts[i] = (w * (1 + self.args.psi_factor))
model.set_weights(wgts)
self.lock.release()
return model
def set_init_params(self):
base_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
base_path = '/'.join(base_path.split('/')[:(- 1)])
sys.path.insert(0, base_path)
if os.path.exists(os.path.join(base_path, 'misc/init_sig.npy')):
init_sig = np.load(os.path.join(base_path, 'misc/init_sig.npy'), allow_pickle=True)
for (lid, sig) in enumerate(init_sig):
self.sigma[lid].assign(sig)
self.psi[lid].assign((sig * self.args.psi_factor))
else:
np_save(base_path, 'misc/init_sig.npy', [sig.numpy() for sig in self.sigma])
def conv_decomposed(self, lid, filters, kernel_size, strides, padding, acti):
return DecomposedConv(name='layer-{}'.format(lid), filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, activation=acti, use_bias=False, sigma=self.sigma[lid], psi=self.psi[lid], l1_thres=self.args.l1_thres, kernel_regularizer=tf_regularizers.l2(self.args.wd))
def dense_decomposed(self, lid, units, acti):
return DecomposedDense(name='layer-{}'.format(lid), units=units, activation=acti, use_bias=False, sigma=self.sigma[lid], psi=self.psi[lid], l1_thres=self.args.l1_thres)
def create_variable(self, name, shape):
return tf.Variable(self.initializer(shape), name=name)
def get_psi(self):
return self.psi
def get_sigma(self):
return self.sigma |
.fast
def test_non_air_diluent(verbose=True, plot=False, *args, **kwargs):
sf = SpectrumFactory(wavelength_min=4200, wavelength_max=4500, cutoff=1e-23, molecule='CO', isotope='1,2', truncation=5, neighbour_lines=10, path_length=0.1, mole_fraction=0.1, medium='vacuum', optimization=None, verbose=verbose)
sf.warnings.update({'MissingSelfBroadeningWarning': 'ignore', 'NegativeEnergiesWarning': 'ignore', 'LinestrengthCutoffWarning': 'ignore', 'HighTemperatureWarning': 'ignore', 'AccuracyWarning': 'ignore', 'PerformanceWarning': 'ignore'})
sf.fetch_databank('hitran', load_columns=['diluent', 'equilibrium'], extra_params='all', db_use_cached='regen')
sf.warnings['MissingDiluentBroadeningWarning'] = 'error'
sf.warnings['MissingDiluentBroadeningTdepWarning'] = 'error'
sf.eq_spectrum(Tgas=2000)
wl1 = sf.df1['hwhm_lorentz']
assert (sf._diluent == {'air': 0.9})
sf.eq_spectrum(Tgas=2000, diluent={'CO2': 0.4, 'air': 0.5})
wl2 = sf.df1['hwhm_lorentz']
assert (sf._diluent == {'CO2': 0.4, 'air': 0.5})
sf.eq_spectrum(Tgas=2000, diluent='CO2')
wl3 = sf.df1['hwhm_lorentz']
assert (sf._diluent == {'CO2': 0.9})
assert ((wl1 < wl2).all() and (wl2 < wl3).all())
from radis.misc.warning import MissingDiluentBroadeningWarning
with pytest.raises(MissingDiluentBroadeningWarning):
sf.eq_spectrum(Tgas=2000, diluent='X')
sf.warnings['MissingDiluentBroadeningWarning'] = 'warn'
sf.warnings['MissingDiluentBroadeningTdepWarning'] = 'warn'
with pytest.warns(MissingDiluentBroadeningWarning):
sf.eq_spectrum(Tgas=2000, diluent='X') |
class MixNet(nn.Module):
mixnet_s = [(16, 16, [3], [1], [1], 1, 1, 'ReLU', 0.0), (16, 24, [3], [1, 1], [1, 1], 2, 6, 'ReLU', 0.0), (24, 24, [3], [1, 1], [1, 1], 1, 3, 'ReLU', 0.0), (24, 40, [3, 5, 7], [1], [1], 2, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 80, [3, 5, 7], [1], [1, 1], 2, 6, 'Swish', 0.25), (80, 80, [3, 5], [1], [1, 1], 1, 6, 'Swish', 0.25), (80, 80, [3, 5], [1], [1, 1], 1, 6, 'Swish', 0.25), (80, 120, [3, 5, 7], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5), (120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5), (120, 200, [3, 5, 7, 9, 11], [1], [1], 2, 6, 'Swish', 0.5), (200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5), (200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5)]
mixnet_m = [(24, 24, [3], [1], [1], 1, 1, 'ReLU', 0.0), (24, 32, [3, 5, 7], [1, 1], [1, 1], 2, 6, 'ReLU', 0.0), (32, 32, [3], [1, 1], [1, 1], 1, 3, 'ReLU', 0.0), (32, 40, [3, 5, 7, 9], [1], [1], 2, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 40, [3, 5], [1, 1], [1, 1], 1, 6, 'Swish', 0.5), (40, 80, [3, 5, 7], [1], [1], 2, 6, 'Swish', 0.25), (80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25), (80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25), (80, 80, [3, 5, 7, 9], [1, 1], [1, 1], 1, 6, 'Swish', 0.25), (80, 120, [3], [1], [1], 1, 6, 'Swish', 0.5), (120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5), (120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5), (120, 120, [3, 5, 7, 9], [1, 1], [1, 1], 1, 3, 'Swish', 0.5), (120, 200, [3, 5, 7, 9], [1], [1], 2, 6, 'Swish', 0.5), (200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5), (200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5), (200, 200, [3, 5, 7, 9], [1], [1, 1], 1, 6, 'Swish', 0.5)]
def __init__(self, net_type='mixnet_s', input_size=224, num_classes=1000, stem_channels=16, feature_size=1536, depth_multiplier=1.0, quan_first=False, quan_last=False, constr_activation=None, bw_act=None):
super(MixNet, self).__init__()
self.quan_activation = (constr_activation is not None)
self.quan_first = quan_first
self.quan_last = quan_last
if (net_type == 'mixnet_s'):
config = self.mixnet_s
stem_channels = 16
dropout_rate = 0.2
elif (net_type == 'mixnet_m'):
config = self.mixnet_m
stem_channels = 24
dropout_rate = 0.25
elif (net_type == 'mixnet_l'):
config = self.mixnet_m
stem_channels = 24
depth_multiplier *= 1.3
dropout_rate = 0.25
else:
raise TypeError('Unsupported MixNet type')
assert ((input_size % 32) == 0)
if (depth_multiplier != 1.0):
stem_channels = _RoundChannels((stem_channels * depth_multiplier))
for (i, conf) in enumerate(config):
conf_ls = list(conf)
conf_ls[0] = _RoundChannels((conf_ls[0] * depth_multiplier))
conf_ls[1] = _RoundChannels((conf_ls[1] * depth_multiplier))
config[i] = tuple(conf_ls)
if self.quan_first:
self.first_act = (LsqActivation(get_constraint(bw_act, 'weight')) if self.quan_activation else _Identity())
self.stem_conv = Conv3x3Bn(3, stem_channels, 2, constr_activation=constr_activation, quantize_w=True, bw_act=bw_act)
else:
self.stem_conv = Conv3x3Bn(3, stem_channels, 2, constr_activation=constr_activation, quantize_w=False, bw_act=bw_act)
layers = []
for (in_channels, out_channels, kernel_size, expand_ksize, project_ksize, stride, expand_ratio, non_linear, se_ratio) in config:
layers.append(MixNetBlock(in_channels, out_channels, kernel_size=kernel_size, expand_ksize=expand_ksize, project_ksize=project_ksize, stride=stride, expand_ratio=expand_ratio, non_linear=non_linear, se_ratio=se_ratio, constr_activation=constr_activation, bw_act=bw_act))
self.layers = nn.Sequential(*layers)
self.head_conv = Conv1x1Bn(config[(- 1)][1], feature_size, constr_activation=constr_activation, bw_act=bw_act)
self.avgpool = nn.AvgPool2d((input_size // 32), stride=1)
if self.quan_last:
self.last_act = (LsqActivation(get_constraint(bw_act, 'weight')) if self.quan_activation else _Identity())
self.classifier = Linear(feature_size, num_classes)
else:
self.classifier = nn.Linear(feature_size, num_classes)
self.dropout = nn.Dropout(dropout_rate)
self._initialize_weights()
def forward(self, x):
if self.quan_first:
x = self.first_act(x)
x = self.stem_conv(x)
x = self.layers(x)
x = self.head_conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
if self.quan_last:
x = self.last_act(x)
x = self.classifier(x)
x = self.dropout(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_() |
class TestCLI(TestCase):
def run_cli(self, argv, files=None, stdin=StringIO(), exit_code=0, **override):
arguments = cli.parse_args(argv)
arguments.update(override)
self.assertFalse(hasattr(cli, 'open'))
cli.open = fake_open((files or {}))
try:
(stdout, stderr) = (StringIO(), StringIO())
actual_exit_code = cli.run(arguments, stdin=stdin, stdout=stdout, stderr=stderr)
finally:
del cli.open
self.assertEqual(actual_exit_code, exit_code, msg=dedent(f'''
Expected an exit code of {exit_code} != {actual_exit_code}.
stdout: {stdout.getvalue()}
stderr: {stderr.getvalue()}
'''))
return (stdout.getvalue(), stderr.getvalue())
def assertOutputs(self, stdout='', stderr='', **kwargs):
self.assertEqual(self.run_cli(**kwargs), (dedent(stdout), dedent(stderr)))
def test_invalid_instance(self):
error = ValidationError('I am an error!', instance=12)
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_instance=json.dumps(error.instance)), validator=fake_validator([error]), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stderr='12: I am an error!\n')
def test_invalid_instance_pretty_output(self):
error = ValidationError('I am an error!', instance=12)
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_instance=json.dumps(error.instance)), validator=fake_validator([error]), argv=['-i', 'some_instance', '--output', 'pretty', 'some_schema'], exit_code=1, stderr=' ===[ValidationError]===(some_instance)===\n\n I am an error!\n \n ')
def test_invalid_instance_explicit_plain_output(self):
error = ValidationError('I am an error!', instance=12)
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_instance=json.dumps(error.instance)), validator=fake_validator([error]), argv=['--output', 'plain', '-i', 'some_instance', 'some_schema'], exit_code=1, stderr='12: I am an error!\n')
def test_invalid_instance_multiple_errors(self):
instance = 12
first = ValidationError('First error', instance=instance)
second = ValidationError('Second error', instance=instance)
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_instance=json.dumps(instance)), validator=fake_validator([first, second]), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stderr=' 12: First error\n 12: Second error\n ')
def test_invalid_instance_multiple_errors_pretty_output(self):
instance = 12
first = ValidationError('First error', instance=instance)
second = ValidationError('Second error', instance=instance)
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_instance=json.dumps(instance)), validator=fake_validator([first, second]), argv=['-i', 'some_instance', '--output', 'pretty', 'some_schema'], exit_code=1, stderr=' ===[ValidationError]===(some_instance)===\n\n First error\n \n ===[ValidationError]===(some_instance)===\n\n Second error\n \n ')
def test_multiple_invalid_instances(self):
first_instance = 12
first_errors = [ValidationError('An error', instance=first_instance), ValidationError('Another error', instance=first_instance)]
second_instance = 'foo'
second_errors = [ValidationError('BOOM', instance=second_instance)]
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_first_instance=json.dumps(first_instance), some_second_instance=json.dumps(second_instance)), validator=fake_validator(first_errors, second_errors), argv=['-i', 'some_first_instance', '-i', 'some_second_instance', 'some_schema'], exit_code=1, stderr=' 12: An error\n 12: Another error\n foo: BOOM\n ')
def test_multiple_invalid_instances_pretty_output(self):
first_instance = 12
first_errors = [ValidationError('An error', instance=first_instance), ValidationError('Another error', instance=first_instance)]
second_instance = 'foo'
second_errors = [ValidationError('BOOM', instance=second_instance)]
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_first_instance=json.dumps(first_instance), some_second_instance=json.dumps(second_instance)), validator=fake_validator(first_errors, second_errors), argv=['--output', 'pretty', '-i', 'some_first_instance', '-i', 'some_second_instance', 'some_schema'], exit_code=1, stderr=' ===[ValidationError]===(some_first_instance)===\n\n An error\n \n ===[ValidationError]===(some_first_instance)===\n\n Another error\n \n ===[ValidationError]===(some_second_instance)===\n\n BOOM\n \n ')
def test_custom_error_format(self):
first_instance = 12
first_errors = [ValidationError('An error', instance=first_instance), ValidationError('Another error', instance=first_instance)]
second_instance = 'foo'
second_errors = [ValidationError('BOOM', instance=second_instance)]
self.assertOutputs(files=dict(some_schema='{"does not": "matter since it is stubbed"}', some_first_instance=json.dumps(first_instance), some_second_instance=json.dumps(second_instance)), validator=fake_validator(first_errors, second_errors), argv=['--error-format', ':{error.message}._-_.{error.instance}:', '-i', 'some_first_instance', '-i', 'some_second_instance', 'some_schema'], exit_code=1, stderr=':An error._-_.12::Another error._-_.12::BOOM._-_.foo:')
def test_invalid_schema(self):
self.assertOutputs(files=dict(some_schema='{"type": 12}'), argv=['some_schema'], exit_code=1, stderr=' 12: 12 is not valid under any of the given schemas\n ')
def test_invalid_schema_pretty_output(self):
schema = {'type': 12}
with self.assertRaises(SchemaError) as e:
validate(schema=schema, instance='')
error = str(e.exception)
self.assertOutputs(files=dict(some_schema=json.dumps(schema)), argv=['--output', 'pretty', 'some_schema'], exit_code=1, stderr=(('===[SchemaError]===(some_schema)===\n\n' + str(error)) + '\n\n'))
def test_invalid_schema_multiple_errors(self):
self.assertOutputs(files=dict(some_schema='{"type": 12, "items": 57}'), argv=['some_schema'], exit_code=1, stderr=" 57: 57 is not of type 'object', 'boolean'\n ")
def test_invalid_schema_multiple_errors_pretty_output(self):
schema = {'type': 12, 'items': 57}
with self.assertRaises(SchemaError) as e:
validate(schema=schema, instance='')
error = str(e.exception)
self.assertOutputs(files=dict(some_schema=json.dumps(schema)), argv=['--output', 'pretty', 'some_schema'], exit_code=1, stderr=(('===[SchemaError]===(some_schema)===\n\n' + str(error)) + '\n\n'))
def test_invalid_schema_with_invalid_instance(self):
self.assertOutputs(files=dict(some_schema='{"type": 12, "minimum": 30}', some_instance='13'), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stderr=' 12: 12 is not valid under any of the given schemas\n ')
def test_invalid_schema_with_invalid_instance_pretty_output(self):
(instance, schema) = (13, {'type': 12, 'minimum': 30})
with self.assertRaises(SchemaError) as e:
validate(schema=schema, instance=instance)
error = str(e.exception)
self.assertOutputs(files=dict(some_schema=json.dumps(schema), some_instance=json.dumps(instance)), argv=['--output', 'pretty', '-i', 'some_instance', 'some_schema'], exit_code=1, stderr=(('===[SchemaError]===(some_schema)===\n\n' + str(error)) + '\n\n'))
def test_invalid_instance_continues_with_the_rest(self):
self.assertOutputs(files=dict(some_schema='{"minimum": 30}', first_instance='not valid JSON!', second_instance='12'), argv=['-i', 'first_instance', '-i', 'second_instance', 'some_schema'], exit_code=1, stderr=" Failed to parse 'first_instance': {}\n 12: 12 is less than the minimum of 30\n ".format(_message_for('not valid JSON!')))
def test_custom_error_format_applies_to_schema_errors(self):
(instance, schema) = (13, {'type': 12, 'minimum': 30})
with self.assertRaises(SchemaError):
validate(schema=schema, instance=instance)
self.assertOutputs(files=dict(some_schema=json.dumps(schema)), argv=['--error-format', ':{error.message}._-_.{error.instance}:', 'some_schema'], exit_code=1, stderr=':12 is not valid under any of the given schemas._-_.12:')
def test_instance_is_invalid_JSON(self):
instance = 'not valid JSON!'
self.assertOutputs(files=dict(some_schema='{}', some_instance=instance), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stderr=f''' Failed to parse 'some_instance': {_message_for(instance)}
''')
def test_instance_is_invalid_JSON_pretty_output(self):
(stdout, stderr) = self.run_cli(files=dict(some_schema='{}', some_instance='not valid JSON!'), argv=['--output', 'pretty', '-i', 'some_instance', 'some_schema'], exit_code=1)
self.assertFalse(stdout)
self.assertIn('(some_instance)===\n\nTraceback (most recent call last):\n', stderr)
self.assertNotIn('some_schema', stderr)
def test_instance_is_invalid_JSON_on_stdin(self):
instance = 'not valid JSON!'
self.assertOutputs(files=dict(some_schema='{}'), stdin=StringIO(instance), argv=['some_schema'], exit_code=1, stderr=f''' Failed to parse <stdin>: {_message_for(instance)}
''')
def test_instance_is_invalid_JSON_on_stdin_pretty_output(self):
(stdout, stderr) = self.run_cli(files=dict(some_schema='{}'), stdin=StringIO('not valid JSON!'), argv=['--output', 'pretty', 'some_schema'], exit_code=1)
self.assertFalse(stdout)
self.assertIn('(<stdin>)===\n\nTraceback (most recent call last):\n', stderr)
self.assertNotIn('some_schema', stderr)
def test_schema_is_invalid_JSON(self):
schema = 'not valid JSON!'
self.assertOutputs(files=dict(some_schema=schema), argv=['some_schema'], exit_code=1, stderr=f''' Failed to parse 'some_schema': {_message_for(schema)}
''')
def test_schema_is_invalid_JSON_pretty_output(self):
(stdout, stderr) = self.run_cli(files=dict(some_schema='not valid JSON!'), argv=['--output', 'pretty', 'some_schema'], exit_code=1)
self.assertFalse(stdout)
self.assertIn('(some_schema)===\n\nTraceback (most recent call last):\n', stderr)
def test_schema_and_instance_are_both_invalid_JSON(self):
(schema, instance) = ('not valid JSON!', 'also not valid JSON!')
self.assertOutputs(files=dict(some_schema=schema, some_instance=instance), argv=['some_schema'], exit_code=1, stderr=f''' Failed to parse 'some_schema': {_message_for(schema)}
''')
def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self):
(stdout, stderr) = self.run_cli(files=dict(some_schema='not valid JSON!', some_instance='also not valid JSON!'), argv=['--output', 'pretty', '-i', 'some_instance', 'some_schema'], exit_code=1)
self.assertFalse(stdout)
self.assertIn('(some_schema)===\n\nTraceback (most recent call last):\n', stderr)
self.assertNotIn('some_instance', stderr)
def test_instance_does_not_exist(self):
self.assertOutputs(files=dict(some_schema='{}'), argv=['-i', 'nonexisting_instance', 'some_schema'], exit_code=1, stderr=" 'nonexisting_instance' does not exist.\n ")
def test_instance_does_not_exist_pretty_output(self):
self.assertOutputs(files=dict(some_schema='{}'), argv=['--output', 'pretty', '-i', 'nonexisting_instance', 'some_schema'], exit_code=1, stderr=" ===[FileNotFoundError]===(nonexisting_instance)===\n\n 'nonexisting_instance' does not exist.\n \n ")
def test_schema_does_not_exist(self):
self.assertOutputs(argv=['nonexisting_schema'], exit_code=1, stderr="'nonexisting_schema' does not exist.\n")
def test_schema_does_not_exist_pretty_output(self):
self.assertOutputs(argv=['--output', 'pretty', 'nonexisting_schema'], exit_code=1, stderr=" ===[FileNotFoundError]===(nonexisting_schema)===\n\n 'nonexisting_schema' does not exist.\n \n ")
def test_neither_instance_nor_schema_exist(self):
self.assertOutputs(argv=['-i', 'nonexisting_instance', 'nonexisting_schema'], exit_code=1, stderr="'nonexisting_schema' does not exist.\n")
def test_neither_instance_nor_schema_exist_pretty_output(self):
self.assertOutputs(argv=['--output', 'pretty', '-i', 'nonexisting_instance', 'nonexisting_schema'], exit_code=1, stderr=" ===[FileNotFoundError]===(nonexisting_schema)===\n\n 'nonexisting_schema' does not exist.\n \n ")
def test_successful_validation(self):
self.assertOutputs(files=dict(some_schema='{}', some_instance='{}'), argv=['-i', 'some_instance', 'some_schema'], stdout='', stderr='')
def test_successful_validation_pretty_output(self):
self.assertOutputs(files=dict(some_schema='{}', some_instance='{}'), argv=['--output', 'pretty', '-i', 'some_instance', 'some_schema'], stdout='===[SUCCESS]===(some_instance)===\n', stderr='')
def test_successful_validation_of_stdin(self):
self.assertOutputs(files=dict(some_schema='{}'), stdin=StringIO('{}'), argv=['some_schema'], stdout='', stderr='')
def test_successful_validation_of_stdin_pretty_output(self):
self.assertOutputs(files=dict(some_schema='{}'), stdin=StringIO('{}'), argv=['--output', 'pretty', 'some_schema'], stdout='===[SUCCESS]===(<stdin>)===\n', stderr='')
def test_successful_validation_of_just_the_schema(self):
self.assertOutputs(files=dict(some_schema='{}', some_instance='{}'), argv=['-i', 'some_instance', 'some_schema'], stdout='', stderr='')
def test_successful_validation_of_just_the_schema_pretty_output(self):
self.assertOutputs(files=dict(some_schema='{}', some_instance='{}'), argv=['--output', 'pretty', '-i', 'some_instance', 'some_schema'], stdout='===[SUCCESS]===(some_instance)===\n', stderr='')
def test_successful_validation_via_explicit_base_uri(self):
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
ref_schema_file.close()
self.addCleanup(os.remove, ref_schema_file.name)
ref_path = Path(ref_schema_file.name)
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
self.assertOutputs(files=dict(some_schema=schema, some_instance='1'), argv=['-i', 'some_instance', '--base-uri', (ref_path.parent.as_uri() + '/'), 'some_schema'], stdout='', stderr='')
def test_unsuccessful_validation_via_explicit_base_uri(self):
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
ref_schema_file.close()
self.addCleanup(os.remove, ref_schema_file.name)
ref_path = Path(ref_schema_file.name)
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
self.assertOutputs(files=dict(some_schema=schema, some_instance='"1"'), argv=['-i', 'some_instance', '--base-uri', (ref_path.parent.as_uri() + '/'), 'some_schema'], exit_code=1, stdout='', stderr="1: '1' is not of type 'integer'\n")
def test_nonexistent_file_with_explicit_base_uri(self):
schema = '{"$ref": "someNonexistentFile.json#definitions/num"}'
instance = '1'
with self.assertRaises(_RefResolutionError) as e:
self.assertOutputs(files=dict(some_schema=schema, some_instance=instance), argv=['-i', 'some_instance', '--base-uri', Path.cwd().as_uri(), 'some_schema'])
error = str(e.exception)
self.assertIn(f"{os.sep}someNonexistentFile.json'", error)
def test_invalid_explicit_base_uri(self):
schema = '{"$ref": "foo.json#definitions/num"}'
instance = '1'
with self.assertRaises(_RefResolutionError) as e:
self.assertOutputs(files=dict(some_schema=schema, some_instance=instance), argv=['-i', 'some_instance', '--base-uri', '', 'some_schema'])
error = str(e.exception)
self.assertEqual(error, "unknown url type: 'foo.json'")
def test_it_validates_using_the_latest_validator_when_unspecified(self):
self.assertIs(Draft202012Validator, _LATEST_VERSION)
self.assertOutputs(files=dict(some_schema='{"const": "check"}', some_instance='"a"'), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stdout='', stderr="a: 'check' was expected\n")
def test_it_validates_using_draft7_when_specified(self):
schema = '\n {\n "$schema": " "const": "check"\n }\n '
instance = '"foo"'
self.assertOutputs(files=dict(some_schema=schema, some_instance=instance), argv=['-i', 'some_instance', 'some_schema'], exit_code=1, stdout='', stderr="foo: 'check' was expected\n")
def test_it_validates_using_draft4_when_specified(self):
schema = '\n {\n "$schema": " "const": "check"\n }\n '
instance = '"foo"'
self.assertOutputs(files=dict(some_schema=schema, some_instance=instance), argv=['-i', 'some_instance', 'some_schema'], stdout='', stderr='') |
class HandShaker():
def __init__(self, bsd_socket):
self._bsd_socket = bsd_socket
def shake_hands_as_host(self, id):
message = ('YOTON!%s.%i' % (UID(id).get_hex(), os.getpid()))
request = self._recv_during_handshaking()
if (not request):
return (False, STOP_HANDSHAKE_TIMEOUT)
elif request.startswith('YOTON!'):
try:
tmp = request[6:].split('.', 1)
(id2_str, pid2_str) = (tmp[0], tmp[1])
(id2, pid2) = (int(id2_str, 16), int(pid2_str, 10))
except Exception:
self._send_during_handshaking('ERROR: could not parse id.')
return (False, STOP_HANDSHAKE_FAILED)
self._send_during_handshaking(message)
if (id == id2):
return (False, STOP_HANDSHAKE_SELF)
else:
return (True, (id2, pid2))
else:
self._send_during_handshaking('ERROR: this is Yoton.')
return (False, STOP_HANDSHAKE_FAILED)
def shake_hands_as_client(self, id):
message = ('YOTON!%s.%i' % (UID(id).get_hex(), os.getpid()))
self._send_during_handshaking(message)
response = self._recv_during_handshaking()
if (not response):
return (False, STOP_HANDSHAKE_TIMEOUT)
elif response.startswith('YOTON!'):
try:
tmp = response[6:].split('.', 1)
(id2_str, pid2_str) = (tmp[0], tmp[1])
(id2, pid2) = (int(id2_str, 16), int(pid2_str, 10))
except Exception:
return (False, STOP_HANDSHAKE_FAILED)
if (id == id2):
return (False, STOP_HANDSHAKE_SELF)
else:
return (True, (id2, pid2))
else:
return (False, STOP_HANDSHAKE_FAILED)
def _send_during_handshaking(self, text, shutdown=False):
return send_all(self._bsd_socket, (text + '\r\n'), shutdown)
def _recv_during_handshaking(self):
return recv_all(self._bsd_socket, 2.0, True) |
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--eval_freq', type=int, default=100, help='meta-eval frequency')
parser.add_argument('--save_freq', type=int, default=500, help='save frequency')
parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
parser.add_argument('--num_workers', type=int, default=16, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=200, help='number of training epochs')
parser.add_argument('--tb_freq', type=int, default=100, help='tb frequency')
parser.add_argument('--use_tb', default=False, action='store_true')
parser.add_argument('--syncBN', action='store_true', help='using synchronized batch normalization')
parser.add_argument('--trial', type=str, default=None, help='the experiment id')
parser.add_argument('--seed', type=int, default=31)
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default=None, help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=0.0005, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--cosine', action='store_true', help='using cosine annealing')
parser.add_argument('--model_s', type=str, default='resnet12', choices=model_pool)
parser.add_argument('--model_t', type=str, default=None, choices=model_pool)
parser.add_argument('--dataset', type=str, default='miniImageNet', choices=['miniImageNet', 'tieredImageNet', 'CIFAR-FS', 'FC100', 'cross'])
parser.add_argument('--transform', type=str, default='A', choices=transforms_list)
parser.add_argument('--use_trainval', action='store_true', help='use trainval set')
parser.add_argument('--aug_type', type=str, default='simclr', choices=AUG_TYPES)
parser.add_argument('--model_path_t', type=str, default=None, help='teacher model snapshot')
parser.add_argument('--kd_T', type=float, default=4, help='temperature for KD distillation')
parser.add_argument('--lambda_cls', default=0.0, type=float, help='weight for classification')
parser.add_argument('--lambda_KD', default=0.0, type=float, help='weight balance for KL div loss')
parser.add_argument('--lambda_contrast_g', default=0.0, type=float, help='weight balance for contrastive loss')
parser.add_argument('--lambda_contrast_s', default=0.0, type=float, help='weight balance for contrastive loss')
parser.add_argument('--model_path', type=str, default='', help='path to save model')
parser.add_argument('--tb_path', type=str, default='', help='path to tensorboard')
parser.add_argument('--data_root', type=str, default='', help='path to data root')
parser.add_argument('--model_name', type=str, default=None, help='model name')
parser.add_argument('--double_transform', action='store_true')
parser.add_argument('--n_test_runs', type=int, default=600, metavar='N', help='Number of test runs')
parser.add_argument('--n_ways', type=int, default=5, metavar='N', help='Number of classes for doing each classification run')
parser.add_argument('--n_shots', type=int, default=1, metavar='N', help='Number of shots in test')
parser.add_argument('--n_queries', type=int, default=15, metavar='N', help='Number of query in test')
parser.add_argument('--n_aug_support_samples', default=5, type=int, help='The number of augmented samples for each meta test sample')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size', help='Size of test batch)')
opt = parser.parse_args()
if ((opt.dataset == 'CIFAR-FS') or (opt.dataset == 'FC100')):
opt.transform = 'D'
if (opt.model_t is None):
opt.model_t = opt.model_s
if (not opt.model_path):
opt.model_path = './models_distilled'
if ((not opt.tb_path) and opt.use_tb):
opt.tb_path = './tensorboard'
if (not opt.data_root):
opt.data_root = './data/{}'.format(opt.dataset)
else:
opt.data_root = '{}/{}'.format(opt.data_root, opt.dataset)
if (opt.dataset == 'cross'):
opt.data_root = opt.data_root.replace('cross', 'miniImageNet')
opt.data_aug = True
if (opt.lr_decay_epochs is None):
decay_steps = (opt.epochs // 10)
opt.lr_decay_epochs = [(opt.epochs - (3 * decay_steps)), (opt.epochs - (2 * decay_steps)), (opt.epochs - decay_steps)]
else:
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
if (opt.model_name is None):
if opt.use_trainval:
opt.trial = (opt.trial + '_trainval')
opt.model_name = 'S:{}_T:{}_{}_trans_{}'.format(opt.model_s, opt.model_t, opt.dataset, opt.transform)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
if (opt.trial is not None):
opt.model_name = '{}_{}'.format(opt.model_name, opt.trial)
if opt.use_tb:
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if (not os.path.isdir(opt.tb_folder)):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if (not os.path.isdir(opt.save_folder)):
os.makedirs(opt.save_folder)
opt.n_gpu = torch.cuda.device_count()
return opt |
class Final_Feature(nn.Module):
def __init__(self, in_channel, out_channel):
super().__init__()
self.CBR_1x1 = nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True))
self.pool_1_2x2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.CBR_3x3 = nn.Sequential(nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(out_channel), nn.ReLU(inplace=True))
self.pool_2_2x2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
def forward(self, x):
x = self.CBR_1x1(x)
x = self.pool_1_2x2(x)
x = self.CBR_3x3(x)
x = self.pool_2_2x2(x)
x = F.avg_pool2d(x, kernel_size=3).view(x.size(0), (- 1))
return x |
class Evaluator():
def __init__(self):
self.partial_scores = None
def eval_hardness(self, sql):
count_comp1_ = count_component1(sql)
count_comp2_ = count_component2(sql)
count_others_ = count_others(sql)
if ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ == 0)):
return 'easy'
elif (((count_others_ <= 2) and (count_comp1_ <= 1) and (count_comp2_ == 0)) or ((count_comp1_ <= 2) and (count_others_ < 2) and (count_comp2_ == 0))):
return 'medium'
elif (((count_others_ > 2) and (count_comp1_ <= 2) and (count_comp2_ == 0)) or ((2 < count_comp1_ <= 3) and (count_others_ <= 2) and (count_comp2_ == 0)) or ((count_comp1_ <= 1) and (count_others_ == 0) and (count_comp2_ <= 1))):
return 'hard'
else:
return 'extra'
def eval_exact_match(self, pred, label):
partial_scores = self.eval_partial_match(pred, label)
self.partial_scores = partial_scores
for (_, score) in partial_scores.items():
if (score['f1'] != 1):
return 0
if (len(label['from']['table_units']) > 0):
label_tables = sorted(label['from']['table_units'])
pred_tables = sorted(pred['from']['table_units'])
return (label_tables == pred_tables)
return 1
def eval_partial_match(self, pred, label):
res = {}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_sel(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['select'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['select(no AGG)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt, cnt_wo_agg) = eval_where(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['where'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(acc, rec, f1) = get_scores(cnt_wo_agg, pred_total, label_total)
res['where(no OP)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_group(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group(no Having)'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_having(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['group'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_order(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['order'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_and_or(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['and/or'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_IUEN(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['IUEN'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
(label_total, pred_total, cnt) = eval_keywords(pred, label)
(acc, rec, f1) = get_scores(cnt, pred_total, label_total)
res['keywords'] = {'acc': acc, 'rec': rec, 'f1': f1, 'label_total': label_total, 'pred_total': pred_total}
return res |
def comet_pull_weight_by_key(key, projname, epoch, api, rank, deterministic=True):
for attempt in range(4):
try:
tic = time()
expt = api.get(cometconfig['workspace'], projname, key)
assets = expt.get_asset_list()
assets = assets2dict(assets, 'fileName', 'assetId')
try:
weights0 = pickle.loads(expt.get_asset(assets[f'weights0-{epoch}']))
except:
(name, assetid) = assets.popitem()
print(f'warning: weights0-{epoch} not found. instead, will load {name}')
weights0 = pickle.loads(expt.get_asset(assetid))
print(f'rank {rank}: weights0-{epoch} epoch {epoch} pulled from {expt._get_experiment_url()} in {(time() - tic)} sec')
return weights0
except:
print(f'comet pull weightset failed on attempt {attempt} trying to extract epoch {epoch} at rank {rank}')
sleep(2)
raise Exception('failed to pull weights') |
class TelegramHandler(tornado.web.RequestHandler):
__slots__ = ('bot', 'update_queue', 'secret_token')
SUPPORTED_METHODS = ('POST',)
def initialize(self, bot: 'Bot', update_queue: asyncio.Queue, secret_token: str) -> None:
self.bot = bot
self.update_queue = update_queue
self.secret_token = secret_token
if secret_token:
_LOGGER.debug('The webhook server has a secret token, expecting it in incoming requests now')
def set_default_headers(self) -> None:
self.set_header('Content-Type', 'application/json; charset="utf-8"')
async def post(self) -> None:
_LOGGER.debug('Webhook triggered')
self._validate_post()
json_string = self.request.body.decode()
data = json.loads(json_string)
self.set_status(HTTPStatus.OK)
_LOGGER.debug('Webhook received data: %s', json_string)
try:
update = Update.de_json(data, self.bot)
except Exception as exc:
_LOGGER.critical('Something went wrong processing the data received from Telegram. Received data was *not* processed!', exc_info=exc)
raise tornado.web.HTTPError(HTTPStatus.BAD_REQUEST, reason='Update could not be processed') from exc
if update:
_LOGGER.debug('Received Update with ID %d on Webhook', update.update_id)
if isinstance(self.bot, ExtBot):
self.bot.insert_callback_data(update)
(await self.update_queue.put(update))
def _validate_post(self) -> None:
ct_header = self.request.headers.get('Content-Type', None)
if (ct_header != 'application/json'):
raise tornado.web.HTTPError(HTTPStatus.FORBIDDEN)
if (self.secret_token is not None):
token = self.request.headers.get('X-Telegram-Bot-Api-Secret-Token')
if (not token):
_LOGGER.debug('Request did not include the secret token')
raise tornado.web.HTTPError(HTTPStatus.FORBIDDEN, reason='Request did not include the secret token')
if (token != self.secret_token):
_LOGGER.debug('Request had the wrong secret token: %s', token)
raise tornado.web.HTTPError(HTTPStatus.FORBIDDEN, reason='Request had the wrong secret token')
def log_exception(self, typ: Optional[Type[BaseException]], value: Optional[BaseException], tb: Optional[TracebackType]) -> None:
_LOGGER.debug('%s - %s', self.request.remote_ip, 'Exception in TelegramHandler', exc_info=((typ, value, tb) if (typ and value and tb) else value)) |
def read_tsp_tour(fname):
has_tour = False
tour = []
with open(fname) as fp:
for line in fp:
if line.startswith('TOUR_SECTION'):
has_tour = True
elif line.startswith('EOF'):
break
elif has_tour:
tour.extend((int(node) for node in line.split()))
if (not tour):
raise RuntimeError('File {} has no valid TOUR_SECTION'.format(fname))
if (tour[(- 1)] == (- 1)):
tour.pop()
return np.array(tour) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.