code stringlengths 281 23.7M |
|---|
def index() -> pc.Component:
return pc.center(pc.vstack(navbar(State), pc.text('Enter a stock symbol', background_image='linear-gradient(271.68deg, #EE756A 0.75%, #756AEE 88.52%)', background_clip='text', font_weight='bold', font_size='2em'), pc.text(State.ticker2, font_family='Silkscreen'), pc.input(on_change=State.set_ticker), pc.button('update!', on_click=State.ticker_update2, style=text_style), pc.plotly(data=State.line_chart, layout={'width': '800', 'height': '400'}), pc.box(pc.data_table(data=State.df1, pagination=True, search=True, sort=True, resizable=True), width='1000px', height='500px', font_size='0.3em'), spacing='1.5em', font_size='2em'), padding_top='10%') |
class TransfoXLModelTester():
def __init__(self, parent, batch_size=14, seq_length=7, mem_len=30, clamp_len=15, is_training=False, use_labels=True, vocab_size=99, cutoffs=[10, 50, 80], hidden_size=32, d_embed=32, num_attention_heads=4, d_head=8, d_inner=128, div_val=2, num_hidden_layers=5, scope=None, seed=1, eos_token_id=0, num_labels=3):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.mem_len = mem_len
self.key_length = (self.seq_length + self.mem_len)
self.clamp_len = clamp_len
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.cutoffs = cutoffs
self.hidden_size = hidden_size
self.d_embed = d_embed
self.num_attention_heads = num_attention_heads
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.num_hidden_layers = num_hidden_layers
self.scope = scope
self.seed = seed
self.eos_token_id = eos_token_id
self.num_labels = num_labels
self.pad_token_id = (self.vocab_size - 1)
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
return (config, input_ids_1, input_ids_2, lm_labels)
def get_config(self):
return TransfoXLConfig(vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLModel(config)
model.to(torch_device)
model.eval()
outputs1 = model(input_ids_1)
outputs2 = model(input_ids_2, outputs1['mems'])
outputs = {'hidden_states_1': outputs1['last_hidden_state'], 'mems_1': outputs1['mems'], 'hidden_states_2': outputs2['last_hidden_state'], 'mems_2': outputs2['mems']}
return outputs
def check_transfo_xl_model_output(self, result):
self.parent.assertEqual(result['hidden_states_1'].shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result['hidden_states_2'].shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual([mem.shape for mem in result['mems_1']], ([(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers))
self.parent.assertListEqual([mem.shape for mem in result['mems_2']], ([(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers))
def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
model = TransfoXLLMHeadModel(config)
model.to(torch_device)
model.eval()
lm_logits_1 = model(input_ids_1)['prediction_scores']
outputs1 = model(input_ids_1, labels=lm_labels)
lm_logits_2 = model(input_ids_2, mems=outputs1['mems'])['prediction_scores']
outputs2 = model(input_ids_2, labels=lm_labels, mems=outputs1['mems'])
outputs = {'loss_1': outputs1['loss'], 'losses_1': outputs1['losses'], 'mems_1': outputs1['mems'], 'lm_logits_1': lm_logits_1, 'loss_2': outputs2['loss'], 'losses_2': outputs2['losses'], 'mems_2': outputs2['mems'], 'lm_logits_2': lm_logits_2}
return outputs
def check_transfo_xl_lm_head_output(self, result):
self.parent.assertEqual(result['loss_1'].shape, ())
self.parent.assertEqual(result['losses_1'].shape, (self.batch_size, (self.seq_length - 1)))
self.parent.assertEqual(result['lm_logits_1'].shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual([mem.shape for mem in result['mems_1']], ([(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers))
self.parent.assertEqual(result['loss_2'].shape, ())
self.parent.assertEqual(result['losses_2'].shape, (self.batch_size, (self.seq_length - 1)))
self.parent.assertEqual(result['lm_logits_2'].shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual([mem.shape for mem in result['mems_2']], ([(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers))
def create_transfo_xl_lm_head_trainer_compatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels):
config.trainer_compatible = True
model = TransfoXLLMHeadModel(config)
model.to(torch_device)
model.eval()
lm_logits_1 = model(input_ids_1, return_dict=False)[0]
outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False)
(loss_1, _, losses_1, mems_1) = outputs1[:4]
lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0]
outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1, return_dict=False)
(loss_2, _, losses_2, mems_2) = outputs2[:4]
outputs = {'losses_1': losses_1, 'mems_1': mems_1, 'lm_logits_1': lm_logits_1, 'loss_1': loss_1, 'losses_2': losses_2, 'mems_2': mems_2, 'lm_logits_2': lm_logits_2, 'loss_2': loss_2}
config.trainer_compatible = None
return outputs
def create_transfo_xl_lm_head_trainer_incompatible_tuple(self, config, input_ids_1, input_ids_2, lm_labels):
config.trainer_compatible = False
model = TransfoXLLMHeadModel(config)
model.to(torch_device)
model.eval()
lm_logits_1 = model(input_ids_1, return_dict=False)[0]
outputs1 = model(input_ids_1, labels=lm_labels, return_dict=False)
(losses_1, _, mems_1) = outputs1[:3]
loss_1 = outputs1[(- 1)]
lm_logits_2 = model(input_ids_2, mems=mems_1, return_dict=False)[0]
outputs2 = model(input_ids_2, labels=lm_labels, mems=mems_1)
(losses_2, _, mems_2) = outputs2[:3]
loss_2 = outputs2[(- 1)]
outputs = {'losses_1': losses_1, 'mems_1': mems_1, 'lm_logits_1': lm_logits_1, 'loss_1': loss_1, 'losses_2': losses_2, 'mems_2': mems_2, 'lm_logits_2': lm_logits_2, 'loss_2': loss_2}
config.trainer_compatible = None
return outputs
def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels):
config.num_labels = self.num_labels
model = TransfoXLForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids_1}
return (config, inputs_dict) |
def get_configs_from_pipeline_file():
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.gfile.GFile(FLAGS.pipeline_config_path, 'r') as f:
text_format.Merge(f.read(), pipeline_config)
model_config = pipeline_config.model.ssd
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
return (model_config, train_config, input_config) |
def process_data(csv_file):
results = []
name = csv_file.split('.')[0]
df_file = pd.read_csv(csv_file)
num = len(df_file)
for i in range(num):
json_file = (((name + '_') + str(i)) + '.json')
if os.path.exists(json_file):
with open(json_file) as f:
results.append(json.load(f))
num_temp = len(results)
depths = []
p_scores = []
synthesizabilitys = []
smiles = df_file.SMILES
prices = []
for i in range(num_temp):
(num_path, status, depth, p_score, synthesizability, price) = tree_analysis(results[i])
depths.append(depth)
p_scores.append(p_score)
synthesizabilitys.append(synthesizability)
prices.append(price)
for i in range((num - num_temp)):
depths.append((- 1))
p_scores.append((- 1))
synthesizabilitys.append((- 1))
prices.append((- 1))
df = pd.DataFrame({'SMILES': smiles, 'tb_synthesizability': synthesizabilitys, 'tb_depth': depths, 'tb_plausibility': p_scores, 'tb_price': prices})
df['len_smiles'] = df.SMILES.apply(len)
return df |
class UffdOAuth2(BaseOAuth2):
name = 'uffd'
ACCESS_TOKEN_METHOD = 'POST'
REFRESH_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ' '
STATE_PARAMETER = True
REDIRECT_STATE = False
EXTRA_DATA = [('id', 'id')]
def get_user_details(self, response):
(fullname, first_name, last_name) = self.get_user_names(fullname=response.get('name'))
return {'username': response.get('nickname'), 'email': (response.get('email') or ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
url = ((self.userinfo_url() + '?') + urlencode({'access_token': access_token}))
try:
return self.get_json(url)
except ValueError:
return None
def authorization_url(self):
return (self.setting('BASE_URL') + '/oauth2/authorize')
def access_token_url(self):
return (self.setting('BASE_URL') + '/oauth2/token')
def userinfo_url(self):
return (self.setting('BASE_URL') + '/oauth2/userinfo') |
class TestReadmeSample(QiskitChemistryTestCase):
def setUp(self):
super().setUp()
try:
from qiskit.chemistry.drivers import PySCFDriver
PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
try:
from qiskit import Aer
_ = Aer.get_backend('statevector_simulator')
except ImportError as ex:
self.skipTest("Aer doesn't appear to be installed. Error: '{}'".format(str(ex)))
return
def test_readme_sample(self):
def print(*args):
if args:
self.log.debug(args[0], *args[1:])
from qiskit.chemistry import FermionicOperator
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.aqua.operators import Z2Symmetries
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735', unit=UnitsType.ANGSTROM, basis='sto3g')
molecule = driver.run()
num_particles = (molecule.num_alpha + molecule.num_beta)
num_spin_orbitals = (molecule.num_orbitals * 2)
ferm_op = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals)
map_type = 'PARITY'
qubit_op = ferm_op.mapping(map_type)
qubit_op = Z2Symmetries.two_qubit_reduction(qubit_op, num_particles)
num_qubits = qubit_op.num_qubits
from qiskit.aqua.components.optimizers import L_BFGS_B
optimizer = L_BFGS_B()
from qiskit.chemistry.circuit.library import HartreeFock
init_state = HartreeFock(num_spin_orbitals, num_particles)
from qiskit.circuit.library import TwoLocal
var_form = TwoLocal(num_qubits, ['ry', 'rz'], 'cz')
var_form.compose(init_state, front=True)
from qiskit.aqua.algorithms import VQE
algorithm = VQE(qubit_op, var_form, optimizer)
from qiskit import Aer
backend = Aer.get_backend('statevector_simulator')
result = algorithm.run(backend)
print(result.eigenvalue.real)
self.assertAlmostEqual(result.eigenvalue.real, (- 1.), places=6) |
def create_H(kaldi_root: Path, fst_dir: Path, disambig_out_units_file: Path, in_labels: str, vocab: Dictionary, blk_sym: str, silence_symbol: Optional[str]) -> (Path, Path, Path):
h_graph = (fst_dir / f"H.{in_labels}{(('_' + silence_symbol) if silence_symbol else '')}.fst")
h_out_units_file = (fst_dir / f'kaldi_dict.h_out.{in_labels}.txt')
disambig_in_units_file_int = Path((str(h_graph) + 'isym_disambig.int'))
disambig_out_units_file_int = Path((str(disambig_out_units_file) + '.int'))
if ((not h_graph.exists()) or (not h_out_units_file.exists()) or (not disambig_in_units_file_int.exists())):
logger.info(f'Creating {h_graph}')
eps_sym = '<eps>'
num_disambig = 0
osymbols = []
with open(disambig_out_units_file, 'r') as f, open(disambig_out_units_file_int, 'w') as out_f:
for line in f:
(symb, id) = line.rstrip().split()
if line.startswith('#'):
num_disambig += 1
print(id, file=out_f)
else:
if (len(osymbols) == 0):
assert (symb == eps_sym), symb
osymbols.append((symb, id))
i_idx = 0
isymbols = [(eps_sym, 0)]
imap = {}
for (i, s) in enumerate(vocab.symbols):
i_idx += 1
isymbols.append((s, i_idx))
imap[s] = i_idx
fst_str = []
node_idx = 0
root_node = node_idx
special_symbols = [blk_sym]
if (silence_symbol is not None):
special_symbols.append(silence_symbol)
for ss in special_symbols:
fst_str.append('{} {} {} {}'.format(root_node, root_node, ss, eps_sym))
for (symbol, _) in osymbols:
if ((symbol == eps_sym) or symbol.startswith('#')):
continue
node_idx += 1
fst_str.append('{} {} {} {}'.format(root_node, node_idx, symbol, symbol))
fst_str.append('{} {} {} {}'.format(node_idx, root_node, eps_sym, eps_sym))
pre_node = node_idx
node_idx += 1
for ss in special_symbols:
fst_str.append('{} {} {} {}'.format(pre_node, node_idx, ss, eps_sym))
fst_str.append('{} {} {} {}'.format(node_idx, root_node, eps_sym, eps_sym))
fst_str.append('{}'.format(root_node))
fst_str = '\n'.join(fst_str)
h_str = str(h_graph)
isym_file = (h_str + '.isym')
with open(isym_file, 'w') as f:
for (sym, id) in isymbols:
f.write('{} {}\n'.format(sym, id))
with open(h_out_units_file, 'w') as f:
for (sym, id) in osymbols:
f.write('{} {}\n'.format(sym, id))
with open(disambig_in_units_file_int, 'w') as f:
disam_sym_id = len(isymbols)
for _ in range(num_disambig):
f.write('{}\n'.format(disam_sym_id))
disam_sym_id += 1
fstcompile = (kaldi_root / 'tools/openfst-1.6.7/bin/fstcompile')
fstaddselfloops = (kaldi_root / 'src/fstbin/fstaddselfloops')
fstarcsort = (kaldi_root / 'tools/openfst-1.6.7/bin/fstarcsort')
try:
with open(h_graph, 'wb') as out_f:
res = subprocess.run([fstcompile, f'--isymbols={isym_file}', f'--osymbols={h_out_units_file}', '--keep_isymbols=false', '--keep_osymbols=false'], input=str.encode(fst_str), capture_output=True, check=True)
res = subprocess.run([fstaddselfloops, disambig_in_units_file_int, disambig_out_units_file_int], input=res.stdout, capture_output=True, check=True)
res = subprocess.run([fstarcsort, '--sort_type=olabel'], input=res.stdout, capture_output=True, check=True)
out_f.write(res.stdout)
except subprocess.CalledProcessError as e:
logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}")
os.remove(h_graph)
raise
return (h_graph, h_out_units_file, disambig_in_units_file_int) |
class EvoqueHtmlLexer(DelegatingLexer):
name = 'HTML+Evoque'
aliases = ['html+evoque']
filenames = ['*.html']
mimetypes = ['text/html+evoque']
url = '
version_added = '1.1'
def __init__(self, **options):
super().__init__(HtmlLexer, EvoqueLexer, **options)
def analyse_text(text):
return EvoqueLexer.analyse_text(text) |
class Projection(nn.Module):
def __init__(self, dim, projection_size, hidden_size):
super().__init__()
self.net = nn.Sequential(nn.Linear(dim, hidden_size, bias=False), nn.ReLU(inplace=True), nn.Linear(hidden_size, projection_size, bias=False))
def forward(self, x):
return self.net(x) |
class SignalDispatcher(object):
def __init__(self):
self._signals = {}
def signal_clear(self):
for handler_list in self._signals.values():
for handler in handler_list:
handler.function = None
self._signals = {}
def signal_bind(self, signal_name, function, priority=0.5, weak=False, autosort=True):
assert isinstance(signal_name, str)
assert hasattr(function, '__call__')
assert hasattr(function, '__code__')
assert isinstance(priority, (int, float))
assert isinstance(weak, bool)
try:
handlers = self._signals[signal_name]
except KeyError:
handlers = self._signals[signal_name] = []
nargs = function.__code__.co_argcount
if getattr(function, '__self__', None):
nargs -= 1
if weak:
function = (function.__func__, weakref.proxy(function.__self__))
elif weak:
function = weakref.proxy(function)
handler = SignalHandler(signal_name, function, priority, (nargs > 0))
handlers.append(handler)
if autosort:
handlers.sort(key=(lambda handler: (- handler.priority)))
return handler
def signal_force_sort(self, signal_name=None):
if (signal_name is None):
for handlers in self._signals.values():
handlers.sort(key=(lambda handler: (- handler.priority)))
return None
elif (signal_name in self._signals):
self._signals[signal_name].sort(key=(lambda handler: (- handler.priority)))
return None
return False
def signal_unbind(self, signal_handler):
try:
handlers = self._signals[signal_handler.signal_name]
except KeyError:
pass
else:
signal_handler.function = None
try:
handlers.remove(signal_handler)
except IndexError:
pass
def signal_garbage_collect(self):
for handler_list in self._signals.values():
i = len(handler_list)
while i:
i -= 1
handler = handler_list[i]
try:
if isinstance(handler.function, tuple):
handler.function[1].__class__
else:
handler.function.__class__
except ReferenceError:
handler.function = None
del handler_list[i]
def signal_emit(self, signal_name, **kw):
assert isinstance(signal_name, str)
if (signal_name not in self._signals):
return True
handlers = self._signals[signal_name]
if (not handlers):
return True
signal = Signal(origin=self, name=signal_name, **kw)
for handler in tuple(handlers):
if handler.active:
try:
if isinstance(handler.function, tuple):
fnc = MethodType(*handler.function)
else:
fnc = handler.function
if handler.pass_signal:
fnc(signal)
else:
fnc()
except ReferenceError:
handler.function = None
handlers.remove(handler)
if signal.stopped:
return False
return True |
(scope='class')
def aviary_testing_model():
test_model_path = get_test_model_path()
test_model_runner = os.environ.get('AVIARY_TEST_MODEL_LAUNCH_MODULE_PATH', 'rayllm.backend.server.run').lower()
test_model_patch_target = os.environ.get('AVIARY_TEST_VLLM_PATCH_TARGET', test_model_runner)
launch_fn = 'run'
runner_fn = getattr(importlib.import_module(test_model_runner), launch_fn)
serve.shutdown()
with patch.multiple(target=test_model_patch_target, VLLMEngine=MockVLLMEngine):
aviary_url = runner_fn(vllm_base_args=[str(test_model_path.absolute())])
openai_api_base = f'{aviary_url}/v1'
openai_api_key = 'not_an_actual_key'
with patch.dict(os.environ, {'AVIARY_URL': aviary_url, 'OPENAI_API_BASE': openai_api_base, 'OPENAI_API_KEY': openai_api_key}):
for _i in range(20):
try:
model = rayllm.sdk.models()[0]
assert model
break
except Exception as e:
print('Error', e)
pass
time.sleep(10)
(yield model)
serve.shutdown() |
.parametrize(('test_input', 'expected'), [(' ' not a link to a recognized domain to prettify'), (' ' not a link to a list, message or thread'), (' ' not a link to a Discourse thread or category')])
def test_process_pretty_url_invalid(test_input, expected):
with pytest.raises(ValueError, match=expected):
pep_headers._process_pretty_url(test_input) |
class MyType(Type):
def __init__(self, thingy):
self.thingy = thingy
def __eq__(self, other):
return ((type(other) == type(self)) and (other.thingy == self.thingy))
def __str__(self):
return str(self.thingy)
def __repr__(self):
return str(self.thingy)
def filter(self, x, strict=False, allow_downcast=None):
if (not isinstance(x, str)):
raise TypeError('Invalid type')
if (not x.startswith(self.thingy)):
raise ValueError('Invalid value')
return x
def may_share_memory(a, b):
return False |
class ImageList(MutableList):
__item_type__ = Image
def observe_item(self, item):
item = self.__item_type__.coerce(None, item)
item._parents = self._parents
return item
def __setitem__(self, index, value):
list.__setitem__(self, index, self.observe_item(value))
self.changed()
def __setslice__(self, start, end, value):
list.__setslice__(self, start, end, [self.observe_item(i) for i in value])
self.changed()
def append(self, x):
list.append(self, self.observe_item(x))
self.changed()
def extend(self, x):
new_value = []
for i in x:
i = self.__item_type__.coerce(None, i)
i._parents = self._parents
new_value.append(i)
list.extend(self, new_value)
self.changed()
def insert(self, i, x):
list.insert(self, i, self.observe_item(x))
self.changed()
def coerce(cls, index, value):
if (not isinstance(value, cls)):
if isinstance(value, Iterable):
result = cls()
for i in value:
item = cls.__item_type__.coerce(index, i)
result.append(item)
return result
return super().coerce(index, value)
else:
return value |
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if (item in {'__name__', '__qualname__'}):
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f'Cannot subclass {self!r}')
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f'Cannot instantiate {self!r}')
def __or__(self, other):
return typing.Union[(self, other)]
def __ror__(self, other):
return typing.Union[(other, self)]
def __instancecheck__(self, obj):
raise TypeError(f'{self} cannot be used with isinstance()')
def __subclasscheck__(self, cls):
raise TypeError(f'{self} cannot be used with issubclass()')
_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters) |
def _has_main_check(line: str) -> bool:
if (line.strip() == ''):
return False
(keyword, *condition) = line.split()
spaceless_condition = ''.join(condition)
return ((keyword == 'if') and line.startswith(keyword) and ((spaceless_condition == "__name__=='__main__':") or (spaceless_condition == '__name__=="__main__":'))) |
def copy(rpin, rpout, compress=0):
log.Log('Regular copying input path {ip} to output path {op}'.format(ip=rpin, op=rpout), log.DEBUG)
if (not rpin.lstat()):
if rpout.lstat():
rpout.delete()
return
if rpout.lstat():
if (rpin.isreg() or (not cmp(rpin, rpout))):
rpout.delete()
else:
return
if rpin.isreg():
return copy_reg_file(rpin, rpout, compress)
elif rpin.isdir():
rpout.mkdir()
elif rpin.issym():
if Globals.symlink_perms:
orig_umask = os.umask((511 & (~ rpin.getperms())))
rpout.symlink(rpin.readlink())
if Globals.symlink_perms:
os.umask(orig_umask)
elif rpin.isdev():
(dev_type, major, minor) = rpin.getdevnums()
rpout.makedev(dev_type, major, minor)
elif rpin.isfifo():
rpout.mkfifo()
elif rpin.issock():
rpout.mksock()
else:
raise RPathException("File '{rp!r}' has unknown type.".format(rp=rpin)) |
def plotly_plt(scatters, title, ylabel, output_file, limits=None, show=False, figsize=None):
del figsize
try:
import plotly.graph_objs as go
import plotly.io as pio
except ImportError:
raise SystemExit('Unable to import plotly, install with: pip install pandas plotly')
fig = go.Figure()
for sc in scatters:
fig.add_traces(go.Scatter(x=sc['xs'], y=sc['ys'], name=sc['name']))
fig.update_xaxes(title_text='Bit-rate [bpp]')
fig.update_yaxes(title_text=ylabel)
if (limits is not None):
fig.update_xaxes(range=[limits[0], limits[1]])
fig.update_yaxes(range=[limits[2], limits[3]])
filename = (output_file or 'plot.html')
pio.write_html(fig, file=filename, auto_open=True) |
def test_get_recompute(verbose=True, *args, **kwargs):
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'), binary=True)
assert (s.get_vars() == ['abscoeff'])
assert s.conditions['thermal_equilibrium']
assert (set(get_recompute(s, ['radiance_noslit'])) == set(('radiance_noslit', 'abscoeff')))
s.conditions['Tvib'] = 2000
with pytest.raises(AssertionError):
assert (not s.is_at_equilibrium(check='error'))
s.conditions['thermal_equilibrium'] = False
assert (set(get_recompute(s, ['radiance_noslit'])) == set(('abscoeff', 'emisscoeff', 'radiance_noslit'))) |
class GroupBox(_GroupBase):
orientations = base.ORIENTATION_HORIZONTAL
defaults = [('block_highlight_text_color', None, 'Selected group font colour'), ('active', 'FFFFFF', 'Active group font colour'), ('inactive', '404040', 'Inactive group font colour'), ('highlight_method', 'border', "Method of highlighting ('border', 'block', 'text', or 'line')Uses `*_border` color settings"), ('rounded', True, 'To round or not to round box borders'), ('this_current_screen_border', '215578', 'Border or line colour for group on this screen when focused.'), ('this_screen_border', '215578', 'Border or line colour for group on this screen when unfocused.'), ('other_current_screen_border', '404040', 'Border or line colour for group on other screen when focused.'), ('other_screen_border', '404040', 'Border or line colour for group on other screen when unfocused.'), ('highlight_color', ['000000', '282828'], "Active group highlight color when using 'line' highlight method."), ('urgent_alert_method', 'border', "Method for alerting you of WM urgent hints (one of 'border', 'text', 'block', or 'line')"), ('urgent_text', 'FF0000', 'Urgent group font color'), ('urgent_border', 'FF0000', 'Urgent border or line color'), ('disable_drag', False, 'Disable dragging and dropping of group names on widget'), ('invert_mouse_wheel', False, 'Whether to invert mouse wheel group movement'), ('use_mouse_wheel', True, 'Whether to use mouse wheel events'), ('visible_groups', None, 'Groups that will be visible. If set to None or [], all groups will be visible.Visible groups are identified by name not by their displayed label.'), ('hide_unused', False, 'Hide groups that have no windows and that are not displayed on any screen.'), ('spacing', None, 'Spacing between groups(if set to None, will be equal to margin_x)'), ('toggle', True, 'Enable toggling of group when clicking on same group name')]
def __init__(self, **config):
_GroupBase.__init__(self, **config)
self.add_defaults(GroupBox.defaults)
self.clicked = None
self.click = None
default_callbacks = {'Button1': self.select_group}
if self.use_mouse_wheel:
default_callbacks.update({('Button5' if self.invert_mouse_wheel else 'Button4'): self.prev_group, ('Button4' if self.invert_mouse_wheel else 'Button5'): self.next_group})
self.add_callbacks(default_callbacks)
def _configure(self, qtile, bar):
_GroupBase._configure(self, qtile, bar)
if (self.spacing is None):
self.spacing = self.margin_x
def groups(self):
groups = filter((lambda g: g.label), self.qtile.groups)
if self.hide_unused:
groups = filter((lambda g: (g.windows or g.screen)), groups)
if self.visible_groups:
groups = filter((lambda g: (g.name in self.visible_groups)), groups)
return list(groups)
def get_clicked_group(self):
group = None
new_width = (self.margin_x - (self.spacing / 2.0))
width = 0
for g in self.groups:
new_width += (self.box_width([g]) + self.spacing)
if (width <= self.click <= new_width):
group = g
break
width = new_width
return group
def button_press(self, x, y, button):
self.click = x
_GroupBase.button_press(self, x, y, button)
def next_group(self):
group = None
current_group = self.qtile.current_group
i = itertools.cycle(self.qtile.groups)
while (next(i) != current_group):
pass
while ((group is None) or (group not in self.groups)):
group = next(i)
self.go_to_group(group)
def prev_group(self):
group = None
current_group = self.qtile.current_group
i = itertools.cycle(reversed(self.qtile.groups))
while (next(i) != current_group):
pass
while ((group is None) or (group not in self.groups)):
group = next(i)
self.go_to_group(group)
def select_group(self):
self.clicked = None
group = self.get_clicked_group()
if (not self.disable_drag):
self.clicked = group
self.go_to_group(group)
def go_to_group(self, group):
if group:
if ((self.bar.screen.group != group) or (not self.disable_drag) or (not self.toggle)):
self.bar.screen.set_group(group, warp=False)
else:
self.bar.screen.toggle_group(group, warp=False)
def button_release(self, x, y, button):
self.click = x
if (button not in (5, 4)):
group = self.get_clicked_group()
if (group and self.clicked):
group.switch_groups(self.clicked.name)
self.clicked = None
def calculate_length(self):
width = ((self.margin_x * 2) + ((len(self.groups) - 1) * self.spacing))
for g in self.groups:
width += self.box_width([g])
return width
def group_has_urgent(self, group):
return any((w.urgent for w in group.windows))
def draw(self):
self.drawer.clear((self.background or self.bar.background))
offset = self.margin_x
for (i, g) in enumerate(self.groups):
to_highlight = False
is_block = (self.highlight_method == 'block')
is_line = (self.highlight_method == 'line')
bw = self.box_width([g])
if (self.group_has_urgent(g) and (self.urgent_alert_method == 'text')):
text_color = self.urgent_text
elif g.windows:
text_color = self.active
else:
text_color = self.inactive
if g.screen:
if (self.highlight_method == 'text'):
border = None
text_color = self.this_current_screen_border
else:
if self.block_highlight_text_color:
text_color = self.block_highlight_text_color
if (self.bar.screen.group.name == g.name):
if (self.qtile.current_screen == self.bar.screen):
border = self.this_current_screen_border
to_highlight = True
else:
border = self.this_screen_border
elif (self.qtile.current_screen == g.screen):
border = self.other_current_screen_border
else:
border = self.other_screen_border
elif (self.group_has_urgent(g) and (self.urgent_alert_method in ('border', 'block', 'line'))):
border = self.urgent_border
if (self.urgent_alert_method == 'block'):
is_block = True
elif (self.urgent_alert_method == 'line'):
is_line = True
else:
border = None
self.drawbox(offset, g.label, border, text_color, highlight_color=self.highlight_color, width=bw, rounded=self.rounded, block=is_block, line=is_line, highlighted=to_highlight)
offset += (bw + self.spacing)
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.width) |
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
self.action_repeat = max(1, int(((0.5 * state_dim) // action_dim)))
action_dim = (action_dim * self.action_repeat)
self.l1 = nn.Linear((state_dim + action_dim), 400)
self.l2 = nn.Linear(400, 300)
self.l3 = nn.Linear(300, 1)
self.l4 = nn.Linear((state_dim + action_dim), 400)
self.l5 = nn.Linear(400, 300)
self.l6 = nn.Linear(300, 1)
def forward(self, x, u):
xu = torch.cat([x, u.repeat([1, self.action_repeat])], dim=1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
x2 = F.relu(self.l4(xu))
x2 = F.relu(self.l5(x2))
x2 = self.l6(x2)
return (x1, x2)
def Q1(self, x, u):
xu = torch.cat([x, u.repeat([1, self.action_repeat])], dim=1)
x1 = F.relu(self.l1(xu))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1 |
class RelativePositionGraph(object):
def __init__(self, stopword_ids, tokenizer):
self.stopword_ids = stopword_ids
self.tokenizer = tokenizer
self.word2id = self.tokenizer.get_vocab()
def update_node_dict(self, b, new_sentence):
if (str(new_sentence) not in self.sentence2id[b]):
self.id2sentence[b].append(new_sentence)
self.sentence2id[b][str(new_sentence)] = (len(self.id2sentence[b]) - 1)
token_ids = [item for item in new_sentence if (item not in self.stopword_ids)]
for tid in token_ids:
if (tid in self.node2id[b]):
continue
self.id2node[b].append(tid)
if (len(self.id2node[b]) > self.node_capacity):
self.id2node[b] = self.id2node[b][(- self.node_capacity):]
self.node2id[b] = {}
for i in range(len(self.id2node[b])):
self.node2id[b][self.id2node[b][i]] = i
def push_batch(self, new_sentence_list, prev_action_list, *argv):
assert (len(new_sentence_list) == len(self.triplets))
assert (len(new_sentence_list) == len(self.id2node))
assert (len(new_sentence_list) == len(self.id2relation))
for b in range(len(new_sentence_list)):
self.push_one(b, new_sentence_list[b], prev_action_list[b])
def push_one(self, b, new_sentence, prev_action, *argv):
assert (b < len(self.triplets))
if ((prev_action is not None) and (prev_action in ['stop'])):
return
self.update_node_dict(b, new_sentence)
triplets = []
for s_id in range(len(self.id2sentence[b])):
sent = self.id2sentence[b][s_id]
token_ids = [item for item in sent if (item in self.node2id[b])]
if (len(token_ids) <= 1):
continue
for i in range(len(token_ids)):
if (token_ids[i] not in self.node2id[b]):
continue
for j in range(i, len(token_ids)):
if (token_ids[j] not in self.node2id[b]):
continue
dist = min(max((j - i), (- self.radius)), self.radius)
triplets.append(np.array([self.node2id[b][token_ids[i]], self.node2id[b][token_ids[j]], self.relation2id[b][str(dist)]]))
if (len(triplets) > 0):
self.triplets[b] = np.stack(triplets, axis=0)
else:
self.triplets[b] = []
def push_batch_question(self, question_list, *argv):
assert (len(question_list) == len(self.triplets))
assert (len(question_list) == len(self.id2node))
assert (len(question_list) == len(self.id2relation))
for b in range(len(question_list)):
self.push_one(b, question_list[b], None)
def get_adjacency_matrix(self, triplets, *argv):
batch_size = len(triplets)
adj = np.zeros((batch_size, self.relation_capacity, self.node_capacity, self.node_capacity), dtype='float32')
for b in range(batch_size):
for t in triplets[b]:
(node1, node2, relation) = (t[0], t[1], t[2])
adj[b][relation][node1][node2] = 1.0
adj[b][((self.relation_capacity - 1) - relation)][node2][node1] = 1.0
return adj
def get_node_vocabulary(self):
return copy.deepcopy(self.id2node)
def get_relation_vocabulary(self):
return copy.deepcopy(self.id2relation)
def get_triplets(self):
return copy.deepcopy(self.triplets)
def get_observable_node_mask(self, observation_id_matrix, question_id_matrix=None, node_vocabulary=None):
if (node_vocabulary is None):
node_vocabulary = self.id2node
assert (observation_id_matrix.size(0) == len(node_vocabulary))
if (question_id_matrix is not None):
assert (question_id_matrix.size(0) == len(node_vocabulary))
observable_node_mask = np.zeros((observation_id_matrix.size(0), self.node_capacity), dtype='float32')
for b in range(observation_id_matrix.size(0)):
node2id = {}
for (i, w) in enumerate(node_vocabulary[b]):
node2id[w] = i
for w_id in observation_id_matrix[b]:
if (w_id in node2id):
observable_node_mask[b][node2id[w_id]] = 1.0
if (question_id_matrix is not None):
for w_id in question_id_matrix[b]:
if (w_id in node2id):
observable_node_mask[b][node2id[w_id]] = 1.0
return observable_node_mask
def get(self):
triplets = self.get_triplets()
node_vocabulary = self.get_node_vocabulary()
relation_vocabulary = self.get_relation_vocabulary()
adj = self.get_adjacency_matrix(triplets)
return (triplets, node_vocabulary, relation_vocabulary, adj)
def reset(self, node_capacity, relation_capacity, batch_size):
self.node_capacity = node_capacity
self.relation_capacity = relation_capacity
assert (relation_capacity > 1)
assert ((relation_capacity % 2) == 1)
(self.id2node, self.id2relation, self.id2sentence) = ([], [], [])
(self.node2id, self.relation2id, self.sentence2id) = ([], [], [])
self.triplets = []
minus_id = self.word2id['-']
self.radius = int((relation_capacity / 2))
tmp = (([i for i in range(1, (self.radius + 1))][::(- 1)] + [0]) + [i for i in range(1, (self.radius + 1))])
tmp = [str(item) for item in tmp]
relations = copy.deepcopy(tmp)
for i in range(self.radius):
relations[i] = ('-' + relations[i])
id2relation = [[self.word2id[item]] for item in tmp]
for i in range(self.radius):
id2relation[i] = ([minus_id] + id2relation[i])
relation2id = {}
for i in range(len(id2relation)):
relation2id[relations[i]] = i
for _ in range(batch_size):
self.id2node.append([])
self.id2relation.append(id2relation)
self.node2id.append({})
self.relation2id.append(relation2id)
self.sentence2id.append({})
self.id2sentence.append([])
self.triplets.append([]) |
_fixtures(WebFixture, SqlAlchemyFixture, OptimisticConcurrencyWithAjaxFixture)
def test_optimistic_concurrency_with_ajax(web_fixture, sql_alchemy_fixture, concurrency_fixture):
fixture = concurrency_fixture
with sql_alchemy_fixture.persistent_test_classes(fixture.ModelObject):
model_object = fixture.model_object
Session.add(model_object)
model_object.some_trigger_field = 'some value'
model_object.some_field = 'some value'
wsgi_app = web_fixture.new_wsgi_app(child_factory=fixture.MyForm.factory(), enable_js=True)
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
browser.type(XPath.input_labelled('Some field'), 'something for the fun of it')
with browser.refresh_expected_for('#myform', fixture.refresh_expected_for_form), browser.refresh_expected_for('#inner_div', True):
browser.type(XPath.input_labelled('Some trigger field'), 'something else')
fixture.make_concurrent_change_in_backend()
assert (not fixture.is_concurrency_error_displayed())
browser.type(XPath.input_labelled('Some trigger field'), 'something else again')
assert (not fixture.is_concurrency_error_displayed())
browser.click(XPath.button_labelled('Submit'))
assert fixture.is_concurrency_error_displayed() |
def test_no_capture_preserves_custom_excepthook(testdir):
testdir.makepyfile('\n import pytest\n import sys\n from pytestqt.qt_compat import qt_api\n\n def custom_excepthook(*args):\n sys.__excepthook__(*args)\n\n sys.excepthook = custom_excepthook\n\n .qt_no_exception_capture\n def test_no_capture(qtbot):\n assert sys.excepthook is custom_excepthook\n\n def test_capture(qtbot):\n assert sys.excepthook is not custom_excepthook\n ')
res = testdir.runpytest()
res.stdout.fnmatch_lines(['*2 passed*']) |
class Migration(migrations.Migration):
dependencies = [('adserver', '0053_add_regiontopic_index')]
operations = [migrations.AddField(model_name='publisherpayout', name='end_date', field=models.DateField(help_text='Last day of paid period', null=True, verbose_name='End Date')), migrations.AddField(model_name='publisherpayout', name='start_date', field=models.DateField(help_text='First day of paid period', null=True, verbose_name='Start Date')), migrations.AddField(model_name='publisherpayout', name='status', field=models.CharField(choices=[('pending', 'Pending'), ('hold', 'On hold'), ('emailed', 'Email sent'), ('paid', 'Payment sent')], default='pending', help_text='Status of this payout', max_length=50))] |
class CreateSignatureViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def test_CreateSignaturePOSTOk(self):
data = {'first_name': 'Alan', 'last_name': 'John', 'email': '', 'phone': '', 'subscribed_to_mailinglist': False}
petition = Petition.objects.filter(published=True).first()
response = self.client.post(reverse('create_signature', args=[petition.id]), data, follow=True)
self.assertRedirects(response, petition.url)
signature = Signature.objects.filter(petition=petition).first()
self.assertEqual(signature.confirmed, False)
self.assertEqual(signature.email, '')
self.assertEqual(signature.phone, '+')
self.assertEqual(signature.first_name, 'Alan')
self.assertEqual(signature.last_name, 'John')
self.assertEqual(signature.subscribed_to_mailinglist, False)
def test_CreateSignaturePOSTNok(self):
data = {'first_name': 'Alan', 'last_name': '', 'email': 'wrong-mail.org', 'phone': ''}
petition = Petition.objects.filter(published=True).first()
response = self.client.post(reverse('create_signature', args=[petition.id]), data)
self.assertEqual(Signature.objects.count(), 0)
self.assertTemplateUsed(response, 'petition/petition_detail.html')
self.assertContains(response, 'This field is required')
self.assertContains(response, 'Enter a valid phone number')
self.assertContains(response, 'Enter a valid email address')
def test_CreateSignatureGETOk(self):
petition = Petition.objects.filter(published=True).first()
response = self.client.get(reverse('create_signature', args=[petition.id]), follow=True)
self.assertRedirects(response, petition.url) |
class MultiLineFormatter(logging.Formatter):
def __init__(self, fmt):
super().__init__(fmt)
match = _re.search('%\\(levelname\\)-(\\d+)s', fmt)
self.level_length = (int(match.group(1)) if match else 0)
def format(self, record):
original = super().format(record)
lines = original.split('\n')
levelname = lines[0].split(' ')[0]
if (len(lines) <= 1):
return original
else:
formatted = [lines[0]]
if (self.level_length == 0):
padding = (' ' * len(levelname))
else:
padding = (' ' * self.level_length)
padding += ' '
formatted.extend(((padding + line) for line in lines[1:]))
return '\n'.join(formatted) |
class OpenFitInNewTab(ContextMenuSingle):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
if (srcContext not in ('projectedFit', 'commandFit', 'graphFitListMisc', 'graphTgtListMisc')):
return False
if (mainItem is None):
return False
if isinstance(mainItem, BaseWrapper):
if (not mainItem.isFit):
return False
mainItem = mainItem.item
currentFitID = self.mainFrame.getActiveFit()
selectedFitID = mainItem.ID
if (currentFitID == selectedFitID):
return False
return True
def getText(self, callingWindow, itmContext, mainItem):
return _t('Open Fit in New Tab')
def activate(self, callingWindow, fullContext, mainItem, i):
if isinstance(mainItem, BaseWrapper):
mainItem = mainItem.item
wx.PostEvent(self.mainFrame, FitSelected(fitID=mainItem.ID, startup=2)) |
def main():
enhance_print()
basic_multivector_operations()
check_generalized_BAC_CAB_formulas()
derivatives_in_rectangular_coordinates()
derivatives_in_spherical_coordinates()
rounding_numerical_components()
conformal_representations_of_circles_lines_spheres_and_planes()
properties_of_geometric_objects()
extracting_vectors_from_conformal_2_blade()
reciprocal_frame_test()
return |
def run_test(case, m):
m.elaborate()
m.apply(BehavioralRTLIRGenPass(m))
m.apply(BehavioralRTLIRTypeCheckPass(m))
visitor = YosysBehavioralRTLIRToVVisitorL2((lambda x: (x in verilog_reserved)))
upblks = m.get_metadata(BehavioralRTLIRGenPass.rtlir_upblks)
m_all_upblks = m.get_update_blocks()
assert (len(m_all_upblks) == 1)
for blk in m_all_upblks:
upblk_src = visitor.enter(blk, upblks[blk])
upblk_src = '\n'.join(upblk_src)
assert ((upblk_src + '\n') == case.REF_UPBLK) |
def init_guess_by_chkfile(cell, chkfile_name, project=None, kpt=None):
from pyscf import gto
(chk_cell, scf_rec) = chkfile.load_scf(chkfile_name)
if (project is None):
project = (not gto.same_basis_set(chk_cell, cell))
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
if (kpt is None):
kpt = np.zeros(3)
if ('kpt' in scf_rec):
chk_kpt = scf_rec['kpt']
elif ('kpts' in scf_rec):
kpts = scf_rec['kpts']
where = np.argmin(lib.norm((kpts - kpt), axis=1))
chk_kpt = kpts[where]
if (getattr(mo[0], 'ndim', None) == 2):
mo = mo[where]
mo_occ = mo_occ[where]
else:
mo = [mo[0][where], mo[1][where]]
mo_occ = [mo_occ[0][where], mo_occ[1][where]]
else:
chk_kpt = np.zeros(3)
if project:
s = cell.pbc_intor('int1e_ovlp', kpt=kpt)
def fproj(mo):
if project:
mo = addons.project_mo_nr2nr(chk_cell, mo, cell, (chk_kpt - kpt))
norm = np.einsum('pi,pi->i', mo.conj(), s.dot(mo))
mo /= np.sqrt(norm)
return mo
if (getattr(mo, 'ndim', None) == 2):
mo = fproj(mo)
mo_occa = (mo_occ > 1e-08).astype(np.double)
mo_occb = (mo_occ - mo_occa)
dm = mol_uhf.make_rdm1([mo, mo], [mo_occa, mo_occb])
else:
dm = mol_uhf.make_rdm1([fproj(mo[0]), fproj(mo[1])], mo_occ)
if ((kpt is None) or np.allclose(kpt, 0)):
dm = dm.real
return dm |
class Bookmarks(SongsMenuPlugin):
PLUGIN_ID = 'Go to Bookmark'
PLUGIN_NAME = _('Go to Bookmark')
PLUGIN_DESC = _('Manages bookmarks in the selected files.')
PLUGIN_ICON = Icons.GO_JUMP
plugin_handles = any_song(has_bookmark)
def __init__(self, songs, *args, **kwargs):
super().__init__(songs, *args, **kwargs)
self.__menu = Gtk.Menu()
self.__create_children(self.__menu, songs)
self.set_submenu(self.__menu)
class FakePlayer():
def __init__(self, song):
self.song = song
def seek(self, time):
if app.player.go_to(self.song._song, explicit=True):
app.player.seek(time)
def get_position(self, *args):
return 0
def __create_children(self, menu, songs):
self.__remove_children(menu)
for song in songs:
marks = song.bookmarks
if marks:
fake_player = self.FakePlayer(song)
song_item = Gtk.MenuItem(song.comma('title'))
song_menu = Gtk.Menu()
song_item.set_submenu(song_menu)
menu.append(song_item)
items = qltk.bookmarks.MenuItems(marks, fake_player, True)
for item in items:
song_menu.append(item)
song_menu.append(SeparatorMenuItem())
i = qltk.MenuItem(_('_Edit Bookmarks...'), Icons.EDIT)
def edit_bookmarks_cb(menu_item):
window = EditBookmarks(self.plugin_window, app.library, fake_player)
window.show()
i.connect('activate', edit_bookmarks_cb)
song_menu.append(i)
if (menu.get_active() is None):
no_marks = Gtk.MenuItem(_('No Bookmarks'))
no_marks.set_sensitive(False)
menu.append(no_marks)
menu.show_all()
def __remove_children(self, menu):
for child in menu.get_children():
menu.remove(child)
def plugin_songs(self, songs):
pass |
class AttendeeTicket():
id: strawberry.ID
hashid: strawberry.ID
name: Optional[str]
email: Optional[str]
secret: str
variation: Optional[strawberry.ID]
item: TicketItem
_conference: strawberry.Private[Conference]
_data: strawberry.Private[Any]
def role(self, info: Info) -> (ConferenceRole | None):
if (not self.item.admission):
return None
return get_conference_roles_for_ticket_data(conference=self._conference, user_id=info.context.request.user.id, data=self._data)[0]
def from_data(cls, data: OrderPositionDict, language: str, categories: Dict[(str, CategoryDict)], questions: List[QuestionDict], conference: Conference):
data['item']['questions'] = get_questions_with_answers(questions, data)
return cls(id=data['id'], hashid=encode_hashid(data['id']), name=data['attendee_name'], email=data['attendee_email'], secret=data['secret'], variation=data['variation'], item=TicketItem.from_data(data['item'], language=language, categories=categories, questions=data['item']['questions']), _conference=conference, _data=data) |
class SparseMaxPool(SparseModule):
def __init__(self, ndim, kernel_size, stride=1, padding=0, dilation=1, subm=False):
super(SparseMaxPool, self).__init__()
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, tuple))):
stride = ([stride] * ndim)
if (not isinstance(padding, (list, tuple))):
padding = ([padding] * ndim)
if (not isinstance(dilation, (list, tuple))):
dilation = ([dilation] * ndim)
self.ndim = ndim
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.subm = subm
self.dilation = dilation
def forward(self, input):
assert isinstance(input, spconv.SparseConvTensor)
features = input.features
device = features.device
indices = input.indices
spatial_shape = input.spatial_shape
batch_size = input.batch_size
if (not self.subm):
out_spatial_shape = ops.get_conv_output_size(spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation)
else:
out_spatial_shape = spatial_shape
(outids, indice_pairs, indice_pairs_num) = ops.get_indice_pairs(indices, batch_size, spatial_shape, self.kernel_size, self.stride, self.padding, self.dilation, 0, self.subm)
out_features = Fsp.indice_maxpool(features, indice_pairs.to(device), indice_pairs_num.to(device), outids.shape[0])
out_tensor = spconv.SparseConvTensor(out_features, outids, out_spatial_shape, batch_size)
out_tensor.indice_dict = input.indice_dict
out_tensor.grid = input.grid
return out_tensor |
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device='cuda')
self.enumerator = EmbeddingEnumerator(topology=topology, batch_size=BATCH_SIZE)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
self.grid_search_proposer = GridSearchProposer()
def test_greedy_two_table(self) -> None:
tables = [EmbeddingBagConfig(num_embeddings=100, embedding_dim=10, name='table_0', feature_names=['feature_0']), EmbeddingBagConfig(num_embeddings=100, embedding_dim=10, name='table_1', feature_names=['feature_1'])]
model = TestSparseNN(tables=tables, sparse_device=torch.device('meta'))
search_space = self.enumerator.enumerate(module=model, sharders=[cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())])
self.greedy_proposer.load(search_space)
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(key=(lambda sharding_option: (max([shard.perf.total for shard in sharding_option.shards]), sharding_option.name)))
output.append([(candidate.name, candidate.sharding_type, candidate.compute_kernel) for candidate in proposal])
self.greedy_proposer.feedback(partitionable=True)
expected_output = [[('table_0', 'row_wise', 'fused'), ('table_1', 'row_wise', 'fused')], [('table_0', 'table_row_wise', 'fused'), ('table_1', 'row_wise', 'fused')], [('table_1', 'row_wise', 'fused'), ('table_0', 'data_parallel', 'dense')], [('table_1', 'table_row_wise', 'fused'), ('table_0', 'data_parallel', 'dense')], [('table_0', 'data_parallel', 'dense'), ('table_1', 'data_parallel', 'dense')]]
self.assertEqual(expected_output, output)
self.greedy_proposer._threshold = 10
self.greedy_proposer.load(search_space)
proposal = None
for i in range(13):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
self.greedy_proposer.feedback(partitionable=True, perf_rating=(100 + i))
self.assertEqual(self.greedy_proposer._best_perf_rating, 100)
self.assertEqual(proposal, None)
def test_uniform_three_table(self) -> None:
tables = [EmbeddingBagConfig(num_embeddings=(100 * i), embedding_dim=(10 * i), name=('table_' + str(i)), feature_names=[('feature_' + str(i))]) for i in range(1, 4)]
model = TestSparseNN(tables=tables, sparse_device=torch.device('meta'))
mock_ebc_sharder = cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
mock_ebc_sharder.sharding_types = MagicMock(return_value=[ShardingType.DATA_PARALLEL.value, ShardingType.TABLE_WISE.value, ShardingType.ROW_WISE.value, ShardingType.TABLE_ROW_WISE.value])
self.maxDiff = None
search_space = self.enumerator.enumerate(module=model, sharders=[mock_ebc_sharder])
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(key=(lambda sharding_option: (max([shard.perf.total for shard in sharding_option.shards]), sharding_option.name)))
output.append([(candidate.name, candidate.sharding_type, candidate.compute_kernel) for candidate in proposal])
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [[('table_1', 'data_parallel', 'dense'), ('table_2', 'data_parallel', 'dense'), ('table_3', 'data_parallel', 'dense')], [('table_1', 'table_wise', 'fused'), ('table_2', 'table_wise', 'fused'), ('table_3', 'table_wise', 'fused')], [('table_1', 'row_wise', 'fused'), ('table_2', 'row_wise', 'fused'), ('table_3', 'row_wise', 'fused')], [('table_1', 'table_row_wise', 'fused'), ('table_2', 'table_row_wise', 'fused'), ('table_3', 'table_row_wise', 'fused')]]
self.assertEqual(expected_output, output)
def test_grid_search_three_table(self) -> None:
tables = [EmbeddingBagConfig(num_embeddings=(100 * i), embedding_dim=(10 * i), name=('table_' + str(i)), feature_names=[('feature_' + str(i))]) for i in range(1, 4)]
model = TestSparseNN(tables=tables, sparse_device=torch.device('meta'))
search_space = self.enumerator.enumerate(module=model, sharders=[cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())])
num_pruned_options = (((len(ShardingType) - 1) * 3) + 1)
self.grid_search_proposer.load(search_space)
for sharding_options in self.grid_search_proposer._sharding_options_by_fqn.values():
self.assertEqual(len(sharding_options), num_pruned_options)
num_proposals = 0
proposal = self.grid_search_proposer.propose()
while proposal:
self.grid_search_proposer.feedback(partitionable=True)
proposal = self.grid_search_proposer.propose()
num_proposals += 1
self.assertEqual((num_pruned_options ** len(tables)), num_proposals)
def test_allocate_budget(self) -> None:
model = torch.tensor([[1.0, 0.0], [2.0, 3.0], [4.0, 5.0]])
got = EmbeddingOffloadScaleupProposer.clf_to_bytes(model, torch.tensor([0, 0.5, 1]))
torch.testing.assert_close(got, torch.tensor([0, 4, 9]))
model = torch.tensor([[, 2000000], [, 2000000], [, 2000000]])
mins = torch.tensor([0.1, 0.1, 1])
budget =
got = EmbeddingOffloadScaleupProposer.allocate_budget(model, clfs=torch.tensor(mins), budget=budget, allocation_priority=torch.tensor([2, 2, 2]))
torch.testing.assert_close(got, torch.tensor([1.0, 1.0, 1.0]))
increase = (EmbeddingOffloadScaleupProposer.clf_to_bytes(model, got).sum() - EmbeddingOffloadScaleupProposer.clf_to_bytes(model, mins).sum()).item()
self.assertLess(increase, budget)
model = torch.tensor([[, 2000000], [, 2000000], [, 2000000]])
mins = torch.tensor([0.1, 0.1, 1])
budget =
got = EmbeddingOffloadScaleupProposer.allocate_budget(model, clfs=mins, budget=budget, allocation_priority=torch.tensor([2, 2, 2]))
torch.testing.assert_close(got, torch.tensor([0.26667, 0.26667, 1.0]))
increase = (EmbeddingOffloadScaleupProposer.clf_to_bytes(model, got).sum() - EmbeddingOffloadScaleupProposer.clf_to_bytes(model, mins).sum())
self.assertEqual(increase, budget)
model = torch.tensor([[, 2000000], [, 2000000], [, 2000000]])
mins = torch.tensor([0.1, 0.1, 1])
budget =
got = EmbeddingOffloadScaleupProposer.allocate_budget(model, clfs=mins, budget=budget, allocation_priority=torch.tensor([2, 4, 2]))
torch.testing.assert_close(got, torch.tensor([(0.1 + 0.11111), (0.1 + (2 * 0.11111)), 1.0]))
increase = (EmbeddingOffloadScaleupProposer.clf_to_bytes(model, got).sum() - EmbeddingOffloadScaleupProposer.clf_to_bytes(model, mins).sum())
self.assertEqual(increase, budget)
model = torch.tensor([[, 2000000], [, 2000000], [, 2000000]])
mins = torch.tensor([0.1, 0.3, 0.5])
budget =
got = EmbeddingOffloadScaleupProposer.allocate_budget(model, clfs=mins, budget=budget, allocation_priority=torch.tensor([1, 2, 100]))
torch.testing.assert_close(got, torch.tensor([0.56667, 1.0, 1.0]))
increase = (EmbeddingOffloadScaleupProposer.clf_to_bytes(model, got).sum() - EmbeddingOffloadScaleupProposer.clf_to_bytes(model, mins).sum())
self.assertEqual(increase, budget)
def test_scaleup(self) -> None:
tables = [EmbeddingBagConfig(num_embeddings=2000000, embedding_dim=10, name=f'table_{i}', feature_names=[f'feature_{i}']) for i in range(3)]
constraints = {'table_0': ParameterConstraints(compute_kernels=[EmbeddingComputeKernel.FUSED_UVM_CACHING.value], cache_params=CacheParams(load_factor=0.1, stats=MockCacheStatistics(expected_lookups=2, cacheability=0.2))), 'table_1': ParameterConstraints(compute_kernels=[EmbeddingComputeKernel.FUSED_UVM_CACHING.value], cache_params=CacheParams(load_factor=0.1, stats=MockCacheStatistics(expected_lookups=2, cacheability=0.5)))}
MB = (1024 * 1024)
storage_constraint = Topology(world_size=2, compute_device='cuda', hbm_cap=(100 * MB), ddr_cap=(1000 * MB))
model = TestSparseNN(tables=tables, sparse_device=torch.device('meta'))
enumerator = EmbeddingEnumerator(topology=storage_constraint, batch_size=BATCH_SIZE, constraints=constraints)
search_space = enumerator.enumerate(module=model, sharders=[cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())])
proposer = EmbeddingOffloadScaleupProposer()
proposer.load(search_space, enumerator=enumerator)
output = []
proposal = proposer.propose()
while (proposal is not None):
output.append([(candidate.name, candidate.compute_kernel, (candidate.cache_params.load_factor if candidate.cache_params else None)) for candidate in proposal])
proposer.feedback(partitionable=True, plan=proposal, storage_constraint=storage_constraint)
proposal = proposer.propose()
expected_output = [[('table_0', 'fused_uvm_caching', 0.1), ('table_1', 'fused_uvm_caching', 0.1), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused_uvm_caching', 0.), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused_uvm_caching', 0.), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused_uvm_caching', 0.), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused', None), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused', None), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused', None), ('table_2', 'fused', None)]]
self.assertEqual(output, expected_output)
def test_scaleup_ample_budget_and_deprecated_feature(self) -> None:
tables = [EmbeddingBagConfig(num_embeddings=2000000, embedding_dim=10, name=f'table_{i}', feature_names=[f'feature_{i}']) for i in range(3)]
constraints = {'table_0': ParameterConstraints(compute_kernels=[EmbeddingComputeKernel.FUSED_UVM_CACHING.value], cache_params=CacheParams(load_factor=0.1, stats=MockCacheStatistics(expected_lookups=2, cacheability=0.2))), 'table_1': ParameterConstraints(compute_kernels=[EmbeddingComputeKernel.FUSED_UVM_CACHING.value], cache_params=CacheParams(load_factor=0.1, stats=MockCacheStatistics(expected_lookups=0, cacheability=0)))}
MB = (1024 * 1024)
storage_constraint = Topology(world_size=2, compute_device='cuda', hbm_cap=(100 * MB), ddr_cap=(1000 * MB))
model = TestSparseNN(tables=tables, sparse_device=torch.device('meta'))
enumerator = EmbeddingEnumerator(topology=storage_constraint, batch_size=BATCH_SIZE, constraints=constraints)
search_space = enumerator.enumerate(module=model, sharders=[cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())])
proposer = EmbeddingOffloadScaleupProposer()
proposer.load(search_space, enumerator=enumerator)
output = []
proposal = proposer.propose()
while (proposal is not None):
output.append([(candidate.name, candidate.compute_kernel, (candidate.cache_params.load_factor if candidate.cache_params else None)) for candidate in proposal])
proposer.feedback(partitionable=True, plan=proposal, storage_constraint=storage_constraint)
proposal = proposer.propose()
expected_output = [[('table_0', 'fused_uvm_caching', 0.1), ('table_1', 'fused_uvm_caching', 0.1), ('table_2', 'fused', None)], [('table_0', 'fused_uvm_caching', 0.), ('table_1', 'fused_uvm_caching', 0.1), ('table_2', 'fused', None)], [('table_0', 'fused', None), ('table_1', 'fused_uvm_caching', 0.1), ('table_2', 'fused', None)]]
self.assertEqual(output[0:3], expected_output)
def test_proposers_to_proposals_list(self) -> None:
def make_mock_proposal(name: str) -> List[ShardingOption]:
return [ShardingOption(name=name, tensor=torch.zeros(1), module=('model', None), input_lengths=[], batch_size=8, sharding_type='row_wise', partition_by='DEVICE', compute_kernel='fused', shards=[])]
mock_proposer_1 = MockProposer()
mock_proposer_1_sharding_options = [make_mock_proposal('p1so1'), make_mock_proposal('p1so2'), make_mock_proposal('p1so1'), None]
mock_proposer_1.propose = MagicMock(side_effect=mock_proposer_1_sharding_options)
mock_proposer_2 = MockProposer()
mock_proposer_2_sharding_options = [make_mock_proposal('p2so1'), make_mock_proposal('p2so1'), make_mock_proposal('p1so2'), make_mock_proposal('p2so2'), None]
mock_proposer_2.propose = MagicMock(side_effect=mock_proposer_2_sharding_options)
mock_proposer_3 = MockProposer()
mock_proposer_3_sharding_options = [make_mock_proposal('p3so1'), make_mock_proposal('p2so1'), make_mock_proposal('p3so2'), None]
mock_proposer_3.propose = MagicMock(side_effect=mock_proposer_3_sharding_options)
proposers_list: List[Proposer] = [mock_proposer_1, mock_proposer_2, mock_proposer_3]
proposals_list = proposers_to_proposals_list(proposers_list, search_space=[])
proposals_list_names = []
for sharding_option in proposals_list:
proposals_list_names.append(sharding_option[0].name)
expected_list_names = ['p1so1', 'p1so2', 'p2so1', 'p2so2', 'p3so1', 'p3so2']
self.assertEqual(proposals_list_names, expected_list_names) |
def test_connect_wr_As_wr_x_conn_At_disjoint():
class Top(ComponentLevel3):
def construct(s):
s.x = Wire(Bits24)
s.A = Wire(Bits32)
connect(s.x, s.A[8:32])
def up_wr_As():
s.A[0:4] = Bits4(15)
def up_wr_x():
s.x = Bits24(6636321)
def up_rd_A():
assert (s.A == )
_test_model(Top) |
class BaseSignalExpr():
def __init__(s, rtype):
assert isinstance(rtype, rt.BaseRTLIRType), f'non-RTLIR type {rtype} encountered!'
s.rtype = rtype
def get_rtype(s):
return s.rtype
def __eq__(s, other):
return ((type(s) is type(other)) and (s.rtype == other.rtype))
def __hash__(s):
return hash((type(s), s.rtype)) |
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
fileMenu = QMenu('&File', self)
newAction = fileMenu.addAction('&New...')
newAction.setShortcut('Ctrl+N')
self.printAction = fileMenu.addAction('&Print...', self.printFile)
self.printAction.setShortcut('Ctrl+P')
self.printAction.setEnabled(False)
quitAction = fileMenu.addAction('E&xit')
quitAction.setShortcut('Ctrl+Q')
self.menuBar().addMenu(fileMenu)
self.letters = QTabWidget()
newAction.triggered.connect(self.openDialog)
quitAction.triggered.connect(self.close)
self.setCentralWidget(self.letters)
self.setWindowTitle('Order Form')
def createLetter(self, name, address, orderItems, sendOffers):
editor = QTextEdit()
tabIndex = self.letters.addTab(editor, name)
self.letters.setCurrentIndex(tabIndex)
cursor = editor.textCursor()
cursor.movePosition(QTextCursor.Start)
topFrame = cursor.currentFrame()
topFrameFormat = topFrame.frameFormat()
topFrameFormat.setPadding(16)
topFrame.setFrameFormat(topFrameFormat)
textFormat = QTextCharFormat()
boldFormat = QTextCharFormat()
boldFormat.setFontWeight(QFont.Bold)
referenceFrameFormat = QTextFrameFormat()
referenceFrameFormat.setBorder(1)
referenceFrameFormat.setPadding(8)
referenceFrameFormat.setPosition(QTextFrameFormat.FloatRight)
referenceFrameFormat.setWidth(QTextLength(QTextLength.PercentageLength, 40))
cursor.insertFrame(referenceFrameFormat)
cursor.insertText('A company', boldFormat)
cursor.insertBlock()
cursor.insertText('321 City Street')
cursor.insertBlock()
cursor.insertText('Industry Park')
cursor.insertBlock()
cursor.insertText('Another country')
cursor.setPosition(topFrame.lastPosition())
cursor.insertText(name, textFormat)
for line in address.split('\n'):
cursor.insertBlock()
cursor.insertText(line)
cursor.insertBlock()
cursor.insertBlock()
date = QDate.currentDate()
cursor.insertText(('Date: %s' % date.toString('d MMMM yyyy')), textFormat)
cursor.insertBlock()
bodyFrameFormat = QTextFrameFormat()
bodyFrameFormat.setWidth(QTextLength(QTextLength.PercentageLength, 100))
cursor.insertFrame(bodyFrameFormat)
cursor.insertText('I would like to place an order for the following items:', textFormat)
cursor.insertBlock()
cursor.insertBlock()
orderTableFormat = QTextTableFormat()
orderTableFormat.setAlignment(Qt.AlignHCenter)
orderTable = cursor.insertTable(1, 2, orderTableFormat)
orderFrameFormat = cursor.currentFrame().frameFormat()
orderFrameFormat.setBorder(1)
cursor.currentFrame().setFrameFormat(orderFrameFormat)
cursor = orderTable.cellAt(0, 0).firstCursorPosition()
cursor.insertText('Product', boldFormat)
cursor = orderTable.cellAt(0, 1).firstCursorPosition()
cursor.insertText('Quantity', boldFormat)
for (text, quantity) in orderItems:
row = orderTable.rows()
orderTable.insertRows(row, 1)
cursor = orderTable.cellAt(row, 0).firstCursorPosition()
cursor.insertText(text, textFormat)
cursor = orderTable.cellAt(row, 1).firstCursorPosition()
cursor.insertText(str(quantity), textFormat)
cursor.setPosition(topFrame.lastPosition())
cursor.insertBlock()
cursor.insertText('Please update my records to take account of the following privacy information:')
cursor.insertBlock()
offersTable = cursor.insertTable(2, 2)
cursor = offersTable.cellAt(0, 1).firstCursorPosition()
cursor.insertText("I want to receive more information about your company's products and special offers.", textFormat)
cursor = offersTable.cellAt(1, 1).firstCursorPosition()
cursor.insertText('I do not want to receive any promotional information from your company.', textFormat)
if sendOffers:
cursor = offersTable.cellAt(0, 0).firstCursorPosition()
else:
cursor = offersTable.cellAt(1, 0).firstCursorPosition()
cursor.insertText('X', boldFormat)
cursor.setPosition(topFrame.lastPosition())
cursor.insertBlock()
cursor.insertText('Sincerely,', textFormat)
cursor.insertBlock()
cursor.insertBlock()
cursor.insertBlock()
cursor.insertText(name)
self.printAction.setEnabled(True)
def createSample(self):
dialog = DetailsDialog('Dialog with default values', self)
self.createLetter('Mr Smith', '12 High Street\nSmall Town\nThis country', dialog.orderItems(), True)
def openDialog(self):
dialog = DetailsDialog('Enter Customer Details', self)
if (dialog.exec_() == QDialog.Accepted):
self.createLetter(dialog.senderName(), dialog.senderAddress(), dialog.orderItems(), dialog.sendOffers())
def printFile(self):
editor = self.letters.currentWidget()
printer = QPrinter()
dialog = QPrintDialog(printer, self)
dialog.setWindowTitle('Print Document')
if editor.textCursor().hasSelection():
dialog.addEnabledOption(QAbstractPrintDialog.PrintSelection)
if (dialog.exec_() != QDialog.Accepted):
return
editor.print_(printer) |
class RHCPEnv(CEnv):
def __init__(self):
super().__init__()
self.agent_names = ['agent1', 'agent2', 'agent3']
def prepare(self):
super().prepare()
self.lord = self.agent_names[self.get_current_idx()]
self.controller = self.lord
def curr_player(self):
return self.agent_names[self.get_current_idx()]
def player_cards(self):
other_two = self.get_last_two_handcards()
curr_idx = self.get_current_idx()
return {self.agent_names[((curr_idx + 2) % 3)]: to_char(other_two[1]), self.agent_names[((curr_idx + 1) % 3)]: to_char(other_two[0]), self.agent_names[curr_idx]: self.get_curr_handcards()}
def get_current_idx(self):
return super().get_curr_ID()
def get_last_outcards(self):
return to_char(super().get_last_outcards())
def get_last_two_cards(self):
last_two_cards = super().get_last_two_cards()
last_two_cards = [to_char(c) for c in last_two_cards]
return last_two_cards
def get_curr_handcards(self):
return to_char(super().get_curr_handcards())
def step(self, intention):
idx = self.get_current_idx()
(r, done, category) = self.step_manual(to_value(intention))
if (category > 0):
self.controller = self.agent_names[idx]
return (r, done)
def step_auto(self):
idx = self.get_current_idx()
(intention, r, _) = super().step_auto()
intention = to_char(intention)
if (len(intention) > 0):
self.controller = self.agent_names[idx]
assert (np.all((self.get_state_prob() >= 0)) and np.all((self.get_state_prob() <= 1)))
return (r, (r != 0)) |
def check_haskell_requirements():
if (which('ghc') is None):
print('GHC is not installed!\n')
return False
if (which('cabal') is None):
print('cabal is not installed!\n')
return False
if (which('c2hs') is None):
print('c2hs is not installed!\n')
return False
return True |
def get_SVHN(augment, dataroot, download):
image_shape = (32, 32, 3)
num_classes = 10
if augment:
transformations = [transforms.RandomAffine(0, translate=(0.1, 0.1))]
else:
transformations = []
transformations.extend([transforms.ToTensor(), preprocess])
train_transform = transforms.Compose(transformations)
test_transform = transforms.Compose([transforms.ToTensor(), preprocess])
path = ((Path(dataroot) / 'data') / 'SVHN')
train_dataset = datasets.SVHN(path, split='train', transform=train_transform, target_transform=one_hot_encode, download=download)
test_dataset = datasets.SVHN(path, split='test', transform=test_transform, target_transform=one_hot_encode, download=download)
return (image_shape, num_classes, train_dataset, test_dataset) |
def test_object():
object1 = xodr.Object(s=10.0, t=(- 2), dynamic=xodr.Dynamic.no, orientation=xodr.Orientation.positive, zOffset=0.0, id='1', height=1.0, Type=xodr.ObjectType.pole)
object2 = xodr.Object(s=20.0, t=(- 2), dynamic=xodr.Dynamic.no, orientation=xodr.Orientation.positive, zOffset=0.0, height=10, id='1', Type=xodr.ObjectType.streetLamp)
road = xodr.create_road(xodr.Line(100), 0)
road.add_object([object1, object2])
prettyprint(road.get_element())
object3 = xodr.Object(s=10.0, t=(- 2), dynamic=xodr.Dynamic.no, orientation=xodr.Orientation.positive, zOffset=0.0, id='1', height=1.0, Type=xodr.ObjectType.pole)
outline = xodr.Outline()
outline.add_corner(xodr.CornerLocal(1, 2, 3, 4))
outline.add_corner(xodr.CornerLocal(1, 2, 3, 5))
object2.add_outline(outline)
prettyprint(object2)
object3.id = object1.id
assert (object1 == object3)
assert (object2 != object1)
road.planview.adjust_geometries()
assert (version_validation('t_road', road, wanted_schema='xodr') == ValidationResponse.OK) |
def mol_transform(mols, model, prop, largest_molecule_len, alphabet, upperbound_dr, lr_dream, dreaming_parameters, plot=False):
for (i, mol) in enumerate(mols):
mol = torch.reshape(mol, (1, mol.shape[0], mol.shape[1]))
(track_prop, track_mol, percent_valid_interm, track_loss, epoch_transformed) = dream_model(model=model, prop=prop, largest_molecule_len=largest_molecule_len, alphabet=alphabet, upperbound=upperbound_dr, data_train=mol, lr=lr_dream, **dreaming_parameters, display=False)
mol1_prop = track_prop[0]
mol2_prop = track_prop[(len(track_prop) - 1)]
mol1 = track_mol[0]
mol2 = track_mol[(len(track_mol) - 1)]
transform = ((((((mol1 + ' --> ') + mol2) + ', ') + str(mol1_prop)) + ' --> ') + str(mol2_prop))
print(((('Transformation ' + str((i + 1))) + ': ') + transform))
print(track_mol)
if plot:
plot_utils.plot_transform(prop, track_mol, track_prop, epoch_transformed, track_loss) |
def test_gaussian_solvent_template(tmpdir, water):
with tmpdir.as_cwd():
charge_engine = DDECCharges()
solvent_settings = charge_engine._get_calculation_settings()
task = AtomicInput(molecule=water.to_qcschema(), driver='energy', model={'method': 'b3lyp-d3bj', 'basis': '6-311G'}, keywords=solvent_settings)
gaussian_harness = GaussianHarness()
config = get_config(task_config={'ncores': 1, 'memory': 1})
job_inputs = gaussian_harness.build_input(task, config)
with open(get_data('gaussian_solvent_example.com')) as g_out:
assert (g_out.read() == job_inputs['infiles']['gaussian.com']) |
class TestSimpleColumns(BaseTestColumns):
def test_SimpleColumnInt64(self) -> None:
data = [1, 2, None, 3, 4, None]
col = infer_column(data)
self.assertEqual(col[0], 1)
self.assertEqual(col[1], 2)
self.assertEqual(col[3], 3)
self.assertEqual(col[4], 4)
self.assertEqual(len(col), 6)
with self.assertRaises(TypeError):
col.append(None)
with self.assertRaises(TypeError):
col.append('hello')
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), True)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), False)
self.assertEqual(col.is_null_at(5), True)
self.assertEqual(col.get_null_count(), 2)
sliced_col = col.slice(1, 3)
self.assertEqual(len(sliced_col), 3)
self.assertEqual(sliced_col[0], 2)
self.assertEqual(sliced_col[2], 3)
self.assertEqual(sliced_col.get_null_count(), 1)
def test_SimpleColumnInt64_unary(self) -> None:
data = [1, (- 2), None, 3, (- 4), None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.BIGINT)
neg_col = col.neg()
self.assert_Column(neg_col, [(- 1), 2, None, (- 3), 4, None])
self.assertEqual(neg_col.type().kind(), ta.TypeKind.BIGINT)
neg_col2 = neg_col.neg()
self.assert_Column(neg_col2, [1, (- 2), None, 3, (- 4), None])
self.assertEqual(neg_col2.type().kind(), ta.TypeKind.BIGINT)
neg_col3 = neg_col2.neg()
self.assert_Column(neg_col3, [(- 1), 2, None, (- 3), 4, None])
self.assertEqual(neg_col3.type().kind(), ta.TypeKind.BIGINT)
abs_col = col.abs()
self.assert_Column(abs_col, [1, 2, None, 3, 4, None])
self.assertEqual(abs_col.type().kind(), ta.TypeKind.BIGINT)
def test_SimpleColumnInt64_binary(self) -> None:
data1 = [1, (- 2), None, 3, (- 4), None]
col1 = infer_column(data1)
data2 = [None, 1, 2, 3, 4, 5]
col2 = infer_column(data2)
sum_col = col1.add(col2)
self.assert_Column(sum_col, [None, (- 1), None, 6, 0, None])
self.assertEqual(sum_col.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(col1.sub(col2), [None, (- 3), None, 0, (- 8), None])
self.assert_Column(col1.mul(col2), [None, (- 2), None, 9, (- 16), None])
self.assert_Column(col1.mod(col2), [None, 0, None, 0, 0, None])
data3 = [None, 1.0, 2.0, 3.0, 4.0, 5.0]
col3 = infer_column(data3)
self.assertEqual(col3.type().kind(), ta.TypeKind.REAL)
sum_col = col1.add(col3)
self.assertEqual(sum_col.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sum_col, [None, (- 1.0), None, 6.0, 0.0, None])
sum_col2 = col3.add(col1)
self.assertEqual(sum_col2.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sum_col2, [None, (- 1.0), None, 6.0, 0.0, None])
add_scalar = col1.add(1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, (- 1), None, 4, (- 3), None])
add_scalar = col1.add(0.1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(add_scalar, [1.1, (- 1.9), None, 3.1, (- 3.9), None])
add_scalar = col1.radd(1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, (- 1), None, 4, (- 3), None])
add_scalar = col1.radd(0.1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(add_scalar, [1.1, (- 1.9), None, 3.1, (- 3.9), None])
sub_scalar = col1.sub(2)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(sub_scalar, [(- 1), (- 4), None, 1, (- 6), None])
sub_scalar = col1.sub(0.1)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sub_scalar, [0.9, (- 2.1), None, 2.9, (- 4.1), None])
sub_scalar = col1.rsub(2)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(sub_scalar, [1, 4, None, (- 1), 6, None])
sub_scalar = col1.rsub(0.1)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sub_scalar, [(- 0.9), 2.1, None, (- 2.9), 4.1, None])
mul_scalar = col1.mul(2)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mul_scalar, [2, (- 4), None, 6, (- 8), None])
mul_scalar = col1.mul((- 2.0))
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mul_scalar, [(- 2.0), 4.0, None, (- 6.0), 8.0, None])
mul_scalar = col1.rmul(2)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mul_scalar, [2, (- 4), None, 6, (- 8), None])
mul_scalar = col1.rmul((- 2.0))
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mul_scalar, [(- 2.0), 4.0, None, (- 6.0), 8.0, None])
mod_scalar = col1.mod(3)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mod_scalar, [1, 1, None, 0, 2, None])
mod_scalar = col1.mod((- 3.0))
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mod_scalar, [(- 2.0), (- 2.0), None, 0.0, (- 1.0), None])
mod_scalar = col1.rmod(3)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mod_scalar, [0, (- 1), None, 0, (- 1), None])
mod_scalar = col1.rmod((- 3.0))
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mod_scalar, [0.0, (- 1.0), None, 0.0, (- 3.0), None])
add_scalar = col1.add(True)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, (- 1), None, 4, (- 3), None])
def test_SimpleColumnFloat32_unary(self) -> None:
data = [1.2, (- 2.3), None, 3.4, (- 4.6), None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.REAL)
neg_col = col.neg()
self.assert_Column(neg_col, [(- 1.2), 2.3, None, (- 3.4), 4.6, None])
self.assertEqual(neg_col.type().kind(), ta.TypeKind.REAL)
abs_col = col.abs()
self.assert_Column(abs_col, [1.2, 2.3, None, 3.4, 4.6, None])
self.assertEqual(abs_col.type().kind(), ta.TypeKind.REAL)
round_col = col.round()
self.assert_Column(round_col, [1.0, (- 2.0), None, 3.0, (- 5.0), None])
self.assertEqual(round_col.type().kind(), ta.TypeKind.REAL)
def test_SimpleColumnBoolean(self) -> None:
data = [True, True, True, True]
col = infer_column(data)
for i in range(4):
self.assertEqual(col[i], True)
self.assertEqual(len(col), 4)
with self.assertRaises(TypeError):
col.append(None)
with self.assertRaises(TypeError):
col.append('hello')
col.append_null()
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), False)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), True)
def test_SimpleColumnBoolean_unary(self) -> None:
data = [True, False, None, True, False, None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.BOOLEAN)
inv_col = col.invert()
self.assertEqual(inv_col.type().kind(), ta.TypeKind.BOOLEAN)
self.assert_Column(inv_col, [False, True, None, False, True, None])
def test_SimpleColumnString(self) -> None:
data = ['0', '1', '2', '3']
col = infer_column(data)
for i in range(4):
self.assertEqual(col[i], str(i))
self.assertEqual(len(col), 4)
with self.assertRaises(TypeError):
col.append(None)
with self.assertRaises(TypeError):
col.append(1)
col.append_null()
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), False)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), True)
def test_SimpleColumnUTF(self) -> None:
s = ['hello.this', 'is.interesting.', 'this.is_24', 'paradise']
col = infer_column(s)
for i in range(4):
self.assertEqual(col[i], s[i])
self.assertEqual(len(col), 4)
def test_ConstantColumn(self) -> None:
col = ta.ConstantColumn(42, 6, ta.VeloxType_INTEGER())
self.assertTrue(isinstance(col.type(), ta.VeloxType_INTEGER))
self.assert_Column(col, ([42] * 6))
col = ta.ConstantColumn(42, 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_BIGINT))
self.assert_Column(col, ([42] * 6))
data = [1, (- 2), None, 3, (- 4), None]
num_column = infer_column(data)
add_result = num_column.add(col)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_BIGINT))
self.assert_Column(add_result, [43, 40, None, 45, 38, None])
add_result = col.add(num_column)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_BIGINT))
self.assert_Column(add_result, [43, 40, None, 45, 38, None])
col = ta.ConstantColumn(4.2, 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_REAL))
self.assert_Column(col, ([4.2] * 6))
data = [1.2, (- 2.3), None, 3.4, (- 4.6), None]
num_column = infer_column(data)
add_result = num_column.add(col)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_REAL))
self.assert_Column(add_result, [5.4, 1.9, None, 7.6, (- 0.4), None])
add_result = col.add(num_column)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_REAL))
self.assert_Column(add_result, [5.4, 1.9, None, 7.6, (- 0.4), None])
col = ta.ConstantColumn('abc', 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, (['abc'] * 6))
def test_FromPyList(self) -> None:
col = ta.Column(ta.VeloxType_BIGINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_BIGINT))
self.assert_Column(col, [1, 2, None, 4])
col = ta.Column(ta.VeloxType_INTEGER(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_INTEGER))
self.assert_Column(col, [1, 2, None, 4])
col = ta.Column(ta.VeloxType_SMALLINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_SMALLINT))
self.assert_Column(col, [1, 2, None, 4])
col = ta.Column(ta.VeloxType_TINYINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_TINYINT))
self.assert_Column(col, [1, 2, None, 4])
col = ta.Column(ta.VeloxType_REAL(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_REAL))
self.assert_Column(col, [1.0, 2.0, None, 4.0])
col = ta.Column(ta.VeloxType_DOUBLE(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_DOUBLE))
self.assert_Column(col, [1.0, 2.0, None, 4.0])
col = ta.Column(ta.VeloxType_BOOLEAN(), [True, False, None, True])
self.assertTrue(isinstance(col.type(), ta.VeloxType_BOOLEAN))
self.assert_Column(col, [True, False, None, True])
col = ta.Column(ta.VeloxType_VARCHAR(), ['foo', 'bar', None, 'abc'])
self.assertTrue(isinstance(col.type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, ['foo', 'bar', None, 'abc'])
col = ta.Column(ta.VeloxArrayType(ta.VeloxType_VARCHAR()), [['foo', 'bar'], None, ['abc', None]])
self.assertTrue(isinstance(col.type(), ta.VeloxArrayType))
self.assertTrue(isinstance(col.type().element_type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, [['foo', 'bar'], None, ['abc', None]])
def test_NullCount(self) -> None:
col = infer_column([None, 1, 2, None])
self.assertEqual(col.get_null_count(), 2)
colSlice = col.slice(col.offset, col.length)
self.assertEqual(colSlice.get_null_count(), 2)
colSlice = col.slice(1, (col.length - 1))
self.assertEqual(colSlice.get_null_count(), 1)
self.assertEqual(col.get_null_count(), 2)
colSlice = col.slice(1, 2)
self.assertEqual(colSlice.get_null_count(), 0)
self.assertEqual(col.get_null_count(), 2)
def test_ToArrow_Numerical(self) -> None:
c_array = ffi.new('struct ArrowArray*')
ptr_array = int(ffi.cast('uintptr_t', c_array))
col = infer_column([None, 1, 2, None])
col._export_to_arrow(ptr_array)
self.assertEqual(c_array.length, 4)
self.assertEqual(c_array.null_count, 2)
self.assertEqual(c_array.n_buffers, 2)
vals = ffi.cast('int64_t*', c_array.buffers[1])
self.assertEqual(vals[1], 1)
self.assertEqual(vals[2], 2)
self.assertEqual(c_array.n_children, 0)
self.assertNotEqual(c_array.release, ffi.NULL)
c_array_slice = ffi.new('struct ArrowArray*')
ptr_array_slice = int(ffi.cast('uintptr_t', c_array_slice))
col_slice = col.slice(1, 3)
col_slice._export_to_arrow(ptr_array_slice)
self.assertEqual(c_array_slice.length, 3)
self.assertEqual(c_array_slice.null_count, 1)
self.assertEqual(c_array_slice.n_buffers, 2)
vals_slice = ffi.cast('int64_t*', c_array_slice.buffers[1])
self.assertEqual(vals_slice[0], 1)
self.assertEqual(vals_slice[1], 2)
self.assertEqual(c_array_slice.n_children, 0)
self.assertNotEqual(c_array_slice.release, ffi.NULL)
def test_ToArrow_Struct(self) -> None:
c_array = ffi.new('struct ArrowArray*')
ptr_array = int(ffi.cast('uintptr_t', c_array))
col = ta.Column(ta.VeloxRowType(['f1', 'f2'], [ta.VeloxType_INTEGER(), ta.VeloxType_INTEGER()]))
col.child_at(0).append(1)
col.child_at(1).append(10)
col.set_length(1)
col.child_at(0).append(2)
col.child_at(1).append(20)
col.set_length(2)
col._export_to_arrow(ptr_array)
self.assertEqual(c_array.length, 2)
self.assertEqual(c_array.null_count, 0)
self.assertEqual(c_array.n_buffers, 1)
self.assertEqual(c_array.n_children, 2)
self.assertNotEqual(c_array.release, ffi.NULL)
s = pa.StructArray._import_from_c(ptr_array, pa.struct([pa.field('f1', pa.int32(), nullable=False), pa.field('f2', pa.int32(), nullable=False)]))
self.assertTrue(isinstance(s, pa.StructArray))
self.assertEqual(len(s), len(col))
self.assertEqual(pa.StructArray.field(s, 0).to_pylist(), [1, 2])
self.assertEqual(pa.StructArray.field(s, 1).to_pylist(), [10, 20])
def test_FromArrow_Numerical(self) -> None:
c_schema = ffi.new('struct ArrowSchema*')
ptr_schema = int(ffi.cast('uintptr_t', c_schema))
c_array = ffi.new('struct ArrowArray*')
ptr_array = int(ffi.cast('uintptr_t', c_array))
a = pa.array([None, 1, 2, None])
a._export_to_c(ptr_array, ptr_schema)
col = ta._import_from_arrow(ta.VeloxType_BIGINT(), ptr_array, ptr_schema)
self.assertEqual(len(col), 4)
self.assertEqual(col.get_null_count(), 2)
self.assertTrue(col.is_null_at(0))
self.assertEqual(col[1], 1)
self.assertEqual(col[2], 2)
self.assertTrue(col.is_null_at(3))
self.assertEqual(c_array.release, ffi.NULL)
self.assertEqual(c_schema.release, ffi.NULL)
def test_FromArrow_Struct(self) -> None:
c_schema = ffi.new('struct ArrowSchema*')
ptr_schema = int(ffi.cast('uintptr_t', c_schema))
c_array = ffi.new('struct ArrowArray*')
ptr_array = int(ffi.cast('uintptr_t', c_array))
f1 = pa.array([1, 2, 3], type=pa.int64())
f2 = pa.array([True, False, None], type=pa.bool_())
s = pa.StructArray.from_arrays([f1, f2], fields=[pa.field('f1', f1.type, nullable=False), pa.field('f2', f2.type, nullable=True)])
s._export_to_c(ptr_array, ptr_schema)
col = ta._import_from_arrow(ta.VeloxRowType(['f1', 'f2'], [ta.VeloxType_INTEGER(), ta.VeloxType_BOOLEAN()]), ptr_array, ptr_schema)
self.assertEqual(len(col), 3)
self.assertEqual(col.get_null_count(), 0)
self.assertEqual(col.child_at(0).get_null_count(), 0)
self.assertEqual(col.child_at(1).get_null_count(), 1)
self.assertEqual(col.type().name_of(0), 'f1')
self.assertEqual(col.type().name_of(1), 'f2')
self.assert_Column(col.child_at(0), [1, 2, 3])
self.assert_Column(col.child_at(1), [True, False, None])
self.assertEqual(c_array.release, ffi.NULL)
self.assertEqual(c_schema.release, ffi.NULL) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
if (expand_ratio == 1):
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
class ReversibleBlock(nn.Module):
def __init__(self, fm, gm):
super(ReversibleBlock, self).__init__()
self.gm = gm
self.fm = fm
self.rev_funct = ReversibleBlockFunction.apply
def forward(self, x):
assert ((x.shape[1] % 2) == 0)
params = ([w for w in self.fm.parameters()] + [w for w in self.gm.parameters()])
y = self.rev_funct(x, self.fm, self.gm, *params)
x.data.set_()
return y
def inverse(self, y):
assert ((y.shape[1] % 2) == 0)
(y1, y2) = torch.chunk(y, chunks=2, dim=1)
y1 = y1.contiguous()
y2 = y2.contiguous()
x2 = (y2 - self.gm(y1))
x1 = (y1 - self.fm(x2))
x = torch.cat((x1, x2), dim=1)
return x |
def ql_syscall_connect_attach(ql: Qiling, nd, pid, chid, index, flags, *args, **kw):
assert ((nd, flags) == (ND_LOCAL_NODE, connect_attach_flags['_NTO_COF_CLOEXEC'])), 'syscall_connect_attach parameters are wrong'
ql.log.debug(f'syscall_connect_attach(nd = ND_LOCAL_NODE, pid = {pid}, chid = {chid}, index = 0x{index:x}, flags = _NTO_COF_CLOEXEC)')
if (index & NTO_SIDE_CHANNEL):
regreturn = ql.os.connection_id_hi
ql.os.connection_id_hi += 1
else:
regreturn = ql.os.connection_id_lo
ql.os.connection_id_lo += 1
assert (not (regreturn in ql.os.connections)), 'Connection Id is already in use'
ql.os.connections[regreturn] = QnxConn(pid, chid)
return regreturn |
()
('--random-seed', envvar='SEED', default=0)
('--test-on-gt', type=bool, default=True)
('--only-test', type=bool, default=False)
('--overfit', type=bool, default=False)
('--fusion', type=click.Choice(choices=['none', 'avg']), default='none')
('--weighted-aggregation', type=bool, default=True)
def main(random_seed, test_on_gt, only_test, overfit, fusion, weighted_aggregation):
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
n_epochs = 3
lr = 0.01
wd = 0
lr_scheduler = True
h_dim = 128
x_dim = 128
c_dim = 90
phi_dim = 2048
max_steps = 3
train_db = JointCocoTasks()
initializer = InitializerMul(h_dim=h_dim, phi_dim=phi_dim, c_dim=c_dim)
if weighted_aggregation:
aggregator = AllLinearAggregatorWeightedWithDetScore(in_features=h_dim, out_features=x_dim)
else:
aggregator = AllLinearAggregator(in_features=h_dim, out_features=x_dim)
output_model = OutputModelFirstLast(h_dim=h_dim, num_tasks=len(TASK_NUMBERS))
network = GGNNDiscLoss(initializer=initializer, aggregator=aggregator, output_model=output_model, max_steps=max_steps, h_dim=h_dim, x_dim=x_dim, class_dim=c_dim, fusion=fusion)
optimizer = SGD(network.parameters(), lr=lr, weight_decay=wd)
experiment = JointGraphExperiment(network=network, optimizer=optimizer, dataset=train_db, tensorboard=True, seed=random_seed)
train_folder = 'ggnn-full-seed:{s}'.format(s=random_seed)
folder = os.path.join(SAVING_DIRECTORY, train_folder)
mkdir_p(folder)
if (not only_test):
experiment.train_n_epochs(n_epochs, overfit=overfit, lr_scheduler=lr_scheduler)
torch.save(network.state_dict(), os.path.join(folder, 'model.mdl'))
else:
network.load_state_dict(torch.load(os.path.join(folder, 'model.mdl')))
for task_number in TASK_NUMBERS:
if test_on_gt:
test_db = CocoTasksTestGT(task_number)
else:
test_db = CocoTasksTest(task_number)
print('testing task {}'.format(task_number), '')
detections = experiment.do_test(test_db, task_number=task_number)
detection_file_name = 'detections_wa:{}_tn:{}_tgt:{}_f:{}.json'.format(weighted_aggregation, task_number, test_on_gt, fusion)
with open(os.path.join(folder, detection_file_name), 'w') as f:
json.dump(detections, f)
with redirect_stdout(open(os.devnull, 'w')):
gtCOCO = test_db.task_coco
dtCOCO = gtCOCO.loadRes(os.path.join(folder, detection_file_name))
cocoEval = COCOeval(gtCOCO, dtCOCO, 'bbox')
cocoEval.params.catIds = 1
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print(('mAP:\t\t %1.6f' % cocoEval.stats[0]))
print((':\t\t %1.6f' % cocoEval.stats[1]))
result_file_name = 'result_wa:{}_tn:{}_tgt:{}_f:{}.txt'.format(weighted_aggregation, task_number, test_on_gt, fusion)
with open(os.path.join(folder, result_file_name), 'w') as f:
f.write(('%1.6f, %1.6f' % (cocoEval.stats[0], cocoEval.stats[1]))) |
class WildernessExit(DefaultExit):
def wilderness(self):
return self.location.wilderness
def mapprovider(self):
return self.wilderness.mapprovider
def at_traverse_coordinates(self, traversing_object, current_coordinates, new_coordinates):
return True
def at_traverse(self, traversing_object, target_location):
itemcoordinates = self.location.wilderness.db.itemcoordinates
current_coordinates = itemcoordinates[traversing_object]
new_coordinates = get_new_coordinates(current_coordinates, self.key)
if (not self.at_traverse_coordinates(traversing_object, current_coordinates, new_coordinates)):
return False
if (not traversing_object.at_before_move(None)):
return False
traversing_object.location.msg_contents('{} leaves to {}'.format(traversing_object.key, new_coordinates), exclude=[traversing_object])
self.location.wilderness.move_obj(traversing_object, new_coordinates)
traversing_object.location.msg_contents('{} arrives from {}'.format(traversing_object.key, current_coordinates), exclude=[traversing_object])
traversing_object.at_after_move(None)
return True |
def attach(parser):
add_input(parser, pages=False)
parser.add_argument('--output', '-o', required=True, type=Path, help='Target path for the new document')
parser.add_argument('--rows', '-r', type=int, required=True, help='Number of rows (horizontal tiles)')
parser.add_argument('--cols', '-c', type=int, required=True, help='Number of columns (vertical tiles)')
parser.add_argument('--width', type=float, required=True, help='Target width')
parser.add_argument('--height', type=float, required=True, help='Target height')
parser.add_argument('--unit', '-u', default=Units.MM, type=(lambda string: Units[string.upper()]), help='Unit for target width and height (pt, mm, cm, in)') |
def test_add_ord_range_2() -> None:
assert (add_ord_range([(1, 2), (11, 12)], (5, 6)) == [(1, 2), (5, 6), (11, 12)])
assert (add_ord_range([(1, 2), (11, 12)], (3, 6)) == [(1, 6), (11, 12)])
assert (add_ord_range([(1, 2), (11, 12)], (2, 6)) == [(1, 6), (11, 12)])
assert (add_ord_range([(1, 2), (11, 12)], (5, 9)) == [(1, 2), (5, 9), (11, 12)])
assert (add_ord_range([(1, 2), (11, 12)], (5, 10)) == [(1, 2), (5, 12)])
assert (add_ord_range([(1, 2), (11, 12)], ((- 2), (- 1))) == [((- 2), (- 1)), (1, 2), (11, 12)])
assert (add_ord_range([(1, 2), (11, 12)], (0, 20)) == [(0, 20)]) |
class LocationLimit(IntEnum):
__slots__ = ()
MIN_CHAT_LOCATION_ADDRESS = 1
MAX_CHAT_LOCATION_ADDRESS = 64
HORIZONTAL_ACCURACY = 1500
MIN_HEADING = 1
MAX_HEADING = 360
MIN_LIVE_PERIOD = 60
MAX_LIVE_PERIOD = 86400
MIN_PROXIMITY_ALERT_RADIUS = 1
MAX_PROXIMITY_ALERT_RADIUS = 100000 |
class UtilRegexHandler(BaseHandler):
async def get(self):
Rtv = {}
try:
data = self.get_argument('data', '')
p = self.get_argument('p', '')
temp = {}
ds = re.findall(p, data, re.IGNORECASE)
for cnt in range(0, len(ds)):
temp[(cnt + 1)] = ds[cnt]
Rtv[u''] = temp
Rtv[u''] = 'OK'
except Exception as e:
Rtv[u''] = str(e)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(json.dumps(Rtv, ensure_ascii=False, indent=4))
return
async def post(self):
Rtv = {}
try:
data = self.get_argument('data', '')
p = self.get_argument('p', '')
temp = {}
ds = re.findall(p, data, re.IGNORECASE)
for cnt in range(0, len(ds)):
temp[(cnt + 1)] = ds[cnt]
Rtv[u''] = temp
Rtv[u''] = 'OK'
except Exception as e:
Rtv[u''] = str(e)
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.write(json.dumps(Rtv, ensure_ascii=False, indent=4))
return |
def version_info():
import importlib
def update_version_info(version_info, library):
if importlib.util.find_spec(library):
version = importlib.import_module(library).__version__
else:
version = 'not installed'
version_info[library] = version
version_info = {'pyranges version': pr.__version__, 'pandas version': pd.__version__, 'numpy version': np.__version__, 'python version': sys.version_info}
update_version_info(version_info, 'ncls')
update_version_info(version_info, 'sorted_nearest')
update_version_info(version_info, 'pyrle')
update_version_info(version_info, 'ray')
update_version_info(version_info, 'bamread')
update_version_info(version_info, 'pyranges_db')
update_version_info(version_info, 'pybigwig')
update_version_info(version_info, 'hypothesis')
print(version_info) |
def _create_selecsls(variant, pretrained, **kwargs):
cfg = {}
feature_info = [dict(num_chs=32, reduction=2, module='stem.2')]
if variant.startswith('selecsls42'):
cfg['block'] = SelecSLSBlock
cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1)]
feature_info.extend([dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5')])
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if (variant == 'selecsls42b'):
cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif variant.startswith('selecsls60'):
cfg['block'] = SelecSLSBlock
cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1)]
feature_info.extend([dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8')])
feature_info.append(dict(num_chs=1024, reduction=32, module='head.1'))
if (variant == 'selecsls60b'):
cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)]
feature_info.append(dict(num_chs=1024, reduction=64, module='head.3'))
cfg['num_features'] = 1024
else:
cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)]
feature_info.append(dict(num_chs=1280, reduction=64, module='head.3'))
cfg['num_features'] = 1280
elif (variant == 'selecsls84'):
cfg['block'] = SelecSLSBlock
cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1)]
feature_info.extend([dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12')])
cfg['head'] = [(512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1)]
cfg['num_features'] = 1280
feature_info.extend([dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3')])
else:
raise ValueError((('Invalid net configuration ' + variant) + ' !!!'))
cfg['feature_info'] = feature_info
return build_model_with_cfg(SelecSLS, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs) |
class ModelHandler(object):
def __init__(self, config):
self._train_loss = AverageMeter()
self._dev_loss = AverageMeter()
if (config['task_type'] == 'classification'):
self._train_metrics = {'nloss': AverageMeter(), 'acc': AverageMeter()}
self._dev_metrics = {'nloss': AverageMeter(), 'acc': AverageMeter()}
elif (config['task_type'] == 'regression'):
self._train_metrics = {'nloss': AverageMeter(), 'r2': AverageMeter()}
self._dev_metrics = {'nloss': AverageMeter(), 'r2': AverageMeter()}
else:
raise ValueError('Unknown task_type: {}'.format(config['task_type']))
self.logger = DummyLogger(config, dirname=config['out_dir'], pretrained=config['pretrained'])
self.dirname = self.logger.dirname
if ((not config['no_cuda']) and torch.cuda.is_available()):
print('[ Using CUDA ]')
self.device = torch.device(('cuda' if (config['cuda_id'] < 0) else ('cuda:%d' % config['cuda_id'])))
cudnn.benchmark = True
else:
self.device = torch.device('cpu')
config['device'] = self.device
seed = config.get('seed', 42)
np.random.seed(seed)
torch.manual_seed(seed)
if self.device:
torch.cuda.manual_seed(seed)
datasets = prepare_datasets(config)
if (config['data_type'] in ('network', 'uci', 'dgl')):
config['num_feat'] = datasets['features'].shape[(- 1)]
config['num_class'] = (datasets['labels'].max().item() + 1)
self.model = Model(config, train_set=datasets.get('train', None))
self.model.network = self.model.network.to(self.device)
self._n_test_examples = datasets['idx_test'].shape[0]
self.run_epoch = (self._scalable_run_whole_epoch if config.get('scalable_run', False) else self._run_whole_epoch)
self.train_loader = datasets
self.dev_loader = datasets
self.test_loader = datasets
else:
train_set = datasets['train']
dev_set = datasets['dev']
test_set = datasets['test']
config['num_class'] = (max([x[(- 1)] for x in ((train_set + dev_set) + test_set)]) + 1)
self.run_epoch = self._run_batch_epoch
self.model = Model(config, train_set=datasets.get('train', None))
self.model.network = self.model.network.to(self.device)
self._n_train_examples = 0
if train_set:
self.train_loader = DataStream(train_set, self.model.vocab_model.word_vocab, config=config, isShuffle=True, isLoop=True, isSort=True)
self._n_train_batches = self.train_loader.get_num_batch()
else:
self.train_loader = None
if dev_set:
self.dev_loader = DataStream(dev_set, self.model.vocab_model.word_vocab, config=config, isShuffle=False, isLoop=True, isSort=True)
self._n_dev_batches = self.dev_loader.get_num_batch()
else:
self.dev_loader = None
if test_set:
self.test_loader = DataStream(test_set, self.model.vocab_model.word_vocab, config=config, isShuffle=False, isLoop=False, isSort=True, batch_size=config['batch_size'])
self._n_test_batches = self.test_loader.get_num_batch()
self._n_test_examples = len(test_set)
else:
self.test_loader = None
self.config = self.model.config
self.is_test = False
def train(self):
if ((self.train_loader is None) or (self.dev_loader is None)):
print('No training set or dev set specified -- skipped training.')
return
self.is_test = False
timer = Timer('Train')
self._epoch = self._best_epoch = 0
self._best_metrics = {}
for k in self._dev_metrics:
self._best_metrics[k] = (- float('inf'))
self._reset_metrics()
while self._stop_condition(self._epoch, self.config['patience']):
self._epoch += 1
if ((self._epoch % self.config['print_every_epochs']) == 0):
format_str = '\n>>> Train Epoch: [{} / {}]'.format(self._epoch, self.config['max_epochs'])
print(format_str)
self.logger.write_to_file(format_str)
self.run_epoch(self.train_loader, training=True, verbose=self.config['verbose'])
if ((self._epoch % self.config['print_every_epochs']) == 0):
format_str = 'Training Epoch {} -- Loss: {:0.5f}'.format(self._epoch, self._train_loss.mean())
format_str += self.metric_to_str(self._train_metrics)
train_epoch_time_msg = timer.interval('Training Epoch {}'.format(self._epoch))
self.logger.write_to_file(((train_epoch_time_msg + '\n') + format_str))
print(format_str)
format_str = '\n>>> Validation Epoch: [{} / {}]'.format(self._epoch, self.config['max_epochs'])
print(format_str)
self.logger.write_to_file(format_str)
(dev_output, dev_gold) = self.run_epoch(self.dev_loader, training=False, verbose=self.config['verbose'], out_predictions=self.config['out_predictions'])
if self.config['out_predictions']:
dev_metric_score = self.model.score_func(dev_gold, dev_output)
else:
dev_metric_score = None
if ((self._epoch % self.config['print_every_epochs']) == 0):
format_str = 'Validation Epoch {} -- Loss: {:0.5f}'.format(self._epoch, self._dev_loss.mean())
format_str += self.metric_to_str(self._dev_metrics)
if (dev_metric_score is not None):
format_str += '\n Dev score: {:0.5f}'.format(dev_metric_score)
dev_epoch_time_msg = timer.interval('Validation Epoch {}'.format(self._epoch))
self.logger.write_to_file(((dev_epoch_time_msg + '\n') + format_str))
print(format_str)
if (not (self.config['data_type'] in ('network', 'uci', 'text'))):
self.model.scheduler.step(self._dev_metrics[self.config['eary_stop_metric']].mean())
if ((self.config['eary_stop_metric'] == self.model.metric_name) and (dev_metric_score is not None)):
cur_dev_score = dev_metric_score
else:
cur_dev_score = self._dev_metrics[self.config['eary_stop_metric']].mean()
self._reset_metrics()
timer.finish()
format_str = (('Finished Training: {}\nTraining time: {}'.format(self.dirname, timer.total) + '\n') + self.summary())
print(format_str)
self.logger.write_to_file(format_str)
return self._best_metrics
def test(self):
if (self.test_loader is None):
print('No testing set specified -- skipped testing.')
return
self.is_test = True
self._reset_metrics()
timer = Timer('Test')
for param in self.model.network.parameters():
param.requires_grad = False
(output, gold, adj) = self.run_epoch(self.test_loader, training=False, verbose=0, out_predictions=self.config['out_predictions'])
metrics = self._dev_metrics
format_str = '[test] | test_exs = {} | step: [{} / {}]'.format(self._n_test_examples, 1, 1)
format_str += self.metric_to_str(metrics)
if self.config['out_predictions']:
test_score = self.model.score_func(gold, output)
format_str += '\nFinal score on the testing set: {:0.5f}\n'.format(test_score)
else:
test_score = None
print(format_str)
self.logger.write_to_file(format_str)
timer.finish()
format_str = 'Finished Testing: {}\nTesting time: {}'.format(self.dirname, timer.total)
print(format_str)
self.logger.write_to_file(format_str)
self.logger.close()
test_metrics = {}
for k in metrics:
test_metrics[k] = metrics[k].mean()
if (test_score is not None):
test_metrics[self.model.metric_name] = test_score
return (test_metrics, adj)
def batch_no_gnn(self, x_batch, step, training, out_predictions=False):
mode = ('train' if training else ('test' if self.is_test else 'dev'))
network = self.model.network
network.train(training)
(context, context_lens, targets) = (x_batch['context'], x_batch['context_lens'], x_batch['targets'])
context2 = x_batch.get('context2', None)
context2_lens = x_batch.get('context2_lens', None)
output = network.compute_no_gnn_output(context, context_lens)
loss = self.model.criterion(output, targets)
score = self.model.score_func(targets.cpu(), output.detach().cpu())
res = {'loss': loss.item(), 'metrics': {'nloss': (- loss.item()), self.model.metric_name: score}}
if out_predictions:
res['predictions'] = output.detach().cpu()
if training:
loss = (loss / self.config['grad_accumulated_steps'])
loss.backward()
if (((step + 1) % self.config['grad_accumulated_steps']) == 0):
self.model.clip_grad()
self.model.optimizer.step()
self.model.optimizer.zero_grad()
return res
def batch_IGL_stop(self, x_batch, step, training, out_predictions=False):
mode = ('train' if training else ('test' if self.is_test else 'dev'))
network = self.model.network
network.train(training)
(context, context_lens, targets) = (x_batch['context'], x_batch['context_lens'], x_batch['targets'])
context2 = x_batch.get('context2', None)
context2_lens = x_batch.get('context2_lens', None)
(raw_context_vec, context_vec, context_mask, init_adj) = network.prepare_init_graph(context, context_lens)
raw_node_vec = raw_context_vec
init_node_vec = context_vec
node_mask = context_mask
(cur_raw_adj, cur_adj) = network.learn_graph(network.graph_learner, raw_node_vec, network.graph_skip_conn, node_mask=node_mask, graph_include_self=network.graph_include_self, init_adj=init_adj)
node_vec = torch.relu(network.encoder.graph_encoders[0](init_node_vec, cur_adj))
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = torch.relu(encoder(node_vec, cur_adj))
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
output = network.encoder.graph_encoders[(- 1)](node_vec, cur_adj)
output = network.compute_output(output, node_mask=node_mask)
loss1 = self.model.criterion(output, targets)
score = self.model.score_func(targets.cpu(), output.detach().cpu())
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss1 += self.add_batch_graph_loss(cur_raw_adj, raw_node_vec)
(first_raw_adj, first_adj) = (cur_raw_adj, cur_adj)
if (not (mode == 'test')):
if (self._epoch > self.config.get('pretrain_epoch', 0)):
max_iter_ = self.config.get('max_iter', 10)
if (self._epoch == (self.config.get('pretrain_epoch', 0) + 1)):
for k in self._dev_metrics:
self._best_metrics[k] = (- float('inf'))
else:
max_iter_ = 0
else:
max_iter_ = self.config.get('max_iter', 10)
eps_adj = (float(self.config.get('eps_adj', 0)) if training else float(self.config.get('test_eps_adj', self.config.get('eps_adj', 0))))
pre_raw_adj = cur_raw_adj
pre_adj = cur_adj
loss = 0
iter_ = 0
batch_last_iters = to_cuda(torch.zeros(x_batch['batch_size'], dtype=torch.uint8), self.device)
batch_stop_indicators = to_cuda(torch.ones(x_batch['batch_size'], dtype=torch.uint8), self.device)
batch_all_outputs = []
while (self.config['graph_learn'] and ((iter_ == 0) or (torch.sum(batch_stop_indicators).item() > 0)) and (iter_ < max_iter_)):
iter_ += 1
batch_last_iters += batch_stop_indicators
pre_adj = cur_adj
pre_raw_adj = cur_raw_adj
(cur_raw_adj, cur_adj) = network.learn_graph(network.graph_learner2, node_vec, network.graph_skip_conn, node_mask=node_mask, graph_include_self=network.graph_include_self, init_adj=init_adj)
update_adj_ratio = self.config.get('update_adj_ratio', None)
if (update_adj_ratio is not None):
cur_adj = ((update_adj_ratio * cur_adj) + ((1 - update_adj_ratio) * first_adj))
node_vec = torch.relu(network.encoder.graph_encoders[0](init_node_vec, cur_adj))
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = torch.relu(encoder(node_vec, cur_adj))
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
tmp_output = network.encoder.graph_encoders[(- 1)](node_vec, cur_adj)
tmp_output = network.compute_output(tmp_output, node_mask=node_mask)
batch_all_outputs.append(tmp_output.unsqueeze(1))
tmp_loss = self.model.criterion(tmp_output, targets, reduction='none')
if (len(tmp_loss.shape) == 2):
tmp_loss = torch.mean(tmp_loss, 1)
loss += (batch_stop_indicators.float() * tmp_loss)
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss += (batch_stop_indicators.float() * self.add_batch_graph_loss(cur_raw_adj, raw_node_vec, keep_batch_dim=True))
if (self.config['graph_learn'] and (not (self.config.get('graph_learn_ratio', None) in (None, 0)))):
loss += ((batch_stop_indicators.float() * batch_SquaredFrobeniusNorm((cur_raw_adj - pre_raw_adj))) * self.config.get('graph_learn_ratio'))
tmp_stop_criteria = (batch_diff(cur_raw_adj, pre_raw_adj, first_raw_adj) > eps_adj)
batch_stop_indicators = (batch_stop_indicators * tmp_stop_criteria)
if (iter_ > 0):
loss = (torch.mean((loss / batch_last_iters.float())) + loss1)
batch_all_outputs = torch.cat(batch_all_outputs, 1)
selected_iter_index = (batch_last_iters.long().unsqueeze((- 1)) - 1)
if (len(batch_all_outputs.shape) == 3):
selected_iter_index = selected_iter_index.unsqueeze((- 1)).expand((- 1), (- 1), batch_all_outputs.size((- 1)))
output = batch_all_outputs.gather(1, selected_iter_index).squeeze(1)
else:
output = batch_all_outputs.gather(1, selected_iter_index)
score = self.model.score_func(targets.cpu(), output.detach().cpu())
else:
loss = loss1
res = {'loss': loss.item(), 'metrics': {'nloss': (- loss.item()), self.model.metric_name: score}}
if out_predictions:
res['predictions'] = output.detach().cpu()
if training:
loss = (loss / self.config['grad_accumulated_steps'])
loss.backward()
if (((step + 1) % self.config['grad_accumulated_steps']) == 0):
self.model.clip_grad()
self.model.optimizer.step()
self.model.optimizer.zero_grad()
return res
def scalable_batch_IGL_stop(self, x_batch, step, training, out_predictions=False):
mode = ('train' if training else ('test' if self.is_test else 'dev'))
network = self.model.network
network.train(training)
(context, context_lens, targets) = (x_batch['context'], x_batch['context_lens'], x_batch['targets'])
context2 = x_batch.get('context2', None)
context2_lens = x_batch.get('context2_lens', None)
(raw_context_vec, context_vec, context_mask, init_adj) = network.prepare_init_graph(context, context_lens)
raw_node_vec = raw_context_vec
init_node_vec = context_vec
node_mask = context_mask
(init_anchor_vec, anchor_mask, sampled_node_idx, max_num_anchors) = batch_sample_anchors(init_node_vec, network.config.get('ratio_anchors', 0.2), node_mask=node_mask, device=self.device)
raw_anchor_vec = batch_select_from_tensor(raw_node_vec, sampled_node_idx, max_num_anchors, self.device)
cur_node_anchor_adj = network.learn_graph(network.graph_learner, raw_node_vec, anchor_features=raw_anchor_vec, node_mask=node_mask, anchor_mask=anchor_mask)
cur_anchor_adj = compute_anchor_adj(cur_node_anchor_adj, anchor_mask=anchor_mask)
init_agg_vec = network.encoder.graph_encoders[0](init_node_vec, init_adj, anchor_mp=False, batch_norm=False)
node_vec = (((1 - network.graph_skip_conn) * network.encoder.graph_encoders[0](init_node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * init_agg_vec))
if (network.encoder.graph_encoders[0].bn is not None):
node_vec = network.encoder.graph_encoders[0].compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
anchor_vec = batch_select_from_tensor(node_vec, sampled_node_idx, max_num_anchors, self.device)
(first_node_anchor_adj, first_anchor_adj) = (cur_node_anchor_adj, cur_anchor_adj)
first_init_agg_vec = network.encoder.graph_encoders[0](init_node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = (((1 - network.graph_skip_conn) * encoder(node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * encoder(node_vec, init_adj, anchor_mp=False, batch_norm=False)))
if (encoder.bn is not None):
node_vec = encoder.compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
anchor_vec = batch_select_from_tensor(node_vec, sampled_node_idx, max_num_anchors, self.device)
output = (((1 - network.graph_skip_conn) * network.encoder.graph_encoders[(- 1)](node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * network.encoder.graph_encoders[(- 1)](node_vec, init_adj, anchor_mp=False, batch_norm=False)))
output = network.compute_output(output, node_mask=node_mask)
loss1 = self.model.criterion(output, targets)
score = self.model.score_func(targets.cpu(), output.detach().cpu())
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss1 += self.add_batch_graph_loss(cur_anchor_adj, raw_anchor_vec)
if (not (mode == 'test')):
if (self._epoch > self.config.get('pretrain_epoch', 0)):
max_iter_ = self.config.get('max_iter', 10)
if (self._epoch == (self.config.get('pretrain_epoch', 0) + 1)):
for k in self._dev_metrics:
self._best_metrics[k] = (- float('inf'))
else:
max_iter_ = 0
else:
max_iter_ = self.config.get('max_iter', 10)
eps_adj = (float(self.config.get('eps_adj', 0)) if training else float(self.config.get('test_eps_adj', self.config.get('eps_adj', 0))))
pre_node_anchor_adj = cur_node_anchor_adj
loss = 0
iter_ = 0
batch_last_iters = to_cuda(torch.zeros(x_batch['batch_size'], dtype=torch.uint8), self.device)
batch_stop_indicators = to_cuda(torch.ones(x_batch['batch_size'], dtype=torch.uint8), self.device)
batch_all_outputs = []
while (self.config['graph_learn'] and ((iter_ == 0) or (torch.sum(batch_stop_indicators).item() > 0)) and (iter_ < max_iter_)):
iter_ += 1
batch_last_iters += batch_stop_indicators
pre_node_anchor_adj = cur_node_anchor_adj
cur_node_anchor_adj = network.learn_graph(network.graph_learner2, node_vec, anchor_features=anchor_vec, node_mask=node_mask, anchor_mask=anchor_mask)
cur_anchor_adj = compute_anchor_adj(cur_node_anchor_adj, anchor_mask=anchor_mask)
cur_agg_vec = network.encoder.graph_encoders[0](init_node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
update_adj_ratio = self.config.get('update_adj_ratio', None)
if (update_adj_ratio is not None):
cur_agg_vec = ((update_adj_ratio * cur_agg_vec) + ((1 - update_adj_ratio) * first_init_agg_vec))
node_vec = (((1 - network.graph_skip_conn) * cur_agg_vec) + (network.graph_skip_conn * init_agg_vec))
if (network.encoder.graph_encoders[0].bn is not None):
node_vec = network.encoder.graph_encoders[0].compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
anchor_vec = batch_select_from_tensor(node_vec, sampled_node_idx, max_num_anchors, self.device)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
mid_cur_agg_vec = encoder(node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
if (update_adj_ratio is not None):
mid_first_agg_vecc = encoder(node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
mid_cur_agg_vec = ((update_adj_ratio * mid_cur_agg_vec) + ((1 - update_adj_ratio) * mid_first_agg_vecc))
node_vec = (((1 - network.graph_skip_conn) * mid_cur_agg_vec) + (network.graph_skip_conn * encoder(node_vec, init_adj, anchor_mp=False, batch_norm=False)))
if (encoder.bn is not None):
node_vec = encoder.compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
anchor_vec = batch_select_from_tensor(node_vec, sampled_node_idx, max_num_anchors, self.device)
cur_agg_vec = network.encoder.graph_encoders[(- 1)](node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
if (update_adj_ratio is not None):
first_agg_vec = network.encoder.graph_encoders[(- 1)](node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
cur_agg_vec = ((update_adj_ratio * cur_agg_vec) + ((1 - update_adj_ratio) * first_agg_vec))
tmp_output = (((1 - network.graph_skip_conn) * cur_agg_vec) + (network.graph_skip_conn * network.encoder.graph_encoders[(- 1)](node_vec, init_adj, anchor_mp=False, batch_norm=False)))
tmp_output = network.compute_output(tmp_output, node_mask=node_mask)
batch_all_outputs.append(tmp_output.unsqueeze(1))
tmp_loss = self.model.criterion(tmp_output, targets, reduction='none')
if (len(tmp_loss.shape) == 2):
tmp_loss = torch.mean(tmp_loss, 1)
loss += (batch_stop_indicators.float() * tmp_loss)
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss += (batch_stop_indicators.float() * self.add_batch_graph_loss(cur_anchor_adj, raw_anchor_vec, keep_batch_dim=True))
if (self.config['graph_learn'] and (not (self.config.get('graph_learn_ratio', None) in (None, 0)))):
loss += ((batch_stop_indicators.float() * batch_SquaredFrobeniusNorm((cur_node_anchor_adj - pre_node_anchor_adj))) * self.config.get('graph_learn_ratio'))
tmp_stop_criteria = (batch_diff(cur_node_anchor_adj, pre_node_anchor_adj, cur_node_anchor_adj) > eps_adj)
batch_stop_indicators = (batch_stop_indicators * tmp_stop_criteria)
if (iter_ > 0):
loss = (torch.mean((loss / batch_last_iters.float())) + loss1)
batch_all_outputs = torch.cat(batch_all_outputs, 1)
selected_iter_index = (batch_last_iters.long().unsqueeze((- 1)) - 1)
if (len(batch_all_outputs.shape) == 3):
selected_iter_index = selected_iter_index.unsqueeze((- 1)).expand((- 1), (- 1), batch_all_outputs.size((- 1)))
output = batch_all_outputs.gather(1, selected_iter_index).squeeze(1)
else:
output = batch_all_outputs.gather(1, selected_iter_index)
score = self.model.score_func(targets.cpu(), output.detach().cpu())
else:
loss = loss1
res = {'loss': loss.item(), 'metrics': {'nloss': (- loss.item()), self.model.metric_name: score}}
if out_predictions:
res['predictions'] = output.detach().cpu()
if training:
loss = (loss / self.config['grad_accumulated_steps'])
loss.backward()
if (((step + 1) % self.config['grad_accumulated_steps']) == 0):
self.model.clip_grad()
self.model.optimizer.step()
self.model.optimizer.zero_grad()
return res
def _run_whole_epoch(self, data_loader, training=True, verbose=None, out_predictions=False):
mode = ('train' if training else ('test' if self.is_test else 'dev'))
self.model.network.train(training)
(init_adj, features, labels) = (data_loader['adj'], data_loader['features'], data_loader['labels'])
if (mode == 'train'):
idx = data_loader['idx_train']
elif (mode == 'dev'):
idx = data_loader['idx_val']
else:
idx = data_loader['idx_test']
network = self.model.network
features = F.dropout(features, network.config.get('feat_adj_dropout', 0), training=network.training)
init_node_vec = features
(cur_raw_adj, cur_adj) = network.learn_graph(network.graph_learner, init_node_vec, network.graph_skip_conn, graph_include_self=network.graph_include_self, init_adj=init_adj)
if (self.config['graph_learn'] and (self.config.get('max_iter', 10) > 0)):
cur_raw_adj = F.dropout(cur_raw_adj, network.config.get('feat_adj_dropout', 0), training=network.training)
cur_adj = F.dropout(cur_adj, network.config.get('feat_adj_dropout', 0), training=network.training)
if (network.graph_module == 'gat'):
assert ((self.config['graph_learn'] is False) and (self.config.get('max_iter', 10) == 0))
node_vec = network.encoder(init_node_vec, init_adj)
output = F.log_softmax(node_vec, dim=(- 1))
elif (network.graph_module == 'graphsage'):
import dgl
from scipy import sparse
binarized_adj = sparse.coo_matrix((init_adj.detach().cpu().numpy() != 0))
dgl_graph = dgl.DGLGraph(binarized_adj)
dgl_graph = dgl_graph.to(self.device)
node_vec = network.encoder(dgl_graph, init_node_vec)
output = F.log_softmax(node_vec, dim=(- 1))
else:
node_vec = torch.relu(network.encoder.graph_encoders[0](init_node_vec, cur_adj))
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = torch.relu(encoder(node_vec, cur_adj))
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
output = network.encoder.graph_encoders[(- 1)](node_vec, cur_adj)
output = F.log_softmax(output, dim=(- 1))
score = self.model.score_func(labels[idx], output[idx])
loss1 = self.model.criterion(output[idx], labels[idx])
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss1 += self.add_graph_loss(cur_raw_adj, init_node_vec)
(first_raw_adj, first_adj) = (cur_raw_adj, cur_adj)
if (not (mode == 'test')):
if (self._epoch > self.config.get('pretrain_epoch', 0)):
max_iter_ = self.config.get('max_iter', 10)
if (self._epoch == (self.config.get('pretrain_epoch', 0) + 1)):
for k in self._dev_metrics:
self._best_metrics[k] = (- float('inf'))
else:
max_iter_ = 0
else:
max_iter_ = self.config.get('max_iter', 10)
if training:
eps_adj = float(self.config.get('eps_adj', 0))
else:
eps_adj = float(self.config.get('test_eps_adj', self.config.get('eps_adj', 0)))
pre_raw_adj = cur_raw_adj
pre_adj = cur_adj
loss = 0
iter_ = 0
while (self.config['graph_learn'] and ((iter_ == 0) or (diff(cur_raw_adj, pre_raw_adj, first_raw_adj).item() > eps_adj)) and (iter_ < max_iter_)):
iter_ += 1
pre_adj = cur_adj
pre_raw_adj = cur_raw_adj
(cur_raw_adj, cur_adj) = network.learn_graph(network.graph_learner2, node_vec, network.graph_skip_conn, graph_include_self=network.graph_include_self, init_adj=init_adj)
update_adj_ratio = self.config.get('update_adj_ratio', None)
if (update_adj_ratio is not None):
cur_adj = ((update_adj_ratio * cur_adj) + ((1 - update_adj_ratio) * first_adj))
node_vec = torch.relu(network.encoder.graph_encoders[0](init_node_vec, cur_adj))
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = torch.relu(encoder(node_vec, cur_adj))
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
output = network.encoder.graph_encoders[(- 1)](node_vec, cur_adj)
output = F.log_softmax(output, dim=(- 1))
score = self.model.score_func(labels[idx], output[idx])
loss += self.model.criterion(output[idx], labels[idx])
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss += self.add_graph_loss(cur_raw_adj, init_node_vec)
if (self.config['graph_learn'] and (not (self.config.get('graph_learn_ratio', None) in (None, 0)))):
loss += (SquaredFrobeniusNorm((cur_raw_adj - pre_raw_adj)) * self.config.get('graph_learn_ratio'))
if (iter_ > 0):
loss = ((loss / iter_) + loss1)
else:
loss = loss1
if training:
self.model.optimizer.zero_grad()
loss.backward()
self.model.clip_grad()
self.model.optimizer.step()
self._update_metrics(loss.item(), {'nloss': (- loss.item()), self.model.metric_name: score}, 1, training=training)
if (mode != 'test'):
return (output[idx], labels[idx])
else:
return (output[idx], labels[idx], cur_raw_adj.cpu())
def _scalable_run_whole_epoch(self, data_loader, training=True, verbose=None, out_predictions=False):
mode = ('train' if training else ('test' if self.is_test else 'dev'))
self.model.network.train(training)
(init_adj, features, labels) = (data_loader['adj'], data_loader['features'], data_loader['labels'])
if (mode == 'train'):
idx = data_loader['idx_train']
elif (mode == 'dev'):
idx = data_loader['idx_val']
else:
idx = data_loader['idx_test']
network = self.model.network
features = F.dropout(features, network.config.get('feat_adj_dropout', 0), training=network.training)
init_node_vec = features
(init_anchor_vec, sampled_node_idx) = sample_anchors(init_node_vec, network.config.get('num_anchors', int((0.2 * init_node_vec.size(0)))))
cur_node_anchor_adj = network.learn_graph(network.graph_learner, init_node_vec, anchor_features=init_anchor_vec)
cur_anchor_adj = compute_anchor_adj(cur_node_anchor_adj)
if (self.config['graph_learn'] and (self.config.get('max_iter', 10) > 0)):
cur_node_anchor_adj = F.dropout(cur_node_anchor_adj, network.config.get('feat_adj_dropout', 0), training=network.training)
cur_anchor_adj = F.dropout(cur_anchor_adj, network.config.get('feat_adj_dropout', 0), training=network.training)
init_agg_vec = network.encoder.graph_encoders[0](init_node_vec, init_adj, anchor_mp=False, batch_norm=False)
node_vec = (((1 - network.graph_skip_conn) * network.encoder.graph_encoders[0](init_node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * init_agg_vec))
if (network.encoder.graph_encoders[0].bn is not None):
node_vec = network.encoder.graph_encoders[0].compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
anchor_vec = node_vec[sampled_node_idx]
(first_node_anchor_adj, first_anchor_adj) = (cur_node_anchor_adj, cur_anchor_adj)
first_init_agg_vec = network.encoder.graph_encoders[0](init_node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
for encoder in network.encoder.graph_encoders[1:(- 1)]:
node_vec = (((1 - network.graph_skip_conn) * encoder(node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * encoder(node_vec, init_adj, anchor_mp=False, batch_norm=False)))
if (encoder.bn is not None):
node_vec = encoder.compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, network.dropout, training=network.training)
anchor_vec = node_vec[sampled_node_idx]
output = (((1 - network.graph_skip_conn) * network.encoder.graph_encoders[(- 1)](node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)) + (network.graph_skip_conn * network.encoder.graph_encoders[(- 1)](node_vec, init_adj, anchor_mp=False, batch_norm=False)))
output = F.log_softmax(output, dim=(- 1))
score = self.model.score_func(labels[idx], output[idx])
loss1 = self.model.criterion(output[idx], labels[idx])
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss1 += self.add_graph_loss(cur_anchor_adj, init_anchor_vec)
if (not (mode == 'test')):
if (self._epoch > self.config.get('pretrain_epoch', 0)):
max_iter_ = self.config.get('max_iter', 10)
if (self._epoch == (self.config.get('pretrain_epoch', 0) + 1)):
for k in self._dev_metrics:
self._best_metrics[k] = (- float('inf'))
else:
max_iter_ = 0
else:
max_iter_ = self.config.get('max_iter', 10)
if training:
eps_adj = float(self.config.get('eps_adj', 0))
else:
eps_adj = float(self.config.get('test_eps_adj', self.config.get('eps_adj', 0)))
pre_node_anchor_adj = cur_node_anchor_adj
loss = 0
iter_ = 0
while (self.config['graph_learn'] and ((iter_ == 0) or (diff(cur_node_anchor_adj, pre_node_anchor_adj, cur_node_anchor_adj).item() > eps_adj)) and (iter_ < max_iter_)):
iter_ += 1
pre_node_anchor_adj = cur_node_anchor_adj
cur_node_anchor_adj = network.learn_graph(network.graph_learner2, node_vec, anchor_features=anchor_vec)
cur_anchor_adj = compute_anchor_adj(cur_node_anchor_adj)
cur_agg_vec = network.encoder.graph_encoders[0](init_node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
update_adj_ratio = self.config.get('update_adj_ratio', None)
if (update_adj_ratio is not None):
cur_agg_vec = ((update_adj_ratio * cur_agg_vec) + ((1 - update_adj_ratio) * first_init_agg_vec))
node_vec = (((1 - network.graph_skip_conn) * cur_agg_vec) + (network.graph_skip_conn * init_agg_vec))
if (network.encoder.graph_encoders[0].bn is not None):
node_vec = network.encoder.graph_encoders[0].compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
anchor_vec = node_vec[sampled_node_idx]
for encoder in network.encoder.graph_encoders[1:(- 1)]:
mid_cur_agg_vec = encoder(node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
if (update_adj_ratio is not None):
mid_first_agg_vecc = encoder(node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
mid_cur_agg_vec = ((update_adj_ratio * mid_cur_agg_vec) + ((1 - update_adj_ratio) * mid_first_agg_vecc))
node_vec = (((1 - network.graph_skip_conn) * mid_cur_agg_vec) + (network.graph_skip_conn * encoder(node_vec, init_adj, anchor_mp=False, batch_norm=False)))
if (encoder.bn is not None):
node_vec = encoder.compute_bn(node_vec)
node_vec = torch.relu(node_vec)
node_vec = F.dropout(node_vec, self.config.get('gl_dropout', 0), training=network.training)
anchor_vec = node_vec[sampled_node_idx]
cur_agg_vec = network.encoder.graph_encoders[(- 1)](node_vec, cur_node_anchor_adj, anchor_mp=True, batch_norm=False)
if (update_adj_ratio is not None):
first_agg_vec = network.encoder.graph_encoders[(- 1)](node_vec, first_node_anchor_adj, anchor_mp=True, batch_norm=False)
cur_agg_vec = ((update_adj_ratio * cur_agg_vec) + ((1 - update_adj_ratio) * first_agg_vec))
output = (((1 - network.graph_skip_conn) * cur_agg_vec) + (network.graph_skip_conn * network.encoder.graph_encoders[(- 1)](node_vec, init_adj, anchor_mp=False, batch_norm=False)))
output = F.log_softmax(output, dim=(- 1))
score = self.model.score_func(labels[idx], output[idx])
loss += self.model.criterion(output[idx], labels[idx])
if (self.config['graph_learn'] and self.config['graph_learn_regularization']):
loss += self.add_graph_loss(cur_anchor_adj, init_anchor_vec)
if (self.config['graph_learn'] and (not (self.config.get('graph_learn_ratio', None) in (None, 0)))):
loss += (SquaredFrobeniusNorm((cur_node_anchor_adj - pre_node_anchor_adj)) * self.config.get('graph_learn_ratio'))
if ((mode == 'test') and self.config.get('out_raw_learned_adj_path', None)):
out_raw_learned_adj_path = os.path.join(self.dirname, self.config['out_raw_learned_adj_path'])
np.save(out_raw_learned_adj_path, cur_node_anchor_adj.cpu())
print('Saved raw_learned_adj to {}'.format(out_raw_learned_adj_path))
if (iter_ > 0):
loss = ((loss / iter_) + loss1)
else:
loss = loss1
if training:
self.model.optimizer.zero_grad()
loss.backward()
self.model.clip_grad()
self.model.optimizer.step()
self._update_metrics(loss.item(), {'nloss': (- loss.item()), self.model.metric_name: score}, 1, training=training)
return (output[idx], labels[idx])
def _run_batch_epoch(self, data_loader, training=True, rl_ratio=0, verbose=10, out_predictions=False):
start_time = time.time()
mode = ('train' if training else ('test' if self.is_test else 'dev'))
if training:
self.model.optimizer.zero_grad()
output = []
gold = []
for step in range(data_loader.get_num_batch()):
input_batch = data_loader.nextBatch()
x_batch = vectorize_input(input_batch, self.config, training=training, device=self.device)
if (not x_batch):
continue
if self.config.get('no_gnn', False):
res = self.batch_no_gnn(x_batch, step, training=training, out_predictions=out_predictions)
elif self.config.get('scalable_run', False):
res = self.scalable_batch_IGL_stop(x_batch, step, training=training, out_predictions=out_predictions)
else:
res = self.batch_IGL_stop(x_batch, step, training=training, out_predictions=out_predictions)
loss = res['loss']
metrics = res['metrics']
self._update_metrics(loss, metrics, x_batch['batch_size'], training=training)
if training:
self._n_train_examples += x_batch['batch_size']
if ((verbose > 0) and (step > 0) and ((step % verbose) == 0)):
summary_str = self.self_report(step, mode)
self.logger.write_to_file(summary_str)
print(summary_str)
print('used_time: {:0.2f}s'.format((time.time() - start_time)))
if ((not training) and out_predictions):
output.extend(res['predictions'])
gold.extend(x_batch['targets'])
return (output, gold)
def set_requires_grad(self, nets, requires_grad=False):
if (not isinstance(nets, list)):
nets = [nets]
for net in nets:
if (net is not None):
for param in net.parameters():
param.requires_grad = requires_grad
def self_report(self, step, mode='train'):
if (mode == 'train'):
format_str = '[train-{}] step: [{} / {}] | loss = {:0.5f}'.format(self._epoch, step, self._n_train_batches, self._train_loss.mean())
format_str += self.metric_to_str(self._train_metrics)
elif (mode == 'dev'):
format_str = '[predict-{}] step: [{} / {}] | loss = {:0.5f}'.format(self._epoch, step, self._n_dev_batches, self._dev_loss.mean())
format_str += self.metric_to_str(self._dev_metrics)
elif (mode == 'test'):
format_str = '[test] | test_exs = {} | step: [{} / {}]'.format(self._n_test_examples, step, self._n_test_batches)
format_str += self.metric_to_str(self._dev_metrics)
else:
raise ValueError(('mode = {} not supported.' % mode))
return format_str
def plain_metric_to_str(self, metrics):
format_str = ''
for k in metrics:
format_str += ' | {} = {:0.5f}'.format(k.upper(), metrics[k])
return format_str
def metric_to_str(self, metrics):
format_str = ''
for k in metrics:
format_str += ' | {} = {:0.5f}'.format(k.upper(), metrics[k].mean())
return format_str
def best_metric_to_str(self, metrics):
format_str = '\n'
for k in metrics:
format_str += '{} = {:0.5f}\n'.format(k.upper(), metrics[k])
return format_str
def summary(self):
start = '\n MODEL SUMMARY '
info = ('Best epoch = {}; '.format(self._best_epoch) + self.best_metric_to_str(self._best_metrics))
end = ' MODEL SUMMARY '
return '\n'.join([start, info, end])
def _update_metrics(self, loss, metrics, batch_size, training=True):
if training:
if loss:
self._train_loss.update(loss)
for k in self._train_metrics:
if (not (k in metrics)):
continue
self._train_metrics[k].update(metrics[k], batch_size)
else:
if loss:
self._dev_loss.update(loss)
for k in self._dev_metrics:
if (not (k in metrics)):
continue
self._dev_metrics[k].update(metrics[k], batch_size)
def _reset_metrics(self):
self._train_loss.reset()
self._dev_loss.reset()
for k in self._train_metrics:
self._train_metrics[k].reset()
for k in self._dev_metrics:
self._dev_metrics[k].reset()
def _stop_condition(self, epoch, patience=10):
no_improvement = (epoch >= (self._best_epoch + patience))
exceeded_max_epochs = (epoch >= self.config['max_epochs'])
return (False if (exceeded_max_epochs or no_improvement) else True)
def add_graph_loss(self, out_adj, features):
graph_loss = 0
L = (torch.diagflat(torch.sum(out_adj, (- 1))) - out_adj)
graph_loss += ((self.config['smoothness_ratio'] * torch.trace(torch.mm(features.transpose((- 1), (- 2)), torch.mm(L, features)))) / int(np.prod(out_adj.shape)))
ones_vec = to_cuda(torch.ones(out_adj.size((- 1))), self.device)
graph_loss += (((- self.config['degree_ratio']) * torch.mm(ones_vec.unsqueeze(0), torch.log((torch.mm(out_adj, ones_vec.unsqueeze((- 1))) + Constants.VERY_SMALL_NUMBER))).squeeze()) / out_adj.shape[(- 1)])
graph_loss += ((self.config['sparsity_ratio'] * torch.sum(torch.pow(out_adj, 2))) / int(np.prod(out_adj.shape)))
return graph_loss
def add_batch_graph_loss(self, out_adj, features, keep_batch_dim=False):
if keep_batch_dim:
graph_loss = []
for i in range(out_adj.shape[0]):
L = (torch.diagflat(torch.sum(out_adj[i], (- 1))) - out_adj[i])
graph_loss.append(((self.config['smoothness_ratio'] * torch.trace(torch.mm(features[i].transpose((- 1), (- 2)), torch.mm(L, features[i])))) / int(np.prod(out_adj.shape[1:]))))
graph_loss = to_cuda(torch.Tensor(graph_loss), self.device)
ones_vec = to_cuda(torch.ones(out_adj.shape[:(- 1)]), self.device)
graph_loss += (((- self.config['degree_ratio']) * torch.matmul(ones_vec.unsqueeze(1), torch.log((torch.matmul(out_adj, ones_vec.unsqueeze((- 1))) + Constants.VERY_SMALL_NUMBER))).squeeze((- 1)).squeeze((- 1))) / out_adj.shape[(- 1)])
graph_loss += ((self.config['sparsity_ratio'] * torch.sum(torch.pow(out_adj, 2), (1, 2))) / int(np.prod(out_adj.shape[1:])))
else:
graph_loss = 0
for i in range(out_adj.shape[0]):
L = (torch.diagflat(torch.sum(out_adj[i], (- 1))) - out_adj[i])
graph_loss += ((self.config['smoothness_ratio'] * torch.trace(torch.mm(features[i].transpose((- 1), (- 2)), torch.mm(L, features[i])))) / int(np.prod(out_adj.shape)))
ones_vec = to_cuda(torch.ones(out_adj.shape[:(- 1)]), self.device)
graph_loss += ((((- self.config['degree_ratio']) * torch.matmul(ones_vec.unsqueeze(1), torch.log((torch.matmul(out_adj, ones_vec.unsqueeze((- 1))) + Constants.VERY_SMALL_NUMBER))).sum()) / out_adj.shape[0]) / out_adj.shape[(- 1)])
graph_loss += ((self.config['sparsity_ratio'] * torch.sum(torch.pow(out_adj, 2))) / int(np.prod(out_adj.shape)))
return graph_loss |
def _migrate_v21(data: dict) -> dict:
game_modifications = data['game_modifications']
for game in game_modifications:
game_name = game['game']
if (game_name != 'dread'):
continue
dock_weakness = game.get('dock_weakness')
if (dock_weakness is None):
continue
old_new_name = migration_data.get_raw_data(RandovaniaGame(game_name))['dairon_typo']
for (old_name, new_name) in old_new_name.items():
if (old_name in dock_weakness):
dock_weakness[new_name] = dock_weakness.pop(old_name)
return data |
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if (len(otp) != 6):
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = (self.get_otp() is None)
def on_enter(self, dt):
next = self.ids.next
if (not next.disabled):
next.dispatch('on_release') |
class BasicBlock3d(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, spatial_stride=1, temporal_stride=1, dilation=1, downsample=None, style='pytorch', inflate=True, inflate_style='3x1x1', non_local=False, non_local_cfg=dict(), conv_cfg=dict(type='Conv3d'), norm_cfg=dict(type='BN3d'), act_cfg=dict(type='ReLU'), with_cp=False):
super().__init__()
assert (style in ['pytorch', 'caffe'])
assert (inflate_style in ['3x1x1', '3x3x3'])
self.inplanes = inplanes
self.planes = planes
self.spatial_stride = spatial_stride
self.temporal_stride = temporal_stride
self.dilation = dilation
self.style = style
self.inflate = inflate
self.inflate_style = inflate_style
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_cp = with_cp
self.non_local = non_local
self.non_local_cfg = non_local_cfg
self.conv1_stride_s = spatial_stride
self.conv2_stride_s = 1
self.conv1_stride_t = temporal_stride
self.conv2_stride_t = 1
if self.inflate:
conv1_kernel_size = (3, 3, 3)
conv1_padding = (1, dilation, dilation)
conv2_kernel_size = (3, 3, 3)
conv2_padding = (1, 1, 1)
else:
conv1_kernel_size = (1, 3, 3)
conv1_padding = (0, dilation, dilation)
conv2_kernel_size = (1, 3, 3)
conv2_padding = (0, 1, 1)
self.conv1 = ConvModule(inplanes, planes, conv1_kernel_size, stride=(self.conv1_stride_t, self.conv1_stride_s, self.conv1_stride_s), padding=conv1_padding, dilation=(1, dilation, dilation), bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.conv2 = ConvModule(planes, (planes * self.expansion), conv2_kernel_size, stride=(self.conv2_stride_t, self.conv2_stride_s, self.conv2_stride_s), padding=conv2_padding, bias=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=None)
self.downsample = downsample
self.relu = build_activation_layer(self.act_cfg)
if self.non_local:
self.non_local_block = NonLocal3d(self.conv2.norm.num_features, **self.non_local_cfg)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out = (out + identity)
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
if self.non_local:
out = self.non_local_block(out)
return out |
class OptionSchema(marshmallow.Schema):
param_decls = OptionNameField(data_key='name', metadata={'description': 'Name of the option. Usually prefixed with - or --.'})
type = TypeField(metadata={'description': f"Name of the type. {', '.join(TYPES)} accepted."})
is_flag = fields.Boolean(default=False, metadata={'description': 'Whether the option is a boolean flag.'})
help = fields.String(default=None, metadata={'description': 'Documentation for the option.'})
hidden = fields.Boolean(default=False, metadata={'description': 'Whether the option is hidden from help.'})
required = fields.Boolean(default=False, metadata={'description': 'Whether the option is required.'})
nargs = fields.Integer(metadata={'description': 'Number of instances expected. Pass -1 for variadic.'})
multiple = fields.Boolean(metadata={'description': 'Whether multiple values can be passed.'})
default = AnyField(default=None, metadata={'description': 'Default value.'})
choices = fields.List(fields.String(), metadata={'description': 'List of allowed string values.'}, default=None)
_load()
def make_option(self, validated, partial, many):
choices = validated.pop('choices', None)
if choices:
validated['type'] = click.Choice(choices)
return click.Option(**validated) |
def test_render_debug_better_error_message_recursion_error() -> None:
io = BufferedIO()
io.set_verbosity(Verbosity.DEBUG)
try:
recursion.recursion_error()
except RecursionError as e:
trace = ExceptionTrace(e)
lineno = 83
trace.render(io)
expected = f'''^
Stack trace:
\d+ {re.escape(trace._get_relative_file_path(__file__))}:{lineno} in test_render_debug_better_error_message_recursion_error
{(lineno - 2)}\
{(lineno - 1)}\ try:
{(lineno + 0)}\ recursion.recursion_error\(\)
{(lineno + 1)}\ except RecursionError as e:
{(lineno + 2)}\ trace = ExceptionTrace\(e\)
... Previous frame repeated \d+ times
\s*\d+ {re.escape(trace._get_relative_file_path(recursion.__file__))}:2 in recursion_error
1\ def recursion_error\(\) -> None:
2\ recursion_error\(\)
3\
RecursionError
maximum recursion depth exceeded
at {re.escape(trace._get_relative_file_path(recursion.__file__))}:2 in recursion_error
1\ def recursion_error\(\) -> None:
2\ recursion_error\(\)
3\
'''
assert (re.match(expected, io.fetch_output()) is not None) |
class ElixirOpenIdConnect(OpenIdConnectAuth):
name = 'elixir'
OIDC_ENDPOINT = '
EXTRA_DATA = [('expires_in', 'expires_in', True), ('refresh_token', 'refresh_token', True), ('id_token', 'id_token', True), ('other_tokens', 'other_tokens', True)]
DEFAULT_SCOPE = ['openid', 'email']
JWT_DECODE_OPTIONS = {'verify_at_hash': False}
def get_user_details(self, response):
username_key = self.setting('USERNAME_KEY', default=self.USERNAME_KEY)
name = (response.get('name') or '')
(fullname, first_name, last_name) = self.get_user_names(name)
return {'username': response.get(username_key), 'email': response.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} |
def two_way_teleporter_connections(rng: Random, teleporter_database: tuple[(TeleporterHelper, ...)], between_areas: bool) -> TeleporterConnection:
if ((len(teleporter_database) % 2) != 0):
raise ValueError('Two-way teleporter shuffle, but odd number of teleporters to shuffle.')
if between_areas:
teleporter_database = tuple(try_randomize_teleporters(rng, teleporter_database))
else:
teleporters = list(teleporter_database)
rng.shuffle(teleporters)
while teleporters:
teleporters.pop().connect_to(teleporters.pop())
result: TeleporterConnection = {}
for teleporter in teleporter_database:
assert (teleporter.connected_teleporter is not None)
result[teleporter.teleporter] = teleporter.connected_teleporter.teleporter
return result |
class GCNLayer(nn.Module):
def __init__(self, input_dim: int, output_dim: int, A: torch.Tensor):
super(GCNLayer, self).__init__()
self.A = A
self.BN = nn.BatchNorm1d(input_dim)
self.Activition = nn.LeakyReLU()
self.sigma1 = torch.nn.Parameter(torch.tensor([0.1], requires_grad=True))
self.GCN_liner_theta_1 = nn.Sequential(nn.Linear(input_dim, 256))
self.GCN_liner_out_1 = nn.Sequential(nn.Linear(input_dim, output_dim))
nodes_count = self.A.shape[0]
self.I = torch.eye(nodes_count, nodes_count, requires_grad=False).to(device)
self.mask = torch.ceil((self.A * 1e-05))
def A_to_D_inv(self, A: torch.Tensor):
D = A.sum(1)
D_hat = torch.diag(torch.pow(D, (- 0.5)))
return D_hat
def forward(self, H, model='normal'):
H = self.BN(H)
H_xx1 = self.GCN_liner_theta_1(H)
e = torch.sigmoid(torch.matmul(H_xx1, H_xx1.t()))
zero_vec = ((- .0) * torch.ones_like(e))
A = (torch.where((self.mask > 0), e, zero_vec) + self.I)
if (model != 'normal'):
A = torch.clamp(A, 0.1)
A = F.softmax(A, dim=1)
output = self.Activition(torch.mm(A, self.GCN_liner_out_1(H)))
return (output, A) |
def logdir2df(logdir):
if issubclass(type(logdir), Path):
logdir = str(logdir)
ea = EventAccumulator(path=logdir)
ea.Reload()
scalar_tags = ea.Tags()['scalars']
dfs = {}
for scalar_tag in scalar_tags:
dfs[scalar_tag] = pd.DataFrame(ea.Scalars(scalar_tag), columns=['wall_time', 'step', scalar_tag.replace('val/', '')])
dfs[scalar_tag] = dfs[scalar_tag].set_index('step')
dfs[scalar_tag].drop('wall_time', axis=1, inplace=True)
return pd.concat([v for (k, v) in dfs.items()], axis=1) |
class FakeStatResult():
def __init__(self, is_windows: bool, user_id: int, group_id: int, initial_time: Optional[float]=None):
self.st_mode: int = 0
self.st_ino: Optional[int] = None
self.st_dev: int = 0
self.st_nlink: int = 0
self.st_uid: int = user_id
self.st_gid: int = group_id
self._st_size: int = 0
self.is_windows: bool = is_windows
self._st_atime_ns: int = int(((initial_time or 0) * .0))
self._st_mtime_ns: int = self._st_atime_ns
self._st_ctime_ns: int = self._st_atime_ns
def __eq__(self, other: Any) -> bool:
return (isinstance(other, FakeStatResult) and (self._st_atime_ns == other._st_atime_ns) and (self._st_ctime_ns == other._st_ctime_ns) and (self._st_mtime_ns == other._st_mtime_ns) and (self.st_size == other.st_size) and (self.st_gid == other.st_gid) and (self.st_uid == other.st_uid) and (self.st_nlink == other.st_nlink) and (self.st_dev == other.st_dev) and (self.st_ino == other.st_ino) and (self.st_mode == other.st_mode))
def __ne__(self, other: Any) -> bool:
return (not (self == other))
def copy(self) -> 'FakeStatResult':
stat_result = copy(self)
return stat_result
def set_from_stat_result(self, stat_result: os.stat_result) -> None:
self.st_mode = stat_result.st_mode
self.st_uid = stat_result.st_uid
self.st_gid = stat_result.st_gid
self._st_size = stat_result.st_size
self._st_atime_ns = stat_result.st_atime_ns
self._st_mtime_ns = stat_result.st_mtime_ns
self._st_ctime_ns = stat_result.st_ctime_ns
def st_ctime(self) -> Union[(int, float)]:
return (self._st_ctime_ns / .0)
_ctime.setter
def st_ctime(self, val: Union[(int, float)]) -> None:
self._st_ctime_ns = int((val * .0))
def st_atime(self) -> Union[(int, float)]:
return (self._st_atime_ns / .0)
_atime.setter
def st_atime(self, val: Union[(int, float)]) -> None:
self._st_atime_ns = int((val * .0))
def st_mtime(self) -> Union[(int, float)]:
return (self._st_mtime_ns / .0)
_mtime.setter
def st_mtime(self, val: Union[(int, float)]) -> None:
self._st_mtime_ns = int((val * .0))
def st_size(self) -> int:
if (((self.st_mode & S_IFLNK) == S_IFLNK) and self.is_windows):
return 0
return self._st_size
_size.setter
def st_size(self, val: int) -> None:
self._st_size = val
def st_blocks(self) -> int:
if self.is_windows:
raise AttributeError("'os.stat_result' object has no attribute 'st_blocks'")
page_size = 4096
blocks_in_page = (page_size // 512)
pages = (self._st_size // page_size)
if (self._st_size % page_size):
pages += 1
return (pages * blocks_in_page)
def st_file_attributes(self) -> int:
if (not self.is_windows):
raise AttributeError("module 'os.stat_result' has no attribute 'st_file_attributes'")
mode = 0
st_mode = self.st_mode
if (st_mode & stat.S_IFDIR):
mode |= stat.FILE_ATTRIBUTE_DIRECTORY
if (st_mode & stat.S_IFREG):
mode |= stat.FILE_ATTRIBUTE_NORMAL
if (st_mode & (stat.S_IFCHR | stat.S_IFBLK)):
mode |= stat.FILE_ATTRIBUTE_DEVICE
if (st_mode & stat.S_IFLNK):
mode |= stat.FILE_ATTRIBUTE_REPARSE_POINT
return mode
def st_reparse_tag(self) -> int:
if ((not self.is_windows) or (sys.version_info < (3, 8))):
raise AttributeError("module 'os.stat_result' has no attribute 'st_reparse_tag'")
if (self.st_mode & stat.S_IFLNK):
return stat.IO_REPARSE_TAG_SYMLINK
return 0
def __getitem__(self, item: int) -> Optional[int]:
import stat
if (item == stat.ST_MODE):
return self.st_mode
if (item == stat.ST_INO):
return self.st_ino
if (item == stat.ST_DEV):
return self.st_dev
if (item == stat.ST_NLINK):
return self.st_nlink
if (item == stat.ST_UID):
return self.st_uid
if (item == stat.ST_GID):
return self.st_gid
if (item == stat.ST_SIZE):
return self.st_size
if (item == stat.ST_ATIME):
return int(self.st_atime)
if (item == stat.ST_MTIME):
return int(self.st_mtime)
if (item == stat.ST_CTIME):
return int(self.st_ctime)
raise ValueError('Invalid item')
def st_atime_ns(self) -> int:
return self._st_atime_ns
_atime_ns.setter
def st_atime_ns(self, val: int) -> None:
self._st_atime_ns = val
def st_mtime_ns(self) -> int:
return self._st_mtime_ns
_mtime_ns.setter
def st_mtime_ns(self, val: int) -> None:
self._st_mtime_ns = val
def st_ctime_ns(self) -> int:
return self._st_ctime_ns
_ctime_ns.setter
def st_ctime_ns(self, val: int) -> None:
self._st_ctime_ns = val |
def _init_weights(module, name=None, head_init_scale=1.0):
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=0.02)
if (module.bias is not None):
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=0.02)
nn.init.zeros_(module.bias)
if (name and ('head.' in name)):
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale) |
class UrlFetcher(BinFetcher):
def __fetch_impl__(self, bin_info, bin_file):
self.app.print('DL {}'.format(bin_file))
if ('url' not in bin_info):
raise Exception('Missing URL for bin')
download_url = bin_info['url']
self.app.detail(download_url)
urllib.request.urlretrieve(download_url, bin_file)
p = Path(bin_file).stat()
self.app.detail('{:,d} bytes downloaded'.format(p.st_size)) |
def fold_high_bias_next_conv_qt(activation_is_relu, beta, gamma, weight, bias_curr_layer, bias_prev_layer):
curr_layer_bias = bias_curr_layer
prev_layer_bias = bias_prev_layer
if (not activation_is_relu):
absorb_bias = beta
else:
abs_gamma = np.abs(gamma)
absorb_bias = np.maximum(0, (beta - (3 * abs_gamma)))
weight_matrix = weight.sum(3).sum(2)
if (weight_matrix.shape[1] == 1):
weight_matrix1 = weight_matrix.reshape(weight_matrix.shape[0])
bias_correction = np.multiply(weight_matrix1, absorb_bias)
else:
bias_correction = np.matmul(weight_matrix, absorb_bias)
curr_layer_bias += bias_correction
prev_layer_bias = (prev_layer_bias - absorb_bias)
return (prev_layer_bias, curr_layer_bias) |
def get_reorient_poses(env):
if env._real:
bounds = ((0.15, (- 0.5), (env.TABLE_OFFSET + 0.001)), (0.3, (- 0.35), (env.TABLE_OFFSET + 0.001)))
else:
bounds = ((0.25, (- 0.55), (env.TABLE_OFFSET + 0.001)), (0.75, (- 0.25), (env.TABLE_OFFSET + 0.001)))
if 0:
pp.draw_aabb(bounds)
XY = np.array(list(itertools.product(np.linspace(bounds[0][0], bounds[1][0], num=10), np.linspace(bounds[0][1], bounds[1][1], num=8))))
ABG = np.array(list(itertools.product(np.linspace((- np.pi), np.pi, num=8, endpoint=False), np.linspace((- np.pi), np.pi, num=8, endpoint=False), np.linspace((- np.pi), np.pi, num=8, endpoint=False))))
aabb = pp.get_aabb(env.fg_object_id)
max_extent = max((aabb[1] - aabb[0]))
with pp.LockRenderer(), pp.WorldSaver():
XY_valid = []
for (x, y) in XY:
box = pp.create_box(w=max_extent, l=max_extent, h=0.5)
pp.set_pose(box, ((x, y, 0), (0, 0, 0, 1)))
obstacles = env.object_ids[:]
obstacles.remove(env.fg_object_id)
if reorientbot.pybullet.is_colliding(box, ids2=obstacles):
pp.remove_body(box)
continue
pp.remove_body(box)
if env.debug:
pp.draw_point((x, y, (env.TABLE_OFFSET + 0.001)), color=(0, 1, 0, 1))
XY_valid.append((x, y))
XY = XY_valid
reorient_poses = []
with pp.LockRenderer(), pp.WorldSaver():
for (a, b, g) in ABG:
(x, y) = XY[0]
c = reorientbot.geometry.Coordinate(position=(x, y, 0), quaternion=_utils.get_canonical_quaternion(class_id=_utils.get_class_id(env.fg_object_id)))
c.rotate([a, b, g], wrt='world')
pp.set_pose(env.fg_object_id, c.pose)
c.position[2] = ((- pp.get_aabb(env.fg_object_id)[0][2]) + env.TABLE_OFFSET)
pp.set_pose(env.fg_object_id, c.pose)
points = pp.body_collision_info(env.fg_object_id, env.plane, max_distance=0.2)
distance_to_plane = min((point[8] for point in points))
assert (distance_to_plane > 0)
c.position[2] += (- distance_to_plane)
if (_utils.get_class_id(env.fg_object_id) == 11):
c.position[2] += 0.03
else:
c.position[2] += 0.0
for (x, y) in XY:
reorient_poses.append([x, y, c.position[2], *c.quaternion])
return np.array(reorient_poses) |
class InitiatorMixin():
address_to_client: dict
block_number: BlockNumber
def __init__(self):
super().__init__()
self.used_secrets: Set[Secret] = set()
self.processed_secret_request_secrethashes: Set[SecretHash] = set()
self.initiated: Set[Secret] = set()
def _available_amount(self, route):
client = self.address_to_client[route.initiator]
netting_channel = client.address_to_channel[route.hops[1]]
return channel.get_distributable(netting_channel.our_state, netting_channel.partner_state)
def _is_expired(self, secrethash, initiator):
expiry = self.address_to_client[initiator].expected_expiry[secrethash]
return (self.block_number >= (expiry + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL))
def _is_removed(self, action):
return self._is_expired(action.transfer.secrethash, action.transfer.initiator)
def _action_init_initiator(self, route: Route, transfer: TransferDescriptionWithSecretState):
client = self.address_to_client[route.initiator]
channel = client.address_to_channel[route.hops[1]]
if (transfer.secrethash not in client.expected_expiry):
client.expected_expiry[transfer.secrethash] = (self.block_number + 10)
return ActionInitInitiator(transfer, [factories.make_route_from_channel(channel)])
def _new_transfer_description(self, route, payment_id, amount, secret):
self.used_secrets.add(secret)
return TransferDescriptionWithSecretState(token_network_registry_address=self.token_network_registry_address, payment_identifier=payment_id, amount=amount, token_network_address=self.token_network_address, initiator=route.initiator, target=route.target, secret=secret)
(target=send_locked_transfers, route=routes, payment_id=payment_id(), amount=integers(min_value=1, max_value=100), secret=secret())
def valid_init_initiator(self, route, payment_id, amount, secret):
assume((amount <= self._available_amount(route)))
assume((secret not in self.used_secrets))
transfer = self._new_transfer_description(route, payment_id, amount, secret)
action = self._action_init_initiator(route, transfer)
client = self.address_to_client[route.initiator]
result = node.state_transition(client.chain_state, action)
assert event_types_match(result.events, SendLockedTransfer)
self.initiated.add(transfer.secret)
client.expected_expiry[transfer.secrethash] = (self.block_number + 10)
self.transfer_order.initiated.append(secret)
return utils.SendLockedTransferInNode(event=result.events[0], action=action, node=route.initiator, private_key=self.address_to_privkey[route.initiator])
(route=routes, payment_id=payment_id(), excess_amount=integers(min_value=1), secret=secret())
def exceeded_capacity_init_initiator(self, route, payment_id, excess_amount, secret):
amount = (self._available_amount(route) + excess_amount)
transfer = self._new_transfer_description(route, payment_id, amount, secret)
action = self._action_init_initiator(route, transfer)
client = self.address_to_client[route.initiator]
result = node.state_transition(client.chain_state, action)
assert event_types_match(result.events, EventPaymentSentFailed)
self.event('ActionInitInitiator failed: Amount exceeded')
(previous=send_locked_transfers, route=routes, payment_id=payment_id(), amount=integers(min_value=1))
def used_secret_init_initiator(self, previous, route, payment_id, amount):
assume((not self._is_removed(previous.action)))
client = self.address_to_client[previous.node]
secret = previous.action.transfer.secret
transfer = self._new_transfer_description(route, payment_id, amount, secret)
action = self._action_init_initiator(route, transfer)
result = node.state_transition(client.chain_state, action)
assert (not result.events)
self.event('ActionInitInitiator failed: Secret already in use.')
(previous=send_locked_transfers)
def replay_init_initiator(self, previous):
assume((not self._is_removed(previous.action)))
client = self.address_to_client[previous.node]
result = node.state_transition(client.chain_state, previous.action)
assert (not result.events)
self.event('Replayed init_initiator action ignored')
(target=send_secret_reveals_forward, source=consumes(send_secret_requests))
def process_valid_secret_request(self, source: utils.SendSecretRequestInNode) -> utils.SendSecretRevealInNode:
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_request_to_receive_secret_request(source)
assume((state_change.secrethash not in self.processed_secret_request_secrethashes))
result = node.state_transition(initiator_client.chain_state, state_change)
if (state_change.secrethash in self.processed_secret_request_secrethashes):
assert (not result.events)
self.event('Valid SecretRequest dropped due to previous one with same secrethash.')
return multiple()
elif self._is_expired(state_change.secrethash, initiator_address):
assert (not result.events)
self.event('Otherwise valid SecretRequest dropped due to expired lock.')
return multiple()
else:
assert event_types_match(result.events, SendSecretReveal)
self.event('Valid SecretRequest accepted.')
self.processed_secret_request_secrethashes.add(state_change.secrethash)
return utils.SendSecretRevealInNode(node=initiator_address, event=result.events[0])
(source=send_secret_requests, wrong_amount=integers())
def process_secret_request_with_wrong_amount(self, source: utils.SendSecretRequestInNode, wrong_amount):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_request_to_receive_secret_request(source)
assume((wrong_amount != state_change.amount))
state_change = replace(state_change, amount=wrong_amount)
result = node.state_transition(initiator_client.chain_state, state_change)
transfer_expired = self._is_expired(state_change.secrethash, initiator_address)
secrethash_known = (state_change.secrethash in self.processed_secret_request_secrethashes)
if (transfer_expired or secrethash_known):
assert (not result.events)
self.event('Invalid secret request dropped silently (wrong amount)')
else:
self.processed_secret_request_secrethashes.add(state_change.secrethash)
(source=send_secret_requests, wrong_secret=secret())
def process_secret_request_with_wrong_secrethash(self, source: utils.SendSecretRequestInNode, wrong_secret):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_request_to_receive_secret_request(source)
wrong_secrethash = sha256_secrethash(wrong_secret)
assume((wrong_secrethash != state_change.secrethash))
state_change = replace(state_change, secrethash=wrong_secrethash)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Invalid secret request dropped (wrong secrethash)')
(source=send_secret_requests, wrong_payment_identifier=integers())
def process_secret_request_with_wrong_payment_identifier(self, source: utils.SendSecretRequestInNode, wrong_payment_identifier):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_request_to_receive_secret_request(source)
assume((wrong_payment_identifier != state_change.payment_identifier))
state_change = replace(state_change, payment_identifier=wrong_payment_identifier)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Invalid secret request dropped (wrong payment identifier)')
(target=send_unlocks, source=consumes(send_secret_reveals_backward))
def process_secret_reveal_as_initiator(self, source: utils.SendSecretRevealInNode) -> utils.SendUnlockInNode:
initiator_address = source.event.recipient
private_key = self.address_to_privkey[initiator_address]
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_reveal_to_recieve_secret_reveal(source)
result = node.state_transition(initiator_client.chain_state, state_change)
assert event_types_match(result.events, SendUnlock, EventPaymentSentSuccess, EventUnlockSuccess)
self.event('Valid secret reveal processed in initiator node.')
return utils.SendUnlockInNode(node=initiator_address, private_key=private_key, event=result.events[0])
(source=send_secret_reveals_backward, wrong_secret=secret())
def process_secret_reveal_with_mismatched_secret_as_initiator(self, source: utils.SendSecretRevealInNode, wrong_secret: Secret):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_reveal_to_recieve_secret_reveal(source)
assume((state_change.secret != wrong_secret))
state_change = replace(state_change, secret=wrong_secret)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Secret reveal with wrong secret dropped in initiator node.')
(source=send_secret_reveals_backward, wrong_secret=secret())
def process_secret_reveal_with_unknown_secrethash_as_initiator(self, source: utils.SendSecretRevealInNode, wrong_secret: Secret):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_reveal_to_recieve_secret_reveal(source)
assume((state_change.secret != wrong_secret))
wrong_secrethash = sha256_secrethash(wrong_secret)
state_change = replace(state_change, secret=wrong_secret, secrethash=wrong_secrethash)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Secret reveal with unknown secrethash dropped in initiator node.')
(source=send_secret_reveals_backward, wrong_channel_id=integers())
def process_secret_reveal_with_wrong_channel_identifier_as_initiator(self, source: utils.SendSecretRevealInNode, wrong_channel_id):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_reveal_to_recieve_secret_reveal(source)
assume((state_change.canonical_id.channel_id != wrong_channel_id))
wrong_canonical_id = replace(state_change.canonical_id, channel_id=wrong_channel_id)
state_change = replace(state_change, canonical_id=wrong_canonical_id)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Secret reveal with unknown channel id dropped in initiator node.')
(source=send_secret_reveals_backward, wrong_channel_id=integers(), wrong_recipient=address())
def process_secret_reveal_with_wrong_queue_identifier_as_initiator(self, source: utils.SendSecretRevealInNode, wrong_channel_id, wrong_recipient):
initiator_address = source.event.recipient
initiator_client = self.address_to_client[initiator_address]
state_change = utils.send_secret_reveal_to_recieve_secret_reveal(source)
assume((state_change.canonical_id.channel_id != wrong_channel_id))
wrong_canonical_id = replace(state_change.queue_id.canonical_id, channel_id=wrong_channel_id)
wrong_queue_id = replace(state_change.queue_id, canonical_id=wrong_canonical_id, recipient=wrong_recipient)
state_change = replace(state_change, queue_id=wrong_queue_id)
result = node.state_transition(initiator_client.chain_state, state_change)
assert (not result.events)
self.event('Secret reveal with unknown queue id dropped in initiator node.') |
.pydicom
def test_copy():
dont_change_string = "don't change me"
to_be_changed_string = 'do change me'
new_manufacturer = 'george'
dataset_to_be_copied = dicom_dataset_from_dict({'Manufacturer': dont_change_string})
dataset_to_be_viewed = dicom_dataset_from_dict({'Manufacturer': to_be_changed_string})
dicom_base_copy = DicomBase(dataset_to_be_copied)
dicom_base_view = DicomBase(dataset_to_be_viewed, copy=False)
dicom_base_copy.dataset.Manufacturer = new_manufacturer
dicom_base_view.dataset.Manufacturer = new_manufacturer
assert (dataset_to_be_copied.Manufacturer == dont_change_string)
assert (dataset_to_be_viewed.Manufacturer == new_manufacturer) |
def pooling_type_to_pooling_mode(pooling_type: PoolingType) -> PoolingMode:
if (pooling_type.value == PoolingType.SUM.value):
return PoolingMode.SUM
elif (pooling_type.value == PoolingType.MEAN.value):
return PoolingMode.MEAN
elif (pooling_type.value == PoolingType.NONE.value):
return PoolingMode.NONE
else:
raise Exception(f'Invalid pooling type {pooling_type}') |
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = ((result_dir / 'final_result') / 'data')
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {'gt_num': 0}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric[('recall_roi_%s' % str(cur_thresh))] = 0
metric[('recall_rcnn_%s' % str(cur_thresh))] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info((' EPOCH %s EVALUATION ' % epoch_id))
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = (cfg.LOCAL_RANK % num_gpus)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False)
model.eval()
if (cfg.LOCAL_RANK == 0):
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for (i, batch_dict) in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
(pred_dicts, ret_dict) = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=(final_output_dir if save_to_file else None))
det_annos += annos
if (cfg.LOCAL_RANK == 0):
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if (cfg.LOCAL_RANK == 0):
progress_bar.close()
if dist_test:
(rank, world_size) = common_utils.get_dist_info()
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=(result_dir / 'tmpdir'))
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=(result_dir / 'tmpdir'))
logger.info((' Performance of EPOCH %s ' % epoch_id))
sec_per_example = ((time.time() - start_time) / len(dataloader.dataset))
logger.info(('Generate label finished(sec_per_example: %.4f second).' % sec_per_example))
if (cfg.LOCAL_RANK != 0):
return {}
ret_dict = {}
if dist_test:
for (key, val) in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = (metric[('recall_roi_%s' % str(cur_thresh))] / max(gt_num_cnt, 1))
cur_rcnn_recall = (metric[('recall_rcnn_%s' % str(cur_thresh))] / max(gt_num_cnt, 1))
logger.info(('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall)))
logger.info(('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall)))
ret_dict[('recall/roi_%s' % str(cur_thresh))] = cur_roi_recall
ret_dict[('recall/rcnn_%s' % str(cur_thresh))] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info(('Average predicted number of objects(%d samples): %.3f' % (len(det_annos), (total_pred_objects / max(1, len(det_annos))))))
with open((result_dir / 'result.pkl'), 'wb') as f:
pickle.dump(det_annos, f)
(result_str, result_dict) = dataset.evaluation(det_annos, class_names, eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC, output_path=final_output_dir)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info(('Result is save to %s' % result_dir))
logger.info('Evaluation done.')
return ret_dict |
def test_majorana_operator_divide():
a = (MajoranaOperator((0, 1, 5), 1.5) + MajoranaOperator((1, 2, 7), (- 0.5)))
assert ((a / 2).terms == {(0, 1, 5): 0.75, (1, 2, 7): (- 0.25)})
a /= 2
assert (a.terms == {(0, 1, 5): 0.75, (1, 2, 7): (- 0.25)})
with pytest.raises(TypeError):
_ = (a / 'a')
with pytest.raises(TypeError):
a /= 'a' |
def qflags_key(base: Type[sip.simplewrapper], value: _EnumValueType, klass: Type[_EnumValueType]=None) -> str:
if (klass is None):
klass = value.__class__
if (klass == int):
raise TypeError("Can't guess enum class of an int!")
if (not value):
return qenum_key(base, value, klass)
bits = []
names = []
mask = 1
intval = qtutils.extract_enum_val(value)
while (mask <= intval):
if (intval & mask):
bits.append(mask)
mask <<= 1
for bit in bits:
enum_value = klass(bit)
names.append(qenum_key(base, enum_value, klass))
return '|'.join(names) |
def generate_texture_mipmaps(target):
if (isinstance(target, Texture) and (target.dim == 2) and (target.size[2] > 1)):
for i in range(target.size[2]):
generate_texture_mipmaps(GfxTextureView(target, layer_range=(i, (i + 1))))
return
if isinstance(target, Texture):
texture = target
layer = 0
elif isinstance(target, GfxTextureView):
(view, texture) = (target, target.texture)
layer = view.layer_range[0]
generate_mipmaps(texture, layer) |
(Infraction)
class InfractionAdmin(admin.ModelAdmin):
fieldsets = (('Members', {'fields': ('user', 'actor')}), ('Action', {'fields': ('type', 'hidden', 'active')}), ('Dates', {'fields': ('inserted_at', 'expires_at')}), ('Reason', {'fields': ('reason',)}))
readonly_fields = ('user', 'actor', 'type', 'inserted_at', 'expires_at', 'active', 'hidden')
list_display = ('type', 'active', 'user', 'inserted_at', 'reason')
search_fields = ('id', 'user__name', 'user__id', 'actor__name', 'actor__id', 'reason', 'type')
list_filter = ('type', 'hidden', 'active', InfractionActorFilter)
def has_add_permission(self, *args) -> bool:
return False |
def model_with_reused_layer():
relu = tf.keras.layers.ReLU()
inp = tf.keras.layers.Input(shape=(5,))
x = relu(inp)
x = tf.keras.layers.Dense(units=2)(x)
x = relu(x)
x = tf.keras.layers.Softmax()(x)
model = tf.keras.Model(inputs=inp, outputs=x, name='model_with_reused_layer')
return model |
def given(name: (str | StepParser), converters: (dict[(str, Callable[([str], Any)])] | None)=None, target_fixture: (str | None)=None, stacklevel: int=1) -> Callable[([Callable[(P, T)]], Callable[(P, T)])]:
return step(name, GIVEN, converters=converters, target_fixture=target_fixture, stacklevel=stacklevel) |
def test():
if (implementation.name != 'circuitpython'):
print()
print('This demo is for CircuitPython only!')
exit()
try:
cs_pin = DigitalInOut(board.P0_15)
dc_pin = DigitalInOut(board.P0_17)
rst_pin = DigitalInOut(board.P0_20)
spi = SPI(clock=board.P0_24, MOSI=board.P0_22)
display = Display(spi, dc=dc_pin, cs=cs_pin, rst=rst_pin)
display.clear()
fixed = XglcdFont('fonts/FixedFont5x8.c', 5, 8, letter_count=96)
WIDTH = 128
text = 'CircuitPython Demo'
length = fixed.measure_text(text)
x = int(((WIDTH / 2) - (length / 2)))
display.draw_text(x, 6, text, fixed, color565(255, 255, 0))
display.draw_rectangle(0, 0, 127, 20, color565(0, 255, 0))
logo = BouncingSprite('images/blinka45x48.raw', 45, 48, 239, 319, 1, display)
while True:
timer = monotonic()
logo.update_pos()
logo.draw()
timer_dif = (0. - (monotonic() - timer))
if (timer_dif > 0):
sleep(timer_dif)
except KeyboardInterrupt:
display.cleanup() |
class main(list):
def __init__(self, campaign, mod, project_id):
global campaign_list
global module
campaign_list = campaign
if (mod is not None):
module = mod
i = cmd_main()
i.prompt = (((((('(' + cmd2.ansi.style('Overlord', fg=Fg.RED, bg=None, bold=True, underline=False)) + ' : ') + cmd2.ansi.style(project_id, fg=Fg.DARK_GRAY, bg=None, bold=True, underline=False)) + cmd2.ansi.style('/letsencrypt', fg=Fg.BLUE, bg=None, bold=True, underline=False)) + ')') + '$> ')
i.cmdloop() |
class InlineTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
self.mod2 = testutils.create_module(self.project, 'mod2')
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def _inline(self, code, offset, **kwds):
self.mod.write(code)
self._inline2(self.mod, offset, **kwds)
return self.mod.read()
def _inline2(self, resource, offset, **kwds):
inliner = inline.create_inline(self.project, resource, offset)
changes = inliner.get_changes(**kwds)
self.project.do(changes)
return self.mod.read()
def test_simple_case(self):
code = dedent(' a_var = 10\n another_var = a_var\n ')
refactored = self._inline(code, (code.index('a_var') + 1))
self.assertEqual('another_var = 10\n', refactored)
def test_empty_case(self):
code = 'a_var = 10\n'
refactored = self._inline(code, (code.index('a_var') + 1))
self.assertEqual('', refactored)
def test_long_definition(self):
code = dedent(' a_var = 10 + (10 + 10)\n another_var = a_var\n ')
refactored = self._inline(code, (code.index('a_var') + 1))
self.assertEqual('another_var = 10 + (10 + 10)\n', refactored)
def test_explicit_continuation(self):
code = dedent(' a_var = (10 +\n 10)\n another_var = a_var\n ')
refactored = self._inline(code, (code.index('a_var') + 1))
self.assertEqual(dedent(' another_var = (10 +\n 10)\n '), refactored)
def test_implicit_continuation(self):
code = dedent(' a_var = 10 +\\\n 10\n another_var = a_var\n ')
refactored = self._inline(code, (code.index('a_var') + 1))
self.assertEqual(dedent(' another_var = 10 +\\\n 10\n '), refactored)
def test_inlining_at_the_end_of_input(self):
code = dedent(' a = 1\n b = a')
refactored = self._inline(code, (code.index('a') + 1))
self.assertEqual('b = 1', refactored)
def test_on_classes(self):
code = dedent(' class AClass(object):\n pass\n ')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, (code.index('AClass') + 1))
def test_multiple_assignments(self):
code = dedent(' a_var = 10\n a_var = 20\n ')
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, (code.index('a_var') + 1))
def test_tuple_assignments(self):
code = 'a_var, another_var = (20, 30)\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, (code.index('a_var') + 1))
def test_on_unknown_vars(self):
code = 'a_var = another_var\n'
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline(code, (code.index('another_var') + 1))
def test_attribute_inlining(self):
code = dedent(' class A(object):\n def __init__(self):\n self.an_attr = 3\n range(self.an_attr)\n ')
refactored = self._inline(code, (code.index('an_attr') + 1))
expected = dedent(' class A(object):\n def __init__(self):\n range(3)\n ')
self.assertEqual(expected, refactored)
def test_attribute_inlining2(self):
code = dedent(' class A(object):\n def __init__(self):\n self.an_attr = 3\n range(self.an_attr)\n a = A()\n range(a.an_attr)')
refactored = self._inline(code, (code.index('an_attr') + 1))
expected = dedent(' class A(object):\n def __init__(self):\n range(3)\n a = A()\n range(3)')
self.assertEqual(expected, refactored)
def test_a_function_with_no_occurrence(self):
self.mod.write(dedent(' def a_func():\n pass\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('', self.mod.read())
def test_a_function_with_no_occurrence2(self):
self.mod.write(dedent(' a_var = 10\n def a_func():\n pass\n print(a_var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' a_var = 10\n print(a_var)\n '), self.mod.read())
def test_replacing_calls_with_function_definition_in_other_modules(self):
self.mod.write(dedent(' def a_func():\n print(1)\n '))
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write(dedent(' import mod\n mod.a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' import mod\n print(1)\n '), mod1.read())
def test_replacing_calls_with_function_definition_in_other_modules2(self):
self.mod.write(dedent(' def a_func():\n print(1)\n '))
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write(dedent(' import mod\n if True:\n mod.a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' import mod\n if True:\n print(1)\n '), mod1.read())
def test_replacing_calls_with_method_definition_in_other_modules(self):
self.mod.write(dedent(' class A(object):\n var = 10\n def a_func(self):\n print(1)\n '))
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write(dedent(' import mod\n mod.A().a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' import mod\n print(1)\n '), mod1.read())
self.assertEqual(dedent(' class A(object):\n var = 10\n '), self.mod.read())
def test_replacing_calls_with_function_definition_in_defining_module(self):
self.mod.write(dedent(' def a_func():\n print(1)\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1)\n', self.mod.read())
def test_replac_calls_with_function_definition_in_defining_module2(self):
self.mod.write(dedent(' def a_func():\n for i in range(10):\n print(1)\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' for i in range(10):\n print(1)\n '), self.mod.read())
def test_replacing_calls_with_method_definition_in_defining_modules(self):
self.mod.write(dedent(' class A(object):\n var = 10\n def a_func(self):\n print(1)\n A().a_func()'))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' class A(object):\n var = 10\n print(1)\n '), self.mod.read())
def test_parameters_with_the_same_name_as_passed(self):
self.mod.write(dedent(' def a_func(var):\n print(var)\n var = 1\n a_func(var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_parameters_with_the_same_name_as_passed2(self):
self.mod.write(dedent(' def a_func(var):\n print(var)\n var = 1\n a_func(var=var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_simple_parameters_renaming(self):
self.mod.write(dedent(' def a_func(param):\n print(param)\n var = 1\n a_func(var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_simple_parameters_renaming_for_multiple_params(self):
self.mod.write(dedent(' def a_func(param1, param2):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(var1, var2)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var1 + var2\n '), self.mod.read())
def test_parameters_renaming_for_passed_constants(self):
self.mod.write(dedent(' def a_func(param):\n print(param)\n a_func(1)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1)\n', self.mod.read())
def test_parameters_renaming_for_passed_statements(self):
self.mod.write(dedent(' def a_func(param):\n print(param)\n a_func((1 + 2) / 3)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' print((1 + 2) / 3)\n '), self.mod.read())
def test_simple_parameters_renam_for_multiple_params_using_keywords(self):
self.mod.write(dedent(' def a_func(param1, param2):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(param2=var1, param1=var2)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var2 + var1\n '), self.mod.read())
def test_simple_params_renam_for_multi_params_using_mixed_keywords(self):
self.mod.write(dedent(' def a_func(param1, param2):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(var2, param2=var1)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var2 + var1\n '), self.mod.read())
def test_simple_putting_in_default_arguments(self):
self.mod.write(dedent(' def a_func(param=None):\n print(param)\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(None)\n', self.mod.read())
def test_overriding_default_arguments(self):
self.mod.write(dedent(' def a_func(param1=1, param2=2):\n print(param1, param2)\n a_func(param2=3)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1, 3)\n', self.mod.read())
def test_arguments_containing_comparisons(self):
self.mod.write(dedent(' def a_func(param1, param2, param3):\n param2.name\n a_func(2 <= 1, item, True)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('item.name\n', self.mod.read())
def test_badly_formatted_text(self):
self.mod.write(dedent(' def a_func ( param1 = 1 ,param2 = 2 ) :\n print(param1, param2)\n a_func ( param2\n = 3 )\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1, 3)\n', self.mod.read())
def test_passing_first_arguments_for_methods(self):
a_class = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n self.a_func(self.var)\n def a_func(self, param):\n print(param)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n print(self.var)\n ')
self.assertEqual(expected, self.mod.read())
def test_passing_first_arguments_for_methods2(self):
a_class = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n def a_func(self, param):\n print(param, self.var)\n an_a = A()\n an_a.a_func(1)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n an_a = A()\n print(1, an_a.var)\n ')
self.assertEqual(expected, self.mod.read())
def test_passing_first_arguments_for_methods3(self):
a_class = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n def a_func(self, param):\n print(param, self.var)\n an_a = A()\n A.a_func(an_a, 1)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n def __init__(self):\n self.var = 1\n an_a = A()\n print(1, an_a.var)\n ')
self.assertEqual(expected, self.mod.read())
def test_inlining_staticmethods(self):
a_class = dedent(' class A(object):\n \n def a_func(param):\n print(param)\n A.a_func(1)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n pass\n print(1)\n ')
self.assertEqual(expected, self.mod.read())
def test_static_methods2(self):
a_class = dedent(' class A(object):\n var = 10\n \n def a_func(param):\n print(param)\n an_a = A()\n an_a.a_func(1)\n A.a_func(2)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n var = 10\n an_a = A()\n print(1)\n print(2)\n ')
self.assertEqual(expected, self.mod.read())
def test_inlining_classmethods(self):
a_class = dedent(' class A(object):\n \n def a_func(cls, param):\n print(param)\n A.a_func(1)\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n pass\n print(1)\n ')
self.assertEqual(expected, self.mod.read())
def test_inlining_classmethods2(self):
a_class = dedent(' class A(object):\n \n def a_func(cls, param):\n return cls\n print(A.a_func(1))\n ')
self.mod.write(a_class)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
expected = dedent(' class A(object):\n pass\n print(A)\n ')
self.assertEqual(expected, self.mod.read())
def test_simple_return_values_and_inlining_functions(self):
self.mod.write(dedent(' def a_func():\n return 1\n a = a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('a = 1\n', self.mod.read())
def test_simple_return_values_and_inlining_lonely_functions(self):
self.mod.write(dedent(' def a_func():\n return 1\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('1\n', self.mod.read())
def test_empty_returns_and_inlining_lonely_functions(self):
self.mod.write(dedent(' def a_func():\n if True:\n return\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' if True:\n pass\n '), self.mod.read())
def test_multiple_returns(self):
self.mod.write(dedent(' def less_than_five(var):\n if var < 5:\n return True\n return False\n a = less_than_five(2)\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('less') + 1))
def test_multiple_returns_and_not_using_the_value(self):
self.mod.write(dedent(' def less_than_five(var):\n if var < 5:\n return True\n return False\n less_than_five(2)\n '))
self._inline2(self.mod, (self.mod.read().index('less') + 1))
self.assertEqual(dedent(' if 2 < 5:\n True\n False\n '), self.mod.read())
def test_raising_exception_for_list_arguments(self):
self.mod.write(dedent(' def a_func(*args):\n print(args)\n a_func(1)\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
def test_raising_exception_for_list_keywods(self):
self.mod.write(dedent(' def a_func(**kwds):\n print(kwds)\n a_func(n=1)\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
def test_function_parameters_and_returns_in_other_functions(self):
code = dedent(' def a_func(param1, param2):\n return param1 + param2\n range(a_func(20, param2=abs(10)))\n ')
self.mod.write(code)
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('range(20 + abs(10))\n', self.mod.read())
def test_function_references_other_than_call(self):
self.mod.write(dedent(' def a_func(param):\n print(param)\n f = a_func\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
def test_function_referencing_itself(self):
self.mod.write(dedent(' def a_func(var):\n func = a_func\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
def test_recursive_functions(self):
self.mod.write(dedent(' def a_func(var):\n a_func(var)\n '))
with self.assertRaises(rope.base.exceptions.RefactoringError):
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
def xxx_test_inlining_function_default_parameters(self):
self.mod.write(dedent(' def a_func(p1=1):\n pass\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('p1') + 1))
self.assertEqual(dedent(' def a_func(p1=1):\n pass\n a_func()\n '), self.mod.read())
def test_simple_inlining_after_extra_indented_lines(self):
self.mod.write(dedent(' def a_func():\n for i in range(10):\n pass\n if True:\n pass\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' if True:\n pass\n for i in range(10):\n pass\n '), self.mod.read())
def test_inlining_a_function_with_pydoc(self):
self.mod.write(dedent(' def a_func():\n """docs"""\n a = 1\n a_func()'))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('a = 1\n', self.mod.read())
def test_inlining_methods(self):
self.mod.write(dedent(" class A(object):\n name = 'hey'\n def get_name(self):\n return self.name\n a = A()\n name = a.get_name()\n "))
self._inline2(self.mod, (self.mod.read().rindex('get_name') + 1))
self.assertEqual(dedent(" class A(object):\n name = 'hey'\n a = A()\n name = a.name\n "), self.mod.read())
def test_simple_returns_with_backslashes(self):
self.mod.write(dedent(' def a_func():\n return 1\\\n + 2\n a = a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('a = 1\\\n + 2\n', dedent(' a = 1\\\n + 2\n '), self.mod.read())
def test_a_function_with_pass_body(self):
self.mod.write(dedent(' def a_func():\n print(1)\n a = a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' print(1)\n a = None\n '), self.mod.read())
def test_inlining_the_last_method_of_a_class(self):
self.mod.write(dedent(' class A(object):\n def a_func(self):\n pass\n '))
self._inline2(self.mod, (self.mod.read().rindex('a_func') + 1))
self.assertEqual(dedent(' class A(object):\n pass\n '), self.mod.read())
def test_adding_needed_imports_in_the_dest_module(self):
self.mod.write(dedent(' import sys\n\n def ver():\n print(sys.version)\n '))
self.mod2.write(dedent(' import mod\n\n mod.ver()'))
self._inline2(self.mod, (self.mod.read().index('ver') + 1))
self.assertEqual(dedent(' import mod\n import sys\n\n print(sys.version)\n '), self.mod2.read())
def test_adding_needed_imports_in_the_dest_module_removing_selfs(self):
self.mod.write(dedent(' import mod2\n\n def f():\n print(mod2.var)\n '))
self.mod2.write(dedent(' import mod\n\n var = 1\n mod.f()\n '))
self._inline2(self.mod, (self.mod.read().index('f(') + 1))
self.assertEqual(dedent(' import mod\n\n var = 1\n print(var)\n '), self.mod2.read())
def test_handling_relative_imports_when_inlining(self):
pkg = testutils.create_package(self.project, 'pkg')
mod3 = testutils.create_module(self.project, 'mod3', pkg)
mod4 = testutils.create_module(self.project, 'mod4', pkg)
mod4.write('var = 1\n')
mod3.write(dedent(' from . import mod4\n\n def f():\n print(mod4.var)\n '))
self.mod.write(dedent(' import pkg.mod3\n\n pkg.mod3.f()\n '))
self._inline2(self.mod, (self.mod.read().index('f(') + 1))
self.assertTrue(('\n\nprint(mod4.var)\n' in self.mod.read()))
def test_adding_needed_imports_for_elements_in_source(self):
self.mod.write(dedent(' def f1():\n return f2()\n def f2():\n return 1\n '))
self.mod2.write(dedent(' import mod\n\n print(mod.f1())\n '))
self._inline2(self.mod, (self.mod.read().index('f1') + 1))
self.assertEqual(dedent(' import mod\n from mod import f2\n\n print(f2())\n '), self.mod2.read())
def test_relative_imports_and_changing_inlining_body(self):
pkg = testutils.create_package(self.project, 'pkg')
mod3 = testutils.create_module(self.project, 'mod3', pkg)
mod4 = testutils.create_module(self.project, 'mod4', pkg)
mod4.write('var = 1\n')
mod3.write(dedent(' import mod4\n\n def f():\n print(mod4.var)\n '))
self.mod.write(dedent(' import pkg.mod3\n\n pkg.mod3.f()\n '))
self._inline2(self.mod, (self.mod.read().index('f(') + 1))
self.assertEqual(dedent(' import pkg.mod3\n import pkg.mod4\n\n print(pkg.mod4.var)\n '), self.mod.read())
def test_inlining_with_different_returns(self):
self.mod.write(dedent(' def f(p):\n return p\n print(f(1))\n print(f(2))\n print(f(1))\n '))
self._inline2(self.mod, (self.mod.read().index('f(') + 1))
self.assertEqual(dedent(' print(1)\n print(2)\n print(1)\n '), self.mod.read())
def test_not_removing_definition_for_variables(self):
code = dedent(' a_var = 10\n another_var = a_var\n ')
refactored = self._inline(code, (code.index('a_var') + 1), remove=False)
self.assertEqual(dedent(' a_var = 10\n another_var = 10\n '), refactored)
def test_not_removing_definition_for_methods(self):
code = dedent(' def func():\n print(1)\n\n func()\n ')
refactored = self._inline(code, (code.index('func') + 1), remove=False)
self.assertEqual(dedent(' def func():\n print(1)\n\n print(1)\n '), refactored)
def test_only_current_for_methods(self):
code = dedent(' def func():\n print(1)\n\n func()\n func()\n ')
refactored = self._inline(code, (code.rindex('func') + 1), remove=False, only_current=True)
self.assertEqual(dedent(' def func():\n print(1)\n\n func()\n print(1)\n '), refactored)
def test_only_current_for_variables(self):
code = dedent(' one = 1\n\n a = one\n b = one\n ')
refactored = self._inline(code, (code.rindex('one') + 1), remove=False, only_current=True)
self.assertEqual(dedent(' one = 1\n\n a = one\n b = 1\n '), refactored)
def test_inlining_one_line_functions(self):
code = dedent(' def f(): return 1\n var = f()\n ')
refactored = self._inline(code, code.rindex('f'))
self.assertEqual('var = 1\n', refactored)
def test_inlining_one_line_functions_with_breaks(self):
code = dedent(' def f(\n p): return p\n var = f(1)\n ')
refactored = self._inline(code, code.rindex('f'))
self.assertEqual('var = 1\n', refactored)
def test_inlining_one_line_functions_with_breaks2(self):
code = dedent(' def f(\n ): return 1\n var = f()\n ')
refactored = self._inline(code, code.rindex('f'))
self.assertEqual('var = 1\n', refactored)
def test_resources_parameter(self):
self.mod.write(dedent(' def a_func():\n print(1)\n '))
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write(dedent(' import mod\n mod.a_func()\n '))
self._inline2(self.mod, self.mod.read().index('a_func'), resources=[self.mod])
self.assertEqual('', self.mod.read())
self.assertEqual(dedent(' import mod\n mod.a_func()\n '), mod1.read())
def test_inlining_parameters(self):
code = dedent(' def f(p=1):\n pass\n f()\n ')
result = self._inline(code, code.index('p'))
self.assertEqual(dedent(' def f(p=1):\n pass\n f(1)\n '), result)
def test_inlining_function_with_line_breaks_in_args(self):
code = dedent(' def f(p): return p\n var = f(1 +\n 1)\n ')
refactored = self._inline(code, code.rindex('f'))
self.assertEqual('var = 1 + 1\n', refactored)
def test_inlining_variables_before_comparison(self):
code = 'start = 1\nprint(start <= 2)\n'
refactored = self._inline(code, code.index('start'))
self.assertEqual('print(1 <= 2)\n', refactored)
def test_inlining_variables_in_other_modules(self):
self.mod.write('myvar = 1\n')
self.mod2.write(dedent(' import mod\n print(mod.myvar)\n '))
self._inline2(self.mod, 2)
self.assertEqual(dedent(' import mod\n print(1)\n '), self.mod2.read())
def test_inlining_variables_and_back_importing(self):
self.mod.write(dedent(' mainvar = 1\n myvar = mainvar\n '))
self.mod2.write(dedent(' import mod\n print(mod.myvar)\n '))
self._inline2(self.mod, self.mod.read().index('myvar'))
expected = dedent(' import mod\n from mod import mainvar\n print(mainvar)\n ')
self.assertEqual(expected, self.mod2.read())
def test_inlining_variables_and_importing_used_imports(self):
self.mod.write(dedent(' import sys\n myvar = sys.argv\n '))
self.mod2.write(dedent(' import mod\n print(mod.myvar)\n '))
self._inline2(self.mod, self.mod.read().index('myvar'))
expected = dedent(' import mod\n import sys\n print(sys.argv)\n ')
self.assertEqual(expected, self.mod2.read())
def test_inlining_variables_and_removing_old_froms(self):
self.mod.write('var = 1\n')
self.mod2.write(dedent(' from mod import var\n print(var)\n '))
self._inline2(self.mod2, self.mod2.read().rindex('var'))
self.assertEqual('print(1)\n', self.mod2.read())
def test_inlining_method_and_removing_old_froms(self):
self.mod.write(dedent(' def f(): return 1\n '))
self.mod2.write(dedent(' from mod import f\n print(f())\n '))
self._inline2(self.mod2, self.mod2.read().rindex('f'))
self.assertEqual('print(1)\n', self.mod2.read())
def test_inlining_functions_in_other_modules_and_only_current(self):
code1 = dedent(' def f():\n return 1\n print(f())\n ')
code2 = dedent(' import mod\n print(mod.f())\n print(mod.f())\n ')
self.mod.write(code1)
self.mod2.write(code2)
self._inline2(self.mod2, self.mod2.read().rindex('f'), remove=False, only_current=True)
expected2 = dedent(' import mod\n print(mod.f())\n print(1)\n ')
self.assertEqual(code1, self.mod.read())
self.assertEqual(expected2, self.mod2.read())
def test_inlining_variables_in_other_modules_and_only_current(self):
code1 = dedent(' var = 1\n print(var)\n ')
code2 = dedent(' import mod\n print(mod.var)\n print(mod.var)\n ')
self.mod.write(code1)
self.mod2.write(code2)
self._inline2(self.mod2, self.mod2.read().rindex('var'), remove=False, only_current=True)
expected2 = 'import mod\nprint(mod.var)\nprint(1)\n'
self.assertEqual(code1, self.mod.read())
self.assertEqual(expected2, self.mod2.read())
def test_inlining_does_not_change_string_constants(self):
code = dedent(' var = 1\n print("var\\\n ")\n ')
expected = dedent(' var = 1\n print("var\\\n ")\n ')
refactored = self._inline(code, code.rindex('var'), remove=False, only_current=True, docs=False)
self.assertEqual(expected, refactored)
def test_inlining_does_change_string_constants_if_docs_is_set(self):
code = dedent(' var = 1\n print("var\\\n ")\n ')
expected = dedent(' var = 1\n print("1\\\n ")\n ')
refactored = self._inline(code, code.rindex('var'), remove=False, only_current=True, docs=True)
self.assertEqual(expected, refactored)
_for_versions_higher('3.6')
def test_inlining_into_format_string(self):
code = dedent(' var = 123\n print(f"{var}")\n ')
expected = dedent(' print(f"{123}")\n ')
refactored = self._inline(code, code.rindex('var'))
self.assertEqual(expected, refactored)
_for_versions_higher('3.6')
def test_inlining_into_format_string_containing_quotes(self):
code = dedent(' var = 123\n print(f" \'{var}\' ")\n print(f""" "{var}" """)\n print(f\' "{var}" \')\n ')
expected = dedent(' print(f" \'{123}\' ")\n print(f""" "{123}" """)\n print(f\' "{123}" \')\n ')
refactored = self._inline(code, code.rindex('var'))
self.assertEqual(expected, refactored)
def test_parameters_with_the_same_name_as_passed_with_type_hints(self):
self.mod.write(dedent(' def a_func(var: int):\n print(var)\n var = 1\n a_func(var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_parameters_with_the_same_name_as_passed_as_kwargs_with_type_hints(self):
self.mod.write(dedent(' def a_func(var: int):\n print(var)\n var = 1\n a_func(var=var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_simple_parameters_renaming_with_type_hints(self):
self.mod.write(dedent(' def a_func(param: int):\n print(param)\n var = 1\n a_func(var)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var = 1\n print(var)\n '), self.mod.read())
def test_simple_parameters_renaming_for_multiple_params_with_type_hints(self):
self.mod.write(dedent(' def a_func(param1, param2: int):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(var1, var2)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var1 + var2\n '), self.mod.read())
def test_parameters_renaming_for_passed_constants_with_type_hints(self):
self.mod.write(dedent(' def a_func(param: int):\n print(param)\n a_func(1)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1)\n', self.mod.read())
def test_parameters_renaming_for_passed_statements_with_type_hints(self):
self.mod.write(dedent(' def a_func(param: int):\n print(param)\n a_func((1 + 2) / 3)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' print((1 + 2) / 3)\n '), self.mod.read())
def test_simple_parameters_renaming_for_multiple_params_using_keywords_with_type_hints(self):
self.mod.write(dedent(' def a_func(param1, param2: int):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(param2=var1, param1=var2)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var2 + var1\n '), self.mod.read())
def test_simple_params_renaming_for_multi_params_using_mixed_keywords_with_type_hints(self):
self.mod.write(dedent(' def a_func(param1, param2: int):\n p = param1 + param2\n var1 = 1\n var2 = 1\n a_func(var2, param2=var1)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual(dedent(' var1 = 1\n var2 = 1\n p = var2 + var1\n '), self.mod.read())
def test_simple_putting_in_default_arguments_with_type_hints(self):
self.mod.write(dedent(' def a_func(param: Optional[int] = None):\n print(param)\n a_func()\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(None)\n', self.mod.read())
def test_overriding_default_arguments_with_type_hints(self):
self.mod.write(dedent(' def a_func(param1=1, param2: int = 2):\n print(param1, param2)\n a_func(param2=3)\n '))
self._inline2(self.mod, (self.mod.read().index('a_func') + 1))
self.assertEqual('print(1, 3)\n', self.mod.read())
def test_dictionary_with_inline_comment(self):
code = dedent(' myvar = {\n "key": "value", # noqa\n }\n print(myvar)\n ')
refactored = self._inline(code, (code.index('myvar') + 1))
expected = dedent(' print({\n "key": "value", # noqa\n })\n ')
self.assertEqual(expected, refactored) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, dilation=1):
super(InvertedResidual, self).__init__()
self.stride = stride
self.use_res_connect = ((self.stride == 1) and (inp == oup))
padding = (2 - stride)
if (dilation > 1):
padding = dilation
self.conv = nn.Sequential(nn.Conv2d(inp, (inp * expand_ratio), 1, 1, 0, bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU6(inplace=True), nn.Conv2d((inp * expand_ratio), (inp * expand_ratio), 3, stride, padding, dilation=dilation, groups=(inp * expand_ratio), bias=False), nn.BatchNorm2d((inp * expand_ratio)), nn.ReLU6(inplace=True), nn.Conv2d((inp * expand_ratio), oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
def test_td(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
(test_loss, correct) = (0, 0)
with torch.no_grad():
for batch in dataloader:
(X, y) = (batch['images'].contiguous(), batch['targets'].contiguous())
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f'''Test Error:
Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f}
''') |
def test_system_task_crash_ExceptionGroup() -> None:
async def crasher1() -> NoReturn:
raise KeyError
async def crasher2() -> NoReturn:
raise ValueError
async def system_task() -> None:
async with _core.open_nursery() as nursery:
nursery.start_soon(crasher1)
nursery.start_soon(crasher2)
async def main() -> None:
_core.spawn_system_task(system_task)
(await sleep_forever())
with pytest.raises(_core.TrioInternalError) as excinfo:
_core.run(main)
me = excinfo.value.__cause__
assert isinstance(me, ExceptionGroup)
assert (len(me.exceptions) == 2)
for exc in me.exceptions:
assert isinstance(exc, (KeyError, ValueError)) |
def _n(solver, partInfo, subname, shape, retAll=False):
if (not solver):
if ((not utils.isPlanar(shape)) and (not utils.isCylindricalPlane(shape))):
return 'an edge or face with a planar or cylindrical surface'
if utils.isDraftWire(partInfo):
logger.warn(translate('asm3', 'Use draft wire {} for normal. Draft wire placement is not transformable'), partInfo.PartName)
return
key = (subname + '.n')
h = partInfo.EntityMap.get(key, None)
system = solver.system
if h:
system.log('cache {}: {}', key, h)
else:
if utils.isDraftCircle(partInfo.Part):
_prepareDraftCircle(solver, partInfo)
rot = utils.getElementRotation(shape)
nameTag = ((partInfo.PartName + '.') + key)
system.NameTag = nameTag
e = system.addNormal3dV(*utils.getNormal(rot))
system.NameTag += 't'
nz = system.addTransform(e, *partInfo.Params, group=partInfo.Group)
p0 = _p(solver, partInfo, subname, shape, True)
v0 = rot.inverted().multVec(p0.vector)
vz = rot.multVec(FreeCAD.Vector(v0.x, v0.y, (v0.z + 1)))
system.NameTag = (nameTag + 'p1')
e = system.addPoint3dV(*vz)
system.NameTag += 't'
p1 = system.addTransform(e, *partInfo.Params, group=partInfo.Group)
system.NameTag = (nameTag + 'l')
ln = system.addLineSegment(p0.entity, p1, group=partInfo.Group)
vx = rot.multVec(FreeCAD.Vector((v0.x + 1), v0.y, v0.z))
system.NameTag = (nameTag + 'px')
e = system.addPoint3dV(*vx)
system.NameTag += 't'
px = system.addTransform(e, *partInfo.Params, group=partInfo.Group)
h = NormalInfo(entity=nz, rot=rot, params=partInfo.Params, p0=p0.entity, ln=ln, p1=p1, px=px, vx=vx, pla=partInfo.Placement)
system.log('{}: {},{}', system.NameTag, h, partInfo.Group)
partInfo.EntityMap[key] = h
return (h if retAll else h.entity) |
class RayRetriever():
def __init__(self):
self.initialized = False
def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index):
if (not self.initialized):
self.retriever = RagRetriever(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, init_retrieval=False)
self.initialized = True
def init_retrieval(self):
self.retriever.index.init_index()
def retrieve(self, question_hidden_states, n_docs):
(doc_ids, retrieved_doc_embeds) = self.retriever._main_retrieve(question_hidden_states, n_docs)
return (doc_ids, retrieved_doc_embeds) |
class PlPgsqlLexer(PostgresBase, RegexLexer):
name = 'PL/pgSQL'
aliases = ['plpgsql']
mimetypes = ['text/x-plpgsql']
url = '
version_added = '1.5'
flags = re.IGNORECASE
tokens = {name: state[:] for (name, state) in PostgresLexer.tokens.items()}
for (i, pattern) in enumerate(tokens['root']):
if (pattern[1] == Keyword):
tokens['root'][i] = (words((KEYWORDS + PLPGSQL_KEYWORDS), suffix='\\b'), Keyword)
del i
break
else:
assert 0, 'SQL keywords not found'
tokens['root'][:0] = [('\\%[a-z]\\w*\\b', Name.Builtin), (':=', Operator), ('\\<\\<[a-z]\\w*\\>\\>', Name.Label), ('\\#[a-z]\\w*\\b', Keyword.Pseudo)] |
def learn_embeddings(split=10):
_data = (args.rescale * utils.load_emb(args.temp_dir, args.data_name, args.pre_train_path, int((args.dimensions / 2)), config['nodes']))
_network = tdata.TensorDataset(t.LongTensor(np.vstack([pickle.load(open((((args.temp_dir + args.data_name) + '_input.p.') + str(i)))) for i in range(split)])), t.LongTensor(np.vstack([pickle.load(open((((args.temp_dir + args.data_name) + '_output.p.') + str(i)))) for i in range(split)])))
model = SkipGram({'emb_size': int((args.dimensions / 2)), 'window_size': 1, 'batch_size': args.batch_size, 'iter': args.iter, 'neg_ratio': 5, 'lr_ratio': args.lrr, 'lr': args.lr, 'network': _network, 'pre_train': _data, 'node_types': config['nodes'], 'edge_types': config['edges'], 'graph_name': args.data_name, 'dump_timer': args.dump_timer, 'data_dir': args.temp_dir, 'mode': args.op, 'map_mode': args.map_func, 'fine_tune': args.fine_tune, 'model_dir': args.model_dir, 'log_dir': args.log_dir})
model.train()
return model.output() |
(7)
def real_code(source):
collector = codeanalyze.ChangeCollector(source)
for (start, end, matchgroups) in ignored_regions(source):
if (source[start] == '#'):
replacement = (' ' * (end - start))
elif ('f' in matchgroups.get('prefix', '').lower()):
replacement = None
else:
replacement = ('"%s"' % (' ' * ((end - start) - 2)))
if (replacement is not None):
collector.add_change(start, end, replacement)
source = (collector.get_changed() or source)
collector = codeanalyze.ChangeCollector(source)
parens = 0
for match in _parens.finditer(source):
i = match.start()
c = match.group()
if (c in '({['):
parens += 1
if (c in ')}]'):
parens -= 1
if ((c == '\n') and (parens > 0)):
collector.add_change(i, (i + 1), ' ')
source = (collector.get_changed() or source)
return source.replace('\\\n', ' ').replace('\t', ' ').replace(';', '\n') |
.end_to_end()
.parametrize('definition', [" = PythonNode(value=data['dependency'], hash=True)", ": Annotated[Any, PythonNode(value=data['dependency'], hash=True)]"])
def test_task_with_hashed_python_node(runner, tmp_path, definition):
source = f'''
import json
from pathlib import Path
from pytask import Product, PythonNode
from typing import Any
from typing_extensions import Annotated
data = json.loads(Path(__file__).parent.joinpath("data.json").read_text())
def task_example(
dependency{definition},
path: Annotated[Path, Product] = Path("out.txt")
) -> None:
path.write_text(dependency)
'''
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('data.json').write_text('{"dependency": "hello"}')
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert (tmp_path.joinpath('out.txt').read_text() == 'hello')
tmp_path.joinpath('data.json').write_text('{"dependency": "world"}')
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert (tmp_path.joinpath('out.txt').read_text() == 'world') |
def _parse_panond(fbuf):
component_info = {}
dict_levels = [component_info]
lines = fbuf.read().splitlines()
for i in range(0, (len(lines) - 1)):
if (lines[i] == ''):
continue
indent_lvl_1 = ((len(lines[i]) - len(lines[i].lstrip(' '))) // 2)
indent_lvl_2 = ((len(lines[(i + 1)]) - len(lines[(i + 1)].lstrip(' '))) // 2)
line_data = lines[i].split('=')
key = line_data[0].strip()
if (len(line_data) > 1):
value = _element_type(line_data[1].strip())
else:
value = None
if (indent_lvl_2 > indent_lvl_1):
current_level = dict_levels[indent_lvl_1]
new_level = {}
current_level[key] = new_level
dict_levels = (dict_levels[:(indent_lvl_1 + 1)] + [new_level])
current_level = dict_levels[(indent_lvl_1 + 1)]
current_level[key] = value
elif (indent_lvl_2 <= indent_lvl_1):
current_level = dict_levels[indent_lvl_1]
current_level[key] = value
return component_info |
class TwitterShell(Action):
def render_prompt(self, prompt):
prompt = prompt.strip("'").replace("\\'", "'")
for colour in ansi.COLOURS_NAMED:
if (('[%s]' % colour) in prompt):
prompt = prompt.replace(('[%s]' % colour), ansiFormatter.cmdColourNamed(colour))
prompt = prompt.replace('[R]', ansiFormatter.cmdReset())
return prompt
def __call__(self, twitter, options):
prompt = self.render_prompt(options.get('prompt', 'twitter> '))
while True:
options['action'] = ''
try:
args = input(prompt).split()
parse_args(args, options)
if (not options['action']):
continue
elif (options['action'] == 'exit'):
raise SystemExit(0)
elif (options['action'] == 'shell'):
print('Sorry Xzibit does not work here!', file=sys.stderr)
continue
elif (options['action'] == 'help'):
print('\ntwitter> `action`\n\n The Shell accepts all the command line actions along with:\n\n exit Leave the twitter shell (^D may also be used)\n\n Full CMD Line help is appended below for your convenience.', file=sys.stderr)
Action()(twitter, options)
options['action'] = ''
except NoSuchActionError as e:
print(e, file=sys.stderr)
except KeyboardInterrupt:
print('\n[Keyboard Interrupt]', file=sys.stderr)
except EOFError:
print(file=sys.stderr)
leaving = self.ask(subject='Leave')
if (not leaving):
print('Excellent!', file=sys.stderr)
else:
raise SystemExit(0) |
class _buffer_reader():
def __init__(self, buffer):
self.buffer = buffer
def __call__(self, _, position, p_buf, size):
c_buf = ctypes.cast(p_buf, ctypes.POINTER((ctypes.c_char * size)))
self.buffer.seek(position)
self.buffer.readinto(c_buf.contents)
return 1 |
class TextEncoder(torch.nn.Module):
def __init__(self, bert_model, word_embedding_dim, num_attention_heads, query_vector_dim, dropout_rate, enable_gpu=True):
super(TextEncoder, self).__init__()
self.bert_model = bert_model
self.dropout_rate = dropout_rate
self.multihead_attention = MultiHeadAttention(word_embedding_dim, num_attention_heads, 20, 20, enable_gpu)
self.additive_attention = AdditiveAttention((num_attention_heads * 20), query_vector_dim)
def forward(self, text, mask=None):
(batch_size, num_words) = text.shape
num_words = (num_words // 3)
text_ids = torch.narrow(text, 1, 0, num_words)
text_type = torch.narrow(text, 1, num_words, num_words)
text_attmask = torch.narrow(text, 1, (num_words * 2), num_words)
word_emb = self.bert_model(text_ids, text_type, text_attmask)[2][8]
text_vector = F.dropout(word_emb, p=self.dropout_rate, training=self.training)
multihead_text_vector = self.multihead_attention(text_vector, text_vector, text_vector, mask)
multihead_text_vector = F.dropout(multihead_text_vector, p=self.dropout_rate, training=self.training)
text_vector = self.additive_attention(multihead_text_vector, mask)
return text_vector |
def test_scene_to_pixmap_exporter_default_size_and_margin(view):
item1 = BeePixmapItem(QtGui.QImage(100, 100, QtGui.QImage.Format.Format_RGB32))
item1.setPos(QtCore.QPointF(0, 0))
view.scene.addItem(item1)
item2 = BeePixmapItem(QtGui.QImage(100, 100, QtGui.QImage.Format.Format_RGB32))
item1.setPos(QtCore.QPointF(200, 0))
view.scene.addItem(item2)
exporter = SceneToPixmapExporter(view.scene)
assert (view.scene.sceneRect().size().toSize() == QtCore.QSize(300, 100))
assert ((exporter.margin - 9) < 1e-06)
assert (exporter.default_size == QtCore.QSize(318, 118)) |
.parametrize('username,password', users)
.parametrize('project_id', projects)
def test_create_email(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse(urlnames['list'], args=[project_id])
data = {'email': '', 'role': 'guest'}
response = client.post(url, data)
if (project_id in add_invite_permission_map.get(username, [])):
assert (response.status_code == 201)
elif (project_id in view_invite_permission_map.get(username, [])):
assert (response.status_code == 403)
else:
assert (response.status_code == 404) |
def test_parametrize_with_shared(testdir):
testdir.makepyfile("\n import pytest\n from pytest import fixture\n from pytest_describe import behaves_like\n\n def a_duck():\n def it_quacks(sound):\n assert sound == int(sound)\n\n\n .parametrize('foo', (1, 2, 3))\n _like(a_duck)\n def describe_something_that_quacks():\n \n def sound(foo):\n return foo\n\n .parametrize('foo', (1, 2, 3))\n _like(a_duck)\n def describe_something_that_barks():\n \n def sound(foo):\n return foo\n ")
result = testdir.runpytest()
result.assert_outcomes(passed=6) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.