code stringlengths 281 23.7M |
|---|
def request_wrap_timeout(func, url):
import requests
for (attempt, timeout) in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning('Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs', url, attempt, timeout, exc_info=e)
continue
raise RuntimeError(f'Unable to fetch file {url}') |
class SolverProcess():
automatic_call = False
def __init__(self, *, name, command, cp):
self.name = name
self.command = command
self.cp = cp
self.options = ''
self.stdout = None
self.stderr = None
self.last_command_wck = None
self.log_filename_suffix = None
self.n_executions = 0
self.last_log = None
self.last_solution = None
self.n_solutions = None
self.bound = None
self.status = None
self.core = None
def command(self, _command):
self.command = _command
def get_logger(self):
return self.last_log
def setting(self, value):
if (value is not None):
value = str(value).strip()
self.options = ((' ' + value) if (self.options != '') else value)
def log_suffix(self, _extend_filename_logger):
self.log_filename_suffix = _extend_filename_logger
def parse_general_options(self, string_options, dict_options, dict_simplified_options):
raise NotImplementedError('Must be overridden')
def _solve(self, instance, string_options='', dict_options=dict(), dict_simplified_options=dict(), compiler=False, *, verbose=0, automatic=False, extraction=False):
(model, cop) = instance
if extraction:
self.switch_to_extraction()
def _int_from(s, left):
right = (left + s[left:].find('\n'))
left = (right - 1)
while s[left].isdigit():
left -= 1
return int(s[(left + 1):right])
def _record_solution(roots, i):
variables = []
for token in roots[i][0].text.split():
r = VarEntities.get_item_with_name(token)
if isinstance(r, EVar):
variables.append(r.variable)
elif isinstance(r, Variable):
variables.append(r)
else:
for x in flatten(r.variables, keep_none=True):
variables.append(x)
if (i == 0):
for x in variables:
if x:
x.values = []
values = []
for tok in roots[i][1].text.split():
if ('x' in tok):
vk = tok.split('x')
assert (len(vk) == 2)
for _ in range(int(vk[1])):
values.append(vk[0])
else:
values.append(tok)
assert (len(variables) == len(values))
for (i, _) in enumerate(values):
if variables[i]:
if isinstance(variables[i], VariableInteger):
values[i] = (int(values[i]) if (values[i] != '*') else ANY)
variables[i].value = values[i]
variables[i].values.append(values[i])
return (variables, values)
def extract_result_and_solution(stdout):
if extraction:
left = stdout.rfind('c CORE')
if (left == (- 1)):
return TypeStatus.UNKNOWN
right = (left + stdout[left:].find('\n'))
self.core = stdout[left:right]
return TypeStatus.CORE
if ((stdout.find('<unsatisfiable') != (- 1)) or (stdout.find('s UNSATISFIABLE') != (- 1))):
return TypeStatus.UNSAT
if ((stdout.find('<instantiation') == (- 1)) or (stdout.find('</instantiation>') == (- 1))):
print(' Actually, the instance was not solved')
return TypeStatus.UNKNOWN
if (('limit=no' in string_options) or (('limit_sols' in dict_simplified_options) and (int(dict_simplified_options['limit_sols']) > 1))):
roots = [etree.fromstring((('<instantiation' + tok) + '</instantiation>').replace('\nv', ''), etree.XMLParser(remove_blank_text=True)) for tok in re.findall('<instantiation(.*?)</instantiation>', stdout)]
else:
(left, right) = (stdout.rfind('<instantiation'), stdout.rfind('</instantiation>'))
roots = [etree.fromstring(stdout[left:(right + len('</instantiation>'))].replace('\nv', ''), etree.XMLParser(remove_blank_text=True))]
for i in range((len(roots) - 1)):
_record_solution(roots, i)
root = roots[(- 1)]
(variables, values) = _record_solution(roots, (len(roots) - 1))
optimal = (stdout.find('s OPTIMUM') != (- 1))
if cop:
root.attrib['type'] = ('optimum' if optimal else 'solution')
if ('cost' not in root.attrib):
root.attrib['cost'] = _int_from(stdout, (stdout.rfind('o ') + 2))
self.bound = int(root.attrib['cost'])
if ('id' in root.attrib):
del root.attrib['id']
def _array_values(t):
if (t is None):
return None
if isinstance(t, Variable):
return t.value
t.values = [_array_values(v) for v in t]
return t.values
for array in Variable.arrays:
_array_values(array)
pretty_solution = etree.tostring(root, pretty_print=True, xml_declaration=False).decode('UTF-8').strip()
self.last_solution = Instantiation(root, variables, values, pretty_solution)
j = stdout.find('d FOUND SOLUTIONS')
if (j != (- 1)):
self.n_solutions = _int_from(stdout, j)
return (TypeStatus.OPTIMUM if optimal else TypeStatus.SAT)
def execute(command):
if (not is_windows()):
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, preexec_fn=os.setsid)
else:
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stopped = False
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
global stopped
stopped = True
os.killpg(os.getpgid(p.pid), signal.SIGINT)
signal.signal(signal.SIGINT, new_handler)
end_prefix = (self.log_filename_suffix if (self.log_filename_suffix is not None) else str(self.n_executions))
log = Logger(end_prefix, verbose, Compilation.pathname)
self.last_log = log.log_file
for line in p.stdout:
if (verbose == 2):
sys.stdout.write(line)
log.write(line)
p.wait()
p.terminate()
log.close()
signal.signal(signal.SIGINT, handler)
return (log.read(), stopped)
if ((model is not None) and (len(VarEntities.items) == 0)):
print('\n The instance has no variable, so the solver is not run.')
print('Did you forget to indicate the variant of the model?')
return None
if ((automatic is False) and SolverProcess.automatic_call):
print('\n You attempt to solve the instance with both -solve and the function solve().')
return None
SolverProcess.automatic_call = automatic
if (compiler is False):
if ((len(string_options) == 0) or (string_options[0] != '[')):
string_options = ((('[' + self.name.lower()) + ((',' + string_options) if (len(string_options) > 0) else '')) + ']')
(solver, tmp_dict_options, tmp_dict_simplified_options) = process_options(string_options)
dict_simplified_options.update(tmp_dict_simplified_options)
dict_options.update(tmp_dict_options)
stopwatch = Stopwatch()
solver_args = self.parse_general_options(string_options, dict_options, dict_simplified_options)
solver_args += ((' ' + dict_options['args']) if ('args' in dict_options) else '')
solver_args += (' ' + self.options)
verbose = (2 if (options.solve or ('verbose' in dict_simplified_options)) else verbose)
command = ((((self.command + ' ') + (model if (model is not None) else '')) + ' ') + solver_args)
if (verbose > 0):
print((('\n * Solving by ' + self.name) + ' in progress ... '))
print(' - command:', command)
(out_err, stopped) = execute(command)
missing = ((out_err is not None) and (out_err.find('Missing Implementation') != (- 1)))
self.last_command_wck = stopwatch.elapsed_time()
if (verbose > 0):
if stopped:
print(((((((' * Solving process stopped (SIGINT) by ' + self.name) + ' after ') + GREEN) + self.last_command_wck) + WHITE) + ' seconds'))
else:
print((((((('\n * Solved by ' + self.name) + ' in ') + GREEN) + self.last_command_wck) + WHITE) + ' seconds'))
if missing:
print('\n This is due to a missing implementation')
if (automatic and (verbose < 2)):
print('\n NB: use the solver option v, as in -solver=[choco,v] or -solver=[ace,v] to see directly the output of the solver.')
else:
print()
self.n_executions += 1
return (extract_result_and_solution(out_err) if out_err else TypeStatus.UNKNOWN)
def solve(self, instance, string_options='', dict_options=dict(), dict_simplified_options=dict(), compiler=False, *, verbose=0, automatic=False, extraction=False):
self.status = self._solve(instance, string_options, dict_options, dict_simplified_options, compiler, verbose=verbose, automatic=automatic, extraction=extraction)
return self.status
def switch_to_extraction(self):
pass |
def transform_with_items(schema, template):
items = template['with_items']
if isinstance(items, dict):
if (set(items) == {'using'}):
items = items['using']
elif (set(items) == {'from_stdout'}):
items = from_stdout(items['from_stdout'])
if hasattr(items, '__call__'):
items = items()
if (not isinstance(items, Iterable)):
raise RuntimeError('bad with_items template: {}'.format(items))
for key in {'operators', 'sensors', 'flow'}:
if (key not in template):
continue
subschema = reduce(merge, transform_schema_with_items(template[key], items), {})
schema.setdefault(key, {})
schema[key] = merge(schema[key], subschema)
return schema |
def min_freItem():
global ww
counter = dict()
mine = ''
my_dict['a'] = 3
my_dict['g'] = 6
my_dict['c'] = 6
my_dict['t'] = 4
for t in range(NumbS):
S = sDB[t].S
for s in S:
mine = s
if (counter.get(mine) == None):
counter[mine] = 1
else:
counter[mine] = (counter[mine] + 1)
for iterator in counter.items():
pp = list(iterator)[0]
hupval = my_dict[pp[0]]
if ((list(iterator)[1] * hupval) < minsup):
uphupval = (list(iterator)[1] * 6)
if (uphupval >= minsup):
canArr[0] = (canArr[0] + list(iterator)[0])
else:
p0 = list(iterator)[0]
freArr[0].append(p0)
canArr[0].append(list(iterator)[0])
unum[ww] = hupval
ww = (ww + 1) |
class TestGetImage(EndianTest):
def setUp(self):
self.req_args_0 = {'drawable': , 'format': 2, 'height': 20170, 'plane_mask': , 'width': 282, 'x': (- 14814), 'y': (- 5449)}
self.req_bin_0 = b'I\x02\x00\x053\xfbEj\xc6"\xea\xb7\x01\x1aN\xca$\xba\x96\xb6'
self.reply_args_0 = {'data': b'\xeb?:\xa7\xc6\x8b\xc2\x96o-S\xe6\xd6z6\x94\xd7v\xd2R.\xa2\xeaw\t\x13\x95\x85', 'depth': 181, 'sequence_number': 28429, 'visual': }
self.reply_bin_0 = b'\x01\xb5o\r\x00\x00\x00\x07d\x94\xbe\xcd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xeb?:\xa7\xc6\x8b\xc2\x96o-S\xe6\xd6z6\x94\xd7v\xd2R.\xa2\xeaw\t\x13\x95\x85'
def testPackRequest0(self):
bin = request.GetImage._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.GetImage._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0)
def testPackReply0(self):
bin = request.GetImage._reply.to_binary(*(), **self.reply_args_0)
self.assertBinaryEqual(bin, self.reply_bin_0)
def testUnpackReply0(self):
(args, remain) = request.GetImage._reply.parse_binary(self.reply_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.reply_args_0) |
class CollectionConfig(BaseModel, extra='forbid'):
params: 'CollectionParams' = Field(..., description='')
hnsw_config: 'HnswConfig' = Field(..., description='')
optimizer_config: 'OptimizersConfig' = Field(..., description='')
wal_config: 'WalConfig' = Field(..., description='')
quantization_config: Optional['QuantizationConfig'] = Field(default=None, description='') |
def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, compare_with_pt_model=False, use_cached_models=False, remove_cached_files=False, only_convert_finetuned_models=False):
if (args_model_type is None):
model_types = list(MODEL_CLASSES.keys())
else:
model_types = [args_model_type]
for (j, model_type) in enumerate(model_types, start=1):
print(('=' * 100))
print(f' Converting model type {j}/{len(model_types)}: {model_type}')
print(('=' * 100))
if (model_type not in MODEL_CLASSES):
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys())}.')
(config_class, model_class, pt_model_class, aws_model_maps, aws_config_map) = MODEL_CLASSES[model_type]
if (model_shortcut_names_or_path is None):
model_shortcut_names_or_path = list(aws_model_maps.keys())
if (config_shortcut_names_or_path is None):
config_shortcut_names_or_path = model_shortcut_names_or_path
for (i, (model_shortcut_name, config_shortcut_name)) in enumerate(zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1):
print(('-' * 100))
if (('-squad' in model_shortcut_name) or ('-mrpc' in model_shortcut_name) or ('-mnli' in model_shortcut_name)):
if (not only_convert_finetuned_models):
print(f' Skipping finetuned checkpoint {model_shortcut_name}')
continue
model_type = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}')
continue
print(f' Converting checkpoint {i}/{len(aws_config_map)}: {model_shortcut_name} - model_type {model_type}')
print(('-' * 100))
if (config_shortcut_name in aws_config_map):
config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=(not use_cached_models))
else:
config_file = config_shortcut_name
if (model_shortcut_name in aws_model_maps):
model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=(not use_cached_models))
else:
model_file = model_shortcut_name
if os.path.isfile(model_shortcut_name):
model_shortcut_name = 'converted_model'
convert_pt_checkpoint_to_tf(model_type=model_type, pytorch_checkpoint_path=model_file, config_file=config_file, tf_dump_path=os.path.join(tf_dump_path, (model_shortcut_name + '-tf_model.h5')), compare_with_pt_model=compare_with_pt_model)
if remove_cached_files:
os.remove(config_file)
os.remove(model_file) |
def test_molecule_and_vsite_water(coumarin, tmpdir, water, rfree_data):
coumarin_copy = coumarin.copy(deep=True)
MBISCharges.apply_symmetrisation(coumarin_copy)
with tmpdir.as_cwd():
alpha = rfree_data.pop('alpha')
beta = rfree_data.pop('beta')
lj = LennardJones612(free_parameters=rfree_data, alpha=alpha, beta=beta)
lj.run(coumarin_copy)
rfree_data['alpha'] = alpha
rfree_data['beta'] = beta
_combine_molecules_offxml(molecules=[coumarin_copy], parameters=elements, rfree_data=rfree_data, filename='combined.offxml', water_model='tip4p-fb', h_constraints=False)
combinded_ff = ForceField('combined.offxml', load_plugins=True, allow_cosmetic_attributes=True)
handlers = combinded_ff.registered_parameter_handlers
assert ('LocalCoordinateVirtualSites' not in handlers)
assert ('VirtualSites' in handlers)
mixed_top = Topology.from_molecules(molecules=[Molecule.from_rdkit(water.to_rdkit()), Molecule.from_rdkit(coumarin_copy.to_rdkit())])
system = combinded_ff.create_openmm_system(topology=mixed_top)
assert (system.getNumConstraints() == 3)
reference_constraints = [[0, 1, unit.Quantity(0.9572, unit=unit.angstroms)], [0, 2, unit.Quantity(0.9572, unit=unit.angstroms)], [1, 2, unit.Quantity(1., unit=unit.angstroms)]]
for i in range(3):
(a, b, constraint) = system.getConstraintParameters(i)
assert (a == reference_constraints[i][0])
assert (b == reference_constraints[i][1])
assert (constraint == reference_constraints[i][2].in_units_of(unit.nanometers))
forces = dict(((force.__class__.__name__, force) for force in system.getForces()))
nonbonded_force: openmm.NonbondedForce = forces['NonbondedForce']
water_reference = [[unit.Quantity(0.0, unit.elementary_charge), unit.Quantity(3.1655, unit=unit.angstroms), unit.Quantity(0.179082, unit=unit.kilocalorie_per_mole)], [unit.Quantity(0.52587, unit.elementary_charge), unit.Quantity(1, unit=unit.angstroms), unit.Quantity(0, unit=unit.kilocalorie_per_mole)], [unit.Quantity(0.52587, unit.elementary_charge), unit.Quantity(1, unit=unit.angstroms), unit.Quantity(0, unit=unit.kilocalorie_per_mole)]]
for i in range(3):
(charge, sigma, epsilon) = nonbonded_force.getParticleParameters(i)
assert (charge == water_reference[i][0])
assert (sigma.in_units_of(unit.nanometer) == water_reference[i][1].in_units_of(unit.nanometer))
assert (epsilon.in_units_of(unit.kilocalorie_per_mole) == water_reference[i][2])
for i in range(coumarin_copy.n_atoms):
ref_params = coumarin_copy.NonbondedForce[(i,)]
(charge, sigma, epsilon) = nonbonded_force.getParticleParameters((i + 3))
assert (charge.value_in_unit(unit.elementary_charge) == float(ref_params.charge))
assert (sigma.value_in_unit(unit.nanometers) == ref_params.sigma)
assert (epsilon.value_in_unit(unit.kilojoule_per_mole) == ref_params.epsilon)
(charge, sigma, epsilon) = nonbonded_force.getParticleParameters((coumarin_copy.n_atoms + 3))
assert (charge == unit.Quantity((- 1.05174), unit.elementary_charge))
assert (sigma == unit.Quantity(1.0, unit.nanometer))
assert (epsilon == unit.Quantity(0.0, unit.kilojoule_per_mole)) |
def ft_setup(workers: List[int], num_rounds: int, die_round_factor: 0.25, comeback_round_factor: 0.75):
if (workers is None):
return None
ft_manager = FaultToleranceManager.remote()
die_round = int((die_round_factor * num_rounds))
comeback_round = int((comeback_round_factor * num_rounds))
for worker in workers:
ft_manager.schedule_kill.remote(rank=worker, boost_round=die_round)
ft_manager.delay_return.remote(rank=1, start_boost_round=(die_round - 2), end_boost_round=(comeback_round - 1))
print(f'Scheduled workers {list(workers)} to die at round {die_round} and to come back at round {comeback_round} (total {num_rounds} training rounds)')
return ft_manager |
class ExtraDuplicatesSettings(BaseModel):
interval_description: ClassVar[str] = 'Look for rule violations in messages from the last `interval` number of seconds.'
threshold_description: ClassVar[str] = 'Maximum number of duplicate messages before the filter is triggered.'
interval: int = 10
threshold: int = 3 |
class KeyboardButton(TelegramObject):
__slots__ = ('request_location', 'request_contact', 'request_poll', 'text', 'web_app', 'request_user', 'request_chat')
def __init__(self, text: str, request_contact: Optional[bool]=None, request_location: Optional[bool]=None, request_poll: Optional[KeyboardButtonPollType]=None, web_app: Optional[WebAppInfo]=None, request_user: Optional[KeyboardButtonRequestUser]=None, request_chat: Optional[KeyboardButtonRequestChat]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.text: str = text
self.request_contact: Optional[bool] = request_contact
self.request_location: Optional[bool] = request_location
self.request_poll: Optional[KeyboardButtonPollType] = request_poll
self.web_app: Optional[WebAppInfo] = web_app
self.request_user: Optional[KeyboardButtonRequestUser] = request_user
self.request_chat: Optional[KeyboardButtonRequestChat] = request_chat
self._id_attrs = (self.text, self.request_contact, self.request_location, self.request_poll, self.web_app, self.request_user, self.request_chat)
self._freeze()
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['KeyboardButton']:
data = cls._parse_data(data)
if (not data):
return None
data['request_poll'] = KeyboardButtonPollType.de_json(data.get('request_poll'), bot)
data['request_user'] = KeyboardButtonRequestUser.de_json(data.get('request_user'), bot)
data['request_chat'] = KeyboardButtonRequestChat.de_json(data.get('request_chat'), bot)
data['web_app'] = WebAppInfo.de_json(data.get('web_app'), bot)
return super().de_json(data=data, bot=bot) |
class CorruptionLayoutEditor(QtWidgets.QWidget, Ui_CorruptionLayoutEditor):
def __init__(self):
super().__init__()
self.setupUi(self)
self.game_description = default_database.game_description_for(RandovaniaGame.METROID_PRIME_CORRUPTION)
pickup_database = default_database.pickup_database_for_game(RandovaniaGame.METROID_PRIME_CORRUPTION)
region_list = self.game_description.region_list
self._index_to_combo = {}
ids_to_merge = [, , , ]
nodes_to_merge = []
region_count = 0
for (i, region) in enumerate(region_list.regions):
if (region.extra['asset_id'] in ids_to_merge):
nodes_to_merge.extend((node for area in region.areas for node in area.nodes if isinstance(node, PickupNode)))
continue
group = QtWidgets.QGroupBox(self.scroll_area_contents)
group.setTitle(region.name)
layout = QtWidgets.QGridLayout(group)
area_count = 0
for area in region.areas:
for node in area.nodes:
if (not isinstance(node, PickupNode)):
continue
node_label = QtWidgets.QLabel(region_list.node_name(node), group)
layout.addWidget(node_label, area_count, 0)
node_combo = QtWidgets.QComboBox(group)
_fill_combo(pickup_database, node_combo)
node_combo.currentIndexChanged.connect(self.update_layout_string)
layout.addWidget(node_combo, area_count, 1)
self._index_to_combo[node.pickup_index] = node_combo
area_count += 1
self.scroll_area_layout.addWidget(group)
region_count += 1
group = QtWidgets.QGroupBox(self.scroll_area_contents)
group.setTitle('Seeds')
layout = QtWidgets.QGridLayout(group)
area_count = 0
for node in nodes_to_merge:
if (not isinstance(node, PickupNode)):
continue
node_label = QtWidgets.QLabel(region_list.node_name(node), group)
layout.addWidget(node_label, area_count, 0)
node_combo = QtWidgets.QComboBox(group)
_fill_combo(pickup_database, node_combo)
node_combo.currentIndexChanged.connect(self.update_layout_string)
layout.addWidget(node_combo, area_count, 1)
self._index_to_combo[node.pickup_index] = node_combo
area_count += 1
self.scroll_area_layout.addWidget(group)
self.update_layout_string()
def update_layout_string(self):
item_names = [combo.currentData() for combo in self._index_to_combo.values()]
self.layout_edit.setText(layout_string_for_items(item_names)) |
def gen_back_to_back_test():
return '\n # Test backwards walk (back to back branch taken)\n\n csrr x3, mngr2proc < 1\n csrr x1, mngr2proc < 1\n\n bne x3, x0, X0\n csrw proc2mngr, x0\n nop\n a0:\n csrw proc2mngr, x1 > 1\n bne x3, x0, y0\n b0:\n bne x3, x0, a0\n c0:\n bne x3, x0, b0\n d0:\n bne x3, x0, c0\n e0:\n bne x3, x0, d0\n f0:\n bne x3, x0, e0\n g0:\n bne x3, x0, f0\n h0:\n bne x3, x0, g0\n i0:\n bne x3, x0, h0\n X0:\n bne x3, x0, i0\n y0:\n\n bne x3, x0, X1\n csrw x0, proc2mngr\n nop\n a1:\n csrw proc2mngr, x1 > 1\n bne x3, x0, y1\n b1:\n bne x3, x0, a1\n c1:\n bne x3, x0, b1\n d1:\n bne x3, x0, c1\n e1:\n bne x3, x0, d1\n f1:\n bne x3, x0, e1\n g1:\n bne x3, x0, f1\n h1:\n bne x3, x0, g1\n i1:\n bne x3, x0, h1\n X1:\n bne x3, x0, i1\n y1:\n\n bne x3, x0, X2\n csrw proc2mngr, x0\n nop\n a2:\n csrw proc2mngr, x1 > 1\n bne x3, x0, y2\n b2:\n bne x3, x0, a2\n c2:\n bne x3, x0, b2\n d2:\n bne x3, x0, c2\n e2:\n bne x3, x0, d2\n f2:\n bne x3, x0, e2\n g2:\n bne x3, x0, f2\n h2:\n bne x3, x0, g2\n i2:\n bne x3, x0, h2\n X2:\n bne x3, x0, i2\n y2:\n\n bne x3, x0, X3\n csrw proc2mngr, x0\n nop\n a3:\n csrw proc2mngr, x1 > 1\n bne x3, x0, y3\n b3:\n bne x3, x0, a3\n c3:\n bne x3, x0, b3\n d3:\n bne x3, x0, c3\n e3:\n bne x3, x0, d3\n f3:\n bne x3, x0, e3\n g3:\n bne x3, x0, f3\n h3:\n bne x3, x0, g3\n i3:\n bne x3, x0, h3\n X3:\n bne x3, x0, i3\n y3:\n nop\n nop\n nop\n nop\n nop\n nop\n nop\n ' |
class FeatQueue(nn.Module):
def __init__(self, max_queue_size=30000):
super(FeatQueue, self).__init__()
self.max_queue_size = max_queue_size
def append(self, queue, feat):
if isinstance(feat, np.ndarray):
queue = np.concatenate([queue, feat], axis=0)
queue_size = queue.shape[0]
else:
queue = torch.cat([queue, feat], dim=0)
queue_size = queue.size(0)
if (queue_size > self.max_queue_size):
queue = self.pop(queue, (queue_size - self.max_queue_size))
return queue
def pop(self, queue, num_item):
queue = queue[num_item:]
return queue
def update_queue_size(self, size):
try:
curr_queue_size = getattr(self, 'curr_queue_size')
except:
curr_queue_size = 0
curr_queue_size += size
if (curr_queue_size >= self.max_queue_size):
curr_queue_size = self.max_queue_size
setattr(self, 'curr_queue_size', curr_queue_size)
def update(self, name, feat):
try:
queue = getattr(self, name)
queue = self.append(queue, feat)
setattr(self, name, queue)
except:
setattr(self, name, feat)
def update_all(self, feats, names):
for (name, feat) in zip(names, feats):
self.update(name, feat)
def sample(self, name, indices):
queue = getattr(self, name)
out = queue[indices]
return out
def batch_sample(self, indices_list, name):
list_items = []
for indices in indices_list:
list_items.append(self.sample(name, indices).unsqueeze(0))
list_items = torch.cat(list_items, dim=0)
return list_items
def batch_sample_all(self, indices_list, names):
results = []
for name in names:
results.append(self.batch_sample(indices_list, name))
return results
def select_indices(self, dataset_names, dataset_indices, sample_size=8192):
indices_list = []
length_list = []
for (name, dataset_index) in zip(dataset_names, dataset_indices):
dataset_names = getattr(self, 'dataset_names')
dataset_indices = getattr(self, 'dataset_indices')
condition = np.logical_and((dataset_names == name), (dataset_indices == dataset_index))
same_indices = np.where(condition)[0]
all_indices = np.arange(getattr(self, 'curr_queue_size'))
if (same_indices.size != 0):
diff_indices = np.delete(all_indices, same_indices)
else:
diff_indices = all_indices
perm_index = np.random.choice(diff_indices, size=min(sample_size, len(diff_indices)), replace=False)
indices_list.append(perm_index)
length_list.append(len(perm_index))
minimum_length = np.min(length_list)
indices_list = [indices[:minimum_length] for indices in indices_list]
return indices_list |
def directed_hausdorff(point_cloud_A, point_cloud_B):
npoint = point_cloud_A.shape[1]
A = tf.expand_dims(point_cloud_A, axis=2)
A = tf.tile(A, (1, 1, npoint, 1))
B = tf.expand_dims(point_cloud_B, axis=1)
B = tf.tile(B, (1, npoint, 1, 1))
distances = tf.squared_difference(B, A)
distances = tf.reduce_sum(distances, axis=(- 1))
distances = tf.sqrt(distances)
(shortest_dists, _) = tf.nn.top_k((- distances))
shortest_dists = tf.squeeze((- shortest_dists))
(hausdorff_dists, _) = tf.nn.top_k(shortest_dists)
hausdorff_dists = tf.squeeze(hausdorff_dists)
return hausdorff_dists |
class MCHManagedCollisionModule(ManagedCollisionModule):
def __init__(self, zch_size: int, device: torch.device, eviction_policy: MCHEvictionPolicy, eviction_interval: int, input_hash_size: int=(2 ** 63), input_hash_func: Optional[Callable[([torch.Tensor, int], torch.Tensor)]]=None, mch_size: Optional[int]=None, mch_hash_func: Optional[Callable[([torch.Tensor, int], torch.Tensor)]]=None, name: Optional[str]=None, output_global_offset: int=0) -> None:
super().__init__(device)
self._name = name
self._input_history_buffer_size: int = (- 1)
self._input_hash_size = input_hash_size
self._zch_size: int = zch_size
assert (self._zch_size > 0), 'zch_size must be > 0'
self._mch_size: int = 0
if (mch_size is not None):
self._mch_size = mch_size
assert (mch_hash_func is not None), 'mch_hash_func must be provided if mch_size is provided'
self._output_global_offset: int = output_global_offset
self._mch_hash_func = mch_hash_func
self._input_hash_func = input_hash_func
self._eviction_interval = eviction_interval
assert (self._eviction_interval > 0), 'eviction_interval must be > 1'
self._eviction_policy = eviction_policy
self._current_iter: int = (- 1)
self._current_iter_tensor = torch.nn.Parameter(torch.zeros(1, dtype=torch.int32, device=self.device), requires_grad=False)
self._init_buffers()
self._mch_metadata: Dict[(str, torch.Tensor)] = {}
self._history_metadata: Dict[(str, torch.Tensor)] = {}
self._init_metadata_buffers()
self._current_history_buffer_offset: int = 0
self._evicted: bool = False
self._last_eviction_iter: int = (- 1)
def _init_buffers(self) -> None:
self.register_buffer('_mch_sorted_raw_ids', torch.full((self._zch_size,), torch.iinfo(torch.int64).max, dtype=torch.int64, device=self.device))
self.register_buffer('_mch_remapped_ids_mapping', torch.arange(self._zch_size, dtype=torch.int64, device=self.device))
self._evicted_emb_indices: torch.Tensor = torch.empty((1,), device=self.device)
def _init_metadata_buffers(self) -> None:
eviction_metadata_info = self._eviction_policy.metadata_info
for metadata in eviction_metadata_info:
(metadata_name, is_mch_metadata, is_history_metadata) = metadata
if is_mch_metadata:
buffer_name = ('_mch_' + metadata_name)
self.register_buffer(buffer_name, torch.zeros((self._zch_size,), dtype=torch.int64, device=self.device))
self._mch_metadata[metadata_name] = getattr(self, buffer_name)
def _init_history_buffers(self, features: Dict[(str, JaggedTensor)]) -> None:
input_batch_value_size_cumsum = 0
for (_, feature) in features.items():
input_batch_value_size_cumsum += feature.values().numel()
self._input_history_buffer_size = int(((input_batch_value_size_cumsum * self._eviction_interval) * 1.25))
self._history_accumulator: torch.Tensor = torch.empty(self._input_history_buffer_size, dtype=torch.int64, device=self.device)
eviction_metadata_info = self._eviction_policy.metadata_info
for metadata in eviction_metadata_info:
(metadata_name, is_mch_metadata, is_history_metadata) = metadata
if is_history_metadata:
buffer_name = ('_history_' + metadata_name)
self.register_buffer(buffer_name, torch.zeros(self._input_history_buffer_size, dtype=torch.int64, device=self.device), persistent=False)
self._history_metadata[metadata_name] = getattr(self, buffer_name)
_grad()
def preprocess(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
if (self._input_hash_func is None):
return features
preprocessed_features: Dict[(str, JaggedTensor)] = {}
for (name, feature) in features.items():
preprocessed_features[name] = JaggedTensor(values=self._input_hash_func(feature.values(), self._input_hash_size), lengths=feature.lengths(), offsets=feature.offsets(), weights=feature.weights_or_none())
return preprocessed_features
_grad()
def _match_indices(self, sorted_sequence: torch.Tensor, search_values: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]:
searched_indices = torch.searchsorted(sorted_sequence[:(- 1)], search_values)
retrieved_ids = sorted_sequence[searched_indices]
matching_eles = (retrieved_ids == search_values)
matched_indices = searched_indices[matching_eles]
return (matching_eles, matched_indices)
_grad()
def _sort_mch_buffers(self) -> None:
mch_sorted_raw_ids = self._mch_sorted_raw_ids
argsorted_sorted_raw_ids = torch.argsort(mch_sorted_raw_ids, stable=True)
mch_sorted_raw_ids.copy_(mch_sorted_raw_ids[argsorted_sorted_raw_ids])
self._mch_remapped_ids_mapping.copy_(self._mch_remapped_ids_mapping[argsorted_sorted_raw_ids])
for mch_metadata_buffer in self._mch_metadata.values():
mch_metadata_buffer.copy_(mch_metadata_buffer[argsorted_sorted_raw_ids])
_grad()
def _update_and_evict(self, uniq_ids: torch.Tensor, uniq_ids_counts: torch.Tensor, uniq_ids_metadata: Dict[(str, torch.Tensor)]) -> None:
argsorted_uniq_ids_counts = torch.argsort(uniq_ids_counts, descending=True, stable=True)
frequency_sorted_uniq_ids = uniq_ids[argsorted_uniq_ids_counts]
frequency_sorted_uniq_ids_counts = uniq_ids_counts[argsorted_uniq_ids_counts]
(matching_eles, matched_indices) = self._match_indices(self._mch_sorted_raw_ids, frequency_sorted_uniq_ids)
new_frequency_sorted_uniq_ids = frequency_sorted_uniq_ids[(~ matching_eles)]
(evicted_indices, selected_new_indices) = self._eviction_policy.update_metadata_and_generate_eviction_scores(self._current_iter, self._zch_size, argsorted_uniq_ids_counts, frequency_sorted_uniq_ids_counts, matching_eles, matched_indices, self._mch_metadata, uniq_ids_metadata)
self._mch_sorted_raw_ids[evicted_indices] = new_frequency_sorted_uniq_ids[selected_new_indices]
if self._evicted:
self._evicted_emb_indices = torch.unique(torch.cat([self._evicted_emb_indices, self._mch_remapped_ids_mapping[evicted_indices]]))
else:
self._evicted_emb_indices = self._mch_remapped_ids_mapping[evicted_indices]
self._evicted = True
self._sort_mch_buffers()
_grad()
def _coalesce_history(self) -> None:
current_history_accumulator = self._history_accumulator[:self._current_history_buffer_offset]
(uniq_ids, uniq_inverse_mapping, uniq_ids_counts) = torch.unique(current_history_accumulator, return_inverse=True, return_counts=True)
if (self._eviction_policy._threshold_filtering_func is not None):
(threshold_mask, threshold) = self._eviction_policy._threshold_filtering_func(uniq_ids_counts)
else:
threshold_mask = None
coalesced_eviction_history_metadata = self._eviction_policy.coalesce_history_metadata(self._current_iter, {metadata_name: metadata_buffer[:self._current_history_buffer_offset] for (metadata_name, metadata_buffer) in self._history_metadata.items()}, uniq_ids_counts, uniq_inverse_mapping, threshold_mask=threshold_mask)
if (threshold_mask is not None):
uniq_ids = uniq_ids[threshold_mask]
uniq_ids_counts = uniq_ids_counts[threshold_mask]
self._update_and_evict(uniq_ids, uniq_ids_counts, coalesced_eviction_history_metadata)
self._current_history_buffer_offset = 0
_grad()
def profile(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
if (not self.training):
return features
if (self._current_iter == (- 1)):
self._current_iter = int(self._current_iter_tensor.item())
self._last_eviction_iter = self._current_iter
self._current_iter += 1
self._current_iter_tensor.data += 1
if (self._input_history_buffer_size == (- 1)):
self._init_history_buffers(features)
for (_, feature) in features.items():
values = feature.values()
free_elements = (self._input_history_buffer_size - self._current_history_buffer_offset)
values = values[:free_elements]
self._history_accumulator[self._current_history_buffer_offset:(self._current_history_buffer_offset + values.shape[0])] = values
self._eviction_policy.record_history_metadata(self._current_iter, values, {metadata_name: metadata_buffer[self._current_history_buffer_offset:(self._current_history_buffer_offset + values.shape[0])] for (metadata_name, metadata_buffer) in self._history_metadata.items()})
self._current_history_buffer_offset += values.shape[0]
if ((self._current_iter - self._last_eviction_iter) == self._eviction_interval):
self._coalesce_history()
self._last_eviction_iter = self._current_iter
return features
_grad()
def remap(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
remapped_features: Dict[(str, JaggedTensor)] = {}
for (name, feature) in features.items():
values = feature.values()
remapped_ids = torch.empty_like(values)
searched_indices = torch.searchsorted(self._mch_sorted_raw_ids[:(- 1)], values)
retrieved_indices = self._mch_sorted_raw_ids[searched_indices]
matching_indices = (retrieved_indices == values)
remapped_ids[matching_indices] = self._mch_remapped_ids_mapping[searched_indices[matching_indices]]
if self._mch_size:
non_matching_values = values[(~ matching_indices)]
hashed_non_matching = self._mch_hash_func(non_matching_values, self._mch_size).add(self._zch_size)
remapped_ids[(~ matching_indices)] = hashed_non_matching
else:
remapped_ids[(~ matching_indices)] = (self._zch_size - 1)
remapped_features[name] = JaggedTensor(values=remapped_ids, lengths=feature.lengths(), offsets=feature.offsets(), weights=feature.weights_or_none())
return remapped_features
_grad()
def forward(self, features: Dict[(str, JaggedTensor)]) -> Dict[(str, JaggedTensor)]:
features = self.profile(features)
return self.remap(features)
def output_size(self) -> int:
return (self._zch_size + self._mch_size)
def input_size(self) -> int:
return self._input_hash_size
_grad()
def evict(self) -> Optional[torch.Tensor]:
if self._evicted:
self._evicted = False
return self._evicted_emb_indices
else:
return None
def rebuild_with_output_id_range(self, output_id_range: Tuple[(int, int)], device: Optional[torch.device]=None) -> 'MCHManagedCollisionModule':
new_output_size = (output_id_range[1] - output_id_range[0])
new_zch_size = int((self._zch_size * (new_output_size / self.output_size())))
new_mch_size = (new_output_size - new_zch_size)
return type(self)(name=self._name, zch_size=new_zch_size, device=(device or self.device), eviction_policy=self._eviction_policy, eviction_interval=self._eviction_interval, input_hash_size=self._input_hash_size, input_hash_func=self._input_hash_func, mch_size=(new_mch_size if (new_mch_size > 0) else None), mch_hash_func=self._mch_hash_func, output_global_offset=output_id_range[0]) |
.parametrize('method, url, expected_result, strict', [('ls', 'bigquery://bigquery-url/path1/path2', ('bigquery://', ['path1', 'path2', None]), False), ('schema', 'bigquery://bigquery-url/path1/path2/path3', ('bigquery://', ['path1', 'path2', 'path3']), False), ('ls', 'invalidscheme://invalid-url', pytest.raises(ValueError, match='No clients available for scheme'), False), ('invalid_method', 'bigquery://bigquery-url', pytest.raises(ValueError, match='Invalid method'), False)])
def test_parse_url(method, url, expected_result, strict):
if isinstance(expected_result, tuple):
result = parse_url(method, url, strict)
assert (result == expected_result)
else:
with expected_result:
parse_url(method, url, strict) |
class SynonymProcessor(DataProcessor):
def get_train_examples(self, data_dir):
logger.info('LOOKING AT {} train'.format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, 'mctrain.csv')), 'train')
def get_dev_examples(self, data_dir):
logger.info('LOOKING AT {} dev'.format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, 'mchp.csv')), 'dev')
def get_test_examples(self, data_dir):
logger.info('LOOKING AT {} dev'.format(data_dir))
return self._create_examples(self._read_csv(os.path.join(data_dir, 'mctest.csv')), 'test')
def get_labels(self):
return ['0', '1', '2', '3', '4']
def _read_csv(self, input_file):
with open(input_file, 'r', encoding='utf-8') as f:
return list(csv.reader(f))
def _create_examples(self, lines: List[List[str]], type: str):
examples = [InputExample(example_id=line[0], question='', contexts=[line[1], line[1], line[1], line[1], line[1]], endings=[line[2], line[3], line[4], line[5], line[6]], label=line[7]) for line in lines]
return examples |
class Ksboolean_TestCase(ParserTest):
def runTest(self):
self.assertTrue(ksboolean('ON'))
self.assertTrue(ksboolean('On'))
self.assertTrue(ksboolean('YES'))
self.assertTrue(ksboolean('Yes'))
self.assertTrue(ksboolean('TRUE'))
self.assertTrue(ksboolean('True'))
self.assertTrue(ksboolean('1'))
self.assertFalse(ksboolean('OFF'))
self.assertFalse(ksboolean('Off'))
self.assertFalse(ksboolean('NO'))
self.assertFalse(ksboolean('No'))
self.assertFalse(ksboolean('FALSE'))
self.assertFalse(ksboolean('False'))
self.assertFalse(ksboolean('0'))
self.assertRaises(ArgumentTypeError, ksboolean, True)
self.assertRaises(ArgumentTypeError, ksboolean, False)
self.assertRaises(ArgumentTypeError, ksboolean, 'YesSir')
self.assertRaises(ArgumentTypeError, ksboolean, 'NoWay')
self.assertRaises(ArgumentTypeError, ksboolean, None)
self.assertRaises(ArgumentTypeError, ksboolean, [])
self.assertRaises(ArgumentTypeError, ksboolean, {}) |
def write_html(filename, it, img_save_it, img_dir, all_size=1536):
html_file = open(filename, 'w')
html_file.write(('\n <!DOCTYPE html>\n <html>\n <head>\n <title>Experiment name = %s</title>\n <meta content="30">\n </head>\n <body>\n ' % os.path.basename(filename)))
html_file.write('<h3>current</h3>')
_write_row(html_file, it, ('%s/gen_train_current.jpg' % img_dir), all_size)
for j in range(it, (img_save_it - 1), (- 1)):
_write_row(html_file, j, ('%s/gen_train_%08d.jpg' % (img_dir, j)), all_size)
html_file.write('</body></html>')
html_file.close() |
class ConfigTestUtils(unittest.TestCase):
def test_config_from_string(self):
c = GPT2Config()
n_embd = (c.n_embd + 1)
resid_pdrop = (c.resid_pdrop + 1.0)
scale_attn_weights = (not c.scale_attn_weights)
summary_type = (c.summary_type + 'foo')
c.update_from_string(f'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}')
self.assertEqual(n_embd, c.n_embd, 'mismatch for key: n_embd')
self.assertEqual(resid_pdrop, c.resid_pdrop, 'mismatch for key: resid_pdrop')
self.assertEqual(scale_attn_weights, c.scale_attn_weights, 'mismatch for key: scale_attn_weights')
self.assertEqual(summary_type, c.summary_type, 'mismatch for key: summary_type')
def test_config_common_kwargs_is_complete(self):
base_config = PretrainedConfig()
missing_keys = [key for key in base_config.__dict__ if (key not in config_common_kwargs)]
self.assertListEqual(missing_keys, ['is_encoder_decoder', '_name_or_path', 'transformers_version'])
keys_with_defaults = [key for (key, value) in config_common_kwargs.items() if (value == getattr(base_config, key))]
if (len(keys_with_defaults) > 0):
raise ValueError(f"The following keys are set with the default values in `test_configuration_common.config_common_kwargs` pick another value for them: {', '.join(keys_with_defaults)}.")
def test_cached_files_are_used_when_internet_is_down(self):
response_mock = mock.Mock()
response_mock.status_code = 500
response_mock.headers = []
response_mock.raise_for_status.side_effect = HTTPError
_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
with mock.patch('transformers.utils.hub.requests.head', return_value=response_mock) as mock_head:
_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
mock_head.assert_called() |
class TestSlonyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SlonyCollector', {})
self.collector = SlonyCollector(config, None)
def test_import(self):
self.assertTrue(SlonyCollector)
_only_if_psycopg2_is_available
(SlonyCollector, '_get_stats_by_database')
(SlonyCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = [('foo', 7)]
self.collector.collect()
_get_stats_by_database.assert_called_with('localhost', 5432, 'postgres', 'postgres', 'postgres', '_postgres', 'Node [0-9]+ - ')
self.assertPublished(publish, 'foo', 7)
_only_if_psycopg2_is_available
(SlonyCollector, '_get_stats_by_database')
(SlonyCollector, 'publish')
def test_instances(self, publish, _get_stats_by_database):
def side_effect(host, port, user, pwd, slony_db, slony_schema, node):
if ((slony_db, slony_schema) == ('postgres', '_postgres')):
return [('foo', 7)]
elif ((slony_db, slony_schema) == ('data', '_data')):
return [('bar', 14)]
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('SlonyCollector', {'instances': {'alpha': {'slony_db': 'postgres', 'slony_schema': '_postgres'}, 'beta': {'slony_db': 'data', 'slony_schema': '_data'}}})
collector = SlonyCollector(config, None)
collector.collect()
self.assertPublished(publish, 'foo', 7)
self.assertPublished(publish, 'bar', 14)
_only_if_psycopg2_is_available
(SlonyCollector, '_get_stats_by_database')
def test_override_user_password_nodestr(self, _get_stats_by_database):
config = get_collector_config('SlonyCollector', {'instances': {'alpha': {'slony_db': 'postgres', 'slony_schema': '_postgres', 'user': 'postgres', 'password': 'postgres', 'slony_node_string': '(.*)'}, 'beta': {'slony_db': 'data', 'slony_schema': '_data', 'user': 'data', 'password': 'data', 'slony_node_string': 'Node (.*)'}}})
collector = SlonyCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call('localhost', 5432, 'postgres', 'postgres', 'postgres', '_postgres', '(.*)')
_get_stats_by_database.assert_any_call('localhost', 5432, 'data', 'data', 'data', '_data', 'Node (.*)') |
class PyramidNet(nn.Module):
def __init__(self, dataset, depth, alpha, num_classes, bottleneck=False):
super(PyramidNet, self).__init__()
self.dataset = dataset
if self.dataset.startswith('cifar'):
self.inplanes = 16
if (bottleneck == True):
n = int(((depth - 2) / 9))
block = Bottleneck
else:
n = int(((depth - 2) / 6))
block = BasicBlock
self.addrate = (alpha / ((3 * n) * 1.0))
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
elif (dataset == 'imagenet'):
blocks = {18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}
if (layers.get(depth) is None):
if (bottleneck == True):
blocks[depth] = Bottleneck
temp_cfg = int(((depth - 2) / 12))
else:
blocks[depth] = BasicBlock
temp_cfg = int(((depth - 2) / 8))
layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]
print('=> the layer configuration for each stage is set to', layers[depth])
self.inplanes = 64
self.addrate = (alpha / (sum(layers[depth]) * 1.0))
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0])
self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2)
self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2)
self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if (stride != 1):
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = (self.featuremap_dim + self.addrate)
layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample))
for i in range(1, block_depth):
temp_featuremap_dim = (self.featuremap_dim + self.addrate)
layers.append(block((int(round(self.featuremap_dim)) * block.outchannel_ratio), int(round(temp_featuremap_dim)), 1))
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = (int(round(self.featuremap_dim)) * block.outchannel_ratio)
return nn.Sequential(*layers)
def forward(self, x):
if ((self.dataset == 'cifar10') or (self.dataset == 'cifar100')):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
elif (self.dataset == 'imagenet'):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class Dialog(QDialog):
def __init__(self, parent=None):
super(Dialog, self).__init__(parent)
self.server = FortuneServer()
statusLabel = QLabel()
statusLabel.setWordWrap(True)
quitButton = QPushButton('Quit')
quitButton.setAutoDefault(False)
if (not self.server.listen()):
QMessageBox.critical(self, 'Threaded Fortune Server', ('Unable to start the server: %s.' % self.server.errorString()))
self.close()
return
for ipAddress in QNetworkInterface.allAddresses():
if ((ipAddress != QHostAddress.LocalHost) and (ipAddress.toIPv4Address() != 0)):
break
else:
ipAddress = QHostAddress(QHostAddress.LocalHost)
ipAddress = ipAddress.toString()
statusLabel.setText(('The server is running on\n\nIP: %s\nport: %d\n\nRun the Fortune Client example now.' % (ipAddress, self.server.serverPort())))
quitButton.clicked.connect(self.close)
buttonLayout = QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
mainLayout = QVBoxLayout()
mainLayout.addWidget(statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle('Threaded Fortune Server') |
class Multiply(ImageOnlyTransform):
identity_param = 1
def __init__(self, factors: List[float]):
if (self.identity_param not in factors):
factors = ([self.identity_param] + list(factors))
super().__init__('factor', factors)
def apply_aug_image(self, image, factor=1, **kwargs):
if (factor != self.identity_param):
image = F.multiply(image, factor)
return image |
def encrypt(key: bytes, nonce: bytes, initial_block_counter: int, plaintext: bytes) -> bytes:
full_nonce = (struct.pack('<Q', initial_block_counter) + nonce)
encryptor = Cipher(algorithms.ChaCha20(key, full_nonce), mode=None).encryptor()
plaintext_len_blocks = math.ceil((len(plaintext) / BLOCK_SIZE))
blocks_until_overflow = ((MAX_COUNTER - initial_block_counter) + 1)
if (plaintext_len_blocks <= blocks_until_overflow):
return binascii.hexlify(encryptor.update(plaintext))
else:
bytes_until_overflow = min((blocks_until_overflow * 64), len(plaintext))
first_batch = binascii.hexlify(encryptor.update(plaintext[:bytes_until_overflow]))
full_nonce = (struct.pack('<Q', 0) + nonce)
encryptor = Cipher(algorithms.ChaCha20(key, full_nonce), mode=None).encryptor()
second_batch = binascii.hexlify(encryptor.update(plaintext[bytes_until_overflow:]))
return (first_batch + second_batch) |
def available_instruments(inst_loc=None):
def get_inst_id_dict(inst_module_name):
try:
module = importlib.import_module(inst_module_name)
inst_ids = {inst_id: {tag: module.tags[tag] for tag in module.inst_ids[inst_id]} for inst_id in module.inst_ids.keys()}
except ImportError as ierr:
inst_ids = {'ERROR': {'ERROR': str(ierr)}}
return inst_ids
user_modules = pysat.params['user_modules']
if (inst_loc is None):
inst_info = dict()
for platform in user_modules.keys():
inst_info[platform] = dict()
for name in user_modules[platform].keys():
inst_ids = get_inst_id_dict(user_modules[platform][name])
inst_info[platform][name] = {'inst_module': user_modules[platform][name], 'inst_ids_tags': inst_ids}
else:
inst_mods = inst_loc.__all__
inst_info = dict()
for inst_mod in inst_mods:
platform = inst_mod.split('_')[0]
name = '_'.join(inst_mod.split('_')[1:])
mod_name = '.'.join([inst_loc.__name__, inst_mod])
if (platform not in inst_info.keys()):
inst_info[platform] = dict()
inst_info[platform][name] = {'inst_module': mod_name, 'inst_ids_tags': get_inst_id_dict(mod_name)}
return inst_info |
def test_third2oct_2darray():
levels = np.array([[100, 95, 80, 55, 65, 85, 75, 70, 90, 95, 105, 110], [100, 95, 80, 55, 65, 85, 75, 70, 90, 95, 105, 110]])
generated = third2oct(levels, axis=1)
real = np.array([[101., 85., 90., 111.], [101., 85., 90., 111.]])
assert_array_almost_equal(generated, real) |
def _f1_score_param_check(num_classes: Optional[int], average: Optional[str]) -> None:
average_options = ('micro', 'macro', 'weighted', None)
if (average not in average_options):
raise ValueError(f'`average` was not in the allowed value of {average_options}, got {average}.')
if ((average != 'micro') and ((num_classes is None) or (num_classes <= 0))):
raise ValueError(f'num_classes should be a positive number when average={average}, got num_classes={num_classes}.') |
def valid_tile_size(value, arg_name, min_power=4, logger=None):
error = False
if (not isinstance(value, int)):
if logger:
logger.error(f'''Invalid value for the argument {arg_name}: {value}. Enter an integer.
''')
else:
print(f'''ERROR: Invalid value for the argument {arg_name}: {value}. Enter an integer.
''')
error = True
if (value not in [(2 ** i) for i in range(min_power, 12)]):
if logger:
logger.error(f'''Invalid value for the argument {arg_name}: {value}. Choose among {[(2 ** i) for i in range(min_power, 12)]}.
''')
else:
print(f'''ERROR: Invalid value for the argument {arg_name}: {value}. Choose among {[(2 ** i) for i in range(min_power, 12)]}.
''')
error = True
return (not error) |
class DataPrefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.input_cuda = self._input_cuda_for_image
self.record_stream = DataPrefetcher._record_stream_for_image
self.preload()
def preload(self):
try:
(self.next_input, self.next_target, _, _) = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.input_cuda()
self.next_target = (self.next_target[0].cuda(non_blocking=True), self.next_target[1].cuda(non_blocking=True))
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
if (input is not None):
self.record_stream(input)
if (target is not None):
target[0].record_stream(torch.cuda.current_stream())
target[1].record_stream(torch.cuda.current_stream())
self.preload()
return (input, (target[0], target[1]))
def _input_cuda_for_image(self):
self.next_input = self.next_input.cuda(non_blocking=True)
def _record_stream_for_image(input):
input.record_stream(torch.cuda.current_stream()) |
def _migrate_v5(preset: dict) -> dict:
excluded_item = {'include_copy_in_original_location': False, 'num_shuffled_pickups': 0, 'num_included_in_starting_items': 0, 'included_ammo': [], 'allowed_as_random_starting_item': True}
included_item = {**excluded_item, 'num_included_in_starting_items': 1}
shuffled_item = {**excluded_item, 'num_shuffled_pickups': 1}
default_items_state = {'Progressive Suit': {**excluded_item, 'num_shuffled_pickups': 2}, 'Dark Beam': {**shuffled_item, 'included_ammo': [50]}, 'Light Beam': {**shuffled_item, 'included_ammo': [50]}, 'Annihilator Beam': {**shuffled_item, 'included_ammo': [0, 0]}, 'Power Bomb': {**shuffled_item, 'included_ammo': [2]}, 'Progressive Grapple': {**excluded_item, 'num_shuffled_pickups': 2}, 'Missile Launcher': {**shuffled_item, 'included_ammo': [5]}, 'Seeker Launcher': {**shuffled_item, 'included_ammo': [5]}, 'Energy Tank': {**excluded_item, 'num_shuffled_pickups': 14}}
for item in ['Combat Visor', 'Scan Visor', 'Varia Suit', 'Power Beam', 'Charge Beam', 'Morph Ball']:
default_items_state[item] = included_item
for item in ['Dark Visor', 'Echo Visor', 'Morph Ball Bomb', 'Boost Ball', 'Spider Ball', 'Space Jump Boots', 'Gravity Boost', 'Super Missile', 'Sunburst', 'Darkburst', 'Sonic Boom', 'Violet Translator', 'Amber Translator', 'Emerald Translator', 'Cobalt Translator']:
default_items_state[item] = shuffled_item
major_items = preset['layout_configuration']['major_items_configuration']['items_state']
for item in default_items_state.keys():
if (item not in major_items):
major_items[item] = default_items_state[item]
preset['layout_configuration']['major_items_configuration'].pop('progressive_suit')
preset['layout_configuration']['major_items_configuration'].pop('progressive_grapple')
preset['layout_configuration'].pop('split_beam_ammo')
specific_levels: dict[(str, str)] = preset['layout_configuration']['trick_level']['specific_levels']
tricks_to_remove = [trick_name for (trick_name, level) in specific_levels.items() if (level == 'no-tricks')]
for trick in tricks_to_remove:
specific_levels.pop(trick)
preset['game'] = preset['layout_configuration'].pop('game')
preset['configuration'] = preset.pop('layout_configuration')
preset['configuration'].update(preset.pop('patcher_configuration'))
preset['configuration']['varia_suit_damage'] = max(preset['configuration']['varia_suit_damage'], 0.1)
return preset |
class _Metadata(_PrimitiveTemplateBase):
_valid_predicates = set()
def is_element(self, value):
return isinstance(value, metadata.Metadata)
def decode(self, metadata):
if (not self.is_element(metadata)):
raise TypeError('`Metadata` must be provided by the interface directly.')
return metadata
def encode(self, value):
return value |
def unpack_archive(filename, extract_dir, progress_filter=default_filter, drivers=None):
for driver in (drivers or extraction_drivers):
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(('Not a recognized archive type: %s' % filename)) |
def create_playlists(_request: WSGIRequest) -> HttpResponse:
library_link = os.path.join(conf.SONGS_CACHE_DIR, 'local_library')
if (not os.path.islink(library_link)):
return HttpResponseBadRequest('No library set')
_set_scan_progress('0 / 0 / 0')
_create_playlists.delay()
return HttpResponse('started creating playlists. This could take a while') |
def _selfdestruct(computation: ComputationAPI, beneficiary: Address) -> None:
local_balance = computation.state.get_balance(computation.msg.storage_address)
beneficiary_balance = computation.state.get_balance(beneficiary)
computation.state.set_balance(beneficiary, (local_balance + beneficiary_balance))
computation.state.set_balance(computation.msg.storage_address, 0)
computation.register_account_for_deletion(beneficiary)
raise Halt('SELFDESTRUCT') |
class FillLouver(bpy.types.PropertyGroup):
louver_count: IntProperty(name='Louver Count', min=0, max=100, default=10, description='Number of louvers on to create face')
louver_margin: FloatProperty(name='Louver Margin', step=1, min=get_scaled_unit(0.001), max=get_scaled_unit(100.0), default=get_scaled_unit(0.1), unit='LENGTH', description='Offset of louvers from face border')
louver_depth: FloatProperty(name='Louver Depth', step=1, min=get_scaled_unit(0.01), max=get_scaled_unit(100.0), default=get_scaled_unit(0.05), unit='LENGTH', description='Depth of each louver')
louver_border: FloatProperty(name='Louver Border', step=1, min=get_scaled_unit(0.0), max=get_scaled_unit(1.0), default=get_scaled_unit(0.01), unit='LENGTH', description='Distance between louvers')
def draw(self, box):
box.prop(self, 'louver_margin')
col = box.column(align=True)
col.prop(self, 'louver_count')
col.prop(self, 'louver_depth')
col.prop(self, 'louver_border') |
class ReportFormatter(Formatter):
ACTIVITY_MAXLEN = 12
SPACING = ' '
CONTEXT_PREFIX = 'from'
TARGET_PREFIX = 'to'
def format(self, record):
if hasattr(record, 'activity'):
return self.format_report(record)
return self.format_default(record)
def create_padding(self, activity):
actual = len(activity)
count = max((self.ACTIVITY_MAXLEN - actual), 0)
return (' ' * count)
def format_path(self, path):
from .file_system import is_pathname_valid
path = str(path)
if (is_pathname_valid(path) and (pathsep in path)):
abbrev = relpath(path)
if (len(abbrev) < len(path)):
path = abbrev
return path
def format_activity(self, activity):
return activity
def format_subject(self, subject, _activity=None):
return (self.format_path(subject) if subject else '')
def format_target(self, target, _activity=None):
if (target and (not _is_current_path(target))):
return f"{self.TARGET_PREFIX} '{self.format_path(target)}'"
return ''
def format_context(self, context, _activity=None):
if (context and (not _is_current_path(context))):
return f"{self.CONTEXT_PREFIX} '{self.format_path(context)}'"
return ''
def format_default(self, record):
record.msg = ((self.SPACING * max(record.nesting, 0)) + record.msg)
return super().format(record)
def format_report(self, record):
activity = record.activity
record.msg = (((self.create_padding(activity) + self.format_activity(activity)) + (self.SPACING * max((record.nesting + 1), 0))) + ' '.join([text for text in [self.format_subject(record.subject, activity), self.format_target(record.target, activity), self.format_context(record.context, activity)] if text]))
return super().format(record) |
def _get_aws_ip_ranges():
try:
path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(path, 'aws-ip-ranges.json')
with open(file_path, 'r') as f:
return json.loads(f.read())
except IOError:
logger.exception('Could not load AWS IP Ranges')
return None
except ValueError:
logger.exception('Could not load AWS IP Ranges')
return None
except TypeError:
logger.exception('Could not load AWS IP Ranges')
return None |
class FBCNet_old(nn.Module):
def SCB(self, m, nChan, nBands, doWeightNorm=True, *args, **kwargs):
return nn.Sequential(Conv2dWithConstraint(nBands, (m * nBands), (nChan, 1), groups=nBands, max_norm=2, doWeightNorm=doWeightNorm, padding=0), nn.BatchNorm2d((m * nBands)), nn.ELU())
def LastBlock(self, inF, outF, doWeightNorm=True, *args, **kwargs):
return nn.Sequential(LinearWithConstraint(inF, outF, *args, max_norm=0.5, doWeightNorm=doWeightNorm, **kwargs), nn.LogSoftmax(dim=1))
def __init__(self, nChan, nTime, nClass=2, nBands=9, m=4, temporalLayer='VarLayer', doWeightNorm=True, *args, **kwargs):
super(FBCNet_old, self).__init__()
self.nBands = nBands
self.m = m
self.scb = self.SCB(m, nChan, self.nBands, doWeightNorm=doWeightNorm)
self.temporalLayer = current_module.__dict__[temporalLayer](dim=3)
self.lastLayer = self.LastBlock((self.m * self.nBands), nClass, doWeightNorm=doWeightNorm)
def forward(self, x):
x = torch.squeeze(x.permute((0, 4, 2, 3, 1)), dim=4)
x = self.scb(x)
x = self.temporalLayer(x)
x = torch.flatten(x, start_dim=1)
x = self.lastLayer(x)
return x |
_datapipe('load_from_zip')
class ZipArchiveLoaderIterDataPipe(IterDataPipe[Tuple[(str, BufferedIOBase)]]):
def __init__(self, datapipe: Iterable[Tuple[(str, BufferedIOBase)]], length: int=(- 1)) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[(str, BufferedIOBase)]] = datapipe
self.length: int = length
def __iter__(self) -> Iterator[Tuple[(str, BufferedIOBase)]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
(pathname, data_stream) = data
try:
zips = zipfile.ZipFile(cast(IO[bytes], data_stream))
for zipinfo in zips.infolist():
if (sys.version_info[1] >= 6):
if zipinfo.is_dir():
continue
elif zipinfo.filename.endswith('/'):
continue
extracted_fobj = zips.open(zipinfo)
inner_pathname = os.path.normpath(os.path.join(pathname, zipinfo.filename))
(yield (inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname)))
except Exception as e:
warnings.warn(f'Unable to extract files from corrupted zipfile stream {pathname} due to: {e}, abort!')
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
def __len__(self) -> int:
if (self.length == (- 1)):
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length |
class BehavioralRTLIRGenL1Pass(RTLIRPass):
rtlir_upblks = MetadataKey()
def __init__(s, translation_top):
c = s.__class__
s.tr_top = translation_top
if (not translation_top.has_metadata(c.rtlir_getter)):
translation_top.set_metadata(c.rtlir_getter, RTLIRGetter(cache=True))
def __call__(s, m):
c = s.__class__
if m.has_metadata(c.rtlir_upblks):
rtlir_upblks = m.get_metadata(c.rtlir_upblks)
else:
rtlir_upblks = {}
m.set_metadata(c.rtlir_upblks, rtlir_upblks)
visitor = s.get_rtlir_generator_class()(m)
upblks = {bir.CombUpblk: get_ordered_upblks(m), bir.SeqUpblk: get_ordered_update_ff(m)}
upblks[bir.CombUpblk].sort(key=(lambda x: x.__name__))
upblks[bir.SeqUpblk].sort(key=(lambda x: x.__name__))
for upblk_type in (bir.CombUpblk, bir.SeqUpblk):
for blk in upblks[upblk_type]:
visitor._upblk_type = upblk_type
upblk_info = m.get_update_block_info(blk)
upblk = visitor.enter(blk, upblk_info[(- 1)])
upblk.is_lambda = upblk_info[0]
upblk.src = upblk_info[1]
upblk.lino = upblk_info[2]
upblk.filename = upblk_info[3]
rtlir_upblks[blk] = upblk
def get_rtlir_generator_class(s):
return BehavioralRTLIRGeneratorL1 |
_tokenizer('moses')
class MosesTokenizer(object):
def add_args(parser):
parser.add_argument('--moses-source-lang', metavar='SRC', help='source language')
parser.add_argument('--moses-target-lang', metavar='TARGET', help='target language')
parser.add_argument('--moses-no-dash-splits', action='store_true', default=False, help="don't apply dash split rules")
parser.add_argument('--moses-no-escape', action='store_true', default=False, help="don't perform HTML escaping on apostrophy, quotes, etc.")
def __init__(self, args):
self.args = args
if (getattr(args, 'moses_source_lang', None) is None):
args.moses_source_lang = getattr(args, 'source_lang', 'en')
if (getattr(args, 'moses_target_lang', None) is None):
args.moses_target_lang = getattr(args, 'target_lang', 'en')
try:
from sacremoses import MosesTokenizer, MosesDetokenizer
self.tok = MosesTokenizer(args.moses_source_lang)
self.detok = MosesDetokenizer(args.moses_target_lang)
except ImportError:
raise ImportError('Please install Moses tokenizer with: pip install sacremoses')
def encode(self, x: str) -> str:
return self.tok.tokenize(x, aggressive_dash_splits=(not self.args.moses_no_dash_splits), return_str=True, escape=(not self.args.moses_no_escape))
def decode(self, x: str) -> str:
return self.detok.detokenize(x.split()) |
class MixConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_bn=True, bn_eps=1e-05, activation=(lambda : nn.ReLU(inplace=True))):
super(MixConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = MixConv(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(num_features=out_channels, eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x |
def test_create_forbidden(db, client, settings):
settings.PROJECT_CREATE_RESTRICTED = True
client.login(username='user', password='user')
url = reverse(urlnames['list'])
data = {'title': 'Lorem ipsum dolor sit amet', 'description': 'At vero eos et accusam et justo duo dolores et ea rebum.', 'catalog': catalog_id}
response = client.post(url, data)
assert (response.status_code == 403) |
def parse_date(datestring, default_timezone=UTC):
if (not isinstance(datestring, _basestring)):
raise ParseError(('Expecting a string %r' % datestring))
m = ISO8601_REGEX.match(datestring)
if (not m):
raise ParseError(('Unable to parse date string %r' % datestring))
groups = m.groupdict()
tz = parse_timezone(groups, default_timezone=default_timezone)
groups['second_fraction'] = int((Decimal(('0.%s' % (groups['second_fraction'] or 0))) * Decimal('1000000.0')))
try:
return datetime.datetime(year=to_int(groups, 'year'), month=to_int(groups, 'month', default=to_int(groups, 'monthdash', required=False, default=1)), day=to_int(groups, 'day', default=to_int(groups, 'daydash', required=False, default=1)), hour=to_int(groups, 'hour', default_to_zero=True), minute=to_int(groups, 'minute', default_to_zero=True), second=to_int(groups, 'second', default_to_zero=True), microsecond=groups['second_fraction'], tzinfo=tz)
except Exception as e:
raise ParseError(e) |
class cached_property(Generic[R]):
def __init__(self, wrapped: Callable[([Any], R)]):
self.wrapped = wrapped
functools.update_wrapper(self, wrapped)
def __get__(self, instance: T, owner: Type[Any]) -> R:
if (instance is None):
return self
ret = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, ret)
return ret |
class InventoryCommand(Command):
def __init__(self, quals):
super().__init__('INV', 'taking inventory')
def help_description():
return 'INVENTORY or INV or I - lists what items you have'
def _do_command(self, player):
print(('You have %s.' % enumerate_items(player.inv))) |
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if (self.deconstruct_idx is not None):
x = x[self.deconstruct_idx]
return x.transpose((- 2), (- 1)) |
def main():
saved_weights = None
class_limit = None
num_of_snip = 1
image_shape = (224, 224)
load_to_memory = False
batch_size = 512
nb_epoch = 500
name_str = None
train(num_of_snip=num_of_snip, saved_weights=saved_weights, class_limit=class_limit, image_shape=image_shape, load_to_memory=load_to_memory, batch_size=batch_size, nb_epoch=nb_epoch, name_str=name_str) |
class Fighter(HandledItem, HandledCharge, ItemAttrShortcut, ChargeAttrShortcut):
DAMAGE_TYPES = ('em', 'kinetic', 'explosive', 'thermal')
DAMAGE_TYPES2 = ('EM', 'Kin', 'Exp', 'Therm')
def __init__(self, item):
self.__item = item
if self.isInvalid:
raise ValueError('Passed item is not a Fighter')
self.itemID = (item.ID if (item is not None) else None)
self.projected = False
self.projectionRange = None
self.active = True
self._amount = (- 1)
self.__abilities = self.__getAbilities()
self.build()
standardAttackActive = False
for ability in self.abilities:
if (ability.effect.isImplemented and (ability.effect.name == 'fighterAbilityAttackM')):
ability.active = True
standardAttackActive = True
elif (ability.effect.isImplemented and (standardAttackActive is False) and (ability.effect.name != 'fighterAbilityMicroWarpDrive') and (ability.effect.name != 'fighterAbilityEvasiveManeuvers')):
ability.active = True
def init(self):
self.__item = None
if self.itemID:
self.__item = eos.db.getItem(self.itemID)
if (self.__item is None):
pyfalog.error('Item (id: {0}) does not exist', self.itemID)
return
if self.isInvalid:
pyfalog.error('Item (id: {0}) is not a Fighter', self.itemID)
return
self.build()
def build(self):
self.__charge = None
self.__baseVolley = None
self.__miningyield = None
self.__ehp = None
self.__itemModifiedAttributes = ModifiedAttributeDict()
self.__chargeModifiedAttributes = ModifiedAttributeDict()
if ({a.effectID for a in self.abilities} != {e.ID for e in self.item.effects.values()}):
self.__abilities = []
for ability in self.__getAbilities():
self.__abilities.append(ability)
if self.__item:
self.__itemModifiedAttributes.original = self.__item.attributes
self.__itemModifiedAttributes.overrides = self.__item.overrides
self.__slot = self.__calculateSlot(self.__item)
chargeID = self.getModifiedItemAttr('fighterAbilityLaunchBombType')
if chargeID:
charge = eos.db.getItem(int(chargeID))
self.__charge = charge
self.__chargeModifiedAttributes.original = charge.attributes
self.__chargeModifiedAttributes.overrides = charge.overrides
def __getAbilities(self):
return [FighterAbility(effect) for effect in list(self.item.effects.values())]
def __calculateSlot(self, item):
types = {'Light': FittingSlot.F_LIGHT, 'Support': FittingSlot.F_SUPPORT, 'Heavy': FittingSlot.F_HEAVY, 'StandupLight': FittingSlot.FS_LIGHT, 'StandupSupport': FittingSlot.FS_SUPPORT, 'StandupHeavy': FittingSlot.FS_HEAVY}
for (t, slot) in types.items():
if self.getModifiedItemAttr('fighterSquadronIs{}'.format(t)):
return slot
def slot(self):
return self.__slot
def amount(self):
return (int(self.getModifiedItemAttr('fighterSquadronMaxSize')) if (self._amount == (- 1)) else self._amount)
def amount(self, amount):
amount = max(0, int(amount))
if (amount >= self.getModifiedItemAttr('fighterSquadronMaxSize')):
amount = (- 1)
self._amount = amount
def fighterSquadronMaxSize(self):
return int(self.getModifiedItemAttr('fighterSquadronMaxSize'))
def abilities(self):
return (self.__abilities or [])
def charge(self):
return self.__charge
def itemModifiedAttributes(self):
return self.__itemModifiedAttributes
def chargeModifiedAttributes(self):
return self.__chargeModifiedAttributes
def isInvalid(self):
return ((self.__item is None) or (self.__item.category.name != 'Fighter'))
def item(self):
return self.__item
def hasAmmo(self):
return (self.charge is not None)
def isDealingDamage(self):
volleyParams = self.getVolleyParametersPerEffect()
for effectData in volleyParams.values():
for volley in effectData.values():
if (volley.total > 0):
return True
return False
def getVolleyParametersPerEffect(self, targetProfile=None):
if ((not self.active) or (self.amount <= 0)):
return {}
if (self.__baseVolley is None):
self.__baseVolley = {}
for ability in self.abilities:
self.__baseVolley[ability.effectID] = {0: ability.getVolley()}
adjustedVolley = {}
for (effectID, effectData) in self.__baseVolley.items():
adjustedVolley[effectID] = {}
for (volleyTime, volleyValue) in effectData.items():
adjustedVolley[effectID][volleyTime] = DmgTypes(em=(volleyValue.em * (1 - getattr(targetProfile, 'emAmount', 0))), thermal=(volleyValue.thermal * (1 - getattr(targetProfile, 'thermalAmount', 0))), kinetic=(volleyValue.kinetic * (1 - getattr(targetProfile, 'kineticAmount', 0))), explosive=(volleyValue.explosive * (1 - getattr(targetProfile, 'explosiveAmount', 0))))
return adjustedVolley
def getVolleyPerEffect(self, targetProfile=None):
volleyParams = self.getVolleyParametersPerEffect(targetProfile=targetProfile)
volleyMap = {}
for (effectID, volleyData) in volleyParams.items():
volleyMap[effectID] = volleyData[0]
return volleyMap
def getVolley(self, targetProfile=None):
volleyParams = self.getVolleyParametersPerEffect(targetProfile=targetProfile)
em = 0
therm = 0
kin = 0
exp = 0
for volleyData in volleyParams.values():
em += volleyData[0].em
therm += volleyData[0].thermal
kin += volleyData[0].kinetic
exp += volleyData[0].explosive
return DmgTypes(em, therm, kin, exp)
def getDps(self, targetProfile=None):
em = 0
thermal = 0
kinetic = 0
explosive = 0
for dps in self.getDpsPerEffect(targetProfile=targetProfile).values():
em += dps.em
thermal += dps.thermal
kinetic += dps.kinetic
explosive += dps.explosive
return DmgTypes(em=em, thermal=thermal, kinetic=kinetic, explosive=explosive)
def getDpsPerEffect(self, targetProfile=None):
if ((not self.active) or (self.amount <= 0)):
return {}
cycleParams = self.getCycleParametersPerEffectOptimizedDps(targetProfile=targetProfile)
dpsMap = {}
for ability in self.abilities:
if (ability.effectID in cycleParams):
cycleTime = cycleParams[ability.effectID].averageTime
dpsMap[ability.effectID] = ability.getDps(targetProfile=targetProfile, cycleTimeOverride=cycleTime)
return dpsMap
def getCycleParametersPerEffectOptimizedDps(self, targetProfile=None, reloadOverride=None):
cycleParamsInfinite = self.getCycleParametersPerEffectInfinite()
cycleParamsReload = self.getCycleParametersPerEffect(reloadOverride=reloadOverride)
dpsMapOnlyInfinite = {}
dpsMapAllWithReloads = {}
for ability in self.abilities:
if (ability.effectID in cycleParamsInfinite):
cycleTime = cycleParamsInfinite[ability.effectID].averageTime
dpsMapOnlyInfinite[ability.effectID] = ability.getDps(targetProfile=targetProfile, cycleTimeOverride=cycleTime)
if (ability.effectID in cycleParamsReload):
cycleTime = cycleParamsReload[ability.effectID].averageTime
dpsMapAllWithReloads[ability.effectID] = ability.getDps(targetProfile=targetProfile, cycleTimeOverride=cycleTime)
totalOnlyInfinite = sum((i.total for i in dpsMapOnlyInfinite.values()))
totalAllWithReloads = sum((i.total for i in dpsMapAllWithReloads.values()))
return (cycleParamsInfinite if (totalOnlyInfinite >= totalAllWithReloads) else cycleParamsReload)
def getCycleParametersPerEffectInfinite(self):
return {a.effectID: CycleInfo(a.cycleTime, 0, math.inf, False) for a in self.abilities if ((a.numShots == 0) and (a.cycleTime > 0))}
def getCycleParametersPerEffect(self, reloadOverride=None):
factorReload = (reloadOverride if (reloadOverride is not None) else self.owner.factorReload)
if (not factorReload):
return {a.effectID: CycleInfo(a.cycleTime, 0, math.inf, False) for a in self.abilities if (a.cycleTime > 0)}
limitedAbilities = [a for a in self.abilities if ((a.numShots > 0) and (a.cycleTime > 0))]
if (len(limitedAbilities) == 0):
return {a.effectID: CycleInfo(a.cycleTime, 0, math.inf, False) for a in self.abilities if (a.cycleTime > 0)}
validAbilities = [a for a in self.abilities if (a.cycleTime > 0)]
if (len(validAbilities) == 0):
return {}
mostLimitedAbility = min(limitedAbilities, key=(lambda a: (a.cycleTime * a.numShots)))
durationToRefuel = (mostLimitedAbility.cycleTime * mostLimitedAbility.numShots)
cyclesUntilRefuel = {mostLimitedAbility.effectID: (mostLimitedAbility.numShots, None)}
for ability in (a for a in validAbilities if (a is not mostLimitedAbility)):
fullCycles = int(floatUnerr((durationToRefuel / ability.cycleTime)))
extraShotTime = floatUnerr((durationToRefuel - (fullCycles * ability.cycleTime)))
if (extraShotTime == 0):
extraShotTime = None
cyclesUntilRefuel[ability.effectID] = (fullCycles, extraShotTime)
refuelTimes = {}
for ability in validAbilities:
(spentShots, extraShotTime) = cyclesUntilRefuel[ability.effectID]
if (extraShotTime is not None):
spentShots += 1
refuelTimes[ability.effectID] = ability.getReloadTime(spentShots)
refuelTime = max(refuelTimes.values())
cycleParams = {}
for ability in validAbilities:
(regularShots, extraShotTime) = cyclesUntilRefuel[ability.effectID]
sequence = []
if (extraShotTime is not None):
if (regularShots > 0):
sequence.append(CycleInfo(ability.cycleTime, 0, regularShots, False))
sequence.append(CycleInfo(extraShotTime, refuelTime, 1, True))
else:
regularShotsNonReload = (regularShots - 1)
if (regularShotsNonReload > 0):
sequence.append(CycleInfo(ability.cycleTime, 0, regularShotsNonReload, False))
sequence.append(CycleInfo(ability.cycleTime, refuelTime, 1, True))
cycleParams[ability.effectID] = CycleSequence(sequence, math.inf)
return cycleParams
def maxRange(self):
attrs = ('shieldTransferRange', 'powerTransferRange', 'energyDestabilizationRange', 'empFieldRange', 'ecmBurstRange', 'maxRange')
for attr in attrs:
maxRange = self.getModifiedItemAttr(attr, None)
if (maxRange is not None):
return maxRange
if (self.charge is not None):
delay = self.getModifiedChargeAttr('explosionDelay', None)
speed = self.getModifiedChargeAttr('maxVelocity', None)
if ((delay is not None) and (speed is not None)):
return ((delay / 1000.0) * speed)
def falloff(self):
attrs = ('falloff', 'falloffEffectiveness')
for attr in attrs:
falloff = self.getModifiedItemAttr(attr, None)
if (falloff is not None):
return falloff
def hp(self):
hp = {}
for (type, attr) in (('shield', 'shieldCapacity'), ('armor', 'armorHP'), ('hull', 'hp')):
hp[type] = self.getModifiedItemAttr(attr)
return hp
def ehp(self):
if (self.__ehp is None):
if ((self.owner is None) or (self.owner.damagePattern is None)):
ehp = self.hp
else:
ehp = self.owner.damagePattern.calculateEhp(self)
self.__ehp = ehp
return self.__ehp
def calculateShieldRecharge(self):
capacity = self.getModifiedItemAttr('shieldCapacity')
rechargeRate = (self.getModifiedItemAttr('shieldRechargeRate') / 1000.0)
return ((((10 / rechargeRate) * math.sqrt(0.25)) * (1 - math.sqrt(0.25))) * capacity)
('ID', 'itemID', 'chargeID', 'amount')
def validator(self, key, val):
map = {'ID': (lambda _val: isinstance(_val, int)), 'itemID': (lambda _val: isinstance(_val, int)), 'chargeID': (lambda _val: isinstance(_val, int)), 'amount': (lambda _val: (isinstance(_val, int) and (_val >= (- 1))))}
if (not map[key](val)):
raise ValueError(((str(val) + ' is not a valid value for ') + key))
else:
return val
def clear(self):
self.__baseVolley = None
self.__miningyield = None
self.__ehp = None
self.itemModifiedAttributes.clear()
self.chargeModifiedAttributes.clear()
[x.clear() for x in self.abilities]
def canBeApplied(self, projectedOnto):
item = self.item
if (item.offensive and (projectedOnto.ship.getModifiedItemAttr('disallowOffensiveModifiers') == 1)):
offensiveNonModifiers = {'energyDestabilizationNew', 'leech', 'energyNosferatuFalloff', 'energyNeutralizerFalloff'}
if (not offensiveNonModifiers.intersection(set(item.effects))):
return False
if (item.assistive and (projectedOnto.ship.getModifiedItemAttr('disallowAssistance') == 1)):
return False
else:
return True
def calculateModifiedAttributes(self, fit, runTime, forceProjected=False, forcedProjRange=DEFAULT):
if (not self.active):
return
if (self.projected or forceProjected):
context = ('projected', 'fighter')
projected = True
else:
context = ('fighter',)
projected = False
projectionRange = (self.projectionRange if (forcedProjRange is DEFAULT) else forcedProjRange)
for ability in self.abilities:
if (not ability.active):
continue
effect = ability.effect
if ((effect.runTime == runTime) and effect.activeByDefault and ((projected and effect.isType('projected')) or (not projected))):
if ability.grouped:
effect.handler(fit, self, context, projectionRange, effect=effect)
else:
i = 0
while (i != self.amount):
effect.handler(fit, self, context, projectionRange, effect=effect)
i += 1
def __deepcopy__(self, memo):
copy = Fighter(self.item)
copy._amount = self._amount
copy.active = self.active
for ability in self.abilities:
copyAbility = next(filter((lambda a: (a.effectID == ability.effectID)), copy.abilities))
copyAbility.active = ability.active
copy.projectionRange = self.projectionRange
return copy
def rebase(self, item):
amount = self._amount
active = self.active
abilityEffectStates = {a.effectID: a.active for a in self.abilities}
projectionRange = self.projectionRange
Fighter.__init__(self, item)
self._amount = amount
self.active = active
for ability in self.abilities:
if (ability.effectID in abilityEffectStates):
ability.active = abilityEffectStates[ability.effectID]
self.projectionRange = projectionRange
def fits(self, fit):
if (fit.getNumSlots(self.slot) == 0):
return False
return True
def canDealDamage(self, ignoreState=False, ignoreAbilityState=False):
if (self.item is None):
return False
if ((not self.active) and (not ignoreState)):
return False
for ability in self.abilities:
if ((not ability.active) and (not ignoreAbilityState)):
continue
if ability.effect.dealsDamage:
return True
return False |
def test_twocopy_seperates(tmpdir):
learn_states_q.run_and_save(n=5, n_paulis=10, n_sweeps=250, n_shots=250, save_dir=tmpdir, use_engine=False)
pauli_files = [f for f in os.listdir(tmpdir) if (os.path.isfile(os.path.join(tmpdir, f)) and ('basis' not in f))]
exp_predictions = []
for fname in pauli_files:
t = np.load(os.path.join(tmpdir, fname))
pauli = fname.split('-')[(- 1)][:(- 4)]
exp_predictions.append(_predict_exp(t, pauli))
assert (np.mean(exp_predictions) >= 0.5) |
class TestMarkersWithParametrization():
def test_simple_mark(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .foo\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.bar),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
items = pytester.getitems(s)
assert (len(items) == 3)
for item in items:
assert ('foo' in item.keywords)
assert ('bar' not in items[0].keywords)
assert ('bar' in items[1].keywords)
assert ('bar' not in items[2].keywords)
def test_select_based_on_mark(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(2, 3, marks=pytest.mark.foo),\n (3, 4),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
rec = pytester.inline_run('-m', 'foo')
(passed, skipped, fail) = rec.listoutcomes()
assert (len(passed) == 1)
assert (len(skipped) == 0)
assert (len(fail) == 0)
def test_simple_xfail(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_simple_xfail_single_argname(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize("n", [\n 2,\n pytest.param(3, marks=pytest.mark.xfail),\n 4,\n ])\n def test_isEven(n):\n assert n % 2 == 0\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail("True")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_kwarg(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
def test_xfail_with_arg_and_kwarg(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail("True", reason="some bug")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=1)
.parametrize('strict', [True, False])
def test_xfail_passing_is_xpass(self, pytester: Pytester, strict: bool) -> None:
s = '\n import pytest\n\n m = pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})\n\n .parametrize(("n", "expected"), [\n (1, 2),\n pytest.param(2, 3, marks=m),\n (3, 4),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '.format(strict=strict)
pytester.makepyfile(s)
reprec = pytester.inline_run()
(passed, failed) = ((2, 1) if strict else (3, 0))
reprec.assertoutcome(passed=passed, failed=failed)
def test_parametrize_called_in_generate_tests(self, pytester: Pytester) -> None:
s = '\n import pytest\n\n\n def pytest_generate_tests(metafunc):\n passingTestData = [(1, 2),\n (2, 3)]\n failingTestData = [(1, 3),\n (2, 2)]\n\n testData = passingTestData + [pytest.param(*d, marks=pytest.mark.xfail)\n for d in failingTestData]\n metafunc.parametrize(("n", "expected"), testData)\n\n\n def test_increment(n, expected):\n assert n + 1 == expected\n '
pytester.makepyfile(s)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2, skipped=2)
def test_parametrize_ID_generation_string_int_works(self, pytester: Pytester) -> None:
pytester.makepyfile("\n import pytest\n\n \n def myfixture():\n return 'example'\n .parametrize(\n 'limit', (0, '0'))\n def test_limit(limit, myfixture):\n return\n ")
reprec = pytester.inline_run()
reprec.assertoutcome(passed=2)
.parametrize('strict', [True, False])
def test_parametrize_marked_value(self, pytester: Pytester, strict: bool) -> None:
s = '\n import pytest\n\n .parametrize(("n", "expected"), [\n pytest.param(\n 2,3,\n marks=pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}),\n ),\n pytest.param(\n 2,3,\n marks=[pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})],\n ),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n '.format(strict=strict)
pytester.makepyfile(s)
reprec = pytester.inline_run()
(passed, failed) = ((0, 2) if strict else (2, 0))
reprec.assertoutcome(passed=passed, failed=failed)
def test_pytest_make_parametrize_id(self, pytester: Pytester) -> None:
pytester.makeconftest('\n def pytest_make_parametrize_id(config, val):\n return str(val * 2)\n ')
pytester.makepyfile('\n import pytest\n\n .parametrize("x", range(2))\n def test_func(x):\n pass\n ')
result = pytester.runpytest('-v')
result.stdout.fnmatch_lines(['*test_func*0*PASS*', '*test_func*2*PASS*'])
def test_pytest_make_parametrize_id_with_argname(self, pytester: Pytester) -> None:
pytester.makeconftest("\n def pytest_make_parametrize_id(config, val, argname):\n return str(val * 2 if argname == 'x' else val * 10)\n ")
pytester.makepyfile('\n import pytest\n\n .parametrize("x", range(2))\n def test_func_a(x):\n pass\n\n .parametrize("y", [1])\n def test_func_b(y):\n pass\n ')
result = pytester.runpytest('-v')
result.stdout.fnmatch_lines(['*test_func_a*0*PASS*', '*test_func_a*2*PASS*', '*test_func_b*10*PASS*'])
def test_parametrize_positional_args(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n\n .parametrize("a", [1], False)\n def test_foo(a):\n pass\n ')
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_parametrize_iterator(self, pytester: Pytester) -> None:
pytester.makepyfile('\n import itertools\n import pytest\n\n id_parametrize = pytest.mark.parametrize(\n ids=("param%d" % i for i in itertools.count())\n )\n\n _parametrize(\'y\', [\'a\', \'b\'])\n def test1(y):\n pass\n\n _parametrize(\'y\', [\'a\', \'b\'])\n def test2(y):\n pass\n\n .parametrize("a, b", [(1, 2), (3, 4)], ids=itertools.count())\n def test_converted_to_str(a, b):\n pass\n ')
result = pytester.runpytest('-vv', '-s')
result.stdout.fnmatch_lines(['test_parametrize_iterator.py::test1[param0] PASSED', 'test_parametrize_iterator.py::test1[param1] PASSED', 'test_parametrize_iterator.py::test2[param0] PASSED', 'test_parametrize_iterator.py::test2[param1] PASSED', 'test_parametrize_iterator.py::test_converted_to_str[0] PASSED', 'test_parametrize_iterator.py::test_converted_to_str[1] PASSED', '*= 6 passed in *']) |
def propagate_changes_from_baseline(baseline_dir, alternatives_dir, combi_dir, version_id='', comments=''):
version_id += ('_' + datetime.now().strftime('%y%m%d%H%M%S'))
model_dirs = []
for alt in os.listdir(alternatives_dir):
for imp_level in os.listdir(os.path.join(alternatives_dir, alt)):
model_dirs.append(os.path.join(alternatives_dir, alt, imp_level))
model_dirs += [os.path.join(combi_dir, x) for x in os.listdir(combi_dir)]
baseline = Model(baseline_dir)
base_inp_path = baseline.inp.path
for model_dir in model_dirs:
model = Model(model_dir)
vc_directory = os.path.join(model_dir, 'vc')
latest_bi = vc_utils.newest_file(vc_directory)
bi = inp.BuildInstructions(latest_bi)
bi.metadata['Parent Models']['Baseline'] = {base_inp_path: vc_utils.modification_date(base_inp_path)}
bi.metadata['Log'].update({version_id: comments})
bi.save(vc_directory, (version_id + '.txt'))
print('rebuilding {} with changes to baseline'.format(model.name))
bi.build(baseline_dir, model.inp.path) |
(tryfirst=True)
def pytest_cmdline_main(config: Config) -> Optional[Union[(int, ExitCode)]]:
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini('markers'):
parts = line.split(':', 1)
name = parts[0]
rest = (parts[1] if (len(parts) == 2) else '')
tw.write(('.%s:' % name), bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
return None |
class LinearDecayEnvelope(_Envelope):
def __init__(self, peak=1.0):
self.peak = max(min(1.0, peak), 0)
def get_generator(self, sample_rate, duration):
peak = self.peak
total_bytes = int((sample_rate * duration))
for i in range(total_bytes):
(yield (((total_bytes - i) / total_bytes) * peak))
while True:
(yield 0) |
('/config', methods=['GET', 'OPTIONS'])
(anonymous=False)
def config():
response = jsonify({'config': frontend_visible_config(app.config), 'features': features.get_features(), 'oauth': get_oauth_config(), 'external_login': get_external_login_config(), 'registry_state': app.config.get('REGISTRY_STATE', 'normal'), 'account_recovery_mode': app.config.get('ACCOUNT_RECOVERY_MODE', False)})
return response |
def sensitivity(tp: torch.LongTensor, fp: torch.LongTensor, fn: torch.LongTensor, tn: torch.LongTensor, reduction: Optional[str]=None, class_weights: Optional[List[float]]=None, zero_division: Union[(str, float)]=1.0) -> torch.Tensor:
return _compute_metric(_sensitivity, tp, fp, fn, tn, reduction=reduction, class_weights=class_weights, zero_division=zero_division) |
def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None):
loss = _sigmoid_focal_loss(pred, target, gamma, alpha)
if (weight is not None):
weight = weight.view((- 1), 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss |
class NamespaceGCWorker(QueueWorker):
def process_queue_item(self, job_details):
try:
with GlobalLock('LARGE_GARBAGE_COLLECTION', lock_ttl=(NAMESPACE_GC_TIMEOUT + LOCK_TIMEOUT_PADDING)):
self._perform_gc(job_details)
except LockNotAcquiredException:
logger.debug('Could not acquire global lock for garbage collection')
raise WorkerSleepException
def _perform_gc(self, job_details):
logger.debug('Got namespace GC queue item: %s', job_details)
marker_id = job_details['marker_id']
if (not model.user.delete_namespace_via_marker(marker_id, all_queues)):
raise Exception('GC interrupted; will retry')
gc_namespaces_purged.inc() |
def get_contractreceivechannelnew_data_from_event(chain_state: ChainState, event: DecodedEvent) -> Optional[NewChannelDetails]:
token_network_address = TokenNetworkAddress(event.originating_contract)
data = event.event_data
args = data['args']
participant1 = args['participant1']
participant2 = args['participant2']
our_address = chain_state.our_address
if (our_address == participant1):
partner_address = participant2
elif (our_address == participant2):
partner_address = participant1
else:
return None
token_network_registry = views.get_token_network_registry_by_token_network_address(chain_state, token_network_address)
assert (token_network_registry is not None), 'Token network registry missing'
token_network = views.get_token_network_by_address(chain_state=chain_state, token_network_address=token_network_address)
assert (token_network is not None), 'Token network missing'
token_network_registry_address = token_network_registry.address
token_address = token_network.token_address
return NewChannelDetails(chain_id=event.chain_id, token_network_registry_address=token_network_registry_address, token_address=token_address, token_network_address=token_network_address, our_address=our_address, partner_address=partner_address) |
class Effect5308(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Light Missiles')), 'aoeVelocity', ship.getModifiedItemAttr('shipBonusCD2'), skill='Caldari Destroyer', **kwargs) |
class SchedulePreviewSection():
id: strawberry.ID
title: str
primary_cta: (CTA | None)
secondary_cta: (CTA | None)
def from_block(cls, block) -> Self:
primary_cta = block.value['primary_cta']
secondary_cta = block.value['secondary_cta']
return cls(id=block.id, title=block.value['title'], primary_cta=(CTA.from_block(primary_cta) if primary_cta['label'] else None), secondary_cta=(CTA.from_block(secondary_cta) if secondary_cta['label'] else None)) |
class ModelRes512(nn.Module):
def __init__(self, res_base_model):
super(ModelRes512, self).__init__()
self.resnet_dict = {'resnet50': models.resnet50(pretrained=True)}
self.resnet = self._get_res_basemodel(res_base_model)
num_ftrs = int(self.resnet.fc.in_features)
self.res_features = nn.Sequential(*list(self.resnet.children())[:(- 2)])
self.res_l1 = nn.Linear(num_ftrs, num_ftrs)
self.res_l2 = nn.Linear(num_ftrs, 768)
def _get_res_basemodel(self, res_model_name):
try:
res_model = self.resnet_dict[res_model_name]
print('Image feature extractor:', res_model_name)
return res_model
except:
raise 'Invalid model name. Check the config file and pass one of: resnet18 or resnet50'
def forward(self, img):
batch_size = img.shape[0]
res_fea = self.res_features(img)
res_fea = rearrange(res_fea, 'b d n1 n2 -> b (n1 n2) d')
h = rearrange(res_fea, 'b n d -> (b n) d')
x = self.res_l1(h)
x = F.relu(x)
x = self.res_l2(x)
out_emb = rearrange(x, '(b n) d -> b n d', b=batch_size)
out_pool = torch.mean(out_emb, dim=1)
return (out_emb, out_pool) |
def test_jsonify_roundtrip_sequence():
yaml_string = " a: 1\n b: '1'\n c: !jsonify\n - v1\n - 22\n - 123.45\n - a: a value\n b: 123\n d: False\n "
yaml = get_yaml_with_jsonify(yaml_string)
assert (type(yaml['c']) is Jsonify)
assert (type(yaml['c'].value) is CommentedSeq)
assert (repr(yaml['c']) == f"Jsonify({yaml['c'].value!r})")
assert (yaml['c'].value == ['v1', 22, 123.45, {'a': 'a value', 'b': 123}])
assert (yaml['c'].get_value(Context()) == '["v1", 22, 123.45, {"a": "a value", "b": 123}]')
roundtripped_string = get_string_from_yaml_with_jsonify(yaml)
expected = "a: 1\nb: '1'\nc: !jsonify\n- v1\n- 22\n- 123.45\n- a: a value\n b: 123\nd: false\n"
assert (roundtripped_string == expected) |
def add_data_to_storage(storage_list, subject_data, brain_width, tumor_width, truth_dtype, modality_names):
(modality_storage_list, truth_storage, brain_width_storage, tumor_width_storage) = storage_list
for i in range(len(modality_names)):
if (modality_storage_list[i].name != modality_names[i]):
print_red('modality_storage.name != modality_name')
return 1
modality_storage_list[i].append(np.asarray(subject_data[i])[np.newaxis][np.newaxis])
if (truth_storage.name != 'truth'):
print_red('truth_storage.name != truth')
return 1
truth_storage.append(np.asarray(subject_data[(- 1)], dtype=truth_dtype)[np.newaxis][np.newaxis])
brain_width_storage.append(np.asarray(brain_width, dtype=truth_dtype)[np.newaxis])
tumor_width_storage.append(np.asarray(tumor_width, dtype=truth_dtype)[np.newaxis])
return 0 |
.parametrize('projdir_type', [str, Path])
def test_get_data_dir__from_user(projdir_type, tmp_path):
tmpdir = (tmp_path / 'proj')
tmpdir.mkdir()
tmpdir_env = (tmp_path / 'proj_env')
tmpdir_env.mkdir()
with proj_env(), patch.dict(os.environ, {'PROJ_DATA': str(tmpdir_env)}, clear=True), patch('pyproj.datadir.Path.absolute', return_value=(tmpdir / 'datadir.py')), patch('pyproj.datadir.sys.prefix', str(tmpdir_env)):
create_projdb(tmpdir)
create_projdb(tmpdir_env)
set_data_dir(projdir_type(tmpdir))
internal_proj_dir = (((tmpdir / 'proj_dir') / 'share') / 'proj')
internal_proj_dir.mkdir(parents=True)
create_projdb(internal_proj_dir)
assert (get_data_dir() == str(tmpdir)) |
def align_and_update_state_dicts(model_state_dict, loaded_state_dict):
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
match_matrix = [(len(j) if i.endswith(j) else 0) for i in current_keys for j in loaded_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(current_keys), len(loaded_keys))
(max_match_size, idxs) = match_matrix.max(1)
idxs[(max_match_size == 0)] = (- 1)
max_size = (max([len(key) for key in current_keys]) if current_keys else 1)
max_size_loaded = (max([len(key) for key in loaded_keys]) if loaded_keys else 1)
log_str_template = '{: <{}} loaded from {: <{}} of shape {}'
logger = logging.getLogger('monoflex.loading')
for (idx_new, idx_old) in enumerate(idxs.tolist()):
if (idx_old == (- 1)):
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
model_state_dict[key] = loaded_state_dict[key_old] |
.parametrize('val, offset', [(set_test_value(pt.matrix(), np.arange((10 * 10), dtype=config.floatX).reshape((10, 10))), 0), (set_test_value(pt.matrix(), np.arange((10 * 10), dtype=config.floatX).reshape((10, 10))), (- 1)), (set_test_value(pt.vector(), np.arange(10, dtype=config.floatX)), 0)])
def test_ExtractDiag(val, offset):
g = pt.diag(val, offset)
g_fg = FunctionGraph(outputs=[g])
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
def train(args, model, rank, world_size, train_loader, optimizer, epoch, sampler=None):
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
fsdp_loss = torch.zeros(2).to(local_rank)
if sampler:
sampler.set_epoch(epoch)
if (rank == 0):
inner_pbar = tqdm.tqdm(range(len(train_loader)), colour='blue', desc='r0 Training Epoch')
for batch in train_loader:
for key in batch.keys():
batch[key] = batch[key].to(local_rank)
optimizer.zero_grad()
output = model(input_ids=batch['source_ids'], attention_mask=batch['source_mask'], labels=batch['target_ids'])
loss = output['loss']
loss.backward()
optimizer.step()
fsdp_loss[0] += loss.item()
fsdp_loss[1] += len(batch)
if (rank == 0):
inner_pbar.update(1)
dist.all_reduce(fsdp_loss, op=dist.ReduceOp.SUM)
train_accuracy = (fsdp_loss[0] / fsdp_loss[1])
if (rank == 0):
inner_pbar.close()
print(f'Train Epoch: {epoch}, Loss: {train_accuracy:.4f}')
return train_accuracy |
(scope='session')
def browser_instance_getter(browser_patches, splinter_session_scoped_browser, splinter_browser_load_condition, splinter_browser_load_timeout, splinter_download_file_types, splinter_driver_kwargs, splinter_file_download_dir, splinter_firefox_profile_preferences, splinter_firefox_profile_directory, splinter_make_screenshot_on_failure, splinter_remote_url, splinter_screenshot_dir, splinter_selenium_implicit_wait, splinter_wait_time, splinter_selenium_socket_timeout, splinter_selenium_speed, splinter_webdriver_executable, splinter_window_size, splinter_browser_class, splinter_clean_cookies_urls, splinter_screenshot_getter_html, splinter_screenshot_getter_png, splinter_screenshot_encoding, splinter_headless, session_tmpdir, browser_pool):
def get_browser(splinter_webdriver, retry_count=3):
kwargs = get_args(driver=splinter_webdriver, download_dir=splinter_file_download_dir, download_ftypes=splinter_download_file_types, firefox_pref=splinter_firefox_profile_preferences, firefox_prof_dir=splinter_firefox_profile_directory, remote_url=splinter_remote_url, executable=splinter_webdriver_executable, headless=splinter_headless, driver_kwargs=splinter_driver_kwargs)
try:
return splinter_browser_class(splinter_webdriver, visit_condition=splinter_browser_load_condition, visit_condition_timeout=splinter_browser_load_timeout, wait_time=splinter_wait_time, **kwargs)
except Exception:
if (retry_count > 1):
return get_browser(splinter_webdriver, (retry_count - 1))
else:
raise
def prepare_browser(request, parent, retry_count=3):
splinter_webdriver = request.getfixturevalue('splinter_webdriver')
splinter_session_scoped_browser = request.getfixturevalue('splinter_session_scoped_browser')
splinter_close_browser = request.getfixturevalue('splinter_close_browser')
browser_key = id(parent)
browser = browser_pool.get(browser_key)
if (not splinter_session_scoped_browser):
browser = get_browser(splinter_webdriver)
if splinter_close_browser:
request.addfinalizer(browser.quit)
elif (not browser):
browser = browser_pool[browser_key] = get_browser(splinter_webdriver)
if (request.scope == 'function'):
def _take_screenshot_on_failure():
if (splinter_make_screenshot_on_failure and getattr(request.node, 'splinter_failure', True)):
_take_screenshot(request=request, fixture_name=parent.__name__, session_tmpdir=session_tmpdir, browser_instance=browser, splinter_screenshot_dir=splinter_screenshot_dir, splinter_screenshot_getter_html=splinter_screenshot_getter_html, splinter_screenshot_getter_png=splinter_screenshot_getter_png, splinter_screenshot_encoding=splinter_screenshot_encoding)
request.addfinalizer(_take_screenshot_on_failure)
try:
if (splinter_webdriver not in browser.driver_name.lower()):
raise IOError(f'webdriver does not match (requested: {splinter_webdriver} , available: {browser.driver_name.lower()})')
if hasattr(browser, 'driver'):
browser.driver.implicitly_wait(splinter_selenium_implicit_wait)
browser.driver.set_speed(splinter_selenium_speed)
browser.driver.command_executor.set_timeout(splinter_selenium_socket_timeout)
browser.driver.command_executor._conn.timeout = splinter_selenium_socket_timeout
if (splinter_window_size and (splinter_webdriver != 'chrome')):
browser.driver.set_window_size(*splinter_window_size)
try:
browser.cookies.delete()
except (IOError, HTTPException, WebDriverException):
LOGGER.warning('Error cleaning browser cookies', exc_info=True)
for url in splinter_clean_cookies_urls:
browser.visit(url)
browser.cookies.delete()
if hasattr(browser, 'driver'):
browser.visit_condition = splinter_browser_load_condition
browser.visit_condition_timeout = splinter_browser_load_timeout
browser.visit('about:blank')
except (IOError, HTTPException, WebDriverException, MaxRetryError):
try:
browser.quit()
except Exception:
pass
LOGGER.warning('Error preparing the browser', exc_info=True)
if (retry_count < 1):
raise
else:
browser = browser_pool[browser_key] = get_browser(splinter_webdriver)
prepare_browser(request, parent, (retry_count - 1))
return browser
return prepare_browser |
class Window(QWidget):
NumRenderAreas = 9
def __init__(self):
super(Window, self).__init__()
rectPath = QPainterPath()
rectPath.moveTo(20.0, 30.0)
rectPath.lineTo(80.0, 30.0)
rectPath.lineTo(80.0, 70.0)
rectPath.lineTo(20.0, 70.0)
rectPath.closeSubpath()
roundRectPath = QPainterPath()
roundRectPath.moveTo(80.0, 35.0)
roundRectPath.arcTo(70.0, 30.0, 10.0, 10.0, 0.0, 90.0)
roundRectPath.lineTo(25.0, 30.0)
roundRectPath.arcTo(20.0, 30.0, 10.0, 10.0, 90.0, 90.0)
roundRectPath.lineTo(20.0, 65.0)
roundRectPath.arcTo(20.0, 60.0, 10.0, 10.0, 180.0, 90.0)
roundRectPath.lineTo(75.0, 70.0)
roundRectPath.arcTo(70.0, 60.0, 10.0, 10.0, 270.0, 90.0)
roundRectPath.closeSubpath()
ellipsePath = QPainterPath()
ellipsePath.moveTo(80.0, 50.0)
ellipsePath.arcTo(20.0, 30.0, 60.0, 40.0, 0.0, 360.0)
piePath = QPainterPath()
piePath.moveTo(50.0, 50.0)
piePath.lineTo(65.0, 32.6795)
piePath.arcTo(20.0, 30.0, 60.0, 40.0, 60.0, 240.0)
piePath.closeSubpath()
polygonPath = QPainterPath()
polygonPath.moveTo(10.0, 80.0)
polygonPath.lineTo(20.0, 10.0)
polygonPath.lineTo(80.0, 30.0)
polygonPath.lineTo(90.0, 70.0)
polygonPath.closeSubpath()
groupPath = QPainterPath()
groupPath.moveTo(60.0, 40.0)
groupPath.arcTo(20.0, 20.0, 40.0, 40.0, 0.0, 360.0)
groupPath.moveTo(40.0, 40.0)
groupPath.lineTo(40.0, 80.0)
groupPath.lineTo(80.0, 80.0)
groupPath.lineTo(80.0, 40.0)
groupPath.closeSubpath()
textPath = QPainterPath()
timesFont = QFont('Times', 50)
timesFont.setStyleStrategy(QFont.ForceOutline)
textPath.addText(10, 70, timesFont, 'Qt')
bezierPath = QPainterPath()
bezierPath.moveTo(20, 30)
bezierPath.cubicTo(80, 0, 50, 50, 80, 80)
starPath = QPainterPath()
starPath.moveTo(90, 50)
for i in range(1, 5):
starPath.lineTo((50 + (40 * cos(((0.8 * i) * pi)))), (50 + (40 * sin(((0.8 * i) * pi)))))
starPath.closeSubpath()
self.renderAreas = [RenderArea(rectPath), RenderArea(roundRectPath), RenderArea(ellipsePath), RenderArea(piePath), RenderArea(polygonPath), RenderArea(groupPath), RenderArea(textPath), RenderArea(bezierPath), RenderArea(starPath)]
assert (len(self.renderAreas) == 9)
self.fillRuleComboBox = QComboBox()
self.fillRuleComboBox.addItem('Odd Even', Qt.OddEvenFill)
self.fillRuleComboBox.addItem('Winding', Qt.WindingFill)
fillRuleLabel = QLabel('Fill &Rule:')
fillRuleLabel.setBuddy(self.fillRuleComboBox)
self.fillColor1ComboBox = QComboBox()
self.populateWithColors(self.fillColor1ComboBox)
self.fillColor1ComboBox.setCurrentIndex(self.fillColor1ComboBox.findText('mediumslateblue'))
self.fillColor2ComboBox = QComboBox()
self.populateWithColors(self.fillColor2ComboBox)
self.fillColor2ComboBox.setCurrentIndex(self.fillColor2ComboBox.findText('cornsilk'))
fillGradientLabel = QLabel('&Fill Gradient:')
fillGradientLabel.setBuddy(self.fillColor1ComboBox)
fillToLabel = QLabel('to')
fillToLabel.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.penWidthSpinBox = QSpinBox()
self.penWidthSpinBox.setRange(0, 20)
penWidthLabel = QLabel('&Pen Width:')
penWidthLabel.setBuddy(self.penWidthSpinBox)
self.penColorComboBox = QComboBox()
self.populateWithColors(self.penColorComboBox)
self.penColorComboBox.setCurrentIndex(self.penColorComboBox.findText('darkslateblue'))
penColorLabel = QLabel('Pen &Color:')
penColorLabel.setBuddy(self.penColorComboBox)
self.rotationAngleSpinBox = QSpinBox()
self.rotationAngleSpinBox.setRange(0, 359)
self.rotationAngleSpinBox.setWrapping(True)
self.rotationAngleSpinBox.setSuffix(u'')
rotationAngleLabel = QLabel('&Rotation Angle:')
rotationAngleLabel.setBuddy(self.rotationAngleSpinBox)
self.fillRuleComboBox.activated.connect(self.fillRuleChanged)
self.fillColor1ComboBox.activated.connect(self.fillGradientChanged)
self.fillColor2ComboBox.activated.connect(self.fillGradientChanged)
self.penColorComboBox.activated.connect(self.penColorChanged)
for i in range(Window.NumRenderAreas):
self.penWidthSpinBox.valueChanged.connect(self.renderAreas[i].setPenWidth)
self.rotationAngleSpinBox.valueChanged.connect(self.renderAreas[i].setRotationAngle)
topLayout = QGridLayout()
for i in range(Window.NumRenderAreas):
topLayout.addWidget(self.renderAreas[i], (i / 3), (i % 3))
mainLayout = QGridLayout()
mainLayout.addLayout(topLayout, 0, 0, 1, 4)
mainLayout.addWidget(fillRuleLabel, 1, 0)
mainLayout.addWidget(self.fillRuleComboBox, 1, 1, 1, 3)
mainLayout.addWidget(fillGradientLabel, 2, 0)
mainLayout.addWidget(self.fillColor1ComboBox, 2, 1)
mainLayout.addWidget(fillToLabel, 2, 2)
mainLayout.addWidget(self.fillColor2ComboBox, 2, 3)
mainLayout.addWidget(penWidthLabel, 3, 0)
mainLayout.addWidget(self.penWidthSpinBox, 3, 1, 1, 3)
mainLayout.addWidget(penColorLabel, 4, 0)
mainLayout.addWidget(self.penColorComboBox, 4, 1, 1, 3)
mainLayout.addWidget(rotationAngleLabel, 5, 0)
mainLayout.addWidget(self.rotationAngleSpinBox, 5, 1, 1, 3)
self.setLayout(mainLayout)
self.fillRuleChanged()
self.fillGradientChanged()
self.penColorChanged()
self.penWidthSpinBox.setValue(2)
self.setWindowTitle('Painter Paths')
def fillRuleChanged(self):
rule = Qt.FillRule(self.currentItemData(self.fillRuleComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillRule(rule)
def fillGradientChanged(self):
color1 = QColor(self.currentItemData(self.fillColor1ComboBox))
color2 = QColor(self.currentItemData(self.fillColor2ComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setFillGradient(color1, color2)
def penColorChanged(self):
color = QColor(self.currentItemData(self.penColorComboBox))
for i in range(Window.NumRenderAreas):
self.renderAreas[i].setPenColor(color)
def populateWithColors(self, comboBox):
colorNames = QColor.colorNames()
for name in colorNames:
comboBox.addItem(name, name)
def currentItemData(self, comboBox):
return comboBox.itemData(comboBox.currentIndex()) |
class ChannelRounder(CompRatioRounder):
def __init__(self, multiplicity: int):
self._multiplicity = multiplicity
def round(self, layer: Layer, comp_ratio: Decimal, cost_metric: CostMetric) -> Decimal:
if (self._multiplicity == 1):
updated_comp_ratio = comp_ratio
else:
in_channels = layer.weight_shape[1]
keep_inp_channels = (in_channels * comp_ratio)
keep_inp_channels = utils.round_up_to_multiplicity(self._multiplicity, keep_inp_channels, in_channels)
updated_comp_ratio = (Decimal(keep_inp_channels) / Decimal(in_channels))
assert (comp_ratio <= updated_comp_ratio)
assert (0 <= updated_comp_ratio <= 1)
return updated_comp_ratio |
class RTorrentMethod(object):
NEEDS_FAKE_TARGET = set(('ui.current_view.set', 'view_filter'))
def __init__(self, proxy, method_name):
self._proxy = proxy
self._method_name = method_name
def __getattr__(self, attr):
self._method_name += ('.' + attr)
return self
def __str__(self):
return ('out %s, in %s, took %.3fms/%.3fms' % (fmt.human_size(self._outbound).strip(), fmt.human_size(self._inbound).strip(), (self._net_latency * 1000.0), (self._latency * 1000.0)))
def __call__(self, *args, **kwargs):
self._proxy._requests += 1
start = time.time()
raw_xml = kwargs.get('raw_xml', False)
flatten = kwargs.get('flatten', False)
fail_silently = kwargs.get('fail_silently', False)
try:
if (not self._proxy._use_deprecated):
if (self._method_name.endswith('.multicall') or self._method_name.endswith('.multicall.filtered')):
if (self._method_name in ('d.multicall', 'd.multicall.filtered')):
args = ((0,) + args)
if config.debug:
self._proxy.LOG.debug(('BEFORE MAPPING: %r' % (args,)))
if (self._method_name == 'system.multicall'):
for call in args[0]:
call['methodName'] = self._proxy._map_call(call['methodName'])
else:
args = (args[0:2] + tuple((self._proxy._map_call(i) for i in args[2:])))
if config.debug:
self._proxy.LOG.debug(('AFTER MAPPING: %r' % (args,)))
elif (self._method_name in self.NEEDS_FAKE_TARGET):
args = ((0,) + args)
xmlreq = xmlrpclib.dumps(args, self._proxy._map_call(self._method_name))
self._outbound = len(xmlreq)
self._proxy._outbound += self._outbound
self._proxy._outbound_max = max(self._proxy._outbound_max, self._outbound)
if config.debug:
self._proxy.LOG.debug(('XMLRPC raw request: %r' % xmlreq))
scgi_req = xmlrpc2scgi.SCGIRequest(self._proxy._transport)
xmlresp = scgi_req.send(xmlreq)
self._inbound = len(xmlresp)
self._proxy._inbound += self._inbound
self._proxy._inbound_max = max(self._proxy._inbound_max, self._inbound)
self._net_latency = scgi_req.latency
self._proxy._net_latency += self._net_latency
if raw_xml:
return xmlresp
xmlresp = xmlresp.replace('<i8>', '<i4>').replace('</i8>', '</i4>')
try:
result = xmlrpclib.loads(xmlresp)[0][0]
except (KeyboardInterrupt, SystemExit):
raise
except:
(exc_type, exc) = sys.exc_info()[:2]
if ((exc_type is xmlrpclib.Fault) and (exc.faultCode == (- 501)) and (exc.faultString == 'Could not find info-hash.')):
raise HashNotFound('Unknown hash for {}({}) {}', self._method_name, (args[0] if args else ''), self._proxy._url)
if (not fail_silently):
filename = ('/tmp/xmlrpc2scgi-%s.xml' % os.getuid())
handle = open(filename, 'w')
try:
handle.write('REQUEST\n')
handle.write(xmlreq)
handle.write('\nRESPONSE\n')
handle.write(xmlresp)
((print >> sys.stderr), ('INFO: Bad data packets written to %r' % filename))
finally:
handle.close()
raise
else:
try:
return (sum(result, []) if flatten else result)
except TypeError:
if (result and isinstance(result, list) and isinstance(result[0], dict) and ('faultCode' in result[0])):
raise error.LoggableError(('XMLRPC error in multicall: ' + repr(result[0])))
else:
raise
finally:
self._latency = (time.time() - start)
self._proxy._latency += self._latency
if config.debug:
self._proxy.LOG.debug(('%s(%s) took %.3f secs' % (self._method_name, ', '.join((repr(i) for i in args)), self._latency))) |
class PlayerOptions(GObject.Object):
__gproperties__ = {'shuffle': (bool, '', '', False, (GObject.ParamFlags.READABLE | GObject.ParamFlags.WRITABLE)), 'repeat': (bool, '', '', False, (GObject.ParamFlags.READABLE | GObject.ParamFlags.WRITABLE)), 'single': (bool, '', '', False, (GObject.ParamFlags.READABLE | GObject.ParamFlags.WRITABLE)), 'stop-after': (bool, '', '', False, (GObject.ParamFlags.READABLE | GObject.ParamFlags.WRITABLE))}
def __init__(self, window):
super().__init__()
self._stop_after = window.stop_after
self._said = self._stop_after.connect('toggled', (lambda *x: self.notify('stop-after')))
def order_changed(*args):
self.notify('shuffle')
self.notify('single')
self._order_widget = window.order
self._oid = self._order_widget.connect('changed', order_changed)
window.connect('destroy', self._window_destroy)
def _window_destroy(self, window):
self.destroy()
def destroy(self):
if self._order_widget:
self._order_widget.disconnect(self._oid)
self._order_widget = None
if self._stop_after:
self._stop_after.disconnect(self._said)
self._stop_after = None
def do_get_property(self, param):
return getattr(self, param.name.replace('-', '_'))
def do_set_property(self, param, value):
setattr(self, param.name.replace('-', '_'), value)
def single(self):
return (self._order_widget and self._order_widget.repeated and (self._order_widget.repeater is RepeatSongForever))
def single(self, value):
if value:
self.repeat = True
self._order_widget.repeater = RepeatSongForever
else:
self.repeat = False
self._order_widget.repeater = RepeatListForever
def shuffle(self):
return self._order_widget.shuffled
def shuffle(self, value):
self._order_widget.shuffled = value
def repeat(self):
return self._order_widget.repeated
def repeat(self, value):
print_d(('setting repeated to %s' % value))
self._order_widget.repeated = value
def stop_after(self):
return self._stop_after.get_active()
_after.setter
def stop_after(self, value):
self._stop_after.set_active(value) |
def highlight_x(ax, highlight_range, highlight_color='magenta', label=None):
rect = patches.Rectangle((highlight_range[0], 0), (highlight_range[1] - highlight_range[0]), ax.get_ylim()[1], facecolor=highlight_color, edgecolor='none', alpha=0.5)
ax.add_patch(rect)
if (label is not None):
ax.text((highlight_range[0] + ((highlight_range[1] - highlight_range[0]) / 10)), (0.9 * ax.get_ylim()[1]), label, rotation=90, color=highlight_color) |
def monitor(subcommand, dormant_after: float, dormant_signal: int, kill_after: float, kill_signal: int) -> int:
(parent_read, child_stdout_write) = os.pipe()
child_stderr_write = os.dup(child_stdout_write)
process = subprocess.Popen(subcommand, stdin=subprocess.DEVNULL, stdout=child_stdout_write, stderr=child_stderr_write)
os.close(child_stderr_write)
os.close(child_stdout_write)
make_async(parent_read)
read_targets = [parent_read]
empty_targets: List[Any] = []
while (process.poll() is None):
(read_list, _, _) = select.select(read_targets, empty_targets, empty_targets, dormant_after)
if (not read_list):
kill_child(process, dormant_signal, kill_after, kill_signal)
for fd in read_list:
read_until_exhaustion(fd)
for fd in read_targets:
read_until_exhaustion(fd)
os.close(parent_read)
return process.poll() |
class UserViewSet(UserViewSetMixin, ReadOnlyModelViewSet):
permission_classes = ((HasModelPermission | HasObjectPermission),)
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
filterset_fields = ('username', 'first_name', 'last_name', 'email', 'groups')
def get_queryset(self):
return self.get_users_for_user(self.request.user).prefetch_related('groups', 'role__member', 'role__manager', 'role__editor', 'role__reviewer', 'memberships') |
def extract_declaration_for(function_name):
code = list(reversed(_extract_code(function_name)))
for line in code:
if ((function_name in line) and (not is_comment_line(line))):
pos = line.find(function_name)
if ('=' in line[:pos]):
break
else:
return None
declaration = line[:pos].strip()
if declaration.endswith('pycsp3.'):
declaration = declaration[:(- 8)]
if (declaration[(- 1)] == '='):
declaration = declaration[:(- 1)].strip()
assert (declaration.count('=') < 2)
if ('=' in declaration):
t = declaration.split('=')
declaration = (t[0] if (',' in t[0]) else t[1])
if (function_name == 'Var'):
assert ((',' not in declaration) and (')' not in declaration)), "Every simple declaration must be on its own line. For example, 'x, y = Var(dom={0,1}), Var(dom={0,1})' is not allowed."
return declaration
elif (function_name == 'VarArray'):
assert (')' not in declaration)
return (declaration if (',' not in declaration) else [v.strip() for v in declaration.split(',')]) |
def test_param_storage(tmpdir):
with tmpdir.as_cwd():
mol = Ligand.from_file(get_data('chloromethane.pdb'))
OpenFF().run(mol)
with pytest.raises(ValidationError):
mol.NonbondedForce.create_parameter(atoms=(0,), charge=0.1)
mol.NonbondedForce.create_parameter(atoms=(0,), charge=0.1, epsilon=0.2, sigma=0.3)
assert (float(mol.NonbondedForce[(0,)].charge) == 0.1)
assert (mol.NonbondedForce[(0,)].epsilon == 0.2)
assert (mol.NonbondedForce[(0,)].sigma == 0.3)
mol.NonbondedForce[(0,)].charge = 5
assert (float(mol.NonbondedForce[(0,)].charge) == 5)
assert (mol.BondForce[(0, 1)].k == mol.BondForce[(1, 0)].k) |
def train_image_parse_function(filename, *argv):
image = read_image(filename)
image = tf.image.random_flip_left_right(image)
if FLAGS.augmentation:
print('data augmentation')
resized_image = resize_and_random_crop_image(image)
else:
resized_image = resize_image(image)
resized_image = scale_image_value(resized_image)
if (len(argv) == 1):
return (resized_image, argv[0])
elif (len(argv) == 2):
return (resized_image, argv[0], argv[1])
else:
return resized_image |
def reorder_image(img, input_order='HWC'):
if (input_order not in ['HWC', 'CHW']):
raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'")
if (len(img.shape) == 2):
img = img[(..., None)]
if (input_order == 'CHW'):
img = img.transpose(1, 2, 0)
return img |
class MultiResolutionExtractor(ExtractorBase):
def __init__(self, features, patch_mode='replicate', max_scale_change=None):
super().__init__(features)
self.patch_mode = patch_mode
self.max_scale_change = max_scale_change
self.is_color = None
def stride(self):
return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll().list())
def size(self, input_sz):
return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll()
def dim(self):
return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll()
def get_fparams(self, name: str=None):
if (name is None):
return [f.fparams for f in self.features if self._return_feature(f)]
return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll()
def get_attribute(self, name: str, ignore_missing: bool=False):
if ignore_missing:
return TensorList([getattr(f, name) for f in self.features if (self._return_feature(f) and hasattr(f, name))])
else:
return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)])
def get_unique_attribute(self, name: str):
feat = None
for f in self.features:
if (self._return_feature(f) and hasattr(f, name)):
if (feat is not None):
raise RuntimeError('The attribute was not unique.')
feat = f
if (feat is None):
raise RuntimeError('The attribute did not exist')
return getattr(feat, name)
def _return_feature(self, f):
return ((self.is_color is None) or (self.is_color and f.use_for_color) or ((not self.is_color) and f.use_for_gray))
def set_is_color(self, is_color: bool):
self.is_color = is_color
def extract(self, im, pos, scales, image_sz, return_patches=False):
if isinstance(scales, (int, float)):
scales = [scales]
(patch_iter, coord_iter) = zip(*(sample_patch(im, pos, (s * image_sz), image_sz, mode=self.patch_mode, max_scale_change=self.max_scale_change) for s in scales))
im_patches = torch.cat(list(patch_iter))
patch_coords = torch.cat(list(coord_iter))
feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()
if return_patches:
return (feature_map, patch_coords, im_patches)
else:
return (feature_map, patch_coords)
def extract_hist_depth_mask(self, im, depth, pos, scales, image_sz, return_patches=False):
if isinstance(scales, (int, float)):
scales = [scales]
(patch_iter, coord_iter) = zip(*(sample_patch_hist_depth_mask(im, depth, pos, (s * image_sz), image_sz, mode=self.patch_mode, max_scale_change=self.max_scale_change) for s in scales))
im_patches = torch.cat(list(patch_iter))
patch_coords = torch.cat(list(coord_iter))
feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()
if return_patches:
return (feature_map, patch_coords, im_patches)
else:
return (feature_map, patch_coords)
def extract_transformed(self, im, pos, scale, image_sz, transforms):
(im_patch, _) = sample_patch(im, pos, (scale * image_sz), image_sz)
im_patches = torch.cat([T(im_patch) for T in transforms])
feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()
return feature_map |
def test_cross_compiled_build(tmp_path):
if (utils.platform != 'macos'):
pytest.skip('this test is only relevant to macos')
if (get_xcode_version() < (12, 2)):
pytest.skip('this test only works with Xcode 12.2 or greater')
project_dir = (tmp_path / 'project')
basic_project.generate(project_dir)
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={'CIBW_BUILD': 'cp39-*', 'CIBW_ARCHS': 'x86_64, universal2, arm64'})
expected_wheels = [w for w in ALL_MACOS_WHEELS if ('cp39' in w)]
assert (set(actual_wheels) == set(expected_wheels)) |
def uc_refine_hardcode(binary_mask, uncertainty_map, img, threshold_uc=0.2, fn_alpha=0.5, fn_beta=0.7, fp_alpha=0.5, fp_beta=0.7):
img_avg = np.mean(img, axis=2)
mean_value = np.mean((img_avg * binary_mask[0]))
print('the mean value is:', mean_value)
uc_threshold = (np.min(uncertainty_map) + (threshold_uc * (np.max(uncertainty_map) - np.min(uncertainty_map))))
U_thre = (uncertainty_map > uc_threshold)
inverse_binary_mask = (1 - binary_mask)
FN_UH = (U_thre * inverse_binary_mask[0])
FN_xUH = (img_avg * FN_UH)
FN_condition_mask = ((fn_alpha < FN_xUH) & (FN_xUH < fn_beta))
FN_new_mask = np.logical_or(binary_mask, FN_condition_mask)
FN_output_mask = FN_new_mask
FP_UH = (U_thre * binary_mask[0])
FP_xUH = (img_avg * FP_UH)
FP_condition_mask = (((fp_alpha > FP_xUH) | (FP_xUH > fp_beta)) & (FP_UH > 0))
FP_new_mask = np.logical_and(FN_output_mask, np.logical_not(FP_condition_mask))
FP_output_mask = FP_new_mask.astype(int)
return (FN_output_mask, FN_UH, FN_xUH, FN_condition_mask, FP_output_mask, FP_UH, FP_xUH, FP_condition_mask) |
def _build_line(colwidths, colaligns, linefmt):
if (not linefmt):
return None
if hasattr(linefmt, '__call__'):
return linefmt(colwidths, colaligns)
else:
(begin, fill, sep, end) = linefmt
cells = [(fill * w) for w in colwidths]
return _build_simple_row(cells, (begin, sep, end)) |
.parametrize('input_type', [tuple, list])
def test_run_model_from_effective_irradiance_arrays(sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
data['cell_temperature'] = 40
mc = ModelChain(sapm_dc_snl_ac_system_Array, location)
mc.run_model_from_effective_irradiance(input_type((data, data)))
assert_frame_equal(mc.results.dc[0], mc.results.dc[1])
data_two = data.copy()
data_two['effective_irradiance'] = (data['poa_global'] * 0.5)
mc.run_model_from_effective_irradiance(input_type((data, data_two)))
assert (mc.results.dc[0] != mc.results.dc[1]).all().all() |
def pipeThroughEspeak(inpt):
assert (type(inpt) == bytes)
bufsize = 8192
ret = []
while (len(inpt) > bufsize):
splitAt = (inpt.rfind('\n', 0, bufsize) + 1)
if (not splitAt):
splitAt = (inpt.rfind(' ', 0, bufsize) + 1)
if (not splitAt):
sys.stderr.write("Note: had to split eSpeak input and couldn't find a newline or space to do it on\n")
splitAt = bufsize
response = pipeThroughEspeak(inpt[:splitAt])
if ((not ('\n' in response.rstrip())) and ('command' in response)):
return response.strip()
ret.append(response)
inpt = inpt[splitAt:]
try:
(w, r) = os.popen4('espeak -q -x', bufsize=bufsize)
except AttributeError:
import subprocess
proc = subprocess.Popen(['espeak', '-q', '-x'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
w = proc.stdin
r = None
if r:
getBuf(w).write(inpt)
w.close()
r = getBuf(r).read()
else:
w.write(inpt)
(out, err) = proc.communicate()
r = as_utf8('')
if out:
r += out
if err:
r += err
return (as_utf8('\n').join(ret) + r) |
def drop_path(x, drop_prob=0.0, training=False):
if ((drop_prob == 0.0) or (not training)):
return x
keep_prob = (1 - drop_prob)
shape = ((x.shape[0],) + ((1,) * (x.ndim - 1)))
random_tensor = (keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device))
output = (x.div(keep_prob) * random_tensor.floor())
return output |
def test_one_item_host_limit(capsys, root_dir):
memory_limit = sizeof(asproxy(one_item_array(), serializers=('dask', 'pickle')))
dhf = ProxifyHostFile(worker_local_directory=root_dir, device_memory_limit=one_item_nbytes, memory_limit=memory_limit)
a1 = (one_item_array() + 1)
a2 = (one_item_array() + 2)
dhf['k1'] = a1
dhf['k2'] = a2
dhf.manager.validate()
k1 = dhf['k1']
k2 = dhf['k2']
assert k1._pxy_get().is_serialized()
assert (not k2._pxy_get().is_serialized())
dhf.manager.validate()
assert is_proxies_equal(dhf.manager._disk.get_proxies(), [])
assert is_proxies_equal(dhf.manager._host.get_proxies(), [k1])
assert is_proxies_equal(dhf.manager._dev.get_proxies(), [k2])
dhf['k3'] = (one_item_array() + 3)
k3 = dhf['k3']
dhf.manager.validate()
assert is_proxies_equal(dhf.manager._disk.get_proxies(), [k1])
assert is_proxies_equal(dhf.manager._host.get_proxies(), [k2])
assert is_proxies_equal(dhf.manager._dev.get_proxies(), [k3])
dhf.manager.validate()
k2_val = k2[0]
assert (k2_val == 2)
dhf.manager.validate()
assert is_proxies_equal(dhf.manager._disk.get_proxies(), [k1])
assert is_proxies_equal(dhf.manager._host.get_proxies(), [k3])
assert is_proxies_equal(dhf.manager._dev.get_proxies(), [k2])
dhf['k4'] = (one_item_array() + 4)
k4 = dhf['k4']
dhf.manager.validate()
assert is_proxies_equal(dhf.manager._disk.get_proxies(), [k1, k3])
assert is_proxies_equal(dhf.manager._host.get_proxies(), [k2])
assert is_proxies_equal(dhf.manager._dev.get_proxies(), [k4])
k1_val = k1[0]
assert (k1_val == 1)
dhf.manager.validate()
assert is_proxies_equal(dhf.manager._disk.get_proxies(), [k2, k3])
assert is_proxies_equal(dhf.manager._host.get_proxies(), [k4])
assert is_proxies_equal(dhf.manager._dev.get_proxies(), [k1])
del k1, k2, k3, k4
dhf.clear()
assert (len(dhf.manager) == 0) |
class ResNet(nn.Module):
def __init__(self, block, layers, strides, dilations, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1])
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2])
self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3])
self.avgpool = nn.AvgPool2d((7 * max(dilations)), stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
conv1_f = self.relu(x)
x = self.maxpool(conv1_f)
layer1_f = self.layer1(x)
layer2_f = self.layer2(layer1_f)
layer3_f = self.layer3(layer2_f)
layer4_f = self.layer4(layer3_f)
x = self.avgpool(layer4_f)
x = x.view(x.size(0), (- 1))
fc_f = self.fc(x)
return (fc_f, conv1_f, layer1_f, layer2_f, layer3_f, layer4_f) |
class NYStylePepperoniPizza(Pizza):
def __init__(self):
self.name = 'NY Style Pepperoni Pizza'
self.dough = 'Thin Crust Dough'
self.sauce = 'Marinara Sauce'
self.toppings = []
self.toppings.append('Grated Reggiano Cheese')
self.toppings.append('Sliced Pepperoni')
self.toppings.append('Garlic')
self.toppings.append('Onion')
self.toppings.append('Mushrooms')
self.toppings.append('Red Pepper') |
class TestMultiProcessingReadingService(TestCase):
_ctx_parametrize
('dp_fn', [subtest(_non_dispatching_dp, 'non_dispatch'), subtest(_dispatching_dp, 'dispatch')])
('main_prefetch', [0, 10])
('worker_prefetch', [0, 10])
def test_early_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None:
dp = dp_fn(1000)
rs = MultiProcessingReadingService(num_workers=2, main_prefetch_cnt=main_prefetch, worker_prefetch_cnt=worker_prefetch, multiprocessing_context=ctx)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
it = iter(dl)
for _ in range(10):
_ = next(it)
dl.shutdown()
_ctx_parametrize
('dp_fn', [subtest(_non_dispatching_dp, 'non_dispatch'), subtest(_dispatching_dp, 'dispatch')])
('main_prefetch', [0, 10])
('worker_prefetch', [0, 10])
def test_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None:
dp = dp_fn(1000)
rs = MultiProcessingReadingService(num_workers=2, main_prefetch_cnt=main_prefetch, worker_prefetch_cnt=worker_prefetch, multiprocessing_context=ctx)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
_ = list(dl)
dl.shutdown()
_ctx_parametrize
_parametrize
('n_workers,worker_prefetch_cnt,main_prefetch_cnt', [(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 0), (2, 0, 2), (2, 2, 2)])
def test_reading_service_pause_resume(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
rs = MultiProcessingReadingService(num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt, multiprocessing_context=ctx)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
for (i, x) in enumerate(dl):
res.append(x)
if (i in {2, (n_elements - 2)}):
dl._pause()
dl._resume()
self.assertEqual(list(range(n_elements)), sorted(res), msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}")
dl.shutdown()
_ctx_parametrize
_parametrize
('n_workers,worker_prefetch_cnt,main_prefetch_cnt', [(2, 0, 1), (2, 1, 0), (2, 0, 0)])
def test_reading_service_pause_stop_yield(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
rs = MultiProcessingReadingService(num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt, multiprocessing_context=ctx)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
for (i, x) in enumerate(dl):
res.append(x)
if (i in {2}):
dl._pause()
self.assertEqual(3, len(res), msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}")
dl.shutdown()
_parametrize
('n_workers,worker_prefetch_cnt,main_prefetch_cnt', [(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 2)])
def test_reading_service_limit(self, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
rs = MultiProcessingReadingService(num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
cumulative_res = []
n_limit = 3
it: DataLoader2Iterator = iter(dl)
it.limit(n_limit)
for x in it:
res.append(x)
self.assertEqual(n_limit, len(res), msg=f'The test is failing with default multiprocessing method, num_workers = {rs.num_workers}, worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}')
cumulative_res.extend(res)
with self.assertRaises(StopIteration):
next(it)
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual(n_limit, len(res), msg=f'The test is failing with default multiprocessing method, num_workers = {rs.num_workers}, worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}')
cumulative_res.extend(res)
it.limit(None)
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual((n_elements - (2 * n_limit)), len(res), msg=f'The test is failing with default multiprocessing method, num_workers = {rs.num_workers}, worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}')
cumulative_res.extend(res)
self.assertEqual(list(range(n_elements)), sorted(cumulative_res))
dl2: DataLoader2 = DataLoader2(double_pause_dp, reading_service=rs)
res = []
it2: DataLoader2Iterator = iter(dl2)
it2.limit(3)
for x in it2:
res.append(x)
it2.limit(4)
it2.resume()
for x in it2:
res.append(x)
self.assertEqual(7, len(res))
it2.resume()
it2.limit(2)
for x in it2:
res.append(x)
self.assertEqual(9, len(res))
def test_initial_epoch_checkpointing(self):
dp = IterableWrapper(range(20)).shuffle().sharding_filter()
dp = NonShardableDataPipe(dp).shuffle()
rs = MultiProcessingReadingService(num_workers=2)
dl: DataLoader2 = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
initial_state = dl.state_dict()
it1 = iter(dl)
restored_dl: DataLoader2 = DataLoader2.from_state(initial_state, rs)
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs)
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
temp = next(it1)
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs)
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(([temp] + list(it1)), list(restored_dl))
dl.shutdown()
restored_dl.shutdown() |
(short_help='Show the contents of the config file')
('--all', '-a', 'all_keys', is_flag=True, help='Do not scrub secret fields')
_obj
def show(app, all_keys):
if (not app.config_file.path.is_file()):
app.display_critical('No config file found! Please try `hatch config restore`.')
else:
from rich.syntax import Syntax
text = (app.config_file.read() if all_keys else app.config_file.read_scrubbed())
app.output(Syntax(text.rstrip(), 'toml', background_color='default')) |
class Discriminator(nn.Module):
def __init__(self, sigmoid=False):
super(Discriminator, self).__init__()
self.sigmoid = sigmoid
self.conv1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(64, 64, 3, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.conv4 = nn.Conv2d(128, 128, 3, stride=2, padding=1)
self.bn4 = nn.BatchNorm2d(128)
self.conv5 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.bn5 = nn.BatchNorm2d(256)
self.conv6 = nn.Conv2d(256, 256, 3, stride=2, padding=1)
self.bn6 = nn.BatchNorm2d(256)
self.conv7 = nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.bn7 = nn.BatchNorm2d(512)
self.conv8 = nn.Conv2d(512, 512, 3, stride=2, padding=1)
self.bn8 = nn.BatchNorm2d(512)
self.conv9 = nn.Conv2d(512, 1, 1, stride=1, padding=1)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.relu(self.bn6(self.conv6(x)))
x = F.relu(self.bn7(self.conv7(x)))
x = F.relu(self.bn8(self.conv8(x)))
x = self.conv9(x)
if self.sigmoid:
return F.sigmoid(F.avg_pool2d(x, x.size()[2:])).view(x.size()[0], (- 1))
else:
return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], (- 1)) |
class AdditiveAttention2D(AdditiveAttention):
def forward(self, s, h):
s_attention = s.matmul(self.w_attention_matrix).unsqueeze(2)
h_attention = h.matmul(self.u_attention_matrix).unsqueeze(1)
seq_len = h.size(1)
h_attention = h_attention.expand((- 1), seq_len, (- 1), (- 1))
attention = torch.nn.functional.tanh((s_attention + h_attention))
attention = attention.matmul(self.v_attention_vector).squeeze()
attention_weight = torch.nn.functional.softmax(attention, (- 1))
return self.dropout(attention_weight.unsqueeze(2).matmul(h).squeeze()) |
_model_architecture('hf_gpt2', 'hf_gpt2')
def default_architecture(args):
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
args.embed_dim = getattr(args, 'embed_dim', 768)
args.num_attention_heads = getattr(args, 'num_attention_heads', 12)
args.num_layers = getattr(args, 'num_layers', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1) |
def capsule_sdf_grad(radius: float, half_width: float, p: wp.vec3):
if (p[0] > half_width):
return normalize(wp.vec3((p[0] - half_width), p[1], p[2]))
if (p[0] < (0.0 - half_width)):
return normalize(wp.vec3((p[0] + half_width), p[1], p[2]))
return normalize(wp.vec3(0.0, p[1], p[2])) |
class Subscrib_signup_repos_Handler(BaseHandler):
.authenticated
async def get(self, userid):
user = self.current_user
if ((user['id'] == int(userid)) and (user['role'] == u'admin')):
(await self.render('pubtpl_register.html', userid=userid))
else:
(await self.render('utils_run_result.html', log=',', title=u'', flg='danger'))
logger_Web_Handler.error('UserID: %s browse Subscrib_signup_repos failed! Reason: ,', userid)
return
.authenticated
async def post(self, userid):
try:
user = self.current_user
if ((user['id'] == int(userid)) and (user['role'] == u'admin')):
envs = {}
for key in self.request.body_arguments:
envs[key] = self.get_body_arguments(key)
env = {}
for (k, v) in envs.items():
if ((v[0] == 'false') or (v[0] == 'true')):
env[k] = (True if (v[0] == 'true') else False)
else:
env[k] = v[0]
if ((env['reponame'] != '') and (env['repourl'] != '') and (env['repobranch'] != '')):
repos = json.loads((await self.db.site.get(1, fields=('repos',)))['repos'])
tmp = repos['repos']
inflg = False
for repo in repos['repos']:
if (repo['reponame'] == env['reponame']):
inflg = True
break
if inflg:
raise Exception('')
else:
tmp.append(env)
repos['repos'] = tmp
repos['lastupdate'] = 0
(await self.db.site.mod(1, repos=json.dumps(repos, ensure_ascii=False, indent=4)))
else:
raise Exception('/url/')
else:
raise Exception(',')
except Exception as e:
if config.traceback_print:
traceback.print_exc()
(await self.render('utils_run_result.html', log=str(e), title=u'', flg='danger'))
logger_Web_Handler.error('UserID: %s modify Subscribe_signup_repos failed! Reason: %s', userid, str(e).replace('\\r\\n', '\r\n'))
return
(await self.render('utils_run_result.html', log=u',', title=u'', flg='success'))
return |
class TestQcQuantizeOp():
def test_update_stats_with_pymo(self):
input_arr = np.random.rand(1, 3, 4, 4).astype(np.float32)
tensor_quantizer = libpymo.TensorQuantizer(MAP_QUANT_SCHEME_TO_PYMO[QuantScheme.post_training_tf], MAP_ROUND_MODE_TO_PYMO['stochastic'])
cpp_encodings = libpymo.TfEncoding()
quant_info = create_quant_info(cpp_encodings, tensor_quantizer, OpMode.updateStats, useSymmetricEncoding=False)
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
session.run(None, {'input': input_arr})
encodings = tensor_quantizer.computeEncoding(cpp_encodings.bw, quant_info.useSymmetricEncoding)
print('Encoding returned: min={}, max={}, offset={}. delta={}, bw={}'.format(encodings.min, encodings.max, encodings.offset, encodings.delta, encodings.bw))
assert (encodings is not None)
assert (quant_info.tensorQuantizerRef[0].isEncodingValid is True)
def test_quantize_dequantize_with_pymo(self):
input_arr = np.asarray([[[[(- 7), (- 5), (- 3), 0, 0.1, 2.5]]]]).astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_info.isIntDataType = True
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', encodings=None, op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=False)
session.run(None, {'input': input_arr})
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 1
encodings.min = (- 5.0)
qc_op.encodings = [encodings]
qc_op.op_mode = OpMode.quantizeDequantize
output = session.run(None, {'input': input_arr})[0]
assert (np.max(output) <= 1.1)
assert (np.min(output) >= (- 5.1))
def test_quantize_dequantize_fp16(self):
input_arr = np.asarray([[[[(- 7), (- 5), (- 3), 0, 0.1, 2.5]]]]).astype(np.float32)
intermediate_output = input_arr.astype(np.float16)
fp32_array = intermediate_output.astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', encodings=None, op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=False)
qc_op.op_mode = OpMode.quantizeDequantize
output = session.run(None, {'input': input_arr})[0]
assert np.allclose(output, fp32_array)
def test_update_stats_quantize_dequantize(self):
input_arr = np.asarray([[[[(- 7), (- 5), (- 3), 0, 0.1, 2.5]]]]).astype(np.float32)
input_arr2 = (np.random.randn(*input_arr.shape).astype(np.float32) * 10)
quant_info = libquant_info.QcQuantizeInfo()
quant_info.isIntDataType = True
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', encodings=None, op_mode=OpMode.updateStats, bitwidth=8, use_symmetric_encodings=False)
session.run(None, {'input': input_arr})[0]
qc_op.compute_encodings()
assert math.isclose(qc_op.encodings[0].max, 2.5, rel_tol=0.01)
assert math.isclose(qc_op.encodings[0].min, (- 7), rel_tol=0.01)
qc_op.op_mode = OpMode.quantizeDequantize
output = session.run(None, {'input': input_arr2})[0]
assert (np.max(output) <= 2.6)
assert (np.min(output) >= (- 7.1))
assert (not np.allclose(output, input_arr2))
def test_compare_one_shot_with_pymo(self):
input_arr = np.random.randn(2, 3, 5, 1).astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_info.isIntDataType = True
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', encodings=None, op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=False)
quantizer = libpymo.TensorQuantizer(MAP_QUANT_SCHEME_TO_PYMO[QuantScheme.post_training_tf], MAP_ROUND_MODE_TO_PYMO['nearest'])
out_tensor = np.zeros(input_arr.shape).astype(np.float32)
quantizer.updateStats(input_arr, False)
enc = quantizer.computeEncoding(8, False)
quantizer.quantizeDequantize(input_arr.copy(), out_tensor, enc.min, enc.max, 8, False)
output = session.run(None, {'input': input_arr})[0]
assert (quant_info.encoding[0].max == enc.max)
assert (quant_info.encoding[0].min == enc.min)
assert np.allclose(output, out_tensor)
def test_one_shot_quantize_dequantize_asymmetric_cpu(self):
input_arr = np.asarray([[[[(- 7), (- 5), (- 3), 0, 0.1, 2.5]]]]).astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', encodings=None, op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=False)
output_oneshot = session.run(None, {'input': input_arr})[0]
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 2.5
encodings.min = (- 7)
encodings.offset = (- 188)
qc_op.encodings = [encodings]
qc_op.op_mode = OpMode.quantizeDequantize
output_qdq = session.run(None, {'input': input_arr})
assert np.allclose(output_oneshot, output_qdq)
def test_one_shot_quantize_dequantize_symmetric_signed_cpu(self):
input_arr = np.asarray([[[[(- 7), (- 5), (- 3), 0, 0.1, 2.5]]]]).astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=True)
output_oneshot = session.run(None, {'input': input_arr})
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 7
encodings.min = (- 7)
encodings.offset = (- 128)
qc_op.encodings = [encodings]
qc_op.op_mode = OpMode.quantizeDequantize
output_qdq = session.run(None, {'input': input_arr})
assert np.allclose(output_oneshot, output_qdq)
def test_one_shot_quantize_dequantize_symmetric_unsigned_cpu(self):
input_arr = np.asarray([[[[0, 1.2, 1.5, 4.0, 4.9, 5.3]]]]).astype(np.float32)
quant_info = libquant_info.QcQuantizeInfo()
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
model = create_model_from_node(quant_node, input_arr.shape)
session = build_session(model, available_providers)
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=True)
qc_op.use_unsigned_symmetric = True
output_oneshot = session.run(None, {'input': input_arr})
encodings = libpymo.TfEncoding()
encodings.bw = 8
encodings.max = 5.3
encodings.min = 0.0
encodings.offset = 0
qc_op.encodings = [encodings]
qc_op.op_mode = OpMode.quantizeDequantize
output_qdq = session.run(None, {'input': input_arr})
assert np.allclose(output_oneshot, output_qdq)
.cuda
def test_one_shot_quantize_dequantize_cpu_vs_gpu(self):
input_arr = np.asarray([[[[0, 1.2, 1.5, 4.0, 4.9, 5.3]]]]).astype(np.float32)
quant_info_cpu = libquant_info.QcQuantizeInfo()
quant_node_cpu = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain='aimet.customop.cpu', quant_info=libpymo.PtrToInt64(quant_info_cpu))
model_cpu = create_model_from_node(quant_node_cpu, input_arr.shape)
session_cpu = build_session(model_cpu, available_providers)
qc_op_cpu = QcQuantizeOp(quant_info=quant_info_cpu, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=True)
output_cpu = session_cpu.run(None, {'input': input_arr})
quant_info_gpu = libquant_info.QcQuantizeInfo()
quant_node_gpu = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain='aimet.customop.cuda', quant_info=libpymo.PtrToInt64(quant_info_gpu))
model_gpu = create_model_from_node(quant_node_gpu, input_arr.shape)
session_gpu = build_session(model_gpu, available_providers)
qc_op_gpu = QcQuantizeOp(quant_info=quant_info_gpu, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=True)
output_gpu = session_gpu.run(None, {'input': input_arr})
assert np.alltrue((output_gpu[0] == output_cpu[0]))
def test_set_get_properties(self):
quant_info = libquant_info.QcQuantizeInfo()
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
qc_op = QcQuantizeOp(quant_info=quant_info, quant_scheme=QuantScheme.post_training_tf, rounding_mode='nearest', op_mode=OpMode.oneShotQuantizeDequantize, bitwidth=8, use_symmetric_encodings=True)
qc_op.use_strict_symmetric = True
assert (quant_info.tensorQuantizerRef[0].getStrictSymmetric() == True)
qc_op.use_unsigned_symmetric = False
assert (quant_info.tensorQuantizerRef[0].getUnsignedSymmetric() == False)
qc_op.use_unsigned_symmetric = True
assert (quant_info.tensorQuantizerRef[0].getUnsignedSymmetric() == True)
qc_op.data_type = QuantizationDataType.float
assert (qc_op.data_type == QuantizationDataType.float)
assert (qc_op.quant_info.isIntDataType == False)
.parametrize('quant_axis', [0, 1])
.parametrize('use_symmetric,strict_symmetric,unsigned_symmetric', [(True, True, False), (True, False, True), (False, False, False)])
def test_per_channel_one_shot_quantize_dequantize(self, use_symmetric, strict_symmetric, unsigned_symmetric, quant_axis):
input_shape = (12, 6, 3, 3)
input_arr = np.random.randn(*input_shape).astype(np.float32)
expected_output_arr = []
tensor_quantizers = []
encodings = []
for idx in range(input_shape[quant_axis]):
tensor_quantizer = libpymo.TensorQuantizer(MAP_QUANT_SCHEME_TO_PYMO[QuantScheme.post_training_tf], MAP_ROUND_MODE_TO_PYMO['nearest'])
tensor_quantizer.setStrictSymmetric(strict_symmetric)
tensor_quantizer.setUnsignedSymmetric(unsigned_symmetric)
tensor_quantizers.append(tensor_quantizer)
encodings.append(libpymo.TfEncoding())
quant_info = create_per_channel_quant_info(encodings, tensor_quantizers, OpMode.oneShotQuantizeDequantize, useSymmetricEncoding=use_symmetric, ch_idx=quant_axis)
quant_node = helper.make_node(op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
quant_info.usePerChannelMode = False
per_tensor_model = create_model_from_node(quant_node, input_arr.take(indices=0, axis=quant_axis).shape)
session = build_session(per_tensor_model, available_providers)
for idx in range(input_shape[quant_axis]):
channel_input = input_arr.take(indices=idx, axis=quant_axis)
output = session.run(None, {'input': channel_input})[0]
expected_output_arr.append(np.expand_dims(output, quant_axis))
quant_info.opMode = OpMode.oneShotQuantizeDequantize
expected_output_arr = np.concatenate(expected_output_arr, axis=quant_axis)
quant_info.usePerChannelMode = True
per_channel_quant_node = helper.make_node(per_channel_op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
per_channel_model = create_model_from_node(per_channel_quant_node, input_arr.shape)
session = build_session(per_channel_model, available_providers)
output_per_channel = session.run(None, {'input': input_arr})[0]
assert np.allclose(output_per_channel, expected_output_arr)
def test_per_channel_quantize_dequantize(self):
inp_array = np.array([[(- 7), (- 5), (- 3), 0, 0.1, 2.5], [(- 7), (- 5), (- 3), 0, 0.1, 2.5], [(- 7), (- 5), (- 3), 0, 0.1, 2.5], [(- 7), (- 5), (- 3), 0, 0.1, 2.5]]).astype(np.float32)
tensor_quantizers = []
encodings = [libpymo.TfEncoding() for _ in range(4)]
for index in range(3):
tensor_quantizer = libpymo.TensorQuantizer(MAP_QUANT_SCHEME_TO_PYMO[QuantScheme.post_training_tf], MAP_ROUND_MODE_TO_PYMO['nearest'])
tensor_quantizer.isEncodingValid = True
tensor_quantizers.append(tensor_quantizer)
encodings[index].bw = 8
encodings[index].max = 3.81
encodings[index].min = (- 3.84)
encodings[index].delta = 0.03
encodings[index].offset = (- 128)
encodings[3].bw = 8
encodings[3].max = 6.35
encodings[3].min = (- 6.4)
encodings[3].delta = 0.05
encodings[3].offset = (- 128)
quant_info = create_per_channel_quant_info(encodings, tensor_quantizers, OpMode.quantizeDequantize, useSymmetricEncoding=True, ch_idx=0)
per_channel_quant_node = helper.make_node(per_channel_op_name, inputs=['input'], outputs=['output'], domain=op_domain, quant_info=libpymo.PtrToInt64(quant_info))
per_channel_model = create_model_from_node(per_channel_quant_node, inp_array.shape)
per_channel_session = build_session(per_channel_model, available_providers)
expected_out = np.array([[(- 3.84), (- 3.84), (- 3), 0, 0., 2.49], [(- 3.84), (- 3.84), (- 3), 0, 0., 2.49], [(- 3.84), (- 3.84), (- 3), 0, 0., 2.49], [(- 6.4), (- 5), (- 3), 0, 0.1, 2.5]]).astype(np.float32)
output = per_channel_session.run(None, {'input': inp_array})[0]
assert np.allclose(output, expected_out) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.