code stringlengths 281 23.7M |
|---|
class TestNumberAttribute():
def test_number_attribute(self):
attr = NumberAttribute()
assert (attr.attr_type == NUMBER)
attr = NumberAttribute(default=1)
assert (attr.default == 1)
def test_number_serialize(self):
attr = NumberAttribute()
assert (attr.serialize(3.141) == '3.141')
assert (attr.serialize(1) == '1')
assert (attr.serialize() == '')
def test_number_deserialize(self):
attr = NumberAttribute()
assert (attr.deserialize('1') == 1)
assert (attr.deserialize('3.141') == 3.141)
assert (attr.deserialize('') == )
def test_number_set_deserialize(self):
attr = NumberSetAttribute()
assert (attr.attr_type == NUMBER_SET)
assert (attr.deserialize([json.dumps(val) for val in sorted({1, 2})]) == {1, 2})
def test_number_set_serialize(self):
attr = NumberSetAttribute()
assert (attr.serialize({1, 2}) == [json.dumps(val) for val in sorted({1, 2})])
assert (attr.serialize({}) is None)
def test_number_set_attribute(self):
attr = NumberSetAttribute(default=(lambda : {1, 2}))
assert (attr.default() == {1, 2}) |
class Perpendicular(Base):
_id = 28
_entityDef = (_lw, _lw)
_workplane = True
_iconName = 'Assembly_ConstraintPerpendicular.svg'
_tooltip = QT_TRANSLATE_NOOP('asm3', 'Add a "{}" constraint to make planar faces or linear edges of two\nparts perpendicular.')
def prepare(cls, obj, solver):
system = solver.system
(e1, e2) = cls.getEntities(obj, solver)[:2]
isPlane = (isinstance(e1, PlaneInfo), isinstance(e2, PlaneInfo))
if all(isPlane):
ret = system.addPerpendicular(e1.normal.entity, e2.normal.entity, group=solver.group)
elif (not any(isPlane)):
ret = system.addPerpendicular(e1, e2, group=solver.group)
elif isPlane[0]:
ret = system.addParallel(e1.normal.entity, e2, group=solver.group)
else:
ret = system.addParallel(e1, e2.normal.entity, group=solver.group)
return ret |
def getBatches(data, batch_size):
random.shuffle(data)
batches = []
data_len = len(data)
def genNextSamples():
for i in range(0, data_len, batch_size):
(yield data[i:min((i + batch_size), data_len)])
for samples in genNextSamples():
batch = createBatch(samples)
batches.append(batch)
return batches |
def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_hash=None):
if ((commit_hash is not None) and (revision is not None)):
raise ValueError('`commit_hash` and `revision` are mutually exclusive, pick one only.')
if ((revision is None) and (commit_hash is None)):
revision = 'main'
model_id = repo_id.replace('/', '--')
model_cache = os.path.join(cache_dir, f'models--{model_id}')
if (not os.path.isdir(model_cache)):
return None
for subfolder in ['refs', 'snapshots']:
if (not os.path.isdir(os.path.join(model_cache, subfolder))):
return None
if (commit_hash is None):
cached_refs = os.listdir(os.path.join(model_cache, 'refs'))
if (revision in cached_refs):
with open(os.path.join(model_cache, 'refs', revision)) as f:
commit_hash = f.read()
cached_shas = os.listdir(os.path.join(model_cache, 'snapshots'))
if (commit_hash not in cached_shas):
return None
cached_file = os.path.join(model_cache, 'snapshots', commit_hash, filename)
return (cached_file if os.path.isfile(cached_file) else None) |
def _get_area_extent_from_cf_axis(x, y):
(ll_x, ll_y) = (x['first'], y['last'])
(ur_x, ur_y) = (x['last'], y['first'])
ll_x -= ((x['sign'] * 0.5) * x['spacing'])
ur_x += ((x['sign'] * 0.5) * x['spacing'])
ll_y += ((y['sign'] * 0.5) * y['spacing'])
ur_y -= ((y['sign'] * 0.5) * y['spacing'])
return (ll_x, ll_y, ur_x, ur_y) |
def test_ohem_sampler():
with pytest.raises(AssertionError):
sampler = OHEMPixelSampler(context=_context_for_ohem())
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 89, 89))
sampler.sample(seg_logit, seg_label)
sampler = OHEMPixelSampler(context=_context_for_ohem(), thresh=0.7, min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert (seg_weight.shape[0] == seg_logit.shape[0])
assert (seg_weight.shape[1:] == seg_logit.shape[2:])
assert (seg_weight.sum() > 200)
sampler = OHEMPixelSampler(context=_context_for_ohem(), min_kept=200)
seg_logit = torch.randn(1, 19, 45, 45)
seg_label = torch.randint(0, 19, size=(1, 1, 45, 45))
seg_weight = sampler.sample(seg_logit, seg_label)
assert (seg_weight.shape[0] == seg_logit.shape[0])
assert (seg_weight.shape[1:] == seg_logit.shape[2:])
assert (seg_weight.sum() == 200) |
def _prompt_user_for_file(window: QtWidgets.QWidget, caption: str, filter: str, dir: (str | None)=None, new_file: bool=False) -> (Path | None):
if new_file:
method = QtWidgets.QFileDialog.getSaveFileName
else:
method = QtWidgets.QFileDialog.getOpenFileName
open_result = method(window, caption=caption, dir=dir, filter=filter)
if ((not open_result) or (open_result == ('', ''))):
return None
return Path(open_result[0]) |
class Effect6526(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Capacitor Emission Systems')), 'powerTransferAmount', src.getModifiedItemAttr('shipBonusForceAuxiliaryA1'), skill='Amarr Carrier', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Remote Armor Repair Systems')), 'armorDamageAmount', src.getModifiedItemAttr('shipBonusForceAuxiliaryA1'), skill='Amarr Carrier', **kwargs) |
def import_chrome(profile, bookmark_types, output_format):
out_template = {'bookmark': '{url} {name}', 'quickmark': '{name} {url}', 'search': "c.url.searchengines['{keyword}'] = '{url}'", 'oldsearch': '{keyword} {url}'}
if ('search' in bookmark_types):
webdata = sqlite3.connect(os.path.join(profile, 'Web Data'))
c = webdata.cursor()
c.execute('SELECT keyword,url FROM keywords;')
for (keyword, url) in c:
try:
url = opensearch_convert(url)
print(out_template[output_format].format(keyword=keyword, url=url))
except KeyError:
print('# Unsupported parameter in url for {}; skipping....'.format(keyword))
else:
with open(os.path.join(profile, 'Bookmarks'), encoding='utf-8') as f:
bookmarks = json.load(f)
def bm_tree_walk(bm, template):
if (not isinstance(bm, dict)):
return
assert ('type' in bm), bm
if (bm['type'] == 'url'):
if (urllib.parse.urlparse(bm['url']).scheme != 'chrome'):
print(template.format(**bm))
elif (bm['type'] == 'folder'):
for child in bm['children']:
bm_tree_walk(child, template)
for root in bookmarks['roots'].values():
bm_tree_walk(root, out_template[output_format]) |
class PPM(nn.ModuleList):
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg, act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
self.append(nn.Sequential(nn.AdaptiveAvgPool2d(pool_scale), ConvModule(self.in_channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)))
def forward(self, x):
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
upsampled_ppm_out = resize(ppm_out, size=x.size()[2:], mode='bilinear', align_corners=self.align_corners)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs |
('Not ready for production yet')
class SshConfig(Config):
def setUp(self):
self.instance = pynag.Parsers.SshConfig(host='localhost', username='palli')
def tearDown(self):
pass
def testParseMaincfg(self):
self.instance.parse_maincfg()
def testParse(self):
self.instance.parse()
host = self.instance.get_host('localhost')
self.instance.item_edit_field(host, '__test', (host['__test'] + '+'))
def testOpenFile(self):
self.instance.open('/etc/nagios3/nagios.cfg').read()
def testPathWrappers(self):
ftp = self.instance.ftp
i = ftp.stat('/')
self.assertTrue(self.instance.isdir('/')) |
class Packages_Contains_Environment_1_TestCase(ParserTest):
def __init__(self, *args, **kwargs):
ParserTest.__init__(self, *args, **kwargs)
self.ks = '\n%packages\^whatever-environment\n%end\n'
def runTest(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
self.parser.readKickstartFromString(self.ks)
self.assertEqual(self.handler.packages.environment, 'whatever-environment') |
def params_to_string(num_params: float, units: Optional[str]=None, precision: int=2) -> str:
if (units is None):
if ((num_params // (10 ** 6)) > 0):
return (str(round((num_params / (10 ** 6)), precision)) + ' M')
elif (num_params // (10 ** 3)):
return (str(round((num_params / (10 ** 3)), precision)) + ' k')
else:
return str(num_params)
elif (units == 'M'):
return ((str(round((num_params / (10.0 ** 6)), precision)) + ' ') + units)
elif (units == 'K'):
return ((str(round((num_params / (10.0 ** 3)), precision)) + ' ') + units)
else:
return str(num_params) |
def unique_in_window(iterable, n, key=None):
if (n <= 0):
raise ValueError('n must be greater than 0')
window = deque(maxlen=n)
uniques = set()
use_key = (key is not None)
for item in iterable:
k = (key(item) if use_key else item)
if (k in uniques):
continue
if (len(uniques) == n):
uniques.discard(window[0])
uniques.add(k)
window.append(k)
(yield item) |
class SessionStore(object):
def __init__(self, inactivity_timeout=10, sweep_time=1, attach_timeout=60, timeout_disable_mode='soft'):
if (timeout_disable_mode not in ['soft', 'hard']):
raise ValueError("timeout_disable_mode must be 'hard' or 'soft'")
self.inactivity_timeout = inactivity_timeout
self.sweep_time = sweep_time
self.attach_timeout = attach_timeout
self.timeout_disable_mode = timeout_disable_mode
self._sessions = {}
self.gc_glet = None
self._lock = RLock()
def __contains__(self, uuid):
return (uuid in self._sessions)
def has_client(self, uuid):
return ((uuid in self._sessions) and self._sessions[uuid].has_client())
def has_tracer(self, uuid):
return ((uuid in self._sessions) and self._sessions[uuid].tracer())
def _run_gc(self):
def gc_pass():
now = time()
for uuid in list(self._sessions):
last_message = self._sessions[uuid].timestamp
if (((now - last_message) / 60) > self.inactivity_timeout):
log.info(('Session %s was marked inactive, killing' % uuid))
self.slaughter(uuid, self.timeout_disable_mode)
while True:
gc_pass()
gevent.sleep(self.sweep_time)
def start(self):
log.info('Starting qdb.server.session_store')
if self.inactivity_timeout:
self._gc_glet = gevent.spawn(self._run_gc)
def stop(self):
log.info('Stopping qdb.server.session_store')
if self._gc_glet:
self._gc_glet.kill(timeout=5)
self.slaughter_all(self.timeout_disable_mode)
def attach_tracer(self, uuid, socket, local_pid, pause_signal):
log.info(('Attaching a tracer for session %s' % uuid))
if (uuid in self._sessions):
session = self._sessions[uuid].update_timestamp()
else:
session = DebuggingSession()
self._sessions[uuid] = session.attach_tracer(socket, local_pid, pause_signal)
if (self.attach_timeout == 0):
log.info(('Attached %s%stracer for session %s' % (('local ' if local_pid else ''), ('' if self._sessions[uuid].clients else 'orphaned '), uuid)))
return True
if (not self._sessions[uuid].both_sides_event.wait(self.attach_timeout)):
self._send_to_socket(socket, fmt_err_msg('client', 'No client', serial=json.dumps))
self.slaughter(uuid, self.timeout_disable_mode)
log.warn(('No client came to debug %s' % uuid))
return False
log.info(('Session %s has started' % uuid))
return True
def attach_client(self, uuid, ws):
log.info(('Attaching a client for session %s' % uuid))
if (uuid in self._sessions):
session = self._sessions[uuid].update_timestamp()
else:
session = DebuggingSession()
self._sessions[uuid] = session.attach_client(ws)
if (self.attach_timeout == ALLOW_ORPHANS):
log.info(('Attached %sclient for session %s' % (('' if self._sessions[uuid].tracer else 'orphaned '), uuid)))
return True
if (not self._sessions[uuid].both_sides_event.wait(self.attach_timeout)):
ws.send(fmt_err_msg('tracer', 'No tracer', serial=json.dumps))
self.slaughter(uuid)
log.warn(('No tracer attached for %s' % uuid))
return False
return True
def _update_timestamp(self, uuid):
self._sessions[uuid] = self._sessions[uuid].update_timestamp()
def _send_to_socket(sck, msg):
msg_to_send = str_to_bytes(msg, encoding='utf-8')
sck.sendall(pack('>i', len(msg_to_send)))
sck.sendall(msg_to_send)
def is_local(self, uuid):
return ((uuid in self._sessions) and self._sessions[uuid].local_pid)
def pause_tracer(self, uuid):
session = self._sessions.get(uuid)
if (not session):
log.warn(('Attempted to pause non-existing session %s' % uuid))
return False
if (not session.local_pid):
log.warn(('Attempted to pause non-local session %s' % uuid))
return False
try:
os.kill(session.local_pid, session.pause_signal)
return True
except OSError:
return False
def send_to_tracer(self, uuid, event):
if (uuid not in self._sessions):
log.warn(('send_to_tracer failed: session %s does not exist' % uuid))
return
try:
if ((event['e'] == 'pause') and self.is_local(uuid)):
self.pause_tracer(uuid)
log.info(('Raising pause signal (%d) in server local session %s' % (self._sessions[uuid].pause_signal, uuid)))
self._update_timestamp(uuid)
return
msg = fmt_msg(event['e'], event.get('p'), serial=json.dumps)
except (ValueError, KeyError) as e:
log.warn(('send_to_tracer(uuid=%s, event=%s) failed: %s' % (uuid, event, e)))
raise
sck = self._sessions[uuid].tracer
if sck:
self._send_to_socket(sck, msg)
else:
log.warn(('No client session is alive for %s' % uuid))
self._update_timestamp(uuid)
def send_to_clients(self, uuid, event):
if (uuid not in self._sessions):
log.warn(('send_to_clients failed: session %s does not exist' % uuid))
return
try:
msg = fmt_msg(event['e'], event.get('p'), serial=json.dumps)
except (KeyError, ValueError) as e:
log.warn(('send_to_clients(uuid=%s, event=%s) failed: %s' % (uuid, event, e)))
raise
clients = self._sessions[uuid].clients
with self._lock:
for client in set(clients):
try:
client.send(msg)
except Exception:
log.info(('Client was closed for debug session: %s' % uuid))
clients.remove(client)
self._update_timestamp(uuid)
def slaughter(self, uuid, mode='soft'):
session = self._sessions.get(uuid)
if (not session):
return
disable_event = fmt_msg('disable')
self.send_to_clients(uuid, event=disable_event)
for client in session.clients:
try:
client.close()
except WebSocketError as e:
if ((str(e) != 'Socket is dead') or (e.errno not in safe_errnos)):
log.exception(('Exception caught while killing client for session %s:' % uuid))
if session.tracer:
try:
disable_event['p'] = mode
self.send_to_tracer(uuid, event=disable_event)
session.tracer.close()
except socket.error as e:
if (e.errno not in safe_errnos):
log.exception(('Exception caught while killing tracer for session %s:' % uuid))
del self._sessions[uuid]
log.info(('Debugging session %s has been terminated' % uuid))
def slaughter_all(self, mode='soft'):
for uuid in list(self._sessions):
self.slaughter(uuid, mode) |
def check_model_compatibilty(config: AttrDict, state_dict: Dict[(str, Any)]):
from vissl.models import is_feature_extractor_model
(trunk_append_prefix, heads_append_prefix) = ('trunk._feature_blocks.', 'heads.')
if is_feature_extractor_model(config.MODEL):
trunk_append_prefix = 'trunk.base_model._feature_blocks.'
is_compatible = True
for layername in state_dict.keys():
if (not (layername.startswith(trunk_append_prefix) or layername.startswith(heads_append_prefix))):
is_compatible = False
break
if (not is_compatible):
raise Exception(f'Model provided in config.MODEL.WEIGHTS_INIT.PARAMS_FILE is not compatible with VISSL. Please set config.MODEL.WEIGHTS_INIT.APPEND_PREFIX and config.MODEL.WEIGHTS_INIT.REMOVE_PREFIX for making model compatible. Expected trunk prefix: {trunk_append_prefix}') |
class KnownValues(unittest.TestCase):
def test_ea_adc2(self):
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=3)
e_corr = myadc.e_corr
self.assertAlmostEqual(e_corr, (- 0.), 6)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
def test_ea_adc2_oneroot(self):
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel()
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
def test_ea_adc2x(self):
myadc.method = 'adc(2)-x'
myadc.method_type = 'ea'
(e, v, p, x) = myadc.kernel(nroots=4)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(e[3], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6)
self.assertAlmostEqual(p[3], 0., 6)
def test_ea_adc3(self):
myadc.method = 'adc(3)'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
myadc.method_type = 'ea'
(e, v, p, x, adc_es) = myadc.ea_adc(nroots=3)
myadc.analyze()
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 0., 6)
self.assertAlmostEqual(p[1], 0., 6)
self.assertAlmostEqual(p[2], 0., 6) |
def get_geolocation(request, force=False):
if force:
return get_geoipdb_geolocation(request)
if hasattr(request, 'geo'):
return request.geo
log.warning('No geolocation data set by middleware (see CloudflareGeoIpMiddleware). Consider enabling a GeoIp middleware for ad targeting.')
return GeolocationData() |
class CCCVFunctionControl(FunctionControl):
def __init__(self, param, options):
super().__init__(param, self.cccv, options, control='differential with max')
pybamm.citations.register('Mohtat2021')
def cccv(self, variables):
K_aw = 1
Q = self.param.Q
I_var = variables['Current variable [A]']
I = variables['Current [A]']
K_V = 1
V = variables['Voltage [V]']
V_CCCV = pybamm.Parameter('Voltage function [V]')
return ((((- K_aw) / Q) * (I_var - I)) + (K_V * (V - V_CCCV))) |
def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None:
lineno = method.__code__.co_firstlineno
filename = inspect.getfile(method)
module = method.__module__
mod_globals = method.__globals__
try:
warnings.warn_explicit(message, type(message), filename=filename, module=module, registry=mod_globals.setdefault('__warningregistry__', {}), lineno=lineno)
except Warning as w:
raise type(w)(f'''{w}
at {filename}:{lineno}''') from None |
.parametrize('num_spin_orb, num_bits_rot_aa', ((8, 3), (20, 3), (57, 3)))
def test_sparse_costs_against_openfermion(num_spin_orb, num_bits_rot_aa):
num_bits_state_prep = 12
cost = 0
bloq = SelectSparse(num_spin_orb)
(_, sigma) = bloq.call_graph()
cost += sigma[TGate()]
(bloq, num_non_zero) = make_prep_sparse(num_spin_orb, num_bits_state_prep, num_bits_rot_aa)
(_, sigma) = bloq.call_graph()
cost += sigma[TGate()]
bloq = attrs.evolve(bloq, adjoint=True, qroam_block_size=(2 ** QI(num_non_zero)[0]))
(_, sigma) = bloq.call_graph()
cost += sigma[TGate()]
unused_lambda = 10
unused_de = 0.001
unused_stps = 10
logd = (num_non_zero - 1).bit_length()
refl_cost = (4 * ((num_bits_state_prep + logd) + 4))
delta_swap = ((8 * (7 - 4)) * ((num_spin_orb // 2) - 1).bit_length())
cost_of = cost_sparse(num_spin_orb, unused_lambda, num_non_zero, unused_de, num_bits_state_prep, unused_stps)[0]
adjusted_cost_qualtran = (((cost + refl_cost) - delta_swap) // 4)
assert (adjusted_cost_qualtran == cost_of) |
def get_print_full(x):
old_stdout = sys.stdout
sys.stdout = newstdout = StringIO()
try:
pd.set_option('display.max_rows', len(x))
print(x)
string = newstdout.getvalue()
except:
raise
finally:
sys.stdout = old_stdout
pd.reset_option('display.max_rows')
return string[:(- 1)] |
(strat=unstructure_strats, detailed_validation=..., prefer_attrib=..., dict_factory=one_of(just(dict), just(OrderedDict)))
def test_copy_func_hooks(converter_cls: Type[BaseConverter], strat: UnstructureStrategy, prefer_attrib: bool, detailed_validation: bool, dict_factory: Callable):
c = converter_cls(unstruct_strat=strat, prefer_attrib_converters=prefer_attrib, detailed_validation=detailed_validation, dict_factory=dict_factory)
c.register_unstructure_hook_func((lambda t: (t is Simple)), (lambda s: s.a))
c.register_structure_hook_func((lambda t: (t is Simple)), (lambda v, _: Simple(v)))
assert (c.unstructure(Simple(1)) == 1)
assert (c.structure(1, Simple) == Simple(1))
copy = c.copy()
assert (c is not copy)
assert (copy.unstructure(Simple(1)) == 1)
assert (copy.structure(copy.unstructure(Simple(1)), Simple) == Simple(1))
assert (c.detailed_validation == copy.detailed_validation)
assert (c._prefer_attrib_converters == copy._prefer_attrib_converters)
assert (c._dict_factory == copy._dict_factory) |
def get_elec_ddpm_discrete_config():
config = get_default_configs()
config.weight_decay = None
config.reduce_mean = True
config.likelihood_weighting = False
config.batch_size = 64
config.epochs = 20
modeling = config.modeling
modeling.num_scales = 100
modeling.beta_min = 0.01
modeling.beta_max = 10
modeling.md_type = 'vpsde'
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'ancestral_sampling'
sampling.corrector = 'none'
training = config.training
training.continuous = False
training.seed = 123
config.train = True
config.save = True
config.path = './model/ddpm_d.pkl'
return config |
def netting_channel_state(chain_state, token_network_state, token_network_registry_state, partner):
if (partner is None):
partner = factories.make_address()
canonical_identifier = factories.make_canonical_identifier(token_network_address=token_network_state.address)
channel_state = factories.create(factories.NettingChannelStateProperties(our_state=factories.NettingChannelEndStateProperties(balance=TokenAmount(10), address=chain_state.our_address), partner_state=factories.NettingChannelEndStateProperties(balance=TokenAmount(10), address=partner), token_address=token_network_state.token_address, token_network_registry_address=token_network_registry_state.address, canonical_identifier=canonical_identifier))
channel_id = canonical_identifier.channel_identifier
token_network_state.partneraddresses_to_channelidentifiers[partner].append(channel_id)
token_network_state.channelidentifiers_to_channels[channel_id] = channel_state
return channel_state |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms._IDEAInternal((b'\x00' * 16)), modes.CBC((b'\x00' * 8)))), skip_message='Does not support IDEA CBC')
class TestIDEAModeCBC():
test_cbc = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', 'IDEA'), ['idea-cbc.txt'], (lambda key, **kwargs: algorithms._IDEAInternal(binascii.unhexlify(key))), (lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)))) |
class SKConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, groups=32, num_branches=2, reduction=16, min_channels=32):
super(SKConvBlock, self).__init__()
self.num_branches = num_branches
self.out_channels = out_channels
mid_channels = max((in_channels // reduction), min_channels)
self.branches = Concurrent(stack=True)
for i in range(num_branches):
dilation = (1 + i)
self.branches.add_module('branch{}'.format((i + 2)), conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=stride, padding=dilation, dilation=dilation, groups=groups))
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.fc1 = conv1x1_block(in_channels=out_channels, out_channels=mid_channels)
self.fc2 = conv1x1(in_channels=mid_channels, out_channels=(out_channels * num_branches))
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
y = self.branches(x)
u = y.sum(dim=1)
s = self.pool(u)
z = self.fc1(s)
w = self.fc2(z)
batch = w.size(0)
w = w.view(batch, self.num_branches, self.out_channels)
w = self.softmax(w)
w = w.unsqueeze((- 1)).unsqueeze((- 1))
y = (y * w)
y = y.sum(dim=1)
return y |
class ScenarioTestCase(unittest.TestCase):
store_id = 'crust2_mf'
store_id_static = 'ak135_static'
tempdirs = []
def tearDownClass(cls):
for d in cls.tempdirs:
continue
shutil.rmtree(d)
(*have_gf_store(store_id))
def test_scenario_waveforms(self):
tempdir = mkdtemp(prefix='pyrocko-scenario')
self.tempdirs.append(tempdir)
vmin = 2500.0
generator = scenario.ScenarioGenerator(seed=20, center_lat=42.6, center_lon=13.3, radius=(60 * km), target_generators=[targets.WaveformGenerator(store_id=ScenarioTestCase.store_id, station_generators=[targets.RandomStationGenerator(nstations=5, avoid_water=False)], noise_generator=targets.waveform.WhiteNoiseGenerator(), seismogram_quantity='velocity')], source_generator=scenario.DCSourceGenerator(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(10 * km), depth_min=(1 * km), depth_max=(10 * km), magnitude_min=3.0, magnitude_max=4.5, strike=120.0, dip=45.0, rake=90.0, perturbation_angle_std=15.0, nevents=3))
def twin(source):
tmin = source.time
tmax = (source.time + ((100 * km) / vmin))
return (tmin, tmax)
engine = get_gf_engine()
generator.init_modelling(engine)
ref_sources = generator.get_sources()
ref_trs_list = []
for source in ref_sources:
trs = generator.get_waveforms(*twin(source))
trs.sort(key=(lambda tr: tr.nslc_id))
ref_trs_list.append(trs)
collection = scenario.ScenarioCollection(tempdir, engine)
collection.add_scenario('one', generator)
with self.assertRaises(scenario.ScenarioError):
collection.add_scenario('one', generator)
assert (len(collection.list_scenarios()) == 1)
assert (collection.list_scenarios()[0].scenario_id == 'one')
s = collection.get_scenario('one')
for (ref_trs, source) in zip(ref_trs_list, s.get_generator().get_sources()):
trs = generator.get_waveforms(*twin(source))
trs.sort(key=(lambda tr: tr.nslc_id))
self.assert_traces_almost_equal(trs, ref_trs)
collection2 = scenario.ScenarioCollection(tempdir, engine)
assert (len(collection2.list_scenarios()) == 1)
assert (collection2.list_scenarios()[0].scenario_id == 'one')
s = collection2.get_scenario('one')
for (ref_trs, source) in zip(ref_trs_list, s.get_generator().get_sources()):
trs = generator.get_waveforms(*twin(source))
trs.sort(key=(lambda tr: tr.nslc_id))
self.assert_traces_almost_equal(trs, ref_trs)
(tmin, tmax) = s.get_time_range()
s.ensure_data(tmin, tmax)
p = s.get_waveform_pile()
for (ref_trs, source) in zip(ref_trs_list, s.get_generator().get_sources()):
(tmin, tmax) = twin(source)
trs = p.all(tmin=tmin, tmax=tmax, include_last=False)
trs.sort(key=(lambda tr: tr.nslc_id))
self.assert_traces_almost_equal(trs, ref_trs)
(have_kite(), 'Kite is not available')
(*have_gf_store(store_id))
(*have_gf_store(store_id_static))
def test_scenario_insar(self):
tempdir = mkdtemp(prefix='pyrocko-scenario')
self.tempdirs.append(tempdir)
generator = scenario.ScenarioGenerator(seed=20, center_lat=42.6, center_lon=13.3, radius=(60 * km), target_generators=[targets.InSARGenerator(resolution=(20, 20), noise_generator=targets.insar.AtmosphericNoiseGenerator(amplitude=1e-05))], source_generator=scenario.DCSourceGenerator(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(10 * km), depth_min=(1 * km), depth_max=(10 * km), magnitude_min=3.0, magnitude_max=4.5, strike=120.0, dip=45.0, rake=90.0, perturbation_angle_std=15.0, nevents=3))
engine = get_gf_engine()
generator.init_modelling(engine)
collection = scenario.ScenarioCollection(tempdir, engine)
collection.add_scenario('insar', generator)
s = collection.get_scenario('insar')
s.ensure_data()
(*have_gf_store(store_id_static))
def test_scenario_gnss(self):
tempdir = mkdtemp(prefix='pyrocko-scenario')
self.tempdirs.append(tempdir)
generator = scenario.ScenarioGenerator(seed=20, center_lat=42.6, center_lon=13.3, radius=(60 * km), target_generators=[targets.GNSSCampaignGenerator(station_generators=[targets.RandomStationGenerator(avoid_water=False, channels=None)])], source_generator=scenario.DCSourceGenerator(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(10 * km), depth_min=(1 * km), depth_max=(10 * km), magnitude_min=3.0, magnitude_max=4.5, strike=120.0, dip=45.0, rake=90.0, perturbation_angle_std=15.0, nevents=3))
engine = get_gf_engine()
generator.init_modelling(engine)
collection = scenario.ScenarioCollection(tempdir, engine)
collection.add_scenario('gnss', generator)
s = collection.get_scenario('gnss')
assert (len(s.get_gnss_campaigns()) == 1)
(have_kite(), 'Kite is not available')
(*have_gf_store(store_id))
(*have_gf_store(store_id_static))
def test_scenario_combinations(self):
generator = scenario.ScenarioGenerator(seed=20, center_lat=42.6, center_lon=13.3, radius=(60 * km), target_generators=[targets.WaveformGenerator(store_id=ScenarioTestCase.store_id, station_generators=[targets.RandomStationGenerator(avoid_water=False)], noise_generator=targets.waveform.WhiteNoiseGenerator(), seismogram_quantity='velocity'), targets.InSARGenerator(resolution=(20, 20), noise_generator=targets.insar.AtmosphericNoiseGenerator(amplitude=1e-05)), targets.GNSSCampaignGenerator(station_generators=[targets.RandomStationGenerator(avoid_water=False, channels=None)])], source_generator=scenario.DCSourceGenerator(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(10 * km), depth_min=(1 * km), depth_max=(10 * km), magnitude_min=3.0, magnitude_max=4.5, strike=120.0, dip=45.0, rake=90.0, perturbation_angle_std=15.0, nevents=3))
engine = get_gf_engine()
generator.init_modelling(engine)
for src in scenario.sources.AVAILABLE_SOURCES:
generator.source_generator = src(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(1 * km), depth_min=(1.5 * km), depth_max=(5 * km), magnitude_min=3.0, magnitude_max=4.5)
generator.source_generator.update_hierarchy(generator)
generator.get_stations()
generator.get_waveforms()
generator.get_insar_scenes()
generator.get_gnss_campaigns()
(*have_gf_store(store_id_static))
(gmtpy.have_gmt(), 'GMT not available')
def test_scenario_map(self):
tempdir = mkdtemp(prefix='pyrocko-scenario')
self.tempdirs.append(tempdir)
generator = scenario.ScenarioGenerator(seed=20, center_lat=42.6, center_lon=13.3, radius=(60 * km), target_generators=[targets.WaveformGenerator(store_id=ScenarioTestCase.store_id, station_generators=[targets.RandomStationGenerator(avoid_water=False)], noise_generator=targets.waveform.WhiteNoiseGenerator(), seismogram_quantity='velocity'), targets.InSARGenerator(resolution=(20, 20), noise_generator=targets.insar.AtmosphericNoiseGenerator(amplitude=1e-05)), targets.GNSSCampaignGenerator(station_generators=[targets.RandomStationGenerator(avoid_water=False, channels=None)])], source_generator=scenario.DCSourceGenerator(time_min=util.str_to_time('2017-01-01 00:00:00'), time_max=util.str_to_time('2017-01-01 02:00:00'), radius=(10 * km), depth_min=(1 * km), depth_max=(10 * km), magnitude_min=3.0, magnitude_max=4.5, strike=120.0, dip=45.0, rake=90.0, perturbation_angle_std=15.0, nevents=3))
engine = get_gf_engine()
collection = scenario.ScenarioCollection(tempdir, engine)
collection.add_scenario('plot', generator)
s = collection.get_scenario('plot')
s.get_map()
def assert_traces_almost_equal(self, trs1, trs2):
assert (len(trs1) == len(trs2))
for (tr1, tr2) in zip(trs1, trs2):
tr1.assert_almost_equal(tr2) |
def create_shortcut(downsample_type, layers: LayerFn, in_chs, out_chs, stride, dilation, **kwargs):
assert (downsample_type in ('avg', 'conv1x1', ''))
if ((in_chs != out_chs) or (stride != 1) or (dilation[0] != dilation[1])):
if (not downsample_type):
return None
elif (downsample_type == 'avg'):
return DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation[0], **kwargs)
else:
return layers.conv_norm_act(in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation[0], **kwargs)
else:
return nn.Identity() |
def test_wcs_comparison():
wcs1 = WCS(naxis=3)
wcs1.wcs.crpix = np.array([50.0, 45.0, 30.0], dtype='float32')
wcs2 = WCS(naxis=3)
wcs2.wcs.crpix = np.array([50.0, 45.0, 30.0], dtype='float64')
wcs3 = WCS(naxis=3)
wcs3.wcs.crpix = np.array([50.0, 45.0, 31.0], dtype='float64')
wcs4 = WCS(naxis=3)
wcs4.wcs.crpix = np.array([50.0, 45.0, 30.0001], dtype='float64')
assert check_equality(wcs1, wcs2)
assert (not check_equality(wcs1, wcs3))
assert check_equality(wcs1, wcs3, wcs_tolerance=10.0)
assert (not check_equality(wcs1, wcs4))
assert check_equality(wcs1, wcs4, wcs_tolerance=0.001) |
def aimet_spatial_svd(model: torch.nn.Module, evaluator: aimet_common.defs.EvalFunction):
greedy_params = aimet_torch.defs.GreedySelectionParameters(target_comp_ratio=Decimal(0.75), num_comp_ratio_candidates=10)
auto_params = aimet_torch.defs.SpatialSvdParameters.AutoModeParams(greedy_params, modules_to_ignore=[model.conv1])
params = aimet_torch.defs.SpatialSvdParameters(aimet_torch.defs.SpatialSvdParameters.Mode.auto, auto_params)
scheme = CompressionScheme.spatial_svd
metric = CostMetric.mac
results = ModelCompressor.compress_model(model=model, eval_callback=evaluator, eval_iterations=10, input_shape=(1, 3, 224, 224), compress_scheme=scheme, cost_metric=metric, parameters=params)
return results |
def downloadFileWithJSONPost(url, file, post_json_str, descriptor):
global PROXY
if ('/' in file):
makeDirs(os.path.dirname(file))
if os.path.exists(file):
logging.debug(f'Skipping json post to url: {url} ({descriptor}) as already downloaded')
opener = getUrlOpener(PROXY)
opener.addheaders.append(('Content-Type', 'application/json'))
req = urllib.request.Request(url)
for header in opener.addheaders:
req.add_header(header[0], header[1])
body_bytes = bytes(post_json_str, 'utf-8')
req.add_header('Content-Length', len(body_bytes))
resp = urllib.request.urlopen(req, body_bytes)
with open(file, 'w', encoding='UTF-8') as the_file:
the_file.write(resp.read().decode('UTF-8'))
logging.debug(f'Successfully downloaded w/ JSON post to: {url} ({descriptor}) to: {file}') |
def get_prep_freqs(receptacle, df_objects):
template_prep_dict = {}
for tem in rec_templates:
template_list = []
object_list = []
for (idx, row) in df_objects.iterrows():
template_list.append(tem.format(row['entity'], '<mask>', receptacle))
object_list.append(row['entity'])
filled_template_list = unmasker(template_list)
filled_preps = []
for (preds, obj) in zip(filled_template_list, object_list):
for item in preds:
if (item['token_str'][1:].lower() in spatial_prepositions):
filled_preps.append((item['token_str'][1:].lower(), obj))
break
else:
import pdb
pdb.set_trace()
template_prep_dict[tem] = {'preps': filled_preps, 'object_list': object_list, 'top_prep': Counter((prep_obj[0] for prep_obj in filled_preps)).most_common(1)[0][0]}
return template_prep_dict |
(frozen=True)
class Timezone(AnnotatedTypesCheck):
value: Union[(str, timezone, type(...), None)]
def predicate(self, value: Any) -> bool:
if (not isinstance(value, datetime)):
return False
if (self.value is None):
return (value.tzinfo is None)
elif (self.value is ...):
return (value.tzinfo is not None)
elif isinstance(self.value, timezone):
return (value.tzinfo == self.value)
else:
return False
def is_compatible_metadata(self, metadata: AnnotatedTypesCheck) -> bool:
if ((self.value is ...) and isinstance(metadata, Timezone) and (metadata.value is not None)):
return True
return False |
def add_interactive_args(parser):
group = parser.add_argument_group('Interactive')
group.add_argument('--buffer-size', default=0, type=int, metavar='N', help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE', help='file to read from; use - for stdin') |
class EmbeddingSimilarityEvaluator(SentenceEvaluator):
def __init__(self, sentences1: List[str], sentences2: List[str], scores: List[float], batch_size: int=16, main_similarity: SimilarityFunction=None, name: str='', show_progress_bar: bool=False, write_csv: bool=True):
self.sentences1 = sentences1
self.sentences2 = sentences2
self.scores = scores
self.write_csv = write_csv
assert (len(self.sentences1) == len(self.sentences2))
assert (len(self.sentences1) == len(self.scores))
self.main_similarity = main_similarity
self.name = name
self.batch_size = batch_size
if (show_progress_bar is None):
show_progress_bar = ((logger.getEffectiveLevel() == logging.INFO) or (logger.getEffectiveLevel() == logging.DEBUG))
self.show_progress_bar = show_progress_bar
self.csv_file = (('similarity_evaluation' + (('_' + name) if name else '')) + '_results.csv')
self.csv_headers = ['epoch', 'steps', 'cosine_pearson', 'cosine_spearman', 'euclidean_pearson', 'euclidean_spearman', 'manhattan_pearson', 'manhattan_spearman', 'dot_pearson', 'dot_spearman']
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentences1 = []
sentences2 = []
scores = []
for example in examples:
sentences1.append(example.texts[0])
sentences2.append(example.texts[1])
scores.append(example.label)
return cls(sentences1, sentences2, scores, **kwargs)
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
if (epoch != (- 1)):
if (steps == (- 1)):
out_txt = ' after epoch {}:'.format(epoch)
else:
out_txt = ' in epoch {} after {} steps:'.format(epoch, steps)
else:
out_txt = ':'
logger.info(((('EmbeddingSimilarityEvaluator: Evaluating the model on ' + self.name) + ' dataset') + out_txt))
embeddings1 = model.encode(self.sentences1, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
embeddings2 = model.encode(self.sentences2, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True)
labels = self.scores
cosine_scores = (1 - paired_cosine_distances(embeddings1, embeddings2))
manhattan_distances = (- paired_manhattan_distances(embeddings1, embeddings2))
euclidean_distances = (- paired_euclidean_distances(embeddings1, embeddings2))
dot_products = [np.dot(emb1, emb2) for (emb1, emb2) in zip(embeddings1, embeddings2)]
(eval_pearson_cosine, _) = pearsonr(labels, cosine_scores)
(eval_spearman_cosine, _) = spearmanr(labels, cosine_scores)
(eval_pearson_manhattan, _) = pearsonr(labels, manhattan_distances)
(eval_spearman_manhattan, _) = spearmanr(labels, manhattan_distances)
(eval_pearson_euclidean, _) = pearsonr(labels, euclidean_distances)
(eval_spearman_euclidean, _) = spearmanr(labels, euclidean_distances)
(eval_pearson_dot, _) = pearsonr(labels, dot_products)
(eval_spearman_dot, _) = spearmanr(labels, dot_products)
logger.info('Cosine-Similarity :\tPearson: {:.4f}\tSpearman: {:.4f}'.format(eval_pearson_cosine, eval_spearman_cosine))
logger.info('Manhattan-Distance:\tPearson: {:.4f}\tSpearman: {:.4f}'.format(eval_pearson_manhattan, eval_spearman_manhattan))
logger.info('Euclidean-Distance:\tPearson: {:.4f}\tSpearman: {:.4f}'.format(eval_pearson_euclidean, eval_spearman_euclidean))
logger.info('Dot-Product-Similarity:\tPearson: {:.4f}\tSpearman: {:.4f}'.format(eval_pearson_dot, eval_spearman_dot))
if ((output_path is not None) and self.write_csv):
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode=('a' if output_file_exists else 'w'), encoding='utf-8') as f:
writer = csv.writer(f)
if (not output_file_exists):
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson_cosine, eval_spearman_cosine, eval_pearson_euclidean, eval_spearman_euclidean, eval_pearson_manhattan, eval_spearman_manhattan, eval_pearson_dot, eval_spearman_dot])
if (self.main_similarity == SimilarityFunction.COSINE):
return eval_spearman_cosine
elif (self.main_similarity == SimilarityFunction.EUCLIDEAN):
return eval_spearman_euclidean
elif (self.main_similarity == SimilarityFunction.MANHATTAN):
return eval_spearman_manhattan
elif (self.main_similarity == SimilarityFunction.DOT_PRODUCT):
return eval_spearman_dot
elif (self.main_similarity is None):
return max(eval_spearman_cosine, eval_spearman_manhattan, eval_spearman_euclidean, eval_spearman_dot)
else:
raise ValueError('Unknown main_similarity value') |
def list_jobs(session, inprogress='False'):
conn = get_database_conn()
curs = query_execute_wrapper(conn, query_string="SELECT * FROM scansweep_queue WHERE session=? AND inprogress=? AND complete='False'", query_list=[session, inprogress], no_return=False)
job_list = []
for row in curs:
job_list.append({'job': row['job'], 'target': row['target']})
return job_list |
class EoctConv(nn.Module):
def __init__(self, in_channels, num_channels, kernel_size=3, stride=1, padding=1, bias=True, name=None):
super(EoctConv, self).__init__()
self.stride = stride
if ((type(in_channels) is tuple) and (len(in_channels) == 3)):
(in_h, in_l, in_ll) = in_channels
elif ((type(in_channels) is tuple) and (len(in_channels) == 2)):
(in_h, in_l) = in_channels
in_ll = None
else:
(in_h, in_l, in_ll) = (in_channels, None, None)
if ((type(num_channels) is tuple) and (len(num_channels) == 3)):
(num_high, num_low, num_ll) = num_channels
elif ((type(num_channels) is tuple) and (len(num_channels) == 2)):
(num_high, num_low) = num_channels
num_ll = 0
else:
(num_high, num_low, num_ll) = (num_channels, 0, 0)
self.num_high = num_high
self.num_low = num_low
self.num_ll = num_ll
if (in_h is not None):
self.conv2d1 = (nn.Conv2d(in_h, num_high, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_high > 0) else None)
self.conv2d2 = (nn.Conv2d(in_h, num_low, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_low > 0) else None)
self.conv2d3 = (nn.Conv2d(in_h, num_ll, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_ll > 0) else None)
if (in_l is not None):
self.conv2d4 = (nn.Conv2d(in_l, num_low, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_low > 0) else None)
self.conv2d5 = (nn.Conv2d(in_l, num_high, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_high > 0) else None)
self.conv2d6 = (nn.Conv2d(in_l, num_ll, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_ll > 0) else None)
if (in_ll is not None):
self.conv2d7 = (nn.Conv2d(in_ll, num_ll, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_ll > 0) else None)
self.conv2d8 = (nn.Conv2d(in_ll, num_high, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_high > 0) else None)
self.conv2d9 = (nn.Conv2d(in_ll, num_low, kernel_size=3, stride=1, padding=1, bias=bias) if (self.num_low > 0) else None)
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.upsample2 = nn.Upsample(scale_factor=4, mode='nearest')
self.pooling1 = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
self.pooling2 = nn.AvgPool2d(kernel_size=4, stride=4, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
nn.init.constant(m.bias, 0)
def forward(self, data):
stride = self.stride
if ((type(data) is tuple) and (len(data) == 3)):
(data_h, data_l, data_ll) = data
elif ((type(data) is tuple) and (len(data) == 2)):
(data_h, data_l) = data
data_ll = None
else:
(data_h, data_l, data_ll) = (data, None, None)
(data_h2l, data_h2h, data_h2ll, data_l2l, data_l2h, data_l2ll, data_ll2ll, data_ll2h, data_ll2l) = (None, None, None, None, None, None, None, None, None)
if (data_h is not None):
data_h = (self.pooling1(data_h) if (stride == 2) else data_h)
data_h2h = (self.conv2d1(data_h) if (self.num_high > 0) else None)
data_h2l = (self.pooling1(data_h) if (self.num_low > 0) else data_h)
data_h2l = (self.conv2d2(data_h2l) if (self.num_low > 0) else None)
data_h2ll = (self.pooling2(data_h) if (self.num_ll > 0) else data_h)
data_h2ll = (self.conv2d3(data_h2ll) if (self.num_ll > 0) else None)
'processing low frequency group'
if (data_l is not None):
data_l2l = (self.pooling1(data_l) if ((self.num_low > 0) and (stride == 2)) else data_l)
data_l2l = (self.conv2d4(data_l2l) if (self.num_low > 0) else None)
data_l2h = (self.conv2d5(data_l) if (self.num_high > 0) else data_l)
data_l2h = (self.upsample1(data_l2h) if ((self.num_high > 0) and (stride == 1)) else None)
data_l2ll = (self.pooling1(data_l) if (self.num_ll > 0) else data_l)
data_l2ll = (self.conv2d6(data_l2ll) if (self.num_ll > 0) else None)
'processing lower frequency group'
if (data_ll is not None):
data_ll2ll = (self.pooling1(data_ll) if ((self.num_ll > 0) and (stride == 2)) else data_ll)
data_ll2ll = (self.conv2d7(data_ll2ll) if (self.num_ll > 0) else None)
data_ll2h = (self.conv2d8(data_ll) if (self.num_high > 0) else data_ll)
data_ll2h = (self.upsample2(data_ll2h) if ((self.num_high > 0) and (stride == 1)) else None)
data_ll2l = (self.conv2d9(data_ll) if (self.num_low > 0) else data_ll)
data_ll2l = (self.upsample1(data_ll2l) if ((self.num_low > 0) and (stride == 1)) else None)
'you can force to disable the interaction paths'
output = (dataSum(dataSum(data_h2h, data_l2h), data_ll2h), dataSum(dataSum(data_h2l, data_l2l), data_ll2l), dataSum(dataSum(data_h2ll, data_l2ll), data_ll2ll))
if (output[2] is None):
if (output[1] is None):
return output[0]
else:
return output[0:2]
elif (output[1] is None):
return output[0::2]
else:
return output |
def M_eq(mu_new, C, mu, m, n):
csums = [sum([C[i][h] for i in range(m)]) for h in range(n)]
eqs = ([0] * (n + 1))
for j in range(n):
temp = sum([(mu_new[h] * csums[h]) for h in range(n)])
eqs[j] = (((mu[j] * temp) - (mu_new[j] * csums[j])) - mu_new[n])
eqs[n] = (sum(mu_new[:n]) - 1)
return eqs |
def test_analytical_azimuth():
times = pd.date_range(start='1/1/2015 0:00', end='12/31/2015 23:00', freq='H').tz_localize('Etc/GMT+8')
(lat, lon) = (37.8, (- 122.25))
lat_rad = np.deg2rad(lat)
output = solarposition.spa_python(times, lat, lon, 100)
solar_azimuth = np.deg2rad(output['azimuth'])
solar_zenith = np.deg2rad(output['zenith'])
eot = solarposition.equation_of_time_spencer71(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_spencer71(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_1 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle, decl, zenith)
eot = solarposition.equation_of_time_pvcdrom(times.dayofyear)
hour_angle = np.deg2rad(solarposition.hour_angle(times, lon, eot))
decl = solarposition.declination_cooper69(times.dayofyear)
zenith = solarposition.solar_zenith_analytical(lat_rad, hour_angle, decl)
azimuth_2 = solarposition.solar_azimuth_analytical(lat_rad, hour_angle, decl, zenith)
idx = np.where((solar_zenith < (np.pi / 2)))
assert np.allclose(azimuth_1[idx], solar_azimuth.values[idx], atol=0.01)
assert np.allclose(azimuth_2[idx], solar_azimuth.values[idx], atol=0.017)
test_angles = np.radians(np.array([[0.0, (- 180.0), (- 20.0)], [0.0, 0.0, (- 5.0)], [0.0, 0.0, 0.0], [0.0, 0.0, 15.0], [0.0, 180.0, 20.0], [30.0, 0.0, (- 20.0)], [30.0, 0.0, (- 5.0)], [30.0, 0.0, 0.0], [30.0, 180.0, 5.0], [30.0, 0.0, 10.0], [(- 30.0), 0.0, (- 20.0)], [(- 30.0), 0.0, (- 15.0)], [(- 30.0), 0.0, 0.0], [(- 30.0), (- 180.0), 5.0], [(- 30.0), 180.0, 10.0]]))
zeniths = solarposition.solar_zenith_analytical(*test_angles.T)
azimuths = solarposition.solar_azimuth_analytical(*test_angles.T, zenith=zeniths)
assert (not np.isnan(azimuths).any()) |
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if (next_item is None):
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self |
class MaxViT_CASCADE_Small(nn.Module):
def __init__(self, n_class=1, img_size=224):
super(MaxViT_CASCADE_Small, self).__init__()
self.conv = nn.Sequential(nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True))
if (img_size == 224):
self.backbone = maxvit_rmlp_small_rw_224_4out()
print('Loading:', './pretrained_pth/maxvit/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth')
state_dict = torch.load('./pretrained_pth/maxvit/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth')
elif (img_size == 256):
self.backbone = maxxvit_rmlp_small_rw_256_4out()
print('Loading:', './pretrained_pth/maxvit/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth')
state_dict = torch.load('./pretrained_pth/maxvit/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth')
else:
sys.exit((str(img_size) + ' is not a valid image size! Currently supported image sizes are 224 and 256.'))
self.backbone.load_state_dict(state_dict, strict=False)
print('Pretrain weights loaded.')
channels = [768, 384, 192, 96]
if (decoder_aggregation == 'additive'):
self.decoder = CASCADE_Add(channels=channels)
elif (decoder_aggregation == 'concatenation'):
self.decoder = CASCADE_Cat(channels=channels)
else:
sys.exit((("'" + decoder_aggregation) + "' is not a valid decoder aggregation! Currently supported aggregations are 'additive' and 'concatenation'."))
self.out_head1 = nn.Conv2d(channels[0], n_class, 1)
self.out_head2 = nn.Conv2d(channels[1], n_class, 1)
self.out_head3 = nn.Conv2d(channels[2], n_class, 1)
self.out_head4 = nn.Conv2d(channels[3], n_class, 1)
def forward(self, x):
if (x.size()[1] == 1):
x = self.conv(x)
f = self.backbone(x)
(x1_o, x2_o, x3_o, x4_o) = self.decoder(f[3], [f[2], f[1], f[0]])
p1 = self.out_head1(x1_o)
p2 = self.out_head2(x2_o)
p3 = self.out_head3(x3_o)
p4 = self.out_head4(x4_o)
p1 = F.interpolate(p1, scale_factor=32, mode='bilinear')
p2 = F.interpolate(p2, scale_factor=16, mode='bilinear')
p3 = F.interpolate(p3, scale_factor=8, mode='bilinear')
p4 = F.interpolate(p4, scale_factor=4, mode='bilinear')
return (p1, p2, p3, p4) |
_dataframe_method
def fill_missing_timestamps(df: pd.DataFrame, frequency: str, first_time_stamp: pd.Timestamp=None, last_time_stamp: pd.Timestamp=None) -> pd.DataFrame:
check('frequency', frequency, [str])
check('first_time_stamp', first_time_stamp, [pd.Timestamp, type(None)])
check('last_time_stamp', last_time_stamp, [pd.Timestamp, type(None)])
if (first_time_stamp is None):
first_time_stamp = df.index.min()
if (last_time_stamp is None):
last_time_stamp = df.index.max()
expected_timestamps = pd.date_range(start=first_time_stamp, end=last_time_stamp, freq=frequency)
return df.reindex(expected_timestamps) |
def getBoundingBoxes():
allBoundingBoxes = BoundingBoxes()
import glob
import os
currentPath = os.path.dirname(os.path.abspath(__file__))
folderGT = os.path.join(currentPath, 'groundtruths')
os.chdir(folderGT)
files = glob.glob('*.txt')
files.sort()
allBoundingBoxes = BoundingBoxes()
for f in files:
nameOfImage = f.replace('.txt', '')
fh1 = open(f, 'r')
for line in fh1:
line = line.replace('\n', '')
if (line.replace(' ', '') == ''):
continue
splitLine = line.split(' ')
idClass = splitLine[0]
x = float(splitLine[1])
y = float(splitLine[2])
w = float(splitLine[3])
h = float(splitLine[4])
bb = BoundingBox(nameOfImage, idClass, x, y, w, h, CoordinatesType.Absolute, (200, 200), BBType.GroundTruth, format=BBFormat.XYWH)
allBoundingBoxes.addBoundingBox(bb)
fh1.close()
folderDet = os.path.join(currentPath, 'detections')
os.chdir(folderDet)
files = glob.glob('*.txt')
files.sort()
for f in files:
nameOfImage = f.replace('.txt', '')
fh1 = open(f, 'r')
for line in fh1:
line = line.replace('\n', '')
if (line.replace(' ', '') == ''):
continue
splitLine = line.split(' ')
idClass = splitLine[0]
confidence = float(splitLine[1])
x = float(splitLine[2])
y = float(splitLine[3])
w = float(splitLine[4])
h = float(splitLine[5])
bb = BoundingBox(nameOfImage, idClass, x, y, w, h, CoordinatesType.Absolute, (200, 200), BBType.Detected, confidence, format=BBFormat.XYWH)
allBoundingBoxes.addBoundingBox(bb)
fh1.close()
return allBoundingBoxes |
def hierarchical_subsequence(sequential, first, last, after, upto, share_weights=False, depth=0):
assert ((last is None) or (upto is None))
assert ((first is None) or (after is None))
if (first is last is after is upto is None):
return (sequential if share_weights else copy.deepcopy(sequential))
assert isinstance(sequential, torch.nn.Sequential), ('.'.join(((first or last or after or upto)[:depth] or 'arg')) + ' not Sequential')
including_children = ((first is None) and (after is None))
included_children = OrderedDict()
((F, FN), (L, LN), (A, AN), (U, UN)) = [((d[depth], (None if (len(d) == (depth + 1)) else d)) if (d is not None) else (None, None)) for d in [first, last, after, upto]]
for (name, layer) in sequential._modules.items():
if (name == F):
first = None
including_children = True
if ((name == A) and (AN is not None)):
after = None
including_children = True
if ((name == U) and (UN is None)):
upto = None
including_children = False
if including_children:
(FR, LR, AR, UR) = [(n if ((n is None) or (n[depth] == name)) else None) for n in [FN, LN, AN, UN]]
chosen = hierarchical_subsequence(layer, first=FR, last=LR, after=AR, upto=UR, share_weights=share_weights, depth=(depth + 1))
if (chosen is not None):
included_children[name] = chosen
if (name == L):
last = None
including_children = False
if ((name == U) and (UN is not None)):
upto = None
including_children = False
if ((name == A) and (AN is None)):
after = None
including_children = True
for name in [first, last, after, upto]:
if (name is not None):
raise ValueError(('Layer %s not found' % '.'.join(name)))
if ((not len(included_children)) and (depth > 0)):
return None
result = torch.nn.Sequential(included_children)
result.training = sequential.training
return result |
class TestDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'multiwoz_test.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
history = get_constructed_history_and_golden_response(extend_data['dialog']['usr'], extend_data['dialog']['sys'])
slot_ontology_values_str = ''
for (ontology_slot, ontology_values) in zip(extend_data['ontology_slots'], extend_data['ontology_values']):
if (not ontology_values):
ontology_item = '{}: {}'.format(ontology_slot, 'none')
else:
ontology_item = '{}: {}'.format(ontology_slot, ', '.join(ontology_values))
slot_ontology_values_str += '{}; '.format(ontology_item)
if ((not args.seq2seq.mode) or (args.seq2seq.mode == 'sequential')):
output_text = ', '.join(['{}-{}'.format(slot, value).replace('-', ' ') for (slot, value) in zip(extend_data['expanded_turn_belief']['slot'], extend_data['expanded_turn_belief']['value'])])
extend_data.update({'struct_in': slot_ontology_values_str.lower(), 'text_in': history.lower(), 'seq_out': output_text.lower()})
self.extended_data.append(extend_data)
elif (args.seq2seq.mode == 'separate'):
for (slot, value) in zip(extend_data['expanded_turn_belief']['slot'], extend_data['expanded_turn_belief']['value']):
slot_history = '{}: {}'.format(slot, history)
output_text = value
extend_extend_data = copy.deepcopy(extend_data)
del extend_extend_data['expanded_turn_belief']
del extend_extend_data['ontology_slots']
del extend_extend_data['ontology_values']
extend_extend_data.update({'struct_in': slot_ontology_values_str.lower(), 'text_in': slot_history.lower(), 'seq_out': output_text.lower(), 'slot': slot})
self.extended_data.append(extend_extend_data)
else:
raise ValueError('Other seq2seq method not support yet!')
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if (classname.find('Conv') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('Linear') != (- 1)):
init.orthogonal(m.weight.data, gain=1)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0) |
def test_load_spectrum(plot=False, verbose=True, warnings=True, *args, **kwargs):
setup_test_line_databases()
temp_file_name = '_test_database_co2_tempfile.spec'
assert (not exists(temp_file_name))
try:
sf = SpectrumFactory(wavelength_min=4190, wavelength_max=4200, mole_fraction=0.0004, path_length=0.1, isotope=[1], cutoff=1e-20, verbose=verbose)
sf.warnings['MissingSelfBroadeningWarning'] = 'ignore'
sf.load_databank('HITRAN-CO2-TEST')
s1 = sf.eq_spectrum(Tgas=300)
s1.apply_slit(2, verbose=False)
s1.update()
s2 = load_spec(s1.store(temp_file_name, compress=True))
s2.update()
if plot:
fig = plt.figure((fig_prefix + 'Calculated vs stored+retrieved'))
s1.plot('absorbance', nfig=fig.number, lw=3, label='calculated')
s2.plot('absorbance', nfig=fig.number, color='r', label='stored (compressed) and retrieved')
plt.legend()
assert s1.compare_with(s2, spectra_only=True, plot=False)
finally:
if exists(temp_file_name):
os.remove(temp_file_name)
return True |
def configure_tagged_union(union: Any, converter: Converter, tag_generator: Callable[([Type], str)]=default_tag_generator, tag_name: str='_type', default: Optional[Type]=NOTHING) -> None:
args = union.__args__
tag_to_hook = {}
exact_cl_unstruct_hooks = {}
for cl in args:
tag = tag_generator(cl)
struct_handler = converter.get_structure_hook(cl)
unstruct_handler = converter.get_unstructure_hook(cl)
def structure_union_member(val: dict, _cl=cl, _h=struct_handler) -> cl:
return _h(val, _cl)
def unstructure_union_member(val: union, _h=unstruct_handler) -> dict:
return _h(val)
tag_to_hook[tag] = structure_union_member
exact_cl_unstruct_hooks[cl] = unstructure_union_member
cl_to_tag = {cl: tag_generator(cl) for cl in args}
if (default is not NOTHING):
default_handler = converter.get_structure_hook(default)
def structure_default(val: dict, _cl=default, _h=default_handler):
return _h(val, _cl)
tag_to_hook = defaultdict((lambda : structure_default), tag_to_hook)
cl_to_tag = defaultdict((lambda : default), cl_to_tag)
def unstructure_tagged_union(val: union, _exact_cl_unstruct_hooks=exact_cl_unstruct_hooks, _cl_to_tag=cl_to_tag, _tag_name=tag_name) -> Dict:
res = _exact_cl_unstruct_hooks[val.__class__](val)
res[_tag_name] = _cl_to_tag[val.__class__]
return res
if (default is NOTHING):
def structure_tagged_union(val: dict, _, _tag_to_cl=tag_to_hook, _tag_name=tag_name) -> union:
val = val.copy()
return _tag_to_cl[val.pop(_tag_name)](val)
else:
def structure_tagged_union(val: dict, _, _tag_to_hook=tag_to_hook, _tag_name=tag_name, _dh=default_handler, _default=default) -> union:
if (_tag_name in val):
val = val.copy()
return _tag_to_hook[val.pop(_tag_name)](val)
return _dh(val, _default)
converter.register_unstructure_hook(union, unstructure_tagged_union)
converter.register_structure_hook(union, structure_tagged_union) |
class TopKCompressor():
def __init__(self):
self.residuals = {}
self.sparsities = []
self.zero_conditions = {}
self.values = {}
self.indexes = {}
self.c = 0
self.t = 0.0
self.name = 'topk'
self.zc = None
self.current_ratio = 1
self.shapes = {}
def _process_data_before_selecting(self, name, data):
pass
def _process_data_after_residual(self, name, data):
if (name not in self.zero_conditions):
self.zero_conditions[name] = torch.ones(data.numel(), dtype=torch.float32, device=data.device)
zero_condition = self.zero_conditions[name]
zero_condition.fill_(1.0)
zero_condition[self.indexes[name]] = 0.0
self.zc = zero_condition
def clear(self):
self.residuals = {}
self.sparsities = []
self.zero_conditions = {}
self.values = {}
self.indexes = {}
def compress(self, tensor, name=None, sigma_scale=2.5, ratio=0.05):
start = time.time()
with torch.no_grad():
numel = tensor.numel()
k = max(int((numel * ratio)), 1)
self.current_ratio = ratio
(values, indexes) = torch.topk(torch.abs(tensor.data), k=k)
values = tensor.data[indexes]
self.values[name] = values
self.indexes[name] = indexes
return (tensor, indexes, values)
def decompress(self, tensor, original_tensor_size):
return tensor
def decompress_new(self, tensor, indexes, name=None, shape=None):
if (shape is None):
decompress_tensor = torch.zeros(self.shapes[name], dtype=tensor.dtype, device=tensor.device).view((- 1))
decompress_tensor[indexes] = tensor
return decompress_tensor
else:
decompress_tensor = torch.zeros(self.shapes[name], dtype=tensor.dtype, device=tensor.device).view((- 1))
decompress_tensor[indexes] = tensor
return decompress_tensor
def flatten(self, tensor, name=None):
self.shapes[name] = tensor.shape
return tensor.view((- 1))
def unflatten(self, tensor, name=None, shape=None):
if (shape is None):
return tensor.view(self.shapes[name])
else:
return tensor.view(shape)
def update_shapes_dict(self, tensor, name):
self.shapes[name] = tensor.shape
def get_residuals(self, name, like_tensor):
if (name not in self.residuals):
self.residuals[name] = torch.zeros_like(like_tensor.data)
return self.residuals[name]
def add_residuals(self, included_indexes, name):
with torch.no_grad():
residuals = self.residuals[name]
if (type(included_indexes) is np.ndarray):
indexes_t = torch.from_numpy(included_indexes).to(device=residuals.device).long()
else:
indexes_t = included_indexes
values = self.values[name]
values.data[indexes_t] = 0.0
residuals.data[self.indexes[name]] += values.data |
def getLESTurbulencePropertiesTemplate(LESModel='dynamicKEqn'):
return ('\n simulationType LES;\n\n LES\n {\n LESModel %s;\n\n turbulence on;\n\n printCoeffs on;\n\n delta cubeRootVol;\n\n dynamicKEqnCoeffs\n {\n filter simple;\n }\n\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n PrandtlCoeffs\n {\n delta cubeRootVol;\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n smoothCoeffs\n {\n delta cubeRootVol;\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n maxDeltaRatio 1.1;\n }\n\n Cdelta 0.158;\n }\n\n vanDriestCoeffs\n {\n delta cubeRootVol;\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n smoothCoeffs\n {\n delta cubeRootVol;\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n maxDeltaRatio 1.1;\n }\n\n Aplus 26;\n Cdelta 0.158;\n }\n\n smoothCoeffs\n {\n delta cubeRootVol;\n cubeRootVolCoeffs\n {\n deltaCoeff 1;\n }\n\n maxDeltaRatio 1.1;\n }\n }\n ' % LESModel) |
_on_failure
.parametrize('number_of_nodes', [3])
.parametrize('channels_per_node', [CHAIN])
def test_secret_revealed_on_chain(raiden_chain: List[RaidenService], deposit, settle_timeout, token_addresses, retry_interval_initial):
(app0, app1, app2) = raiden_chain
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(views.state_from_raiden(app0), app0.default_registry.address, token_address)
assert token_network_address
amount = PaymentAmount(10)
identifier = PaymentID(1)
target = TargetAddress(app2.address)
(secret, secrethash) = factories.make_secret_with_hash()
app1_hold_event_handler = app1.raiden_event_handler
msg = 'test apps must have HoldRaidenEventHandler set'
assert isinstance(app1_hold_event_handler, HoldRaidenEventHandler), msg
app1_hold_event_handler.hold_unlock_for(secrethash=secrethash)
app0.mediated_transfer_async(token_network_address=token_network_address, amount=amount, target=target, identifier=identifier, secret=secret, route_states=[create_route_state_for_route(apps=raiden_chain, token_address=token_address, fee_estimate=FeeAmount(round((INTERNAL_ROUTING_DEFAULT_FEE_PERC * amount))))])
with watch_for_unlock_failures(*raiden_chain), block_offset_timeout(app0):
wait_for_state_change(app2, ReceiveSecretReveal, {'secrethash': secrethash}, retry_interval_initial)
channel_state2_1 = get_channelstate(app2, app1, token_network_address)
pending_lock = channel_state2_1.partner_state.secrethashes_to_unlockedlocks.get(secrethash)
msg = 'The lock must be registered in unlocked locks since the secret is known'
assert (pending_lock is not None), msg
unlockers = []
orig_unlock = TokenNetwork.unlock
def _mocked_unlock(self: TokenNetwork, *args, **kwargs):
unlockers.append(self.node_address)
return orig_unlock(self, *args, **kwargs)
with patch.object(TokenNetwork, 'unlock', _mocked_unlock):
balance_proof = channel_state2_1.partner_state.balance_proof
assert isinstance(balance_proof, BalanceProofSignedState)
channel_close_event = ContractSendChannelClose(canonical_identifier=channel_state2_1.canonical_identifier, balance_proof=balance_proof, triggered_by_block_hash=app0.rpc_client.blockhash_from_blocknumber(BLOCK_ID_LATEST))
assert app2.wal, 'test apps must be started by the fixture.'
current_state = views.state_from_raiden(app2)
app2.raiden_event_handler.on_raiden_events(raiden=app2, chain_state=current_state, events=[channel_close_event])
settle_expiration = ((app0.rpc_client.block_number() + settle_timeout) + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS)
app0.proxy_manager.client.wait_until_block(target_block_number=settle_expiration)
assert_synced_channel_state(token_network_address, app0, (deposit - amount), [], app1, (deposit + amount), [])
with watch_for_unlock_failures(*raiden_chain), gevent.Timeout(40):
wait_for_state_change(app1, ContractReceiveChannelBatchUnlock, {}, retry_interval_initial)
assert (unlockers == [app2.address]) |
def query_client_id(display, wid):
specs = [{'client': wid, 'mask': XRes.LocalClientPIDMask}]
r = display.res_query_client_ids(specs)
for id in r.ids:
if ((id.spec.client > 0) and (id.spec.mask == XRes.LocalClientPIDMask)):
for value in id.value:
return value
return None |
class Road():
_safe
def build(cls, context, prop):
name = ('road_' + str('{:0>3}'.format((len(bpy.data.objects) + 1))))
obj = create_object(name, create_mesh((name + '_mesh')))
link_obj(obj)
bm = bm_from_obj(obj)
vertex_count = cls.create_vertex_outline(bm, prop)
cls.create_curve(context)
cls.extrude_road(context, prop, bm)
bm_to_obj(bm, obj)
obj['VertexCount'] = vertex_count
return obj
def create_vertex_outline(cls, bm, prop):
shoulder_width = (sin(prop.shoulder_angle) * prop.shoulder_height)
shoulder_height = (cos(prop.shoulder_angle) * prop.shoulder_height)
total_width_left = (prop.width / 2)
if prop.generate_shoulders:
total_width_left += prop.shoulder_width
total_width_right = total_width_left
if prop.generate_left_sidewalk:
total_width_left += prop.sidewalk_width
if prop.generate_right_sidewalk:
total_width_right += prop.sidewalk_width
if (not prop.generate_left_sidewalk):
bm.verts.new(Vector((((- total_width_left) - shoulder_width), 0, (- shoulder_height))))
if prop.generate_left_sidewalk:
bm.verts.new(Vector(((- total_width_left), 0, prop.sidewalk_height)))
if prop.generate_shoulders:
if prop.generate_left_sidewalk:
bm.verts.new(Vector(((((- prop.width) / 2) - prop.shoulder_width), 0, prop.sidewalk_height)))
bm.verts.new(Vector(((((- prop.width) / 2) - prop.shoulder_width), 0, 0)))
elif prop.generate_left_sidewalk:
bm.verts.new(Vector((((- prop.width) / 2), 0, prop.sidewalk_height)))
bm.verts.new(Vector((((- prop.width) / 2), 0, 0)))
bm.verts.new(Vector(((prop.width / 2), 0, 0)))
if prop.generate_shoulders:
bm.verts.new(Vector((((prop.width / 2) + prop.shoulder_width), 0, 0)))
if prop.generate_right_sidewalk:
bm.verts.new(Vector((((prop.width / 2) + prop.shoulder_width), 0, prop.sidewalk_height)))
elif prop.generate_right_sidewalk:
bm.verts.new(Vector(((prop.width / 2), 0, prop.sidewalk_height)))
if prop.generate_right_sidewalk:
bm.verts.new(Vector((total_width_right, 0, prop.sidewalk_height)))
if (not prop.generate_right_sidewalk):
bm.verts.new(Vector(((total_width_right + shoulder_width), 0, (- shoulder_height))))
bm.verts.ensure_lookup_table()
for i in range((len(bm.verts) - 1)):
bm.edges.new((bm.verts[i], bm.verts[(i + 1)]))
return len(bm.verts)
def create_curve(cls, context):
name = ('curve_' + str('{:0>3}'.format((len(bpy.data.objects) + 1))))
curve_data = bpy.data.curves.new(name=name, type='CURVE')
curve_data.dimensions = '3D'
curve_data.resolution_u = 500
spline = curve_data.splines.new(type='BEZIER')
spline.bezier_points.add(1)
spline.bezier_points[1].co = (0, 10, 0)
spline.bezier_points[0].handle_left_type = spline.bezier_points[0].handle_right_type = 'AUTO'
spline.bezier_points[1].handle_left_type = spline.bezier_points[1].handle_right_type = 'AUTO'
curve_obj = bpy.data.objects.new(name=name, object_data=curve_data)
curve_obj.parent = context.object
context.collection.objects.link(curve_obj)
_safe
def extrude_road(cls, context, prop, bm):
geom = bmesh.ops.extrude_face_region(bm, geom=bm.edges)
verts = filter_geom(geom['geom'], bmesh.types.BMVert)
bmesh.ops.transform(bm, matrix=Matrix.Translation((0, prop.interval, 0)), verts=verts)
groups = [MaterialGroup.ROAD]
if (prop.generate_left_sidewalk or prop.generate_right_sidewalk):
groups.append(MaterialGroup.SIDEWALK)
groups.append(MaterialGroup.SIDEWALK_SIDE)
if ((not prop.generate_left_sidewalk) or (not prop.generate_right_sidewalk)):
groups.append(MaterialGroup.SHOULDER_EXTENSION)
if prop.generate_shoulders:
groups.append(MaterialGroup.SHOULDER)
add_material_group(groups)
bm.edges.ensure_lookup_table()
if (not prop.generate_left_sidewalk):
add_faces_to_group(bm, (bm.edges[0].link_faces[0],), MaterialGroup.SHOULDER_EXTENSION)
face_count = 1
if prop.generate_shoulders:
add_faces_to_group(bm, (bm.edges[1].link_faces[0],), MaterialGroup.SHOULDER)
face_count += 1
else:
add_faces_to_group(bm, (bm.edges[0].link_faces[0],), MaterialGroup.SIDEWALK)
add_faces_to_group(bm, (bm.edges[1].link_faces[0],), MaterialGroup.SIDEWALK_SIDE)
face_count = 2
if prop.generate_shoulders:
add_faces_to_group(bm, (bm.edges[2].link_faces[0],), MaterialGroup.SHOULDER)
face_count += 1
add_faces_to_group(bm, (bm.edges[face_count].link_faces[0],), MaterialGroup.ROAD)
face_count += 1
if prop.generate_shoulders:
add_faces_to_group(bm, (bm.edges[face_count].link_faces[0],), MaterialGroup.SHOULDER)
face_count += 1
if prop.generate_right_sidewalk:
add_faces_to_group(bm, (bm.edges[face_count].link_faces[0],), MaterialGroup.SIDEWALK_SIDE)
add_faces_to_group(bm, (bm.edges[(face_count + 1)].link_faces[0],), MaterialGroup.SIDEWALK)
else:
add_faces_to_group(bm, (bm.edges[face_count].link_faces[0],), MaterialGroup.SHOULDER_EXTENSION)
if (prop.extrusion_type == 'STRAIGHT'):
cls.extrude_straight(context, prop, bm)
else:
cls.extrude_curved(context, prop, bm)
return {'FINISHED'}
_safe
def extrude_straight(cls, context, prop, bm):
if (not context.object.modifiers):
bpy.ops.object.modifier_add(type='ARRAY')
modifier = context.object.modifiers['Array']
modifier.show_in_editmode = True
modifier.show_on_cage = True
modifier.fit_type = 'FIT_LENGTH'
modifier.fit_length = prop.length
modifier.use_merge_vertices = True
modifier.relative_offset_displace = [0, 1, 0]
return {'FINISHED'}
_safe
def extrude_curved(cls, context, prop, bm):
curve = context.object.children[0]
bmesh.ops.rotate(bm, matrix=Matrix.Rotation(math.radians(90.0), 3, 'Y'), verts=bm.verts)
if (not context.object.modifiers):
bpy.ops.object.modifier_add(type='ARRAY')
modifier = context.object.modifiers['Array']
modifier.fit_type = 'FIT_CURVE'
modifier.use_merge_vertices = True
modifier.curve = curve
modifier.relative_offset_displace = [0, 1, 0]
bpy.ops.object.modifier_add(type='CURVE')
modifier = context.object.modifiers['Curve']
modifier.show_in_editmode = True
modifier.show_on_cage = True
modifier.object = curve
modifier.deform_axis = 'POS_Y'
return {'FINISHED'}
_safe
def finalize_road(cls, context):
if (context.active_object is None):
return {'FINISHED'}
bpy.ops.object.modifier_apply(modifier='Array')
bpy.ops.object.modifier_apply(modifier='Curve')
if ((len(context.active_object.children) > 0) and (context.active_object.children[0].type == 'CURVE')):
bpy.data.objects.remove(context.active_object.children[0])
bm = bm_from_obj(context.active_object)
count = int(context.active_object['VertexCount'])
uv_layer = bm.loops.layers.uv.new()
sections = (len(bm.verts) // count)
total_distance = 0
uv_coords = []
bm.verts.ensure_lookup_table()
bm.verts.index_update()
last_position = ((bm.verts[0].co + bm.verts[count].co) / 2)
texture_scale = 0.1
for i in range(sections):
current_position = ((bm.verts[(i * count)].co + bm.verts[(((i + 1) * count) - 1)].co) / 2)
total_distance += (last_position - current_position).length
for j in range(count):
uv_coords.append(((j % 2), (total_distance * texture_scale)))
last_position = current_position
for f in bm.faces:
for l in f.loops:
if (l.vert.index < len(uv_coords)):
l[uv_layer].uv = uv_coords[l.vert.index]
bm_to_obj(bm, context.active_object)
return {'FINISHED'} |
def test_align_right_multiline():
text = 'foo\nshoes'
fill_char = '-'
width = 7
aligned = cu.align_right(text, fill_char=fill_char, width=width)
assert (aligned == '----foo\n--shoes')
reset_all = str(ansi.TextStyle.RESET_ALL)
blue = str(ansi.Fg.BLUE)
red = str(ansi.Fg.RED)
green = str(ansi.Fg.GREEN)
fg_reset = str(ansi.Fg.RESET)
text = f'''{blue}foo{red}moo
shoes{fg_reset}'''
fill_char = f'{green}-{fg_reset}'
width = 7
aligned = cu.align_right(text, fill_char=fill_char, width=width)
expected = f'''{reset_all}{green}-{fg_reset}{reset_all}{blue}foo{red}moo{reset_all}
'''
expected += f'{reset_all}{green}--{fg_reset}{reset_all}{red}shoes{fg_reset}{reset_all}'
assert (aligned == expected) |
def find_subtitle(title, delimiters=DEFAULT_SUB_SPLITTERS):
if isinstance(title, bytes):
title = title.decode('utf-8', 'replace')
for pair in delimiters:
if ((len(pair) == 2) and (pair[0] in title[:(- 1)]) and title.endswith(pair[1])):
r = len(pair[1])
l = title[0:(- r)].rindex(pair[0])
if l:
subtitle = title[(l + len(pair[0])):(- r)]
return (title[:l].rstrip(), subtitle)
else:
return (title, None) |
def gen_train_txt(txt_path):
global train_cnt
f = open(txt_path, 'w')
for (i, path) in enumerate(trainval_path):
img_names = open(path, 'r').readlines()
for img_name in img_names:
img_name = img_name.strip()
xml_path = (((anno_path[i] + '/') + img_name) + '.xml')
objects = parse_xml(xml_path)
if objects:
objects[0] = (((img_path[i] + '/') + img_name) + '.jpg')
if os.path.exists(objects[0]):
objects.insert(0, str(train_cnt))
train_cnt += 1
objects = (' '.join(objects) + '\n')
f.write(objects)
f.close() |
def urun(b, mo0=None, dm0=None):
mol = gto.Mole()
mol.build(verbose=5, output=('o2uhf-%3.2f.out' % b), atom=[['O', (0, 0, (b / 2))], ['O', (0, 0, ((- b) / 2))]], basis='cc-pvdz', spin=2)
mf = scf.UHF(mol)
mf.scf(dm0)
mc = mcscf.CASSCF(mf, 12, 8)
if (mo0 is not None):
mo0 = mcscf.project_init_guess(mc, mo0)
mc.kernel(mo0)
mc.analyze()
return (mf, mc) |
_if_mysql
class ModelTaggedQuerysetOptionsSingleTest(TagTestManager, TestCase):
manage_models = [test_models.SingleTagFieldOptionsModel]
def setUpExtra(self):
self.test_model = test_models.SingleTagFieldOptionsModel
self.test_model.objects.create(name='Test 1', case_sensitive_true='Mr', case_sensitive_false='Mr')
self.test_model.objects.create(name='Test 2', case_sensitive_true='mr', case_sensitive_false='mr')
self.test_model.objects.create(name='Test 3', case_sensitive_true='Mr', case_sensitive_false='Mr')
def test_setup(self):
self.assertTagModel(self.test_model.case_sensitive_true, {'Mr': 2, 'mr': 1})
self.assertTagModel(self.test_model.case_sensitive_false, {'Mr': 3})
def test_case_sensitive_filter(self):
qs1 = self.test_model.objects.filter(case_sensitive_true='Mr')
self.assertEqual(qs1.count(), 2)
self.assertEqual(str(qs1[0].name), 'Test 1')
self.assertEqual(str(qs1[1].name), 'Test 3')
def test_case_sensitive_exclude_matches(self):
qs1 = self.test_model.objects.exclude(case_sensitive_true='Mr')
self.assertEqual(qs1.count(), 1)
self.assertEqual(str(qs1[0].name), 'Test 2')
def test_case_insensitive_filter(self):
qs1 = self.test_model.objects.filter(case_sensitive_false='mr')
self.assertEqual(qs1.count(), 3)
self.assertEqual(str(qs1[0].name), 'Test 1')
self.assertEqual(str(qs1[1].name), 'Test 2')
self.assertEqual(str(qs1[2].name), 'Test 3')
def test_case_insensitive_exclude(self):
qs1 = self.test_model.objects.exclude(case_sensitive_false='mr')
self.assertEqual(qs1.count(), 0) |
def read_gda(file_in, tokenizer, max_seq_length=1024):
pmids = set()
features = []
maxlen = 0
with open(file_in, 'r') as infile:
lines = infile.readlines()
for (i_l, line) in enumerate(tqdm(lines)):
line = line.rstrip().split('\t')
pmid = line[0]
if (pmid not in pmids):
pmids.add(pmid)
text = line[1]
prs = chunks(line[2:], 17)
ent2idx = {}
train_triples = {}
entity_pos = set()
for p in prs:
es = list(map(int, p[8].split(':')))
ed = list(map(int, p[9].split(':')))
tpy = p[7]
for (start, end) in zip(es, ed):
entity_pos.add((start, end, tpy))
es = list(map(int, p[14].split(':')))
ed = list(map(int, p[15].split(':')))
tpy = p[13]
for (start, end) in zip(es, ed):
entity_pos.add((start, end, tpy))
sents = [t.split(' ') for t in text.split('|')]
new_sents = []
sent_map = {}
i_t = 0
for sent in sents:
for token in sent:
tokens_wordpiece = tokenizer.tokenize(token)
for (start, end, tpy) in list(entity_pos):
if (i_t == start):
tokens_wordpiece = (['*'] + tokens_wordpiece)
if ((i_t + 1) == end):
tokens_wordpiece = (tokens_wordpiece + ['*'])
sent_map[i_t] = len(new_sents)
new_sents.extend(tokens_wordpiece)
i_t += 1
sent_map[i_t] = len(new_sents)
sents = new_sents
entity_pos = []
for p in prs:
if (p[0] == 'not_include'):
continue
if (p[1] == 'L2R'):
(h_id, t_id) = (p[5], p[11])
(h_start, t_start) = (p[8], p[14])
(h_end, t_end) = (p[9], p[15])
else:
(t_id, h_id) = (p[5], p[11])
(t_start, h_start) = (p[8], p[14])
(t_end, h_end) = (p[9], p[15])
h_start = map(int, h_start.split(':'))
h_end = map(int, h_end.split(':'))
t_start = map(int, t_start.split(':'))
t_end = map(int, t_end.split(':'))
h_start = [sent_map[idx] for idx in h_start]
h_end = [sent_map[idx] for idx in h_end]
t_start = [sent_map[idx] for idx in t_start]
t_end = [sent_map[idx] for idx in t_end]
if (h_id not in ent2idx):
ent2idx[h_id] = len(ent2idx)
entity_pos.append(list(zip(h_start, h_end)))
if (t_id not in ent2idx):
ent2idx[t_id] = len(ent2idx)
entity_pos.append(list(zip(t_start, t_end)))
(h_id, t_id) = (ent2idx[h_id], ent2idx[t_id])
r = gda_rel2id[p[0]]
if ((h_id, t_id) not in train_triples):
train_triples[(h_id, t_id)] = [{'relation': r}]
else:
train_triples[(h_id, t_id)].append({'relation': r})
(relations, hts) = ([], [])
for (h, t) in train_triples.keys():
relation = ([0] * len(gda_rel2id))
for mention in train_triples[(h, t)]:
relation[mention['relation']] = 1
relations.append(relation)
hts.append([h, t])
maxlen = max(maxlen, len(sents))
sents = sents[:(max_seq_length - 2)]
input_ids = tokenizer.convert_tokens_to_ids(sents)
input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)
if (len(hts) > 0):
feature = {'input_ids': input_ids, 'entity_pos': entity_pos, 'labels': relations, 'hts': hts, 'title': pmid}
features.append(feature)
print('Number of documents: {}.'.format(len(features)))
print('Max document length: {}.'.format(maxlen))
return features |
def dist_factory(path_item, entry, only):
lower = entry.lower()
is_egg_info = lower.endswith('.egg-info')
is_dist_info = (lower.endswith('.dist-info') and os.path.isdir(os.path.join(path_item, entry)))
is_meta = (is_egg_info or is_dist_info)
return (distributions_from_metadata if is_meta else (find_distributions if ((not only) and _is_egg_path(entry)) else (resolve_egg_link if ((not only) and lower.endswith('.egg-link')) else NoDists()))) |
def test_asyncio_marker_compatibility_with_xfail(pytester: Pytester):
pytester.makepyfile(dedent(' import pytest\n\n pytest_plugins = "pytest_asyncio"\n\n .xfail(reason="need a failure", strict=True)\n .asyncio\n async def test_asyncio_marker_fail():\n raise AssertionError\n '))
result = pytester.runpytest('--asyncio-mode=strict')
result.assert_outcomes(xfailed=1) |
def test_time_tracking_mixin():
class TestClass(TimeTrackingMixin):
pass
obj = TestClass()
assert hasattr(obj, 'time_stats')
assert hasattr(obj, 'time_estimate')
assert hasattr(obj, 'reset_time_estimate')
assert hasattr(obj, 'add_spent_time')
assert hasattr(obj, 'reset_spent_time') |
def test_context_share_texture():
w1 = window.Window(200, 200)
w1.switch_to()
textures = c_uint()
glGenTextures(1, byref(textures))
texture = textures.value
glBindTexture(GL_TEXTURE_2D, texture)
data = (c_ubyte * 4)()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE, data)
assert glIsTexture(texture)
w2 = window.Window(200, 200)
w2.switch_to()
assert glIsTexture(texture)
glDeleteTextures(1, byref(textures))
assert (not glIsTexture(texture))
w1.switch_to()
assert (not glIsTexture(texture))
w1.close()
w2.close() |
class ANSI_input(unittest.TestCase):
def test(self):
run_test(self, ['-A', ''], ' Month/Day/Year H:M:S 06/11/2013 20:46:11 GPS\n Modified Julian Date 56454. GPS\n GPSweek DayOfWeek SecOfWeek 720 2 247571.000000\n FullGPSweek Zcount 1744 165047\n Year DayOfYear SecondOfDay 2013 162 74771.000000\n Unix: Second Microsecond 0\n Zcount: 29-bit (32-bit) ()\n') |
def test_adding_entry_points_affect_entry_point_map(easter_fixture):
easter_fixture.stub_egg.add_entry_point_from_line(easter_fixture.group_name, 'test1 = reahl.stubble_dev.test_easteregg:TestClass1')
easter_fixture.stub_egg.add_entry_point(easter_fixture.group_name, 'test2', TestClass2)
epmap = easter_fixture.stub_egg.get_entry_map()
assert (list(epmap.keys()) == [easter_fixture.group_name])
name_to_entry_point = list(epmap.values())[0]
assert (len(list(name_to_entry_point.keys())) == 2)
assert isinstance(name_to_entry_point['test1'], pkg_resources.EntryPoint)
assert (name_to_entry_point['test1'].load() is TestClass1)
assert isinstance(name_to_entry_point['test2'], pkg_resources.EntryPoint)
assert (name_to_entry_point['test2'].load() is TestClass2)
easter_fixture.stub_egg.clear()
assert (not easter_fixture.stub_egg.get_entry_map()) |
class MSatBoolUFRewriter(IdentityDagWalker):
def __init__(self, environment):
IdentityDagWalker.__init__(self, environment)
self.get_type = self.env.stc.get_type
self.mgr = self.env.formula_manager
def walk_function(self, formula, args, **kwargs):
from pysmt.typing import FunctionType
bool_args = []
other_args = []
for a in args:
if self.get_type(a).is_bool_type():
bool_args.append(a)
else:
other_args.append(a)
if (len(bool_args) == 0):
return IdentityDagWalker.walk_function(self, formula, args, **kwargs)
rtype = formula.function_name().symbol_type().return_type
ptype = [self.get_type(a) for a in other_args]
if (len(ptype) == 0):
ftype = rtype
else:
ftype = FunctionType(rtype, ptype)
stack = []
for i in range((2 ** len(bool_args))):
fname = self.mgr.Symbol(('%s#%i' % (formula.function_name(), i)), ftype)
if (len(ptype) == 0):
stack.append(fname)
else:
stack.append(self.mgr.Function(fname, tuple(other_args)))
for b in bool_args:
tmp = []
while (len(stack) > 0):
lhs = stack.pop()
rhs = stack.pop()
if b.is_true():
tmp.append(lhs)
elif b.is_false():
tmp.append(rhs)
else:
ite = self.mgr.Ite(b, lhs, rhs)
tmp.append(ite)
stack = tmp
res = stack[0]
return res |
class Keithley2600(Instrument):
def __init__(self, adapter, name='Keithley 2600 SourceMeter', **kwargs):
super().__init__(adapter, name, **kwargs)
self.ChA = Channel(self, 'a')
self.ChB = Channel(self, 'b')
def error(self):
err = self.ask('print(errorqueue.next())')
err = err.split('\t')
if (len(err) > 1):
err = (int(float(err[0])), err[1])
code = err[0]
message = err[1].replace('"', '')
else:
code = message = err[0]
log.info(f'ERROR {str(code)},{str(message)} - len {str(len(err))}')
return (code, message)
def check_errors(self):
(code, message) = self.error
while (code != 0):
t = time.time()
log.info(('Keithley 2600 reported error: %d, %s' % (code, message)))
(code, message) = self.error
if ((time.time() - t) > 10):
log.warning('Timed out for Keithley 2600 error retrieval.') |
def get_logger(name: str=None, rank: Optional[int]=None, **kwargs):
if (rank is None):
rank = int(os.environ.get('RANK', (- 1)))
logger = logging.getLogger(name)
level = logging.INFO
log_format = LOG_FORMAT.format(rank=(f'[Rank {rank}]' if (rank > (- 1)) else ''))
logging.basicConfig(level=level, format=log_format, **kwargs)
return logger |
def colour_path(image, path) -> None:
start_node = path[(- 1)]
finish_node = path[0]
pixels = image.load()
red_fade = np.linspace(255, 0, (finish_node.distance + 1)).astype(int)
blue_fade = np.linspace(0, 255, (finish_node.distance + 1)).astype(int)
step = 0
for (node1, node2) in zip(path[:(- 1)], path[1:]):
(x1, y1) = node1.coords
(x2, y2) = node2.coords
distance = (node1.distance - node2.distance)
x_change = np.linspace(x1, x2, (distance + 1))[:(- 1)]
y_change = np.linspace(y1, y2, (distance + 1))[:(- 1)]
for (path_x, path_y) in zip(x_change, y_change):
pixels[(path_x, path_y)] = (red_fade[step], blue_fade[step], 0)
step += 1
pixels[start_node.coords] = (red_fade[step], blue_fade[step], 0) |
class PerceiverOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('inputs', dynamic_axis), ('attention_mask', dynamic_axis)])
def atol_for_validation(self) -> float:
return 0.0001
def generate_dummy_inputs(self, preprocessor: Union[('PreTrainedTokenizerBase', 'FeatureExtractionMixin')], batch_size: int=(- 1), seq_length: int=(- 1), num_choices: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None, num_channels: int=3, image_width: int=40, image_height: int=40) -> Mapping[(str, Any)]:
if isinstance(preprocessor, PreTrainedTokenizerBase):
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0)
token_to_add = preprocessor.num_special_tokens_to_add(is_pair)
seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add)
dummy_input = ([(' '.join(['a']) * seq_length)] * batch_size)
inputs = dict(preprocessor(dummy_input, return_tensors=framework))
inputs['inputs'] = inputs.pop('input_ids')
return inputs
elif (isinstance(preprocessor, FeatureExtractionMixin) and (preprocessor.model_input_names[0] == 'pixel_values')):
batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
inputs = dict(preprocessor(images=dummy_input, return_tensors=framework))
inputs['inputs'] = inputs.pop('pixel_values')
return inputs
else:
raise ValueError('Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.') |
def channel_pruning_auto_mode():
sess = tf.compat.v1.Session()
with sess.graph.as_default():
_ = VGG16(weights=None, input_shape=(224, 224, 3))
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
conv2d = sess.graph.get_operation_by_name('block1_conv1/Conv2D')
modules_to_ignore = [conv2d]
greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8), num_comp_ratio_candidates=2, use_monotonic_fit=True, saved_eval_scores_dict=None)
auto_params = ChannelPruningParameters.AutoModeParams(greedy_select_params=greedy_params, modules_to_ignore=modules_to_ignore)
batch_size = 1
input_data = np.random.rand(100, 224, 224, 3)
dataset = tf.data.Dataset.from_tensor_slices(input_data)
dataset = dataset.batch(batch_size=batch_size)
params = ChannelPruningParameters(input_op_names=['input_1'], output_op_names=['predictions/Softmax'], data_set=dataset, batch_size=32, num_reconstruction_samples=50, allow_custom_downsample_ops=False, mode=ChannelPruningParameters.Mode.auto, params=auto_params, multiplicity=8)
results = ModelCompressor.compress_model(sess, working_dir=None, eval_callback=evaluate_model, eval_iterations=10, input_shape=(32, 224, 224, 3), compress_scheme=CompressionScheme.channel_pruning, cost_metric=CostMetric.mac, parameters=params)
(compressed_model, stats) = results
print(compressed_model)
print(stats) |
class PaymentMockDriver(PaymentTerminalDriver):
def __init__(self):
super(PaymentMockDriver, self).__init__()
self._set_terminal_status(terminal_id='0', status='connected')
def transaction_start(self, data):
payment_info = data['payment_info']
transaction_id = data['transaction_id']
terminal_id = payment_info.get('terminal_id', '0')
app.logger.info('payment mock driver transaction start for terminal %s: %s', terminal_id, payment_info)
status = input("status: enter 'ok' for sucess, or any other status: ")
if (status == 'ok'):
self.end_transaction(terminal_id, transaction_id, success=True)
else:
self.end_transaction(terminal_id, transaction_id, success=False, status=status, reference='') |
class NonLinearProgram():
def __init__(self, phase_dynamics: PhaseDynamics):
self.casadi_func = {}
self.contact_forces_func = None
self.soft_contact_forces_func = None
self.control_type = ControlType.CONSTANT
self.cx = None
self.dt = None
self.dynamics = None
self.extra_dynamics = []
self.dynamics_evaluation = DynamicsEvaluation()
self.dynamics_func: list = []
self.implicit_dynamics_func: list = []
self.dynamics_type = None
self.external_forces: (list[(list[(Any, ...)], ...)] | None) = None
self.g = []
self.g_internal = []
self.g_implicit = []
self.J = []
self.J_internal = []
self.model: ((((BioModel | StochasticBioModel) | HolonomicBioModel) | VariationalBioModel) | None) = None
self.n_threads = None
self.ns = None
self.ode_solver = OdeSolver.RK4()
self.parameters = []
self.par_dynamics = None
self.phase_idx = None
self.phase_mapping = None
self.plot = {}
self.plot_mapping = {}
self.T = None
self.variable_mappings = {}
self.u_bounds = BoundsList()
self.u_init = InitialGuessList()
self.U_scaled = None
self.u_scaling = None
self.U = None
self.use_states_from_phase_idx = NodeMapping()
self.use_controls_from_phase_idx = NodeMapping()
self.use_states_dot_from_phase_idx = NodeMapping()
self.x_bounds = BoundsList()
self.x_init = InitialGuessList()
self.X_scaled = None
self.x_scaling = None
self.X = None
self.a_bounds = BoundsList()
self.a_init = InitialGuessList()
self.A = None
self.A_scaled = None
self.a_scaling = None
self.phase_dynamics = phase_dynamics
self.time_index = None
self.time_cx = None
self.time_mx = None
self.dt = None
self.dt_mx = None
self.tf = None
self.tf_mx = None
self.states = OptimizationVariableContainer(self.phase_dynamics)
self.states_dot = OptimizationVariableContainer(self.phase_dynamics)
self.controls = OptimizationVariableContainer(self.phase_dynamics)
self.algebraic_states = OptimizationVariableContainer(self.phase_dynamics)
self.integrated_values = OptimizationVariableContainer(self.phase_dynamics)
def initialize(self, cx: ((MX | SX) | Callable)=None):
self.plot = {}
self.cx = cx
self.J = []
self.g = []
self.g_internal = []
self.casadi_func = {}
self.states.initialize_from_shooting(n_shooting=(self.ns + 1), cx=self.cx)
self.states_dot.initialize_from_shooting(n_shooting=(self.ns + 1), cx=self.cx)
self.controls.initialize_from_shooting(n_shooting=(self.ns + 1), cx=self.cx)
self.algebraic_states.initialize_from_shooting(n_shooting=(self.ns + 1), cx=self.cx)
self.integrated_values.initialize_from_shooting(n_shooting=(self.ns + 1), cx=self.cx)
def n_states_nodes(self) -> int:
return (self.ns + 1)
def n_states_decision_steps(self, node_idx) -> int:
if (node_idx >= self.ns):
return 1
return (self.dynamics[node_idx].shape_xf[1] + (1 if self.ode_solver.duplicate_starting_point else 0))
def n_states_stepwise_steps(self, node_idx) -> int:
if (node_idx >= self.ns):
return 1
return self.dynamics[node_idx].shape_xall[1]
def n_controls_nodes(self) -> int:
mod = (1 if (self.control_type in (ControlType.LINEAR_CONTINUOUS, ControlType.CONSTANT_WITH_LAST_NODE)) else 0)
return (self.ns + mod)
def n_controls_steps(self, node_idx) -> int:
if (self.control_type == ControlType.CONSTANT):
return 1
elif (self.control_type == ControlType.CONSTANT_WITH_LAST_NODE):
return 1
elif (self.control_type == ControlType.LINEAR_CONTINUOUS):
return 2
else:
raise RuntimeError('Not implemented yet')
def n_algebraic_states_nodes(self) -> int:
return self.n_states_nodes
def n_algebraic_states_decision_steps(node_idx) -> int:
return 1
def add(ocp, param_name: str, param: Any, duplicate_singleton: bool, _type: Any=None, name: str=None):
if isinstance(param, (OptionList, list, tuple)):
if ((len(param) == 1) and (ocp.n_phases != 1) and (not duplicate_singleton)):
raise RuntimeError(f'{param_name} size({len(param)}) does not correspond to the number of phases({ocp.n_phases}).')
for i in range(ocp.n_phases):
cmp = (0 if (len(param) == 1) else i)
NonLinearProgram.__setattr(ocp.nlp[i], name, param_name, param[cmp])
else:
if ((ocp.n_phases != 1) and (not duplicate_singleton)):
raise RuntimeError(f'{param_name} size({(1 if isinstance(param, int) else len(param))}) does not correspond to the number of phases({ocp.n_phases}).List length of model, final time and node shooting must be equivalent to phase number')
for i in range(ocp.n_phases):
NonLinearProgram.__setattr(ocp.nlp[i], name, param_name, param)
if (_type is not None):
for nlp in ocp.nlp:
if ((((name is None) and (getattr(nlp, param_name) is not None)) or ((name is not None) and (param is not None))) and (not isinstance(param, _type)) and isinstance(param, (list, tuple)) and (False in [False for i in param if (not isinstance(i, _type))])):
raise RuntimeError(f'Parameter {param_name} must be a {str(_type)}')
def __setattr(nlp, name: (str | None), param_name: str, param: Any):
if (name is None):
setattr(nlp, param_name, param)
else:
getattr(nlp, name)[param_name] = param
def add_casadi_func(self, name: str, function: ((Callable | SX) | MX), *all_param: Any) -> casadi.Function:
if (name in self.casadi_func):
return self.casadi_func[name]
else:
mx = [(var.mx if isinstance(var, OptimizationVariable) else var) for var in all_param]
self.casadi_func[name] = self.to_casadi_func(name, function, *mx)
return self.casadi_func[name]
def mx_to_cx(name: str, symbolic_expression: ((SX | MX) | Callable), *all_param: Any) -> Function:
from ..optimization.optimization_variable import OptimizationVariable, OptimizationVariableList
from ..optimization.parameters import Parameter, ParameterList
cx_types = (OptimizationVariable, OptimizationVariableList, Parameter, ParameterList)
mx = [(var.mx if isinstance(var, cx_types) else var) for var in all_param]
cx = []
for var in all_param:
if hasattr(var, 'mapping'):
cx += [var.mapping.to_second.map(var.cx_start)]
elif hasattr(var, 'cx_start'):
cx += [var.cx_start]
else:
cx += [var.cx]
return NonLinearProgram.to_casadi_func(name, symbolic_expression, *mx)(*cx)
def to_casadi_func(name, symbolic_expression: ((MX | SX) | Callable), *all_param, expand=True) -> Function:
cx_param = []
for p in all_param:
if isinstance(p, (MX, SX)):
cx_param.append(p)
if isinstance(symbolic_expression, (MX, SX, Function)):
func_evaluated = symbolic_expression
else:
func_evaluated = symbolic_expression(*all_param)
if isinstance(func_evaluated, (list, tuple)):
func_evaluated = horzcat(*[(val if isinstance(val, MX) else val.to_mx()) for val in func_evaluated])
elif (not isinstance(func_evaluated, MX)):
func_evaluated = func_evaluated.to_mx()
func = Function(name, cx_param, [func_evaluated])
if expand:
try:
func = func.expand()
except Exception as me:
raise RuntimeError(f'''An error occurred while executing the 'expand()' function for {name}. Please review the following casadi error message for more details.
Several factors could be causing this issue. If you are creating your own casadi function, it is possible that you have free variables. Another possibility, if you are using a predefined function, the error might be due to the inability to use expand=True at all. In that case, try adding expand=False to the dynamics or the penalty.
Original casadi error message:
{me}''')
return (func.expand() if expand else func) |
def test_incompatible_ok(hatch, helpers, temp_dir_data, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir_data.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir_data / 'my-app')
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'skip-install': True, 'platforms': ['foo'], **project.config.envs['default']})
with project_path.as_cwd():
result = hatch('env', 'prune')
assert (result.exit_code == 0), result.output
assert (not result.output) |
class PersistentController(YadageController):
def __init__(self, model, backend=None):
self.model = model
super(PersistentController, self).__init__(self.model.load(), backend)
def transaction(self, sync=True):
self.adageobj = self.model.load()
if sync:
log.debug('syncing to setup tx %s', self)
super(PersistentController, self).sync_backend()
(yield)
isvalid = self.validate()
if (not isvalid):
log.warning('commit is invalid %s', isvalid)
if sync:
log.debug('syncing to teardown tx %s', self)
super(PersistentController, self).sync_backend()
self.model.commit(self.adageobj)
def submit_nodes(self, nodeids):
log.debug('transaction to submit')
with self.transaction():
nodes = [self.adageobj.dag.getNode(nodeid) for nodeid in nodeids]
super(PersistentController, self).submit_nodes(nodes)
def apply_rules(self, ruleids):
log.debug('transaction to apply')
with self.transaction():
rules = [r for r in self.adageobj.rules if (r.identifier in ruleids)]
super(PersistentController, self).apply_rules(rules)
def sync_backend(self):
log.debug('transaction to sync but (without sync in tx)')
with self.transaction(sync=False):
super(PersistentController, self).sync_backend()
def applicable_rules(self):
applicable_rules = super(PersistentController, self).applicable_rules()
return [x.identifier for x in applicable_rules]
def submittable_nodes(self):
submittable_nodes = super(PersistentController, self).submittable_nodes()
return [x.identifier for x in submittable_nodes]
def add_rules(self, rulespecs, dataarg, offset='', groupname=None, dataopts=None):
log.debug('adding %s rules', len(rulespecs))
from .stages import JsonStage
from .state_providers import state_provider_from_string
sp = state_provider_from_string(dataarg, dataopts)
rules = [JsonStage(json, sp) for json in rulespecs]
with self.transaction():
self.adageobj.view(offset).addWorkflow(rules, groupname)
def patch_rule(self, ruleid, patchspec):
with self.transaction():
rule = [x for x in self.adageobj.rules if (x.identifier == ruleid)][0]
rule.rule.stagespec = patchspec
def undo_rules(self, ruleids):
with self.transaction():
undo_rules(self.adageobj, ruleids)
def remove_rules(self, ruleids):
with self.transaction():
remove_rules(self.adageobj, ruleids)
def reset_nodes(self, nodeids):
to_reset = (nodeids + collective_downstream(self.adageobj, nodeids))
with self.transaction():
reset_steps(self.adageobj, to_reset) |
class Class_Decode():
def func_url(self, encode_type, source_text):
try:
result_text = str(urllib.parse.unquote(source_text, encode_type))
except Exception as e:
return [0, '', 'Url']
return [1, result_text.strip(), 'Url']
def func_unicode(self, encode_type, source_text):
try:
result_text = bytes(source_text, encoding=encode_type).decode('unicode_escape')
except Exception as e:
return [0, '', 'Unicode']
return [1, result_text.strip(), 'Unicode']
def func_escape_u(self, encode_type, source_text):
try:
text = source_text.replace('%u', '\\u').replace('%U', '\\u')
result_text = bytes(text, encoding=encode_type).decode('unicode_escape')
except Exception as e:
return [0, '', 'Escape_u']
return [1, result_text.strip(), 'Escape_u']
def func_html(self, encode_type, source_text):
try:
result_text = html.unescape(source_text)
except Exception as e:
return [0, '', 'html']
return [1, result_text.strip(), 'html']
def get_split_data(self, text):
if (':' in text):
text = text.split(':')
elif (' ' in text):
text = text.split(' ')
elif (';' in text):
text = text.split(';')
elif (',' in text):
text = text.split(',')
else:
list22 = []
list22.append(text)
text = list22
return text
def func_ASCII_2(self, encode_type, source_text):
try:
text = self.get_split_data(source_text)
result = ''
for i in text:
if (i != ''):
result = (result + chr(int(i, 2)))
except Exception as e:
return [0, '', 'ASCII(2)']
return [1, result, 'ASCII(2)']
def func_ASCII_8(self, encode_type, source_text):
try:
text = self.get_split_data(source_text)
result = ''
for i in text:
if (i != ''):
result = (result + chr(int(i, 8)))
except Exception as e:
return [0, '', 'ASCII(8)']
return [1, result, 'ASCII(8)']
def func_ASCII_10(self, encode_type, source_text):
try:
text = self.get_split_data(source_text)
result = ''
for i in text:
if (i != ''):
result = (result + chr(int(i)))
except Exception as e:
return [0, '', 'ASCII(10)']
return [0, result, 'ASCII(10)']
def func_ASCII_16(self, encode_type, source_text):
try:
text = self.get_split_data(source_text)
result = ''
for i in text:
if (i != ''):
result = (result + chr(int(i, 16)))
except Exception as e:
return [0, '', 'ASCII(16)']
return [1, result.strip(), 'ASCII(16)']
def func_jsfuck(self, encode_type, source_text):
ctx = execjs.compile("\n function decode(source) {\n output = '' \n if (source.length > 0) \n {\n l = ''\n\n if (source.length > 3 && source.slice(source.length-3) == ')()')\n {\n //eval-ed\n s = source.slice(0, source.length - 2)\n i = s.length\n\n //first try\n while (i--) {\n //if ((l = s.slice(i)).split(')').length == l.split('(').length) break\n l = s.slice(i)\n if (l.split(')').length == l.split('(').length) {\n break;\n }\n }\n //\n }\n else\n {\n l = source;\n }\n\n txtResult = eval(l)\n return txtResult\n\n }\n }\n ")
return [1, ctx.call('decode', source_text), 'Jsfuck']
def func_jjencode(self, encode_type, source_text):
js = '\n\n var result =\'\'\n function jjdecode(t)\n { \n //get string from src\n\n\n //clean it\n t.replace(/^\\s+|\\s+$/g, "");\n\n var startpos;\n var endpos;\n var gv;\n var gvl;\t\n\n if (t.indexOf("\\"\\\'\\\\\\"+\\\'+\\",") == 0) //palindrome check\n {\n //locate jjcode\n startpos\t= t.indexOf(\'$$+"\\\\""+\') + 8;\n endpos\t\t= t.indexOf(\'"\\\\"")())()\');\n\n //get gv\n gv\t= t.substring((t.indexOf(\'"\\\'\\\\"+\\\'+",\')+9), t.indexOf("=~[]"));\n gvl\t= gv.length;\n }\n else\n {\n //get gv\n gv\t= t.substr(0, t.indexOf("="));\n gvl\t= gv.length;\n\n //locate jjcode\n startpos\t= t.indexOf(\'"\\\\""+\') + 5;\n endpos\t\t= t.indexOf(\'"\\\\"")())()\');\t\n }\n\n if (startpos == endpos)\n {\n alert("No data !");\n return;\n }\n\n //start decoding\n var data = t.substring(startpos, endpos);\n\n //hex decode string\n var b=[ "___+", "__$+", "_$_+", "_$$+", "$__+", "$_$+", "$$_+", "$$$+", "$___+", "$__$+", "$_$_+", "$_$$+", "$$__+", "$$_$+", "$$$_+", "$$$$+" ];\n\n //lotu\n var str_l = "(![]+\\"\\")[" + gv + "._$_]+";\n var str_o = gv + "._$+";\n var str_t = gv + ".__+";\n var str_u = gv + "._+";\n\n //abcdef\n var str_hex = gv + ".";\n\n //s\n var str_s = \'"\';\n var gvsig = gv + ".";\n\n var str_quote = \'"\';\n var str_slash = \'';\n\n var str_lower = ""+";\n var str_upper = ""+" + gv + "._+";\n\n var str_end\t= \'"+\'; //end of s loop\n\n\n\n while(data != "")\n {\n //l o t u\n if (0 == data.indexOf(str_l))\n {\n data = data.substr(str_l.length);\n out("l");\n continue;\n }\n else if (0 == data.indexOf(str_o))\n {\n data = data.substr(str_o.length);\n out("o");\n continue;\n }\n else if (0 == data.indexOf(str_t))\n {\n data = data.substr(str_t.length);\n out("t");\n continue;\n }\n else if (0 == data.indexOf(str_u))\n {\n data = data.substr(str_u.length);\n out("u");\n continue;\n }\n\n //abcdef\n if (0 == data.indexOf(str_hex))\n {\n data = data.substr(str_hex.length);\n\n //check every element of hex decode string for a match \n var i = 0;\t\t\t\t\t\t\n for (i = 0; i < b.length; i++)\n {\n if (0 == data.indexOf(b[i]))\n {\n data = data.substr( (b[i]).length );\n out(i.toString(16));\n break;\n }\n }\n continue;\n }\n\n //start of s block\n if (0 == data.indexOf(str_s))\n {\n data = data.substr(str_s.length);\n\n //check if "R\n if (0 == data.indexOf(str_upper)) // r4 n >= 128\n {\n data = data.substr(str_upper.length); //skip sig\n\n var ch_str = "";\t\t\t\t\n for (j = 0; j < 2; j++) //shouldn\'t be more than 2 hex chars\n {\n //gv + "."+b[ c ]\t\t\t\t\n if (0 == data.indexOf(gvsig))\n {\n data = data.substr(gvsig.length); //skip gvsig\t\n\n for (k = 0; k < b.length; k++)\t//for every entry in b\n {\t\t\t\t\t\t\n if (0 == data.indexOf(b[k]))\n {\n data = data.substr(b[k].length);\n ch_str += k.toString(16) + "";\t\t\t\t\t\t\t\n break;\n }\n }\t\t\t\t\t\t\n }\n else\n {\n break; //done\n }\t\t\t\t\t\t\t\t\n }\n\n out(String.fromCharCode(parseInt(ch_str,16)));\n continue;\n }\n else if (0 == data.indexOf(str_lower)) //r3 check if "R // n < 128\n {\n data = data.substr(str_lower.length); //skip sig\n\n var ch_str = "";\n var ch_lotux = ""\n var temp = "";\n var b_checkR1 = 0;\n for (j = 0; j < 3; j++) //shouldn\'t be more than 3 octal chars\n {\n\n if (j > 1) //lotu check\n {\t\t\t\t\t\t\t\t\n if (0 == data.indexOf(str_l))\n {\n data = data.substr(str_l.length);\n ch_lotux = "l";\n break;\n }\n else if (0 == data.indexOf(str_o))\n {\n data = data.substr(str_o.length);\n ch_lotux = "o";\n break;\n }\n else if (0 == data.indexOf(str_t))\n {\n data = data.substr(str_t.length);\n ch_lotux = "t";\n break;\n }\n else if (0 == data.indexOf(str_u))\n {\n data = data.substr(str_u.length);\n ch_lotux = "u";\n break;\n }\t\t\t\t\t\t\n }\n\n //gv + "."+b[ c ]\t\t\t\t\t\t\t\n if (0 == data.indexOf(gvsig))\n {\n temp = data.substr(gvsig.length); \n for (k = 0; k < 8; k++)\t//for every entry in b octal\n {\t\t\t\t\t\t\n if (0 == temp.indexOf(b[k]))\n {\n if (parseInt(ch_str + k + "",8) > 128)\n {\n b_checkR1 = 1;\n break;\n }\t\t\t\t\t\t\t\t\n\n ch_str += k + "";\t\t\t\t\t\t\t\t\t\t\n data = data.substr(gvsig.length); //skip gvsig\n data = data.substr(b[k].length);\n break;\n }\n }\n\n if (1 == b_checkR1)\n {\n if (0 == data.indexOf(str_hex)) //abcdef\n {\n data = data.substr(str_hex.length);\n\n //check every element of hex decode string for a match \n var i = 0;\t\t\t\t\t\t\n for (i = 0; i < b.length; i++)\n {\n if (0 == data.indexOf(b[i]))\n {\n data = data.substr( (b[i]).length );\n ch_lotux = i.toString(16);\n break;\n }\n }\n\n break;\n }\n }\t\t\t\t\t\t\t\t\n }\n else\n {\t\t\t\t\t\t\t\t\n break; //done\n }\t\t\t\t\t\t\t\t\n }\n\n out(String.fromCharCode(parseInt(ch_str,8)) + ch_lotux);\n continue; //step out of the while loop\n }\n else //"S ----> "SR or "S+\n {\n\n // if there is, loop s until R 0r +\n // if there is no matching s block, throw error\n\n var match = 0;\n var n;\n\n //searching for mathcing pure s block\n while(true)\n {\n n = data.charCodeAt( 0 );\t\t\t\t\n if (0 == data.indexOf(str_quote))\n {\n data = data.substr(str_quote.length);\n out(\'"\');\n match += 1;\n continue;\n }\n else if (0 == data.indexOf(str_slash))\n {\n data = data.substr(str_slash.length);\n out(\'\\\\\');\n match += 1;\n continue;\n }\n else if (0 == data.indexOf(str_end))\t//reached end off S block ? +\n {\n if (match == 0)\n {\n return("+ no match S block: "+data);\n return;\n }\n data = data.substr(str_end.length);\n\n break; //step out of the while loop\n }\n else if (0 == data.indexOf(str_upper)) //r4 reached end off S block ? - check if "R n >= 128\n {\t\t\t\t\t\t\n if (match == 0)\n {\n return("no match S block n>128: "+data);\n return;\n }\n\n data = data.substr(str_upper.length); //skip sig\n\n var ch_str = "";\n var ch_lotux = "";\n for (j = 0; j < 10; j++) //shouldn\'t be more than 10 hex chars\n {\n\n if (j > 1) //lotu check\n {\t\t\t\t\t\t\t\t\n if (0 == data.indexOf(str_l))\n {\n data = data.substr(str_l.length);\n ch_lotux = "l";\n break;\n }\n else if (0 == data.indexOf(str_o))\n {\n data = data.substr(str_o.length);\n ch_lotux = "o";\n break;\n }\n else if (0 == data.indexOf(str_t))\n {\n data = data.substr(str_t.length);\n ch_lotux = "t";\n break;\n }\n else if (0 == data.indexOf(str_u))\n {\n data = data.substr(str_u.length);\n ch_lotux = "u";\n break;\n }\n }\n\n //gv + "."+b[ c ]\t\t\t\t\n if (0 == data.indexOf(gvsig))\n {\n data = data.substr(gvsig.length); //skip gvsig\n\n for (k = 0; k < b.length; k++)\t//for every entry in b\n {\t\t\t\t\t\t\n if (0 == data.indexOf(b[k]))\n {\n data = data.substr(b[k].length);\n ch_str += k.toString(16) + "";\t\t\t\t\t\t\t\n break;\n }\n }\t\t\t\t\t\t\n }\n else\n {\n break; //done\n }\t\t\t\t\t\t\t\t\n }\n\n out(String.fromCharCode(parseInt(ch_str,16)));\n break; //step out of the while loop\n }\n else if (0 == data.indexOf(str_lower)) //r3 check if "R // n < 128\n {\n if (match == 0)\n {\n return("no match S block n<128: "+data);\n return;\n }\n\n data = data.substr(str_lower.length); //skip sig\n\n var ch_str = "";\n var ch_lotux = ""\n var temp = "";\n var b_checkR1 = 0;\n for (j = 0; j < 3; j++) //shouldn\'t be more than 3 octal chars\n {\n\n if (j > 1) //lotu check\n {\t\t\t\t\t\t\t\t\n if (0 == data.indexOf(str_l))\n {\n data = data.substr(str_l.length);\n ch_lotux = "l";\n break;\n }\n else if (0 == data.indexOf(str_o))\n {\n data = data.substr(str_o.length);\n ch_lotux = "o";\n break;\n }\n else if (0 == data.indexOf(str_t))\n {\n data = data.substr(str_t.length);\n ch_lotux = "t";\n break;\n }\n else if (0 == data.indexOf(str_u))\n {\n data = data.substr(str_u.length);\n ch_lotux = "u";\n break;\n }\t\t\t\t\t\t\t\t\n }\n\n //gv + "."+b[ c ]\t\t\t\t\t\t\t\n if (0 == data.indexOf(gvsig))\n {\n temp = data.substr(gvsig.length); \n for (k = 0; k < 8; k++)\t//for every entry in b octal\n {\t\t\t\t\t\t\n if (0 == temp.indexOf(b[k]))\n {\n if (parseInt(ch_str + k + "",8) > 128)\n {\n b_checkR1 = 1;\n break;\n }\t\t\t\t\t\t\t\t\n\n ch_str += k + "";\t\t\t\t\t\t\t\t\t\t\n data = data.substr(gvsig.length); //skip gvsig\n data = data.substr(b[k].length);\n break;\n }\n }\n\n if (1 == b_checkR1)\n {\n if (0 == data.indexOf(str_hex)) //abcdef\n {\n data = data.substr(str_hex.length);\n\n //check every element of hex decode string for a match \n var i = 0;\t\t\t\t\t\t\n for (i = 0; i < b.length; i++)\n {\n if (0 == data.indexOf(b[i]))\n {\n data = data.substr( (b[i]).length );\n ch_lotux = i.toString(16);\n break;\n }\n }\n }\n }\t\t\t\t\t\t\t\t\n }\n else\n {\t\t\t\t\t\t\t\t\n break; //done\n }\t\t\t\t\t\t\t\t\n }\n\n out(String.fromCharCode(parseInt(ch_str,8)) + ch_lotux);\n break; //step out of the while loop\n }\t \n else if( (0x21 <= n && n <= 0x2f) || (0x3A <= n && n <= 0x40) || ( 0x5b <= n && n <= 0x60 ) || ( 0x7b <= n && n <= 0x7f ) )\n {\n out(data.charAt( 0 ));\n data = data.substr( 1 );\n match += 1;\n }\n\n }\t\t\t\n continue;\t\t\t\n }\n }\n\n return("no match : "+data);\n break;\n }\n return result\n\n }\n\n function out(s)\n {\n result+=s;\n\n }'
js_dr = js2py.EvalJs()
js_dr.execute(js)
result = js_dr.jjdecode(source_text)
if ('no match :' in result):
return [0, result.strip(), 'JJEncode']
return [1, result.strip(), 'JJEncode']
def func_aaencode(self, encode_type, source_text):
js = '\n function aadecode( text )\n {\n var evalPreamble = "() [\'_\'] ( () [\'_\'] (";\n var decodePreamble = "( () [\'_\'] (";\n var evalPostamble = ") ()) (\'_\');";\n var decodePostamble = ") ());";\n\n // strip beginning/ending space.\n text = text.replace(/^\\s*/, "").replace(/\\s*$/, "");\n\n // returns empty text for empty input.\n if (/^\\s*$/.test(text)) {\n return "";\n }\n // check if it is encoded.\n if (text.lastIndexOf(evalPreamble) < 0) {\n throw new Error("Given code is not encoded as aaencode.");\n }\n if (text.lastIndexOf(evalPostamble) != text.length - evalPostamble.length) {\n throw new Error("Given code is not encoded as aaencode.");\n }\n\n var decodingScript = text.replace(evalPreamble, decodePreamble).replace(evalPostamble, decodePostamble);\n return eval(decodingScript);\n }'
js_dr = js2py.EvalJs()
js_dr.execute(js)
result = js_dr.aadecode(source_text)
return [1, result.strip(), 'AAEncode']
def func_base16(self, encode_type, source_text):
try:
text = source_text.upper()
text = base64.b16decode(text.encode(encode_type))
result_text = str(text, encoding=encode_type)
except Exception as e:
return [0, '', 'Base16']
return [1, result_text.strip(), 'Base16']
def func_base32(self, encode_type, source_text):
try:
text = base64.b32decode(source_text.encode(encode_type))
result_text = str(text, encoding=encode_type)
except Exception as e:
return [0, '', 'Base32']
return [1, result_text.strip(), 'Base32']
def func_base36(self, encode_type, source_text):
try:
text = base36.dumps(int(source_text))
result_text = str(text)
except Exception as e:
return [0, '', 'Base36']
return [1, result_text.strip(), 'Base36']
def func_base58(self, encode_type, source_text):
try:
result_text = base58.b58decode(source_text).decode(encode_type)
except Exception as e:
return [0, '', 'Base58']
return [1, result_text.strip(), 'Base58']
def func_base62(self, encode_type, source_text):
try:
result_text = base62.decode(source_text)
except:
return [0, '', 'Base62']
return [1, str(result_text).strip(), 'Base62']
def func_base64(self, encode_type, source_text):
try:
text = base64.b64decode(source_text.encode(encode_type))
result_text = str(text, encoding=encode_type)
except Exception as e:
return [0, '', 'Base64']
return [1, result_text.strip(), 'Base64']
def func_base64_zidingyi(self, encode_type, source_text, n):
try:
STANDARD_ALPHABET = b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/'
CUSTOM_ALPHABET = n.encode()
DECODE_TRANS = bytes.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET)
result_text = base64.b64decode(source_text.encode().translate(DECODE_TRANS)).decode()
return [1, result_text.strip(), 'Base64']
except Exception as e:
return [0, str(e), 'Base64']
def func_bae85_ASCII85(self, encode_type, source_text):
try:
result_text = base64.a85decode(source_text).decode(encode_type)
except Exception as e:
return [0, '', 'Base85(ASCII85)']
return [1, result_text.strip(), 'Base85(ASCII85)']
def func_bae85_RFC1924(self, encode_type, source_text):
try:
result_text = base64.b85decode(source_text).decode(encode_type)
except Exception as e:
return [0, '', 'Base85(RFC1924)']
return [1, result_text.strip(), 'Base85(RFC1924)']
def func_base91(self, encode_type, source_text):
try:
result_text = base91.decode(source_text).decode(encode_type)
except Exception as e:
return [0, '', 'Base91']
return [1, result_text.strip(), 'Base91']
def func_base92(self, encode_type, source_text):
try:
result_text = py3base92.decode(source_text)
except Exception as e:
return [0, '', 'Base92']
return [1, result_text.strip(), 'Base92']
def func_Hex_Str(self, encode_type, source_text):
try:
text = source_text.replace('0x', '').replace('0X', '')
result_text = str(bytes.fromhex(text), encoding=encode_type)
except Exception as e:
return [0, '', 'Hex->Str']
return [1, result_text.strip(), 'Hex-Str']
def func_shellcode(self, encode_type, source_text):
try:
text = source_text.lower()
if (('0x' in text) and ('\\x' not in text)):
text = text.split('0x')
elif (('\\x' in text) and ('0x' not in text)):
text = text.split('\\x')
else:
result_text = ',:\n\\x61\\x00\\x62\\x00\\x63\n0x610x000x620x000x63'
return [0, result_text, 'Shellcode']
result = ''
for i in text:
if (i != ''):
result = (result + chr(int(i, 16)))
result_text = result
except Exception as e:
return [0, '', 'Shellcode']
return [1, result_text.strip(), 'Shellcode']
def func_qwerty(self, encode_type, source_text):
try:
letter = {'q': 'a', 'w': 'b', 'e': 'c', 'r': 'd', 't': 'e', 'y': 'f', 'u': 'g', 'i': 'h', 'o': 'i', 'p': 'j', 'a': 'k', 's': 'l', 'd': 'm', 'f': 'n', 'g': 'o', 'h': 'p', 'j': 'q', 'k': 'r', 'l': 's', 'z': 't', 'x': 'u', 'c': 'v', 'v': 'w', 'b': 'x', 'n': 'y', 'm': 'z', 'Q': 'A', 'W': 'B', 'E': 'C', 'R': 'D', 'T': 'E', 'Y': 'F', 'U': 'G', 'I': 'H', 'O': 'I', 'P': 'J', 'A': 'K', 'S': 'L', 'D': 'M', 'F': 'N', 'G': 'O', 'H': 'P', 'J': 'Q', 'K': 'R', 'L': 'S', 'Z': 'T', 'X': 'U', 'C': 'V', 'V': 'W', 'B': 'X', 'N': 'Y', 'M': 'Z'}
result_text = ''
for i in range(0, len(source_text)):
if (source_text[i] != ' '):
result_text = (result_text + letter.get(source_text[i]))
else:
result_text = (result_text + ' ')
except Exception as e:
return [0, '', 'qwerty']
return [1, result_text.strip(), 'qwerty']
def func_Socialism(sellf, encode_type, source_text):
values = ''
duo = []
for i in source_text:
num = values.index(i)
if (num == (- 1)):
continue
elif (num & 1):
continue
else:
duo.append((num >> 1))
hexs = []
i = 0
while (i < len(duo)):
if (duo[i] < 10):
hexs.append(duo[i])
elif (duo[i] == 10):
i += 1
hexs.append((duo[i] + 10))
else:
i += 1
hexs.append((duo[i] + 6))
i += 1
res = ''.join([hex(i)[2:].upper() for i in hexs])
if (len(res) == 0):
return [0, '', '']
splited = []
for i in range(len(res)):
if ((i & 1) == 0):
splited.append('%')
splited.append(res[i])
result = urllib.parse.unquote(''.join(splited))
return [1, result.strip(), '']
def func_jother(sellf, encode_type, source_text):
result = 'Jother,F12console,,'
return [0, result, 'jother']
def func_baijiaxing(sellf, encode_type, source_text):
CODE = {'': '0', '': '1', '': '2', '': '3', '': '4', '': '5', '': '6', '': '7', '': '8', '': '9', '': 'a', '': 'b', '': 'c', '': 'd', '': 'e', '': 'f', '': 'g', '': 'h', '': 'i', '': 'j', '': 'k', '': 'l', '': 'm', '': 'n', '': 'o', '': 'p', '': 'q', '': 'r', '': 's', '': 't', '': 'u', '': 'v', '': 'w', '': 'x', '': 'y', '': 'z', '': 'A', '': 'B', '': 'C', '': 'D', '': 'E', '': 'F', '': 'G', '': 'H', '': 'I', '': 'J', '': 'K', '': 'L', '': 'M', '': 'N', '': 'O', '': 'P', '': 'Q', '': 'R', '': 'S', '': 'T', '': 'U', '': 'V', '': 'W', '': 'X', '': 'Y', '': 'Z', '': '.', '': '-', '': '_', '': '+', '': '=', '': '/', '': '?', '': '#', '': '%', '': '&', '': '*'}
source_text = re.sub('[^-]+', '', source_text)
cc = [CODE[i] for i in source_text]
dd = ''.join(cc)
if dd:
return [1, ('magnet:?xt=urn:btih:' + dd), '']
else:
return [0, '', ''] |
def train_pipeline(root_path):
(opt, args) = parse_options(root_path, is_train=True)
opt['root_path'] = root_path
torch.backends.cudnn.benchmark = True
resume_state = load_resume_state(opt)
if (resume_state is None):
make_exp_dirs(opt)
if (opt['logger'].get('use_tb_logger') and ('debug' not in opt['name']) and (opt['rank'] == 0)):
mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name']))
copy_opt_file(args.opt, opt['path']['experiments_root'])
log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
tb_logger = init_tb_loggers(opt)
result = create_train_val_dataloader(opt, logger)
(train_loader, train_sampler, val_loaders, total_epochs, total_iters) = result
model = build_model(opt)
if resume_state:
model.resume_training(resume_state)
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
else:
start_epoch = 0
current_iter = 0
msg_logger = MessageLogger(opt, current_iter, tb_logger)
prefetch_mode = opt['datasets']['train'].get('prefetch_mode')
if ((prefetch_mode is None) or (prefetch_mode == 'cpu')):
prefetcher = CPUPrefetcher(train_loader)
elif (prefetch_mode == 'cuda'):
prefetcher = CUDAPrefetcher(train_loader, opt)
logger.info(f'Use {prefetch_mode} prefetch dataloader')
if (opt['datasets']['train'].get('pin_memory') is not True):
raise ValueError('Please set pin_memory=True for CUDAPrefetcher.')
else:
raise ValueError(f"Wrong prefetch_mode {prefetch_mode}.Supported ones are: None, 'cuda', 'cpu'.")
logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}')
(data_timer, iter_timer) = (AvgTimer(), AvgTimer())
start_time = time.time()
for epoch in range(start_epoch, (total_epochs + 1)):
train_sampler.set_epoch(epoch)
prefetcher.reset()
train_data = prefetcher.next()
while (train_data is not None):
data_timer.record()
current_iter += 1
if (current_iter > total_iters):
break
model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', (- 1)))
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_timer.record()
if (current_iter == 1):
msg_logger.reset_start_time()
if ((current_iter % opt['logger']['print_freq']) == 0):
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
if ((current_iter % opt['logger']['save_checkpoint_freq']) == 0):
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
if ((opt.get('val') is not None) and ((current_iter % opt['val']['val_freq']) == 0)):
if (len(val_loaders) > 1):
logger.warning('Multiple validation datasets are *only* supported by SRModel.')
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
data_timer.start()
iter_timer.start()
train_data = prefetcher.next()
consumed_time = str(datetime.timedelta(seconds=int((time.time() - start_time))))
logger.info(f'End of training. Time consumed: {consumed_time}')
logger.info('Save the latest model.')
model.save(epoch=(- 1), current_iter=(- 1))
if (opt.get('val') is not None):
for val_loader in val_loaders:
model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img'])
if tb_logger:
tb_logger.close() |
def test_get_tag_by_manifest_id_multiple_tags_returns_latest(initialized_db):
repo = model.repository.create_repository('devtable', 'newrepo', None)
(manifest, _) = create_manifest_for_testing(repo, '1')
before_ms = (get_epoch_timestamp_ms() - (timedelta(hours=24).total_seconds() * 1000))
count = Tag.update(lifetime_start_ms=before_ms, lifetime_end_ms=(before_ms + 5)).where((Tag.manifest == manifest.id)).execute()
assert (count == 1)
expired_tag = get_tag_by_manifest_id(repo.id, manifest.id)
new_tag = create_temporary_tag_if_necessary(manifest, (get_epoch_timestamp_ms() + (3600 * 1000)))
tag = get_tag_by_manifest_id(repo.id, manifest.id)
assert (tag is not None)
assert (tag.id == new_tag.id)
assert (tag.lifetime_end_ms > expired_tag.lifetime_end_ms) |
class TestTracer(unittest.TestCase):
def test_trace_async_module(self) -> None:
class NeedWait(LazyAwaitable[torch.Tensor]):
def __init__(self, obj: torch.Tensor) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> torch.Tensor:
return (self._obj + 3)
class MyAsyncModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input) -> LazyAwaitable[torch.Tensor]:
return NeedWait((input + 2))
class AutoModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sparse = MyAsyncModule()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.add(self.sparse(input), (input * 10))
auto_model = AutoModel()
auto_gm = symbolic_trace(auto_model)
FileCheck().check('+ 2').check('NeedWait').check('* 10').run(auto_gm.code)
input = torch.randn(3, 4)
ref_out = auto_model(input)
traced_out = auto_gm(input)
self.assertTrue(torch.equal(ref_out, traced_out)) |
def test_L4_subcomp_index():
a = CaseBits32ArrayConnectSubCompAttrComp.DUT()
a.elaborate()
a.apply(StructuralRTLIRGenL4Pass(gen_connections(a)))
connections = a.get_metadata(StructuralRTLIRGenL2Pass.connections)
comp = CurComp(a, 's')
assert (connections[10] == (SubCompAttr(ComponentIndex(CurCompAttr(comp, 'b'), 1), 'out'), CurCompAttr(comp, 'out'))) |
def download(platforms, version, use_v8, max_workers, robust):
if (not max_workers):
max_workers = len(platforms)
archives = {}
with ThreadPoolExecutor(max_workers=max_workers) as pool:
func = functools.partial(_get_package, version=version, robust=robust, use_v8=use_v8)
for (pl_name, file_path) in pool.map(func, platforms):
if (pl_name is not None):
archives[pl_name] = file_path
return archives |
def contractreceivesecretreveal_from_event(event: DecodedEvent) -> ContractReceiveSecretReveal:
secret_registry_address = event.originating_contract
data = event.event_data
args = data['args']
return ContractReceiveSecretReveal(secret_registry_address=SecretRegistryAddress(secret_registry_address), secrethash=args['secrethash'], secret=args['secret'], transaction_hash=event.transaction_hash, block_number=event.block_number, block_hash=event.block_hash) |
class CompoundModelProcessor(SourceProcessor):
__implements__ = 'CompoundModel'
def process(self, sources, sandbox, nthreads=0):
result = {'processor_profile': dict(), 'displacement.e': np.zeros(sandbox.frame.npixel), 'displacement.n': np.zeros(sandbox.frame.npixel), 'displacement.d': np.zeros(sandbox.frame.npixel)}
coords = sandbox.frame.coordinatesMeter
for src in sources:
if isinstance(src, EllipsoidSource):
res = ce.ECM(coords, **src.ECMParameters())
elif isinstance(src, PointCompoundSource):
res = ce.pointCDM(coords, **src.pointCDMParameters())
else:
raise AttributeError('Source of wrong type!')
result['displacement.e'] += res[0]
result['displacement.n'] += res[1]
result['displacement.d'] += res[2]
return result |
def get_best_routes(chain_state: ChainState, token_network_address: TokenNetworkAddress, one_to_n_address: Optional[OneToNAddress], from_address: InitiatorAddress, to_address: TargetAddress, amount: PaymentAmount, previous_address: Optional[Address], privkey: PrivateKey, our_address_metadata: AddressMetadata, pfs_proxy: PFSProxy) -> Tuple[(Optional[str], List[RouteState], Optional[UUID])]:
token_network = views.get_token_network_by_address(chain_state, token_network_address)
assert token_network, 'The token network must be validated and exist.'
if (Address(to_address) in token_network.partneraddresses_to_channelidentifiers.keys()):
for channel_id in token_network.partneraddresses_to_channelidentifiers[Address(to_address)]:
channel_state = token_network.channelidentifiers_to_channels[channel_id]
payment_with_fee_amount = PaymentWithFeeAmount(amount)
is_usable = channel.is_channel_usable_for_new_transfer(channel_state, payment_with_fee_amount, None)
if (is_usable is channel.ChannelUsability.USABLE):
address_to_address_metadata = {Address(from_address): our_address_metadata}
try:
address_metadata = pfs_proxy.query_address_metadata(to_address)
except ServiceRequestFailed as ex:
msg = f'''PFS returned an error while trying to fetch user information:
{ex}'''
log.error(msg)
return (msg, [], None)
else:
address_to_address_metadata[Address(to_address)] = address_metadata
try:
direct_route = RouteState(route=[Address(from_address), Address(to_address)], estimated_fee=FeeAmount(0), address_to_metadata=address_to_address_metadata)
return (None, [direct_route], None)
except ValueError as ex:
return (str(ex), [], None)
if (one_to_n_address is None):
msg = 'Pathfinding Service could not be used.'
log.warning(msg)
return (msg, [], None)
channels = [token_network.channelidentifiers_to_channels[channel_id] for channels_to_partner in token_network.partneraddresses_to_channelidentifiers.values() for channel_id in channels_to_partner]
for channel_state in channels:
payment_with_fee_amount = PaymentWithFeeAmount(amount)
is_usable = channel.is_channel_usable_for_new_transfer(channel_state, payment_with_fee_amount, None)
if (is_usable is channel.ChannelUsability.USABLE):
break
else:
return ('You have no suitable channel to initiate this payment.', [], None)
latest_channel_opened_at = 0
for channel_state in token_network.channelidentifiers_to_channels.values():
latest_channel_opened_at = max(latest_channel_opened_at, channel_state.open_transaction.finished_block_number)
(pfs_error_msg, pfs_routes, pfs_feedback_token) = get_best_routes_pfs(chain_state=chain_state, token_network_address=token_network_address, one_to_n_address=one_to_n_address, from_address=from_address, to_address=to_address, amount=amount, previous_address=previous_address, privkey=privkey, pfs_wait_for_block=BlockNumber(latest_channel_opened_at), pfs_proxy=pfs_proxy)
if pfs_error_msg:
log.warning('Request to Pathfinding Service was not successful. No routes to the target were found.', pfs_message=pfs_error_msg)
return (pfs_error_msg, [], None)
if (not pfs_routes):
return ('PFS could not find any routes', [], None)
log.info('Received route(s) from PFS', routes=pfs_routes, feedback_token=pfs_feedback_token)
return (pfs_error_msg, pfs_routes, pfs_feedback_token) |
class PreconditionerTest():
def __init__(self):
self.x1 = (torch.randn(int((mconfig.K / 2)), mconfig.M).cuda().half() / 100)
self.x2 = torch.zeros_like(self.x1)
self.x = torch.cat([self.x1, self.x2], 0)
self.y = (torch.randn(mconfig.K, mconfig.N).cuda().half() / 100)
self.y_input = (torch.randn(mconfig.M, mconfig.N).cuda().half() / 100)
self.num_bins = ((2 ** mconfig.num_bits) - 1)
self.num_bits = mconfig.num_bits
self.scale_y = (max(abs(self.y.min()), abs(self.y.max())) / 7)
self.quantize_y = (self.y / self.scale_y)
self.quantize_y.clamp_((- 8.0), (self.num_bins - 8)).round_()
self.quantize_y = self.quantize_y.to(torch.int8)
self.dequantize_y = (self.quantize_y * self.scale_y)
self.scale_yinput = (max(abs(self.y_input.min()), abs(self.y_input.max())) / 7)
self.quantize_yinput = (self.y_input / self.scale_yinput)
self.quantize_yinput.clamp_((- 8.0), (self.num_bins - 8)).round_()
self.quantize_yinput = self.quantize_yinput.to(torch.int8)
self.dequantize_yinput = (self.quantize_yinput * self.scale_yinput)
self.hadamard = T[mconfig.group_size].half()
self.weight = (torch.randn(mconfig.M, mconfig.N).cuda().half() / 50)
self.hadamard_weight = self.weight.view((- 1), mconfig.group_size).matmul(self.hadamard).view(self.weight.shape)
self.scale_weight = torch.randn(1).cuda().half()
self.lsq_weight = (self.weight / self.scale_weight)
self.input = (torch.randn(mconfig.K, mconfig.N).cuda().half() / 50)
self.hadamard_input = self.input.view((- 1), mconfig.group_size).matmul(self.hadamard).view(self.input.shape)
self.scale_input = torch.randn(1).cuda().half()
self.lsq_input = (self.input / self.scale_input)
def TwoLayerQuantizeInput_cuda_speed(self, input, inputList):
total_time = 0
quantize_time = 0
leverage_time = 0
sample_time = 0
pack_time = 0
gemm_time = 0
dequantize_time = 0
LSQ_time = 0
for i in range((mconfig.testTurn + 1)):
torch.cuda.synchronize()
time1 = time.time()
activation_out = quantize_grad_input_speed.quantize(input, self.num_bits, self.quantize_yinput, self.scale_yinput, self.lsq_input, inputList[0], inputList[1], inputList[2], inputList[3], inputList[4])
torch.cuda.synchronize()
time2 = time.time()
if (i >= 1):
quantize_time += activation_out[4][0]
leverage_time += activation_out[4][1]
sample_time += activation_out[4][2]
pack_time += activation_out[4][3]
gemm_time += activation_out[4][4]
dequantize_time += activation_out[4][5]
LSQ_time += activation_out[4][6]
total_time += (time2 - time1)
print('quantize cuda speed:')
print(' Tflops is:', ((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
print('total_time is:')
print(total_time)
print('gemm_time is:')
print(gemm_time)
print('dequantize_time is:')
print(dequantize_time)
twolayerInput_cuda_speed_tflops.append(((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
cuda_input_quantize_time.append(quantize_time)
cuda_input_leverage_time.append(leverage_time)
cuda_input_sample_time.append(sample_time)
cuda_input_pack_time.append(pack_time)
cuda_input_gemm_time.append(gemm_time)
cuda_input_dequantize_time.append(dequantize_time)
cuda_input_LSQ_time.append(LSQ_time)
def TwoLayerQuantizeWeight_cuda_speed(self, input):
total_time = 0
method2_time = 0
method3_time = 0
quantize_time = 0
leverage_time = 0
sample_time = 0
pack_time = 0
gemm_time = 0
dequantize_time = 0
LSQ_time = 0
for i in range((mconfig.testTurn + 1)):
torch.cuda.synchronize()
time1 = time.time()
weight_out = quantize_grad_weight_speed.quantize(input, self.num_bits, self.quantize_y, self.scale_y, self.lsq_weight)
torch.cuda.synchronize()
time2 = time.time()
if (i >= 1):
total_time += (time2 - time1)
quantize_time += weight_out[3][0]
leverage_time += weight_out[3][1]
sample_time += weight_out[3][2]
pack_time += weight_out[3][3]
gemm_time += weight_out[3][4]
dequantize_time += weight_out[3][5]
LSQ_time += weight_out[3][6]
method2_time += weight_out[3][8]
method3_time += weight_out[3][9]
print('LSS cuda MM speed:')
print(' Tflops is:', ((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
print('total_time is:')
print(total_time)
print('gemm_time is:')
print(gemm_time)
twolayer_cuda_speed_tflops.append(((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
cuda_quantize_time.append(quantize_time)
cuda_leverage_time.append(leverage_time)
cuda_sample_time.append(sample_time)
cuda_pack_time.append(pack_time)
cuda_gemm_time.append(gemm_time)
cuda_dequantize_time.append(dequantize_time)
cuda_LSQ_time.append(LSQ_time)
return (weight_out[(- 5)], weight_out[(- 4)], weight_out[(- 3)], weight_out[(- 2)], weight_out[(- 1)])
def Gemm_ordinary_python(self, x, y):
total_time = 0
for i in range((mconfig.testTurn + 1)):
time1 = time.time()
out = x.t().matmul(y)
torch.cuda.synchronize()
time2 = time.time()
if (i >= 1):
total_time += (time2 - time1)
print('fp16 gemm speed:')
print(' Tflops is:', ((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
print()
python_ordgemm_flops.append(((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
def HadmardQuantize_cuda_speed(self, x, y):
hadamard_time = 0
quantize_time = 0
pack_time = 0
gemm_time = 0
dequantize_time = 0
hadamard = T[mconfig.group_size].half()
x_shape = x.shape
x_batch = x.view((- 1), mconfig.group_size)
y_shape = y.shape
y_batch = y.view((- 1), mconfig.group_size)
total_time = 0
h_x = x_batch.matmul(hadamard).view(x_shape)
h_y = y_batch.matmul(hadamard).view(y_shape)
scale_hx = (max(abs(h_x.max()), abs(h_x.min())) / 7)
scale_hy = (max(abs(h_y.max()), abs(h_y.min())) / 7)
for i in range((mconfig.testTurn + 1)):
time1 = time.time()
h_x = x_batch.matmul(hadamard).view(x_shape)
h_y = y_batch.matmul(hadamard).view(y_shape)
torch.cuda.synchronize()
time_flag = time.time()
out2 = quantize_forward_easy.quantize(h_x, h_y, scale_hx, scale_hy)
torch.cuda.synchronize()
time2 = time.time()
if (i >= 1):
hadamard_time += (time_flag - time1)
quantize_time += out2[3][0]
pack_time += out2[3][1]
gemm_time += out2[3][2]
dequantize_time += out2[3][3]
total_time += (time2 - time1)
print('HQ cuda MM speed:')
print(' Tflops is:', ((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time))
print('total_time is:')
print(total_time)
print('gemm_time is:')
print(gemm_time)
hadamard_cuda_speed_tflops.append(((((((1e-12 * mconfig.M) * mconfig.K) * mconfig.N) * mconfig.testTurn) * 2) / total_time)) |
def parse_mit_splits():
class_mapping = {}
with open('data/mit/annotations/moments_categories.txt') as f_cat:
for line in f_cat.readlines():
(cat, digit) = line.rstrip().split(',')
class_mapping[cat] = int(digit)
def line_to_map(x):
video = osp.splitext(x[0])[0]
label = class_mapping[osp.dirname(x[0])]
return (video, label)
csv_reader = csv.reader(open('data/mit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list
splits = ((train_list, val_list, test_list),)
return splits |
def calculate_metrics(y_true: np.ndarray, y_pred: np.ndarray, task_type: Union[(str, TaskType)], prediction_type: Optional[Union[(str, PredictionType)]], y_info: dict[(str, Any)]) -> dict[(str, Any)]:
task_type = TaskType(task_type)
if (prediction_type is not None):
prediction_type = PredictionType(prediction_type)
if (task_type == TaskType.REGRESSION):
assert (prediction_type is None)
assert ('std' in y_info)
rmse = calculate_rmse(y_true, y_pred, y_info['std'])
result = {'rmse': rmse}
else:
(labels, probs) = _get_labels_and_probs(y_pred, task_type, prediction_type)
result = cast(dict[(str, Any)], skm.classification_report(y_true, labels, output_dict=True))
if (task_type == TaskType.BINCLASS):
result['roc_auc'] = skm.roc_auc_score(y_true, probs)
return result |
class _PatchWithDescription(codemod.Patch):
def __init__(self, start_line_number: int, end_line_number: Optional[int]=None, new_lines: Optional[List[str]]=None, path: Optional[str]=None, description: Optional[str]=None) -> None:
super().__init__(start_line_number, end_line_number, new_lines, path)
self.description = description
def render_range(self) -> str:
text = super().render_range()
if (self.description is not None):
return ((text + ': ') + self.description)
return text |
class GRAFLoss(BaseLoss):
def __init__(self, runner, d_loss_kwargs=None, g_loss_kwargs=None):
if runner.enable_amp:
raise NotImplementedError('GRAF loss does not support automatic mixed precision training yet.')
self.d_loss_kwargs = (d_loss_kwargs or dict())
self.r1_gamma = self.d_loss_kwargs.get('r1_gamma', 10.0)
assert (self.r1_gamma >= 0.0)
runner.running_stats.add('Loss/D Fake', log_name='loss_d_fake', log_format='.3f', log_strategy='AVERAGE')
runner.running_stats.add('Loss/D Real', log_name='loss_d_real', log_format='.3f', log_strategy='AVERAGE')
if (self.r1_gamma > 0.0):
runner.running_stats.add('Loss/Real Gradient Penalty', log_name='loss_gp', log_format='.1e', log_strategy='AVERAGE')
self.g_loss_kwargs = (g_loss_kwargs or dict())
runner.running_stats.add('Loss/G', log_name='loss_g', log_format='.3f', log_strategy='AVERAGE')
runner.logger.info('gradient penalty (D regularizer):', indent_level=1)
runner.logger.info(f'r1_gamma: {self.r1_gamma}', indent_level=2)
def run_G(runner, patch_grid=None, batch_size=None, sync=True, requires_grad=False):
batch_size = (batch_size or runner.batch_size)
latent_dim = runner.models['generator'].z_dim
label_dim = runner.models['generator'].label_dim
latents = torch.randn((batch_size, latent_dim), device=runner.device, requires_grad=requires_grad)
labels = None
if (label_dim > 0):
rnd_labels = torch.randint(0, label_dim, (batch_size,), device=runner.device)
labels = F.one_hot(rnd_labels, num_classes=label_dim)
G = runner.ddp_models['generator']
G_kwargs = runner.model_kwargs_train['generator']
with ddp_sync(G, sync=sync):
return G(latents, labels, patch_grid, **G_kwargs)
def run_D(runner, images, labels=None, sync=True):
images = runner.augment(images, **runner.augment_kwargs)
D = runner.ddp_models['discriminator']
D_kwargs = runner.model_kwargs_train['discriminator']
with ddp_sync(D, sync=sync):
return D(images, labels, **D_kwargs)
def compute_grad_penalty(images, scores):
with conv2d_gradfix.no_weight_gradients():
image_grad = torch.autograd.grad(outputs=[scores.sum()], inputs=[images], create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_penalty = image_grad.square().sum((1, 2, 3))
return grad_penalty
def g_loss(self, runner, _data, patch_grid=None, sync=True):
fake_results = self.run_G(runner, patch_grid, sync=sync)
fake_scores = self.run_D(runner, images=fake_results['image'], labels=None, sync=False)['score']
g_loss = F.softplus((- fake_scores))
runner.running_stats.update({'Loss/G': g_loss})
return g_loss.mean()
def d_fake_loss(self, runner, _data, patch_grid=None, sync=True):
fake_results = self.run_G(runner, patch_grid, sync=False)
fake_scores = self.run_D(runner, images=fake_results['image'], labels=None, sync=sync)['score']
d_fake_loss = F.softplus(fake_scores)
runner.running_stats.update({'Loss/D Fake': d_fake_loss})
return d_fake_loss.mean()
def d_real_loss(self, runner, data, sync=True):
real_images = data['image'].detach()
real_scores = self.run_D(runner, images=real_images, labels=None, sync=sync)['score']
d_real_loss = F.softplus((- real_scores))
runner.running_stats.update({'Loss/D Real': d_real_loss})
if hasattr(runner.augment, 'prob_tracker'):
runner.augment.prob_tracker.update(real_scores.sign())
return d_real_loss.mean()
def d_reg(self, runner, data, sync=True):
if (self.r1_gamma == 0.0):
return None
real_images = data['image'].detach().requires_grad_(True)
real_scores = self.run_D(runner, images=real_images, labels=None, sync=sync)['score']
r1_penalty = self.compute_grad_penalty(images=real_images, scores=real_scores)
runner.running_stats.update({'Loss/Real Gradient Penalty': r1_penalty})
r1_penalty = (r1_penalty * (self.r1_gamma * 0.5))
if hasattr(runner.augment, 'prob_tracker'):
runner.augment.prob_tracker.update(real_scores.sign())
return ((real_scores * 0) + r1_penalty).mean() |
def snooze_issue(hostname, issue_name, snooze_until):
db = get_db()
spec = {'closed_at': {'$exists': False}, '$or': [{'unsnooze_at': {'$exists': False}}, {'unsnooze_at': {'$lt': snooze_until}}]}
if hostname:
spec['hostname'] = hostname
if issue_name:
spec['name'] = issue_name
ids = [d['_id'] for d in db.issues.find(spec, projection=['_id'])]
if (not ids):
return []
db.issues.update_many({'_id': {'$in': ids}}, {'$set': {'snoozed_at': datetime.datetime.now(), 'unsnooze_at': snooze_until}})
return ids |
def wavelet_color_fix(target: Image, source: Image):
to_tensor = ToTensor()
target_tensor = to_tensor(target).unsqueeze(0)
source_tensor = to_tensor(source).unsqueeze(0)
result_tensor = wavelet_reconstruction(target_tensor, source_tensor)
to_image = ToPILImage()
result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))
return result_image |
class ObjectDeleteView(LoginRequiredMixin, ObjectDetailView, EvenniaDeleteView):
model = class_from_module(settings.BASE_OBJECT_TYPECLASS)
template_name = 'website/object_confirm_delete.html'
access_type = 'delete'
def delete(self, request, *args, **kwargs):
obj = str(self.get_object())
response = super(ObjectDeleteView, self).delete(request, *args, **kwargs)
messages.success(request, ("Successfully deleted '%s'." % obj))
return response |
class WeightedAvgMetricTest(unittest.TestCase):
target_clazz: Type[RecMetric] = WeightedAvgMetric
target_compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION
task_name: str = 'weighted_avg'
def test_weighted_avg_unfused(self) -> None:
rec_metric_value_test_launcher(target_clazz=WeightedAvgMetric, target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestWeightedAvgMetric, metric_name=WeightedAvgMetricTest.task_name, task_names=['t1', 't2', 't3'], fused_update_limit=0, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper)
def test_weighted_avg_fused(self) -> None:
rec_metric_value_test_launcher(target_clazz=WeightedAvgMetric, target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION, test_clazz=TestWeightedAvgMetric, metric_name=WeightedAvgMetricTest.task_name, task_names=['t1', 't2', 't3'], fused_update_limit=0, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper)
def test_weighted_avg_update_fused(self) -> None:
rec_metric_value_test_launcher(target_clazz=WeightedAvgMetric, target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestWeightedAvgMetric, metric_name=WeightedAvgMetricTest.task_name, task_names=['t1', 't2', 't3'], fused_update_limit=5, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper)
rec_metric_value_test_launcher(target_clazz=WeightedAvgMetric, target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestWeightedAvgMetric, metric_name=WeightedAvgMetricTest.task_name, task_names=['t1', 't2', 't3'], fused_update_limit=100, compute_on_all_ranks=False, should_validate_update=False, world_size=WORLD_SIZE, entry_point=metric_test_helper, batch_window_size=10) |
class _FoldArrow(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self._folded = True
def fold(self, folded):
self._folded = folded
self.update()
def paintEvent(self, _event):
opt = QStyleOption()
opt.initFrom(self)
painter = QPainter(self)
if self._folded:
elem = QStyle.PrimitiveElement.PE_IndicatorArrowRight
else:
elem = QStyle.PrimitiveElement.PE_IndicatorArrowDown
style = self.style()
assert (style is not None)
style.drawPrimitive(elem, opt, painter, self)
def minimumSizeHint(self):
return QSize(8, 8) |
.allow_backend_process
.requires_internet
def test_build_dependencies(hatch, temp_dir, helpers):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
build_script = (project_path / DEFAULT_BUILD_SCRIPT)
build_script.write_text(helpers.dedent("\n import pathlib\n\n import binary\n from hatchling.builders.wheel import WheelBuilder\n\n def get_builder():\n return CustomWheelBuilder\n\n class CustomWheelBuilder(WheelBuilder):\n def build(self, **kwargs):\n pathlib.Path('test.txt').write_text(str(binary.convert_units(1024)))\n yield from super().build(**kwargs)\n "))
project = Project(project_path)
config = dict(project.raw_config)
config['tool']['hatch']['build'] = {'targets': {'custom': {'dependencies': ['binary'], 'path': DEFAULT_BUILD_SCRIPT}}}
project.save_config(config)
with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('build', '-t', 'custom')
assert (result.exit_code == 0), result.output
build_directory = (project_path / 'dist')
assert build_directory.is_dir()
artifacts = list(build_directory.iterdir())
assert (len(artifacts) == 1)
output_file = (project_path / 'test.txt')
assert output_file.is_file()
assert (str(output_file.read_text()) == "(1.0, 'KiB')")
assert (result.output == helpers.dedent('\n custom \n Setting up build environment\n ')) |
class SquadFeatures():
def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, start_position, end_position, is_impossible, qas_id: str=None, encoding: BatchEncoding=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
self.qas_id = qas_id
self.encoding = encoding |
def filter_args_by_frequency(args_list: List[EventPredictedArgs], similarity_threshold: float=0.7) -> Tuple[(EventPredictedArgs, List[str])]:
n_generations = len(args_list)
role_to_type_str_pairs = defaultdict(list)
_prev_event_type = None
for (generation_id, predicted_args) in enumerate(args_list):
predicted_args: EventPredictedArgs
if (predicted_args is None):
continue
if (_prev_event_type is not None):
assert (_prev_event_type == predicted_args.event_type)
_prev_event_type = predicted_args.event_type
for (role, type_tokens_pairs) in predicted_args.items():
for (entity_type, tokens) in type_tokens_pairs:
role_to_type_str_pairs[role].append((entity_type, ' '.join(tokens)))
role_to_cluster_type_str_pairs = {}
role_to_output = EventPredictedArgs(event_type=_prev_event_type)
for (role, type_str_pairs) in role_to_type_str_pairs.items():
n = len(type_str_pairs)
similarity_mat = np.zeros((n, n))
for i in range(n):
for j in range(n):
similarity_mat[(i, j)] = similarity_mat[(j, i)] = (fuzz.ratio(type_str_pairs[i][1], type_str_pairs[j][1]) / 100.0)
assert (similarity_mat.shape == (n, n))
equivalent_clusters = set()
for arg_id in range(n):
similar_args = np.where((similarity_mat[arg_id] > similarity_threshold))[0]
equivalent_clusters.add(tuple(sorted(similar_args)))
role_to_cluster_type_str_pairs[role] = [[type_str_pairs[str_idx] for str_idx in cluster] for cluster in equivalent_clusters]
filtered_equivalent_clusters = [cluster for cluster in equivalent_clusters if (len(cluster) > (n_generations // 2))]
role_to_output[role] = [_reduce_cluster(cluster, type_str_pairs) for cluster in filtered_equivalent_clusters]
return (role_to_output, role_to_cluster_type_str_pairs) |
_optimizer('adam', dataclass=FairseqAdamConfig)
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
fused_adam_cls = get_fused_adam_class()
use_fused_adam = ((not getattr(args, 'use_old_adam', False)) and (fused_adam_cls is not None) and torch.cuda.is_available())
if getattr(args, 'tpu', False):
self._optimizer = Adam(params, **self.optimizer_config)
elif use_fused_adam:
logger.info('using FusedAdam')
self._optimizer = fused_adam_cls(params, **self.optimizer_config)
else:
self._optimizer = Adam(params, **self.optimizer_config)
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay}
def average_params(self):
state_dict = self.optimizer.state_dict()
total_gpus = float(dist.get_world_size())
for (_, value) in state_dict['state'].items():
value['exp_avg'] /= total_gpus
value['exp_avg_sq'] /= total_gpus
dist.all_reduce(value['exp_avg'], op=dist.ReduceOp.SUM)
dist.all_reduce(value['exp_avg_sq'], op=dist.ReduceOp.SUM) |
def val(net, dataset, criterion, max_iter=2):
print('Start val')
for p in crnn.parameters():
p.requires_grad = False
net.eval()
data_loader = torch.utils.data.DataLoader(dataset, shuffle=True, batch_size=opt.batchSize, num_workers=int(opt.workers))
val_iter = iter(data_loader)
i = 0
n_correct = 0
loss_avg = utils.averager()
max_iter = min(max_iter, len(data_loader))
for i in range(max_iter):
data = val_iter.next()
i += 1
(cpu_images, cpu_texts) = data
batch_size = cpu_images.size(0)
utils.loadData(image, cpu_images)
if ifUnicode:
cpu_texts = [clean_txt(tx.decode('utf-8')) for tx in cpu_texts]
(t, l) = converter.encode(cpu_texts)
utils.loadData(text, t)
utils.loadData(length, l)
preds = crnn(image)
preds_size = Variable(torch.IntTensor(([preds.size(0)] * batch_size)))
cost = (criterion(preds, text, preds_size, length) / batch_size)
loss_avg.add(cost)
(_, preds) = preds.max(2)
preds = preds.squeeze(2)
preds = preds.transpose(1, 0).contiguous().view((- 1))
sim_preds = converter.decode(preds.data, preds_size.data, raw=False)
for (pred, target) in zip(sim_preds, cpu_texts):
if (pred.strip() == target.strip()):
n_correct += 1
raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:opt.n_test_disp]
accuracy = (n_correct / float((max_iter * opt.batchSize)))
testLoss = loss_avg.val()
return (testLoss, accuracy) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.