code stringlengths 281 23.7M |
|---|
class TestSceneRun():
class TestStartScripts():
class TestAudioListeners(SceneTestCase):
def setUp(self):
super().setUp()
os.environ['PYUNITY_INTERACTIVE'] = '0'
config.audio = True
def testCase1(self):
scene = SceneManager.AddScene('Scene')
scene.startScripts()
assert (scene.audioListener is not None)
def testCase2(self):
scene = SceneManager.AddScene('Scene')
scene.mainCamera.RemoveComponent(AudioListener)
with Logger.TempRedirect(silent=True) as r:
scene.startScripts()
assert (r.get() == 'Warning: No enabled AudioListeners found, audio is disabled\n')
assert (scene.audioListener is None)
def testCase3(self):
scene = SceneManager.AddScene('Scene')
gameObject = GameObject('Listener')
gameObject.AddComponent(AudioListener)
scene.Add(gameObject)
with Logger.TempRedirect(silent=True) as r:
scene.startScripts()
assert (r.get() == 'Warning: Ambiguity in AudioListeners, 2 enabled\n')
assert (scene.audioListener is None)
class TestBehaviours(SceneTestCase):
def setUp(self):
super().setUp()
os.environ['PYUNITY_INTERACTIVE'] = '0'
pytest.skip('Scene loading requires a Runner')
def testCase1(self):
scene = SceneManager.AddScene('Scene')
gameObject = GameObject('Test')
gameObject.AddComponent(TestBehaviour1)
scene.Add(gameObject)
with Logger.TempRedirect(silent=True) as r:
scene.startScripts()
assert (r.get() == 'Start\n')
class TestMeshBuffers(SceneTestCase):
def setUp(self):
super().setUp()
os.environ['PYUNITY_INTERACTIVE'] = '1'
class TestCameraBuffers(SceneTestCase):
def setUp(self):
super().setUp()
os.environ['PYUNITY_INTERACTIVE'] = '1'
class TestCollManager(SceneTestCase):
def setUp(self):
super().setUp()
os.environ['PYUNITY_INTERACTIVE'] = '0'
def testRigidbodies(self):
scene = SceneManager.AddScene('Scene')
gameObject1 = GameObject('With Rigidbody')
rb = gameObject1.AddComponent(Rigidbody)
coll1 = gameObject1.AddComponent(Collider)
coll2 = gameObject1.AddComponent(Collider)
scene.Add(gameObject1)
gameObject2 = GameObject('Without Rigidbody')
coll3 = gameObject2.AddComponent(Collider)
scene.Add(gameObject2)
scene.startScripts()
assert hasattr(scene, 'physics')
assert scene.physics
assert hasattr(scene, 'collManager')
assert (scene.collManager.rigidbodies[rb] == [coll1, coll2])
assert (scene.collManager.rigidbodies[scene.collManager.dummyRigidbody] == [coll3]) |
def _get_acis(start, end, params, map_variables, url, **kwargs):
params = {'sdate': pd.to_datetime(start).strftime('%Y-%m-%d'), 'edate': pd.to_datetime(end).strftime('%Y-%m-%d'), 'output': 'json', **params}
response = requests.post(url, json=params, headers={'Content-Type': 'application/json'}, **kwargs)
response.raise_for_status()
payload = response.json()
if ('error' in payload):
raise requests.HTTPError(payload['error'], response=response)
columns = (['date'] + [e['name'] for e in params['elems']])
df = pd.DataFrame(payload['data'], columns=columns)
df = df.set_index('date')
df.index = pd.to_datetime(df.index)
df.index.name = None
metadata = payload['meta']
try:
(metadata['lon'], metadata['lat']) = metadata.pop('ll')
except KeyError:
pass
try:
metadata['elev'] = (metadata['elev'] * 0.3048)
except KeyError:
pass
if map_variables:
df = df.rename(columns=VARIABLE_MAP)
for key in list(metadata.keys()):
if (key in VARIABLE_MAP):
metadata[VARIABLE_MAP[key]] = metadata.pop(key)
return (df, metadata) |
def modify_commandline_options(parser, is_train):
(opt, _) = parser.parse_known_args()
netG_cls = find_network_using_name(opt.netG, 'generator')
parser = netG_cls.modify_commandline_options(parser, is_train)
if is_train:
netD_cls = find_network_using_name(opt.netD, 'discriminator')
parser = netD_cls.modify_commandline_options(parser, is_train)
netE_cls = find_network_using_name('conv', 'encoder')
parser = netE_cls.modify_commandline_options(parser, is_train)
return parser |
(stability='beta')
class RayXGBRegressor(XGBRegressor, RayXGBMixin):
__init__ = _xgboost_version_warn(XGBRegressor.__init__)
_deprecate_positional_args
def fit(self, X, y, *, sample_weight=None, base_margin=None, eval_set=None, eval_metric=None, early_stopping_rounds=None, verbose=True, xgb_model: Optional[Union[(Booster, str, 'XGBModel')]]=None, sample_weight_eval_set=None, base_margin_eval_set=None, feature_weights=None, callbacks=None, ray_params: Union[(None, RayParams, Dict)]=None, _remote: Optional[bool]=None, ray_dmatrix_params: Optional[Dict]=None):
evals_result = {}
ray_dmatrix_params = (ray_dmatrix_params or {})
(train_dmatrix, evals) = _check_if_params_are_ray_dmatrix(X, sample_weight, base_margin, eval_set, sample_weight_eval_set, base_margin_eval_set)
if (train_dmatrix is None):
(train_dmatrix, evals) = _wrap_evaluation_matrices(missing=self.missing, X=X, y=y, group=None, qid=None, sample_weight=sample_weight, base_margin=base_margin, feature_weights=feature_weights, eval_set=eval_set, sample_weight_eval_set=sample_weight_eval_set, base_margin_eval_set=base_margin_eval_set, eval_group=None, eval_qid=None, create_dmatrix=(lambda **kwargs: RayDMatrix(**{**kwargs, **ray_dmatrix_params})), **self._ray_get_wrap_evaluation_matrices_compat_kwargs())
params = self.get_xgb_params()
if callable(self.objective):
obj = _objective_decorator(self.objective)
params['objective'] = 'reg:squarederror'
else:
obj = None
try:
(model, feval, params) = self._configure_fit(xgb_model, eval_metric, params)
except TypeError:
(model, feval, params, early_stopping_rounds, callbacks) = self._configure_fit(xgb_model, eval_metric, params, early_stopping_rounds, callbacks)
params.pop('n_jobs', None)
params.pop('nthread', None)
ray_params = self._ray_set_ray_params_n_jobs(ray_params, self.n_jobs)
additional_results = {}
self._Booster = train(params, train_dmatrix, self.get_num_boosting_rounds(), evals=evals, early_stopping_rounds=early_stopping_rounds, evals_result=evals_result, obj=obj, feval=feval, verbose_eval=verbose, xgb_model=model, callbacks=callbacks, additional_results=additional_results, ray_params=ray_params, _remote=_remote)
self.additional_results_ = additional_results
self._set_evaluation_result(evals_result)
return self
fit.__doc__ = (_treat_X_doc(_get_doc(XGBRegressor.fit)) + _RAY_PARAMS_DOC)
def _can_use_inplace_predict(self) -> bool:
return False
def predict(self, X, output_margin=False, validate_features=True, base_margin=None, iteration_range=None, ray_params: Union[(None, RayParams, Dict)]=None, _remote: Optional[bool]=None, ray_dmatrix_params: Optional[Dict]=None, **kwargs):
return self._ray_predict(X, output_margin=output_margin, validate_features=validate_features, base_margin=base_margin, iteration_range=iteration_range, ray_params=ray_params, _remote=_remote, ray_dmatrix_params=ray_dmatrix_params, **kwargs)
predict.__doc__ = (_treat_X_doc(_get_doc(XGBRegressor.predict)) + _RAY_PARAMS_DOC)
def load_model(self, fname):
if (not hasattr(self, '_Booster')):
self._Booster = Booster()
return super().load_model(fname) |
def test_initial_private_browsing(request, quteproc_new):
args = (_base_args(request.config) + ['--temp-basedir', '-s', 'content.private_browsing', 'true'])
quteproc_new.start(args)
quteproc_new.compare_session('\n windows:\n - private: True\n tabs:\n - history:\n - url: about:blank\n ')
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit() |
class Cursor(object):
rowcount = (- 1)
arraysize = 1
description = None
def __init__(self, C):
self.database = self.connection = C
self.description = ()
self.__portals = []
def _portal():
def fget(self):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
try:
p = self.__portals[0]
except IndexError:
raise InterfaceError('no portal on stack')
return p
def fdel(self):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
try:
del self.__portals[0]
except IndexError:
raise InterfaceError('no portal on stack')
return locals()
_portal = property(**_portal())
def setinputsizes(self, sizes):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
def setoutputsize(self, sizes, columns=None):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
def callproc(self, proname, args):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
p = self.database.prepare(('SELECT %s(%s)' % (proname, ','.join([('$' + str(x)) for x in range(1, (len(args) + 1))]))))
self.__portals.insert(0, Portal(p.chunks(*args)))
return args
def fetchone(self):
try:
return next(self._portal)
except StopIteration:
return None
def __next__(self):
return next(self._portal)
next = __next__
def __iter__(self):
return self
def fetchmany(self, arraysize=None):
return self._portal.read((arraysize or self.arraysize or 1))
def fetchall(self):
return self._portal.readall()
def nextset(self):
del self._portal
return (len(self.__portals) or None)
def fileno(self):
return self.database.fileno()
def _convert_query(self, string):
parts = list(pg_str.split(string))
style = None
count = 0
keys = []
kmap = {}
transformer = tuple
rparts = []
for part in parts:
if (part.__class__ is ().__class__):
rparts.append(part)
else:
r = percent_parameters(part)
pcount = 0
for x in r:
if (x == 's'):
pcount += 1
else:
x = x[1:(- 2)]
if (x not in keys):
kmap[x] = ('$' + str((len(keys) + 1)))
keys.append(x)
if r:
if pcount:
params = tuple([('$' + str((i + 1))) for i in range(count, (count + pcount))])
count += pcount
rparts.append((part % params))
else:
rparts.append((part % kmap))
else:
rparts.append(part)
if keys:
if count:
raise TypeError('keyword parameters and positional parameters used in query')
transformer = partial(convert_keywords, keys)
count = len(keys)
return ((pg_str.unsplit(rparts) if rparts else string), transformer, count)
def execute(self, statement, parameters=()):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
(sql, pxf, nparams) = self._convert_query(statement)
if ((nparams != (- 1)) and (len(parameters) != nparams)):
raise TypeError(('statement require %d parameters, given %d' % (nparams, len(parameters))))
ps = self.database.prepare(sql)
c = ps.chunks(*pxf(parameters))
if ((ps._output is not None) and (len(ps._output) > 0)):
self.rowcount = (- 1)
self.description = tuple([(self.database.typio.decode(x[0]), dbapi_type(x[3]), None, None, None, None, None) for x in ps._output])
self.__portals.insert(0, Portal(c))
else:
self.rowcount = c.count()
if (self.rowcount is None):
self.rowcount = (- 1)
self.description = None
if self.__portals:
del self._portal
return self
def executemany(self, statement, parameters):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
(sql, pxf, nparams) = self._convert_query(statement)
ps = self.database.prepare(sql)
if (ps._input is not None):
ps.load_rows(map(pxf, parameters))
else:
ps.load_rows(parameters)
self.rowcount = (- 1)
return self
def close(self):
if (self.__portals is None):
raise Error('cursor is closed', source='CLIENT', creator=self.database)
self.description = None
self.__portals = None |
def main():
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=14, metavar='N', help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR', help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M', help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--no-mps', action='store_true', default=False, help='disables macOS GPU training')
parser.add_argument('--dry-run', action='store_true', default=False, help='quickly check a single pass')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model')
args = parser.parse_args()
use_cuda = ((not args.no_cuda) and torch.cuda.is_available())
use_mps = ((not args.no_mps) and torch.backends.mps.is_available())
torch.manual_seed(args.seed)
if use_cuda:
device = torch.device('cuda')
elif use_mps:
device = torch.device('mps')
else:
device = torch.device('cpu')
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1, 'pin_memory': True, 'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset1 = datasets.MNIST('../data', train=True, download=True, transform=transform)
dataset2 = datasets.MNIST('../data', train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, (args.epochs + 1)):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), 'mnist_cnn.pt') |
def copy_presets(old_presets: dict[(str, VersionedPreset)], gd: GameDescription, pickup_db: PickupDatabase):
new_game = gd.game
for (path, preset) in old_presets.items():
config = preset.get_preset().configuration
new_preset = dataclasses.replace(preset.get_preset(), uuid=uuid.uuid4(), game=new_game, configuration=dataclasses.replace(config, starting_location=StartingLocationList((gd.starting_location,), new_game), standard_pickup_configuration=StandardPickupConfiguration(game=new_game, pickups_state={pickup: StandardPickupState(num_shuffled_pickups=1) for pickup in pickup_db.standard_pickups.values()}, default_pickups={}, minimum_random_starting_pickups=0, maximum_random_starting_pickups=0), ammo_pickup_configuration=AmmoPickupConfiguration(pickups_state={}), dock_rando=DockRandoConfiguration(game=new_game, mode=DockRandoMode.VANILLA, types_state={dock_type: DockTypeState(new_game, dock_type.short_name, set(), set()) for dock_type in gd.dock_weakness_database.dock_types})))
VersionedPreset.with_preset(new_preset).save_to_file(_GAMES_PATH.joinpath(new_game.value, 'presets', path)) |
def get_log_info(log_info):
with open('./train_log/DQN-60-MA-SELF_PLAY/log.log', 'r') as file:
content = file.read()
lines = content.splitlines()
front_idx = 0
end_idx = 1
while True:
if ('Start Epoch' in lines[front_idx]):
break
front_idx += 1
while True:
if ('Start Epoch' in lines[end_idx]):
break
end_idx -= 1
start_epoch = int(re.findall('Epoch (.*) \\.\\.\\.', lines[front_idx])[0])
end_epoch = int(re.findall('Epoch (.*) \\.\\.\\.', lines[end_idx])[0])
assert (start_epoch <= end_epoch)
for epoch in range(start_epoch, (end_epoch + 1)):
try:
current_paragraph = re.findall('Epoch {}(.*?)param-summary/agent1/dqn_comb/block0/fc/W-rms:'.format(epoch), content, re.S)[0]
log_info['lord']['baseline_wr'].append(float(re.findall('\\[2\\]_lord_win_rate: (.*?)\n', current_paragraph)[0]))
log_info['farmer_up']['baseline_wr'].append(float(re.findall('\\[1\\]_farmer_win_rate: (.*?)\n', current_paragraph)[0]))
log_info['farmer_down']['baseline_wr'].append(float(re.findall('\\[3\\]_farmer_win_rate: (.*?)\n', current_paragraph)[0]))
log_info['lord']['training_wr'].append(float(re.findall('lord_win_rate: (.*?)\n', current_paragraph)[3]))
log_info['farmer_up']['training_wr'].append(float(re.findall('farmer_win_rate: (.*?)\n', current_paragraph)[3]))
log_info['farmer_down']['training_wr'].append(float(re.findall('farmer_win_rate: (.*?)\n', current_paragraph)[3]))
log_info['epoch'].append(epoch)
except:
pass
return log_info |
def test__irrad_for_celltemp():
total_irrad = pd.DataFrame(index=[0, 1], columns=['poa_global'], data=[10.0, 20.0])
empty = total_irrad.drop('poa_global', axis=1)
effect_irrad = pd.Series(index=total_irrad.index, data=[5.0, 8.0])
poa = modelchain._irrad_for_celltemp(total_irrad, effect_irrad)
assert_series_equal(poa, total_irrad['poa_global'])
poa = modelchain._irrad_for_celltemp(empty, effect_irrad)
assert_series_equal(poa, effect_irrad)
poa = modelchain._irrad_for_celltemp((total_irrad, total_irrad), (effect_irrad, effect_irrad))
assert (len(poa) == 2)
assert_series_equal(poa[0], total_irrad['poa_global'])
assert_series_equal(poa[1], total_irrad['poa_global'])
poa = modelchain._irrad_for_celltemp((empty, empty), (effect_irrad, effect_irrad))
assert (len(poa) == 2)
assert_series_equal(poa[0], effect_irrad)
assert_series_equal(poa[1], effect_irrad) |
def cov_devdevX_y(x, y, sigma, l, m, n):
result = 0
if (m == n):
result = (((- covariance(x, y, sigma, l)) / (l[n] ** 2)) - (((x[m] - y[m]) / (l[m] ** 2)) * cov_devX_y(x, y, sigma, l, n)))
else:
result = ((- ((x[m] - y[m]) / (l[m] ** 2))) * cov_devX_y(x, y, sigma, l, n))
return result |
class VecAsRowAndCol(Op):
__props__ = ()
def make_node(self, v):
if (not isinstance(v, Variable)):
v = pt.as_tensor_variable(v)
assert (v.type.ndim == 1)
type_class = type(v.type)
out_r_type = type_class(dtype=v.dtype, shape=(1, None))
out_c_type = type_class(dtype=v.dtype, shape=(None, 1))
return Apply(self, [v], [out_r_type(), out_c_type()])
def perform(self, node, inp, out):
(v,) = inp
(r, c) = out
lv = v.shape[0]
if ((r[0] is None) or (r[0].shape != (1, lv))):
r[0] = np.empty((1, lv), dtype=node.outputs[0].type.dtype)
if ((c[0] is None) or (c[0].shape != (lv, 1))):
c[0] = np.empty((lv, 1), dtype=node.outputs[0].type.dtype)
for i in range(lv):
r[0][(0, i)] = v[i]
c[0][(i, 0)] = v[i] |
def _param_docs(docstring, param_name):
for line in docstring.splitlines():
for regex in DOC_REGEX:
m = regex.match(line)
if (not m):
continue
if (m.group('param') != param_name):
continue
return (m.group('doc') or '') |
class _TAppDataFileMixin():
PATH = None
def test_filename(self):
self.assertTrue(self.PATH.endswith('.appdata.xml.in'))
def test_validate(self):
try:
subprocess.check_output(['appstreamcli', 'validate', '--no-net', self.PATH], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise Exception(e.output) from e |
def configure(app_config):
logger.debug('Configuring log model')
model_name = app_config.get('LOGS_MODEL', 'database')
model_config = app_config.get('LOGS_MODEL_CONFIG', {})
def should_skip_logging(kind_name, namespace_name, is_free_namespace):
if (namespace_name and (namespace_name in app_config.get('DISABLED_FOR_AUDIT_LOGS', {}))):
return True
if (kind_name in _PULL_LOG_KINDS):
if (namespace_name and (namespace_name in app_config.get('DISABLED_FOR_PULL_LOGS', {}))):
return True
if app_config.get('FEATURE_DISABLE_PULL_LOGS_FOR_FREE_NAMESPACES'):
if is_free_namespace:
return True
return False
model_config['should_skip_logging'] = should_skip_logging
logs_model.initialize(_LOG_MODELS[model_name](**model_config)) |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--result', default='/disk/scratch/XingxingZhang/summarization/experiments/extract_baseline_rouge_lbl/base_ms30/run.save.1layer.sh.models/3.test.txt')
parser.add_argument('--article', default='/disk/scratch/XingxingZhang/summarization/dataset/cnn_dailymail_rouge_lbl_sseg/test.article')
parser.add_argument('--summary', default='/disk/scratch/XingxingZhang/summarization/dataset/cnn_dailymail_rouge_lbl_sseg/test.summary')
parser.add_argument('--entity_map')
parser.add_argument('--out_dir', default='.')
parser.add_argument('--add_full_stop', action='store_true')
return parser.parse_args() |
def _introspect_uniforms(program_id: int, have_dsa: bool) -> dict:
uniforms = {}
for index in range(_get_number(program_id, GL_ACTIVE_UNIFORMS)):
(u_name, u_type, u_size) = _query_uniform(program_id, index)
array_count = u_name.count('[0]')
if (array_count > 1):
raise ShaderException('Multidimensional arrays are not currently supported.')
loc = glGetUniformLocation(program_id, create_string_buffer(u_name.encode('utf-8')))
if (loc == (- 1)):
continue
if (array_count == 1):
u_name = u_name.strip('[0]')
assert (u_name not in uniforms), f'{u_name} exists twice in the shader. Possible name clash with an array.'
uniforms[u_name] = _Uniform(program_id, u_name, u_type, u_size, loc, have_dsa)
if _debug_gl_shaders:
for uniform in uniforms.values():
print(f' Found uniform: {uniform}')
return uniforms |
class TestDiversity():
def setup_method(self):
test_file_path = mm.datasets.get_path('bubenec')
self.df_buildings = gpd.read_file(test_file_path, layer='buildings')
self.df_streets = gpd.read_file(test_file_path, layer='streets')
self.df_tessellation = gpd.read_file(test_file_path, layer='tessellation')
self.df_buildings['height'] = np.linspace(10.0, 30.0, 144)
self.df_tessellation['area'] = mm.Area(self.df_tessellation).series
self.sw = sw_high(k=3, gdf=self.df_tessellation, ids='uID')
self.sw.neighbors[100] = []
self.sw_drop = sw_high(k=3, gdf=self.df_tessellation[2:], ids='uID')
def test_Range(self):
full_sw = mm.Range(self.df_tessellation, 'area', self.sw, 'uID').series
assert (full_sw[0] == pytest.approx(8255.372, rel=0.001))
area = self.df_tessellation['area']
full2 = mm.Range(self.df_tessellation, area, self.sw, 'uID').series
assert (full2[0] == pytest.approx(8255.372, rel=0.001))
limit = mm.Range(self.df_tessellation, 'area', self.sw, 'uID', rng=(10, 90)).series
assert (limit[0] == pytest.approx(4122.139, rel=0.001))
assert mm.Range(self.df_tessellation, 'area', self.sw_drop, 'uID').series.isna().any()
def test_Theil(self):
full_sw = mm.Theil(self.df_tessellation, 'area', self.sw, 'uID').series
assert (full_sw[0] == pytest.approx(0.))
limit = mm.Theil(self.df_tessellation, self.df_tessellation.area, self.sw, 'uID', rng=(10, 90)).series
assert (limit[0] == pytest.approx(0.1330295))
zeros = mm.Theil(self.df_tessellation, np.zeros(len(self.df_tessellation)), self.sw, 'uID').series
assert (zeros[0] == 0)
assert mm.Theil(self.df_tessellation, 'area', self.sw_drop, 'uID').series.isna().any()
def test_Simpson(self):
ht_sw = mm.Simpson(self.df_tessellation, 'area', self.sw, 'uID').series
assert (ht_sw[0] == 0.385)
quan_sw = mm.Simpson(self.df_tessellation, self.df_tessellation.area, self.sw, 'uID', binning='quantiles', k=3).series
assert (quan_sw[0] == 0.395)
with pytest.raises(ValueError):
ht_sw = mm.Simpson(self.df_tessellation, 'area', self.sw, 'uID', binning='nonexistent')
assert mm.Simpson(self.df_tessellation, 'area', self.sw_drop, 'uID').series.isna().any()
gs = mm.Simpson(self.df_tessellation, 'area', self.sw, 'uID', gini_simpson=True).series
assert (gs[0] == (1 - 0.385))
inv = mm.Simpson(self.df_tessellation, 'area', self.sw, 'uID', inverse=True).series
assert (inv[0] == (1 / 0.385))
self.df_tessellation['cat'] = (list(range(8)) * 18)
cat = mm.Simpson(self.df_tessellation, 'cat', self.sw, 'uID', categorical=True).series
assert (cat[0] == pytest.approx(0.15))
cat2 = mm.Simpson(self.df_tessellation, 'cat', self.sw, 'uID', categorical=True).series
assert (cat2[0] == pytest.approx(0.15))
def test_Gini(self):
full_sw = mm.Gini(self.df_tessellation, 'area', self.sw, 'uID').series
assert (full_sw[0] == pytest.approx(0.3945388))
limit = mm.Gini(self.df_tessellation, 'area', self.sw, 'uID', rng=(10, 90)).series
assert (limit[0] == pytest.approx(0.))
self.df_tessellation['negative'] = (self.df_tessellation.area - self.df_tessellation.area.mean())
with pytest.raises(ValueError):
mm.Gini(self.df_tessellation, 'negative', self.sw, 'uID').series
assert mm.Gini(self.df_tessellation, 'area', self.sw_drop, 'uID').series.isna().any()
def test_Shannon(self):
ht_sw = mm.Shannon(self.df_tessellation, 'area', self.sw, 'uID').series
assert (ht_sw[0] == 1.)
quan_sw = mm.Shannon(self.df_tessellation, self.df_tessellation.area, self.sw, 'uID', binning='quantiles', k=3).series
assert (quan_sw[0] == 0.)
with pytest.raises(ValueError):
ht_sw = mm.Shannon(self.df_tessellation, 'area', self.sw, 'uID', binning='nonexistent')
assert mm.Shannon(self.df_tessellation, 'area', self.sw_drop, 'uID').series.isna().any()
self.df_tessellation['cat'] = (list(range(8)) * 18)
cat = mm.Shannon(self.df_tessellation, 'cat', self.sw, 'uID', categorical=True).series
assert (cat[0] == pytest.approx(1.973))
cat2 = mm.Shannon(self.df_tessellation, 'cat', self.sw, 'uID', categorical=True, categories=range(15)).series
assert (cat2[0] == pytest.approx(1.973))
def test_Unique(self):
self.df_tessellation['cat'] = (list(range(8)) * 18)
un = mm.Unique(self.df_tessellation, 'cat', self.sw, 'uID').series
assert (un[0] == 8)
un = mm.Unique(self.df_tessellation, (list(range(8)) * 18), self.sw, 'uID').series
assert (un[0] == 8)
un = mm.Unique(self.df_tessellation, 'cat', self.sw_drop, 'uID').series
assert un.isna().any()
assert (un[5] == 8)
self.df_tessellation.loc[(0, 'cat')] = np.nan
un = mm.Unique(self.df_tessellation, 'cat', self.sw, 'uID', dropna=False).series
assert (un[0] == 9)
un = mm.Unique(self.df_tessellation, 'cat', self.sw, 'uID', dropna=True).series
assert (un[0] == 8)
def test_Percentile(self):
perc = mm.Percentiles(self.df_tessellation, 'area', self.sw, 'uID').frame
assert np.all(((perc.loc[0].values - np.array([1085., 2623.9962661, 4115.])) < 1e-05))
perc = mm.Percentiles(self.df_tessellation, (list(range(8)) * 18), self.sw, 'uID').frame
assert np.all((perc.loc[0].values == np.array([1.0, 3.5, 6.0])))
perc = mm.Percentiles(self.df_tessellation, 'area', self.sw, 'uID', percentiles=[30, 70]).frame
assert np.all(((perc.loc[0].values - np.array([1218., 3951.])) < 1e-05))
perc = mm.Percentiles(self.df_tessellation, 'area', self.sw, 'uID', weighted='linear').frame
assert np.all(((perc.loc[0].values - np.array([, 2598., 4107.])) < 1e-05))
perc = mm.Percentiles(self.df_tessellation, 'area', self.sw, 'uID', percentiles=[30, 70], weighted='linear').frame
assert np.all(((perc.loc[0].values - np.array([1211., 3839.])) < 1e-05))
_data = {'uID': [9999], 'area': 1.0}
_pgon = [Polygon(((0, 0), (0, 1), (1, 1), (1, 0)))]
_gdf = gpd.GeoDataFrame(_data, index=[9999], geometry=_pgon)
perc = mm.Percentiles(pd.concat([self.df_tessellation, _gdf]), 'area', self.sw, 'uID').frame
np.testing.assert_array_equal(np.isnan(perc.loc[9999]), np.ones(3, dtype=bool))
perc = mm.Percentiles(pd.concat([_gdf, self.df_tessellation]), 'area', self.sw, 'uID', weighted='linear').frame
np.testing.assert_array_equal(np.isnan(perc.loc[9999]), np.ones(3, dtype=bool))
with pytest.raises(ValueError, match="'nonsense' is not a valid"):
mm.Percentiles(self.df_tessellation, 'area', self.sw, 'uID', weighted='nonsense') |
class R2Plus1DStem(ResNeXt3DStem):
def __init__(self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool):
super(R2Plus1DStem, self).__init__(temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool)
def _construct_stem(self, temporal_kernel, spatial_kernel, input_planes, stem_planes, maxpool):
self.stem = R2Plus1DStemMultiPathway([input_planes], [stem_planes], [[temporal_kernel, spatial_kernel, spatial_kernel]], [[1, 2, 2]], [[(temporal_kernel // 2), (spatial_kernel // 2), (spatial_kernel // 2)]], maxpool=[maxpool]) |
def handle_inittarget(state_change: ActionInitTarget, channel_state: NettingChannelState, pseudo_random_generator: random.Random, block_number: BlockNumber) -> TransitionResult[Optional[TargetTransferState]]:
iteration: TransitionResult[Optional[TargetTransferState]]
transfer = state_change.transfer
from_hop = state_change.from_hop
assert (channel_state.identifier == transfer.balance_proof.channel_identifier), 'channel_id mismatch in handle_inittarget'
(is_valid, channel_events, errormsg) = channel.handle_receive_lockedtransfer(channel_state, transfer, transfer.payer_address_metadata)
if is_valid:
target_state = TargetTransferState(from_hop, transfer)
if state_change.received_valid_secret:
return TransitionResult(target_state, channel_events)
safe_to_wait = is_safe_to_wait(transfer.lock.expiration, channel_state.reveal_timeout, block_number)
if safe_to_wait:
message_identifier = message_identifier_from_prng(pseudo_random_generator)
recipient = transfer.initiator
secret_request = SendSecretRequest(recipient=Address(recipient), recipient_metadata=transfer.initiator_address_metadata, message_identifier=message_identifier, payment_identifier=transfer.payment_identifier, amount=PaymentAmount(transfer.lock.amount), expiration=transfer.lock.expiration, secrethash=transfer.lock.secrethash, canonical_identifier=CANONICAL_IDENTIFIER_UNORDERED_QUEUE)
channel_events.append(secret_request)
iteration = TransitionResult(target_state, channel_events)
else:
assert errormsg, 'handle_receive_lockedtransfer should return error msg if not valid'
unlock_failed = EventUnlockClaimFailed(identifier=transfer.payment_identifier, secrethash=transfer.lock.secrethash, reason=errormsg)
channel_events.append(unlock_failed)
iteration = TransitionResult(None, channel_events)
return iteration |
class ReversibleBlockFunction(torch.autograd.Function):
def forward(ctx, x, fm, gm, *params):
with torch.no_grad():
(x1, x2) = torch.chunk(x, chunks=2, dim=1)
x1 = x1.contiguous()
x2 = x2.contiguous()
y1 = (x1 + fm(x2))
y2 = (x2 + gm(y1))
y = torch.cat((y1, y2), dim=1)
x1.set_()
x2.set_()
y1.set_()
y2.set_()
del x1, x2, y1, y2
ctx.save_for_backward(x, y)
ctx.fm = fm
ctx.gm = gm
return y
def backward(ctx, grad_y):
fm = ctx.fm
gm = ctx.gm
(x, y) = ctx.saved_variables
(y1, y2) = torch.chunk(y, chunks=2, dim=1)
y1 = y1.contiguous()
y2 = y2.contiguous()
with torch.no_grad():
y1_z = Variable(y1.data, requires_grad=True)
x2 = (y2 - gm(y1_z))
x1 = (y1 - fm(x2))
with set_grad_enabled(True):
x1_ = Variable(x1.data, requires_grad=True)
x2_ = Variable(x2.data, requires_grad=True)
y1_ = (x1_ + fm.forward(x2_))
y2_ = (x2_ + gm(y1_))
y = torch.cat((y1_, y2_), dim=1)
dd = torch.autograd.grad(y, (((x1_, x2_) + tuple(gm.parameters())) + tuple(fm.parameters())), grad_y)
gm_params_len = len([p for p in gm.parameters()])
gm_params_grads = dd[2:(2 + gm_params_len)]
fm_params_grads = dd[(2 + gm_params_len):]
grad_x = torch.cat((dd[0], dd[1]), dim=1)
y1_.detach_()
y2_.detach_()
del y1_, y2_
x.data.set_(torch.cat((x1, x2), dim=1).data.contiguous())
return (((grad_x, None, None) + fm_params_grads) + gm_params_grads) |
class StatViewSettings():
_instance = None
def getInstance(cls):
if (cls._instance is None):
cls._instance = StatViewSettings()
return cls._instance
def __init__(self):
serviceStatViewDefaultSettings = {'resources': 2, 'resistances': 2, 'recharge': 2, 'firepower': 2, 'capacitor': 2, 'targetingMisc': 1, 'price': 2, 'miningyield': 2, 'drones': 2, 'outgoing': 2}
self.serviceStatViewDefaultSettings = SettingsProvider.getInstance().getSettings('pyfaServiceStatViewSettings', serviceStatViewDefaultSettings)
def get(self, type):
return self.serviceStatViewDefaultSettings[type]
def set(self, type, value):
self.serviceStatViewDefaultSettings[type] = value |
class CmdGive(MuxCommand):
key = 'give'
locks = 'cmd:all()'
arg_regex = '\\s|$'
def func(self):
caller = self.caller
if ((not self.args) or (not self.rhs)):
caller.msg('Usage: give <inventory object> = <target>')
return
to_give = caller.search(self.lhs, location=caller, nofound_string=("You aren't carrying %s." % self.lhs), multimatch_string=('You carry more than one %s:' % self.lhs))
target = caller.search(self.rhs)
if (not (to_give and target)):
return
if (target == caller):
caller.msg(('You keep %s to yourself.' % to_give.key))
return
if (not (to_give.location == caller)):
caller.msg(('You are not holding %s.' % to_give.key))
return
if to_give.db.covered_by:
caller.msg(("You can't give that away because it's covered by %s." % to_give.db.covered_by))
return
if to_give.db.worn:
to_give.remove(caller)
to_give.move_to(caller.location, quiet=True)
caller.msg(('You give %s to %s.' % (to_give.key, target.key)))
to_give.move_to(target, quiet=True)
target.msg(('%s gives you %s.' % (caller.key, to_give.key)))
to_give.at_give(caller, target) |
class ComposeTypesTestCase(ZiplineTestCase):
def test_identity(self):
assert_is(compose_types(C), C, msg='compose_types of a single class should be identity')
def test_compose(self):
composed = compose_types(C, D)
assert_is_subclass(composed, C)
assert_is_subclass(composed, D)
def test_compose_mro(self):
composed = compose_types(C, D)
assert_equal(composed.f(), C.f())
assert_equal(composed.g(), D.g())
assert_equal(composed().delegate(), ('C.delegate', 'D.delegate')) |
class TstWindow(MainWindow):
def __init__(self, parent=None, show=True, off_screen=True):
MainWindow.__init__(self, parent)
self.frame = QFrame()
vlayout = QVBoxLayout()
self.vtk_widget = QtInteractor(parent=self.frame, off_screen=off_screen, stereo=False)
vlayout.addWidget(self.vtk_widget.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
mainMenu = _create_menu_bar(parent=self)
fileMenu = mainMenu.addMenu('File')
self.exit_action = QAction('Exit', self)
self.exit_action.setShortcut('Ctrl+Q')
self.exit_action.triggered.connect(self.close)
fileMenu.addAction(self.exit_action)
meshMenu = mainMenu.addMenu('Mesh')
self.add_sphere_action = QAction('Add Sphere', self)
self.exit_action.setShortcut('Ctrl+A')
self.add_sphere_action.triggered.connect(self.add_sphere)
meshMenu.addAction(self.add_sphere_action)
self.signal_close.connect(self.vtk_widget.close)
if show:
self.show()
def add_sphere(self):
sphere = pyvista.Sphere(phi_resolution=6, theta_resolution=6)
self.vtk_widget.add_mesh(sphere)
self.vtk_widget.reset_camera() |
class Vec3():
__slots__ = ('x', 'y', 'z')
def __init__(self, x: float=0.0, y: float=0.0, z: float=0.0) -> None:
self.x = x
self.y = y
self.z = z
def __iter__(self) -> _Iterator[float]:
(yield self.x)
(yield self.y)
(yield self.z)
_typing.overload
def __getitem__(self, item: int) -> float:
...
_typing.overload
def __getitem__(self, item: slice) -> tuple[(float, ...)]:
...
def __getitem__(self, item):
return (self.x, self.y, self.z)[item]
def __setitem__(self, key, value):
if (type(key) is slice):
for (i, attr) in enumerate(['x', 'y', 'z'][key]):
setattr(self, attr, value[i])
else:
setattr(self, ['x', 'y', 'z'][key], value)
def __len__(self) -> int:
return 3
def mag(self) -> float:
return self.__abs__()
def __add__(self, other: Vec3) -> Vec3:
return Vec3((self.x + other.x), (self.y + other.y), (self.z + other.z))
def __sub__(self, other: Vec3) -> Vec3:
return Vec3((self.x - other.x), (self.y - other.y), (self.z - other.z))
def __mul__(self, scalar: float) -> Vec3:
return Vec3((self.x * scalar), (self.y * scalar), (self.z * scalar))
def __truediv__(self, scalar: float) -> Vec3:
return Vec3((self.x / scalar), (self.y / scalar), (self.z / scalar))
def __floordiv__(self, scalar: float) -> Vec3:
return Vec3((self.x // scalar), (self.y // scalar), (self.z // scalar))
def __abs__(self) -> float:
return _math.sqrt((((self.x ** 2) + (self.y ** 2)) + (self.z ** 2)))
def __neg__(self) -> Vec3:
return Vec3((- self.x), (- self.y), (- self.z))
def __round__(self, ndigits: (int | None)=None) -> Vec3:
return Vec3(*(round(v, ndigits) for v in self))
def __radd__(self, other: (Vec3 | int)) -> Vec3:
if (other == 0):
return self
else:
return self.__add__(_typing.cast(Vec3, other))
def __eq__(self, other: object) -> bool:
return (isinstance(other, Vec3) and (self.x == other.x) and (self.y == other.y) and (self.z == other.z))
def __ne__(self, other: object) -> bool:
return ((not isinstance(other, Vec3)) or (self.x != other.x) or (self.y != other.y) or (self.z != other.z))
def from_magnitude(self, magnitude: float) -> Vec3:
return (self.normalize() * magnitude)
def limit(self, maximum: float) -> Vec3:
if ((((self.x ** 2) + (self.y ** 2)) + (self.z ** 2)) > ((maximum * maximum) * maximum)):
return self.from_magnitude(maximum)
return self
def cross(self, other: Vec3) -> Vec3:
return Vec3(((self.y * other.z) - (self.z * other.y)), ((self.z * other.x) - (self.x * other.z)), ((self.x * other.y) - (self.y * other.x)))
def dot(self, other: Vec3) -> float:
return (((self.x * other.x) + (self.y * other.y)) + (self.z * other.z))
def lerp(self, other: Vec3, alpha: float) -> Vec3:
return Vec3((self.x + (alpha * (other.x - self.x))), (self.y + (alpha * (other.y - self.y))), (self.z + (alpha * (other.z - self.z))))
def distance(self, other: Vec3) -> float:
return _math.sqrt(((((other.x - self.x) ** 2) + ((other.y - self.y) ** 2)) + ((other.z - self.z) ** 2)))
def normalize(self) -> Vec3:
try:
d = self.__abs__()
return Vec3((self.x / d), (self.y / d), (self.z / d))
except ZeroDivisionError:
return self
def clamp(self, min_val: float, max_val: float) -> Vec3:
return Vec3(clamp(self.x, min_val, max_val), clamp(self.y, min_val, max_val), clamp(self.z, min_val, max_val))
def __getattr__(self, attrs: str) -> ((Vec2 | Vec3) | Vec4):
try:
vec_class = {2: Vec2, 3: Vec3, 4: Vec4}[len(attrs)]
return vec_class(*(self['xyz'.index(c)] for c in attrs))
except Exception:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{attrs}'") from None
def __repr__(self) -> str:
return f'Vec3({self.x}, {self.y}, {self.z})' |
_register
class ContentDescriptionObject(BaseObject):
GUID = guid2bytes('75B22633-668E-11CF-A6D9-00AA0062CE6C')
NAMES = [u'Title', u'Author', u'Copyright', u'Description', u'Rating']
def parse(self, asf, data):
super(ContentDescriptionObject, self).parse(asf, data)
lengths = struct.unpack('<HHHHH', data[:10])
texts = []
pos = 10
for length in lengths:
end = (pos + length)
if (length > 0):
texts.append(data[pos:end].decode('utf-16-le').strip(u'\x00'))
else:
texts.append(None)
pos = end
for (key, value) in zip(self.NAMES, texts):
if (value is not None):
value = ASFUnicodeAttribute(value=value)
asf._tags.setdefault(self.GUID, []).append((key, value))
def render(self, asf):
def render_text(name):
value = asf.to_content_description.get(name)
if (value is not None):
return (str(value).encode('utf-16-le') + b'\x00\x00')
else:
return b''
texts = [render_text(x) for x in self.NAMES]
data = (struct.pack('<HHHHH', *map(len, texts)) + b''.join(texts))
return ((self.GUID + struct.pack('<Q', (24 + len(data)))) + data) |
def build_dataset(cfg):
args = copy.deepcopy(cfg)
transform = build_transform(args.trans_dict)
ds_dict = args.ds_dict
ds_name = ds_dict.pop('type')
ds_dict['transform'] = transform
if hasattr(torchvision.datasets, ds_name):
ds = getattr(torchvision.datasets, ds_name)(**ds_dict)
else:
ds = datasets.__dict__[ds_name](**ds_dict)
return ds |
_REGISTRY.register()
class MME(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
self.lmda = cfg.TRAINER.MME.LMDA
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, 0)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
print('Building C')
self.C = Prototypes(self.F.fdim, self.num_classes)
self.C.to(self.device)
print('# params: {:,}'.format(count_num_param(self.C)))
self.optim_C = build_optimizer(self.C, cfg.OPTIM)
self.sched_C = build_lr_scheduler(self.optim_C, cfg.OPTIM)
self.register_model('C', self.C, self.optim_C, self.sched_C)
self.revgrad = ReverseGrad()
def forward_backward(self, batch_x, batch_u):
(input_x, label_x, input_u) = self.parse_batch_train(batch_x, batch_u)
feat_x = self.F(input_x)
logit_x = self.C(feat_x)
loss_x = F.cross_entropy(logit_x, label_x)
self.model_backward_and_update(loss_x)
feat_u = self.F(input_u)
feat_u = self.revgrad(feat_u)
logit_u = self.C(feat_u)
prob_u = F.softmax(logit_u, 1)
loss_u = (- ((- prob_u) * torch.log((prob_u + 1e-05))).sum(1).mean())
self.model_backward_and_update((loss_u * self.lmda))
loss_summary = {'loss_x': loss_x.item(), 'acc_x': compute_accuracy(logit_x, label_x)[0].item(), 'loss_u': loss_u.item()}
if ((self.batch_idx + 1) == self.num_batches):
self.update_lr()
return loss_summary
def model_inference(self, input):
return self.C(self.F(input)) |
class FC3_SkipX(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.op = self._getParser()
self.skipx = kwargs.get('skipx', False)
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.skipx:
retval += '# Do not configure the X Window System\nskipx\n'
return retval
def _getParser(self):
op = KSOptionParser(prog='skipx', description='\n If present, X is not configured on the installed\n system.', version=FC3)
return op
def parse(self, args):
self.op.parse_args(args=args, lineno=self.lineno)
self.skipx = True
return self |
class MemoryGraph(_Graph):
orientations = base.ORIENTATION_HORIZONTAL
fixed_upper_bound = True
def __init__(self, **config):
_Graph.__init__(self, **config)
val = self._getvalues()
self.maxvalue = val['MemTotal']
mem = (((val['MemTotal'] - val['MemFree']) - val['Buffers']) - val['Cached'])
self.fulfill(mem)
def _getvalues(self):
val = {}
mem = psutil.virtual_memory()
val['MemTotal'] = int(((mem.total / 1024) / 1024))
val['MemFree'] = int(((mem.free / 1024) / 1024))
val['Buffers'] = int(((mem.buffers / 1024) / 1024))
val['Cached'] = int(((mem.cached / 1024) / 1024))
return val
def update_graph(self):
val = self._getvalues()
self.push((((val['MemTotal'] - val['MemFree']) - val['Buffers']) - val['Cached'])) |
def orthoFrames2Versor(B, A=None, delta: float=0.001, eps: Optional[float]=None, det=None, remove_scaling: bool=False):
if (A is None):
A = B[0].layout.basis_vectors_lst
A = A[:]
B = B[:]
if (len(A) != len(B)):
raise ValueError('len(A)!=len(B)')
if (eps is None):
eps = global_eps()
spinor = False
try:
B = Frame(B)
B_En = B.En
except Exception:
pass
N = len(A)
if remove_scaling:
lam = omoh(A, B)
B = Frame([(B[k] * lam[k]) for k in range(N)])
try:
A = Frame(A[:])
B = Frame(B[:])
alpha = (abs((B.En / A.En)) ** (1.0 / N))
if (abs((alpha - 1)) > eps):
spinor = True
B = [(b / alpha) for b in B]
except Exception:
pass
r_list = []
for k in range(N):
(a, b) = (A[0], B[0])
r = (a - b)
if (abs((b ** 2)) > eps):
d = (abs((r ** 2)) / abs((b ** 2)))
else:
d = abs((r ** 2))
if (d >= delta):
r_list.append(r)
A = A[1:]
B = B[1:]
for j in range(len(A)):
A[j] = (((- r) * A[j]) * r.inv())
else:
R = (b * (a + b))
if (abs(R) > eps):
r_list.append(R)
A = A[1:]
B = B[1:]
for j in range(len(A)):
A[j] = ((R * A[j]) * R.inv())
R = reduce(gp, r_list[::(- 1)])
if (det is not None):
I = R.pseudoScalar
our_det = (((R * I) * (~ R)) * I.inv())(0)
if (sign(float(our_det)) != det):
R = (B_En.dual() * R)
if (abs(R) < eps):
warn('abs(R)<eps. likely to be inaccurate')
R = (R / abs(R))
if spinor:
R = (R * sqrt(alpha))
return (R, r_list) |
def init_distributed_mode(args):
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
print('GPU:{}, rank:{}'.format(args.gpu, args.rank))
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
dist.barrier() |
def _migrate_v66(preset: dict) -> dict:
if (preset['game'] == 'cave_story'):
excluded = set(preset['configuration']['available_locations']['excluded_indices'])
excluded = excluded.union((30, 31))
preset['configuration']['available_locations']['excluded_indices'] = sorted(excluded)
return preset |
class Effect6510(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Projectile Turret')), 'speed', src.getModifiedItemAttr('shipBonusDreadnoughtM2'), skill='Minmatar Dreadnought', **kwargs) |
class AssetConfigurationMixin():
def create_benefit_feature(self, sponsor_benefit, **kwargs):
if (not self.ASSET_CLASS):
raise NotImplementedError('Subclasses of AssetConfigurationMixin must define an ASSET_CLASS attribute.')
benefit_feature = super().create_benefit_feature(sponsor_benefit, **kwargs)
content_object = sponsor_benefit.sponsorship
if (self.related_to == AssetsRelatedTo.SPONSOR.value):
content_object = sponsor_benefit.sponsorship.sponsor
asset_qs = content_object.assets.filter(internal_name=self.internal_name)
if (not asset_qs.exists()):
asset = self.ASSET_CLASS(content_object=content_object, internal_name=self.internal_name)
asset.save()
return benefit_feature
def get_clone_kwargs(self, new_benefit):
kwargs = super().get_clone_kwargs(new_benefit)
kwargs['internal_name'] = f'{self.internal_name}_{new_benefit.year}'
due_date = kwargs.get('due_date')
if due_date:
kwargs['due_date'] = due_date.replace(year=new_benefit.year)
return kwargs
class Meta():
abstract = True |
class ColoredCommand(Command):
def parse_args(self, ctx: Context, args: list[str]) -> list[str]:
if ((not args) and self.no_args_is_help and (not ctx.resilient_parsing)):
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
parser = self.make_parser(ctx)
(opts, args, param_order) = parser.parse_args(args=args)
for param in _iter_params_for_processing(param_order, self.get_params(ctx)):
(value, args) = param.handle_parse_result(ctx, opts, args)
if (args and (not ctx.allow_extra_args) and (not ctx.resilient_parsing)):
ctx.fail(ngettext('Got unexpected extra argument ({args})', 'Got unexpected extra arguments ({args})', len(args)).format(args=' '.join(map(str, args))))
ctx.args = args
ctx._opt_prefixes.update(parser._opt_prefixes)
return args
def format_help(self: Command, ctx: Context, formatter: Any) -> None:
console.print(f'''[b]pytask[/b] [dim]v{version}[/]
''', justify='center', highlight=False)
console.print(f'''Usage: [b]pytask[/b] [b]{self.name}[/b] [b][OPTIONS][/b] [b][PATHS][/b]
''')
console.print(self.help, style='dim')
console.print()
_print_options(self, ctx)
console.print('[bold #FF0000][/] [#f2f2f2] justify='right') |
def test_use_function_in_current_file(config, workspace, code_action_context):
document = create_document(workspace, 'function.py')
document2 = create_document(workspace, 'method_object.py')
line = 0
pos = (document.lines[line].index('def add') + 4)
selection = Range((line, pos), (line, pos))
response = plugin.pylsp_code_actions(config=config, workspace=workspace, document=document, range=selection, context=code_action_context)
expected: typing.CodeAction = {'title': 'Use function for current file only', 'kind': 'refactor', 'command': {'title': 'Use function for current file only', 'command': commands.COMMAND_REFACTOR_USE_FUNCTION, 'arguments': [{'document_uri': document.uri, 'position': selection['start'], 'documents': [document.uri]}]}}
assert (expected in response)
assert (expected['command'] is not None)
command = expected['command']['command']
arguments = expected['command']['arguments']
response = plugin.pylsp_execute_command(config=config, workspace=workspace, command=command, arguments=arguments)
edit_request = workspace._endpoint.request.call_args
workspace_edit = assert_is_apply_edit_request(edit_request)
assert_modified_documents(workspace_edit, {document.uri})
new_text = assert_text_edits(workspace_edit['changes'][document.uri], target='use_function.py')
assert ('{add(a, b)}' in new_text)
assert_unmodified_document(workspace_edit, document2.uri) |
class IsomerScoringFunction(MoleculewiseScoringFunction):
def __init__(self, molecular_formula: str, mean_function='geometric') -> None:
super().__init__()
self.mean_function = self.determine_mean_function(mean_function)
self.scoring_functions = self.determine_scoring_functions(molecular_formula)
def determine_mean_function(mean_function: str) -> Callable[([List[float]], float)]:
if (mean_function == 'arithmetic'):
return arithmetic_mean
if (mean_function == 'geometric'):
return geometric_mean
raise ValueError(f'Invalid mean function: "{mean_function}"')
def determine_scoring_functions(molecular_formula: str) -> List[RdkitScoringFunction]:
element_occurrences = parse_molecular_formula(molecular_formula)
total_number_atoms = sum((element_tuple[1] for element_tuple in element_occurrences))
functions = [RdkitScoringFunction(descriptor=AtomCounter(element), score_modifier=GaussianModifier(mu=n_atoms, sigma=1.0)) for (element, n_atoms) in element_occurrences]
functions.append(RdkitScoringFunction(descriptor=num_atoms, score_modifier=GaussianModifier(mu=total_number_atoms, sigma=2.0)))
return functions
def raw_score(self, smiles: str) -> float:
scores = [f.score(smiles) for f in self.scoring_functions]
if (self.corrupt_score in scores):
return self.corrupt_score
return self.mean_function(scores) |
class _ClassInitVisitor(_AssignVisitor):
def __init__(self, scope_visitor, self_name):
super().__init__(scope_visitor)
self.self_name = self_name
def _Attribute(self, node):
if (not isinstance(node.ctx, ast.Store)):
return
if (isinstance(node.value, ast.Name) and (node.value.id == self.self_name)):
if (node.attr not in self.scope_visitor.names):
self.scope_visitor.names[node.attr] = pynamesdef.AssignedName(lineno=node.lineno, module=self.scope_visitor.get_module())
if (self.assigned_ast is not None):
pyname = self.scope_visitor.names[node.attr]
if isinstance(pyname, pynamesdef.AssignedName):
pyname.assignments.append(pynamesdef.AssignmentValue(self.assigned_ast))
def _Tuple(self, node):
if (not isinstance(node.ctx, ast.Store)):
return
for child in ast.iter_child_nodes(node):
self.visit(child)
def _Name(self, node):
pass
def _FunctionDef(self, node):
pass
def _ClassDef(self, node):
pass
def _For(self, node):
pass
def _With(self, node):
pass |
def partial_pipeline_data(backend, user=None, partial_token=None, *args, **kwargs):
request_data = backend.strategy.request_data()
partial_argument_name = backend.setting('PARTIAL_PIPELINE_TOKEN_NAME', 'partial_token')
partial_token = (partial_token or request_data.get(partial_argument_name) or backend.strategy.session_get(PARTIAL_TOKEN_SESSION_NAME, None))
if partial_token:
partial = backend.strategy.partial_load(partial_token)
partial_matches_request = False
if (partial and (partial.backend == backend.name)):
partial_matches_request = True
if (backend.ID_KEY in request_data):
id_from_partial = partial.kwargs.get('uid')
id_from_request = request_data.get(backend.ID_KEY)
if (id_from_partial != id_from_request):
partial_matches_request = False
if partial_matches_request:
if user:
kwargs.setdefault('user', user)
kwargs.setdefault('request', request_data)
partial.extend_kwargs(kwargs)
return partial
else:
backend.strategy.clean_partial_pipeline(partial_token) |
class KnownValues(unittest.TestCase):
def test_ip_adc2(self):
myadc.max_memory = 20
myadc.incore_complete = False
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ip_adc2x(self):
myadc.max_memory = 20
myadc.incore_complete = False
myadc.method = 'adc(2)-x'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=3)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
def test_ip_adc3(self):
myadc.max_memory = 20
myadc.incore_complete = False
myadc.method = 'adc(3)'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=4)
self.assertAlmostEqual(e[0], 0., 6)
self.assertAlmostEqual(e[1], 0., 6)
self.assertAlmostEqual(e[2], 0., 6)
self.assertAlmostEqual(e[3], 1., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 1., 6)
self.assertAlmostEqual(p[3], 0., 6) |
(persist=eval(os.getenv('PERSISTENT')))
def compute_acc_between_years(keywords, model_path_1, model_path_2, all_model_vectors=False, top_k_acc=200, skip_same_word_pairs=True, skip_duplicates=True):
(kw1, em1, sim_matrix_1) = compute_similarity_matrix_keywords(model_path_1, all_model_vectors=all_model_vectors, keywords=keywords)
(kw2, em2, sim_matrix_2) = compute_similarity_matrix_keywords(model_path_2, all_model_vectors=all_model_vectors, keywords=keywords)
word_pairs = compute_acceleration_matrix(keywords, sim_matrix_1=sim_matrix_1, sim_matrix_2=sim_matrix_2, top_k_acc=top_k_acc, skip_same_word_pairs=skip_same_word_pairs, skip_duplicates=skip_duplicates)
return (word_pairs, em1, em2) |
def collect_files(img_dir, gt_dir):
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
(ann_list, imgs_list) = ([], [])
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files |
def sort_verts_by_loops(face):
start_loop = max(face.loops, key=(lambda loop: loop.vert.co.to_tuple()[:2]))
verts = []
current_loop = start_loop
while (len(verts) < len(face.loops)):
verts.append(current_loop.vert)
current_loop = current_loop.link_loop_prev
return verts |
def test_get_update_appearance(gl, resp_application_appearance):
appearance = gl.appearance.get()
assert (appearance.title == title)
assert (appearance.description == description)
appearance.title = new_title
appearance.description = new_description
appearance.save()
assert (appearance.title == new_title)
assert (appearance.description == new_description) |
class Command(BaseCommand):
def help(self):
return _('Creates a generic user')
def add_arguments(self, parser):
parser.add_argument('user-type', type=str, help=_('The type of the user to be created'))
parser.add_argument('password', type=str, help=_('The password of the user to be created'))
parser.add_argument('email', type=str, help=_('E-mail address of the user to bo be created'))
parser.add_argument('--no-input', action='store_true')
def handle(self, **options):
available_types = ('private', 'superuser')
user_type = options.get('user-type')
email = options.get('email')
no_input = options.get('no_input')
validate_email(email)
if (user_type not in available_types):
raise ValueError(f"({_('Invalid user type, available types:')} {available_types}.")
is_private = (user_type == 'private')
username = (settings.GENERIC_PRIVATEUSER_USERNAME if is_private else settings.GENERIC_SUPERUSER_USERNAME)
confirmation = ('y' if no_input else input((_('A user with username %(username)s, generic type %(user_type)s and email %(email)s will be created. Continue? y/N: ') % {'username': username, 'user_type': user_type, 'email': email})))
if (confirmation != 'y'):
self.stdout.write(_('Command aborted.'))
return
try:
guser = Author.objects.create_user(username=username, email=email, is_active=True, is_novice=False, is_private=is_private, application_status='AP', message_preference='DS', password=options.get('password'))
self.stdout.write((_('generic_%(user_type)s has been created with the username %(username)s and email %(email)s. You can edit the details of this user via admin page if you wish.') % {'username': guser.username, 'user_type': user_type, 'email': guser.email}))
except IntegrityError:
self.stdout.write(_('Error: either there is an existing user with given username or e-mail is in use.')) |
class AmazonOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.amazon.AmazonOAuth2'
user_data_url = '
expected_username = 'FooBar'
access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'bearer'})
user_data_body = json.dumps({'user_id': 'amzn1.account.ABCDE1234', 'email': '', 'name': 'Foo Bar'})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline() |
def test_top_level_non_blocking_ifc_in_deep_net():
class Top(Component):
def construct(s):
s.push = CalleeIfcCL()
s.pull = CalleeIfcCL()
s.inner = Top_less_less_inner()
s.inner.push //= s.push
s.inner.pull //= s.pull
def line_trace(s):
return s.inner.line_trace()
def done(s):
return True
class Top_less_less_inner(Component):
def construct(s):
s.push = CalleeIfcCL()
s.pull = CalleeIfcCL()
s.inner = Top_less_inner()
s.inner.push //= s.push
s.inner.pull //= s.pull
def line_trace(s):
return s.inner.line_trace()
def done(s):
return True
class Top_less_inner(Component):
def construct(s):
s.push = CalleeIfcCL()
s.pull = CalleeIfcCL()
s.inner = TestModuleNonBlockingIfc()
s.inner.push //= s.push
s.inner.pull //= s.pull
def line_trace(s):
return s.inner.line_trace()
def done(s):
return True
num_cycles = _test_TestModuleNonBlockingIfc(Top)
assert (num_cycles == (3 + 10)) |
def _parse_args(kwargs, excludes=()):
kwargs = {k: v for (k, v) in kwargs.items() if ((v is not None) and (k not in excludes))}
check_dom_name_value(kwargs.get('name', ''), '`name`')
kwargs.update(kwargs.get('other_html_attrs', {}))
kwargs.pop('other_html_attrs', None)
if kwargs.get('validate'):
kwargs['onblur'] = True
valid_func = kwargs.pop('validate', (lambda _: None))
if kwargs.get('onchange'):
onchange_func = kwargs['onchange']
kwargs['onchange'] = True
else:
onchange_func = (lambda _: None)
return (kwargs, valid_func, onchange_func) |
class ToolTipsTestCases(unittest.TestCase):
def setUp(self):
Timings.fast()
self.texts = [u'', u'New', u'Open', u'Save', u'Cut', u'Copy', u'Paste', u'Print', u'About', u'Help']
app = Application()
app.start(os.path.join(mfc_samples_folder, 'CmnCtrl1.exe'))
self.app = app
self.dlg = app.Common_Controls_Sample
self.dlg.move_mouse_input(coords=((- 100), (- 100)), absolute=True)
self.dlg.TabControl.select(u'CToolBarCtrl')
self.ctrl = self.dlg.Toolbar.get_tool_tips_control()
def tearDown(self):
self.app.kill()
def testFriendlyClass(self):
self.assertEqual(self.ctrl.friendly_class_name(), 'ToolTips')
def testGetProperties(self):
props = self.ctrl.get_properties()
self.assertEqual(self.ctrl.friendly_class_name(), props['friendly_class_name'])
self.assertEqual(self.ctrl.texts(), props['texts'])
for prop_name in props:
self.assertEqual(getattr(self.ctrl, prop_name)(), props[prop_name])
def test_get_tip(self):
self.assertRaises(IndexError, self.ctrl.get_tip, 99)
tip = self.ctrl.get_tip(1)
self.assertEqual(tip.text, self.texts[1])
def test_tool_count(self):
self.assertEqual(10, self.ctrl.tool_count())
def test_get_tip_text(self):
self.assertEqual(self.texts[1], self.ctrl.get_tip_text(1))
def test_texts(self):
self.dlg.move_mouse_input(coords=(0, 0), absolute=False)
ActionLogger().log(('ToolTips texts = ' + ';'.join(self.ctrl.texts())))
self.assertEqual(self.ctrl.texts()[0], '')
self.assertEqual(self.ctrl.texts()[1:], self.texts) |
class Effect6563(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Heavy Fighters')), 'fighterAbilityMissilesDamageMultiplier', (src.getModifiedItemAttr('damageMultiplierBonus') * lvl), **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Heavy Fighters')), 'fighterAbilityAttackMissileDamageMultiplier', (src.getModifiedItemAttr('damageMultiplierBonus') * lvl), **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Heavy Fighters')), 'fighterAbilityAttackTurretDamageMultiplier', (src.getModifiedItemAttr('damageMultiplierBonus') * lvl), **kwargs) |
class ChannelLock3(AsyncContextManagerMixin):
def __init__(self) -> None:
(self.s, self.r) = open_memory_channel[None](0)
self.acquired = False
def acquire_nowait(self) -> None:
assert (not self.acquired)
self.acquired = True
async def acquire(self) -> None:
if self.acquired:
(await self.s.send(None))
else:
self.acquired = True
(await _core.checkpoint())
def release(self) -> None:
try:
self.r.receive_nowait()
except _core.WouldBlock:
assert self.acquired
self.acquired = False |
class FakeHDF5FileHandler2(FakeHDF5FileHandler):
num_scans = 2
num_cols = 2048
def _rows_per_scan(self):
return self.filetype_info.get('rows_per_scan', 10)
def get_test_content(self, filename, filename_info, filetype_info):
global_attrs = {'/attr/Observing Beginning Date': '2019-01-01', '/attr/Observing Ending Date': '2019-01-01', '/attr/Observing Beginning Time': '18:27:39.720', '/attr/Observing Ending Time': '18:38:36.728'}
global_attrs = self._set_sensor_attrs(global_attrs)
self._add_tbb_coefficients(global_attrs)
data = self._get_data_file_content()
test_content = {}
test_content.update(global_attrs)
test_content.update(data)
test_content.update(_get_calibration(self.num_scans))
return test_content
def _set_sensor_attrs(self, global_attrs):
if ('mersi2_l1b' in self.filetype_info['file_type']):
global_attrs['/attr/Satellite Name'] = 'FY-3D'
global_attrs['/attr/Sensor Identification Code'] = 'MERSI'
elif ('mersi_ll' in self.filetype_info['file_type']):
global_attrs['/attr/Satellite Name'] = 'FY-3E'
global_attrs['/attr/Sensor Identification Code'] = 'MERSI LL'
return global_attrs
def _get_data_file_content(self):
if ('_geo' in self.filetype_info['file_type']):
return self._add_geo_data_file_content()
return self._add_band_data_file_content()
def _add_geo_data_file_content(self):
num_scans = self.num_scans
rows_per_scan = self._rows_per_scan
return _get_geo_data(num_scans, rows_per_scan, self._num_cols_for_file_type, self._geo_prefix_for_file_type)
def _add_band_data_file_content(self):
num_cols = self._num_cols_for_file_type
num_scans = self.num_scans
rows_per_scan = self._rows_per_scan
is_mersi2 = self.filetype_info['file_type'].startswith('mersi2_')
is_1km = ('_1000' in self.filetype_info['file_type'])
data_func = (_get_1km_data if is_1km else (_get_250m_data if is_mersi2 else _get_250m_ll_data))
return data_func(num_scans, rows_per_scan, num_cols)
def _add_tbb_coefficients(self, global_attrs):
if (not self.filetype_info['file_type'].startswith('mersi2_')):
return
if ('_1000' in self.filetype_info['file_type']):
global_attrs['/attr/TBB_Trans_Coefficient_A'] = np.array(([1.0] * 6))
global_attrs['/attr/TBB_Trans_Coefficient_B'] = np.array(([0.0] * 6))
else:
global_attrs['/attr/TBB_Trans_Coefficient_A'] = np.array(([0.0] * 6))
global_attrs['/attr/TBB_Trans_Coefficient_B'] = np.array(([0.0] * 6))
def _num_cols_for_file_type(self):
return (self.num_cols if ('1000' in self.filetype_info['file_type']) else (self.num_cols * 2))
def _geo_prefix_for_file_type(self):
return ('Geolocation/' if ('1000' in self.filetype_info['file_type']) else '') |
_state_transitions.register
def _handle_receive_withdraw_expired(action: ReceiveWithdrawExpired, channel_state: NettingChannelState, block_number: BlockNumber, **kwargs: Optional[Dict[(Any, Any)]]) -> TransitionResult:
events: List[Event] = []
withdraw_state = channel_state.partner_state.withdraws_pending.get(action.total_withdraw)
if (not withdraw_state):
invalid_withdraw_expired_msg = f'Withdraw expired of {action.total_withdraw} did not correspond to previous withdraw request'
return TransitionResult(channel_state, [EventInvalidReceivedWithdrawExpired(attempted_withdraw=action.total_withdraw, reason=invalid_withdraw_expired_msg)])
is_valid = is_valid_withdraw_expired(channel_state=channel_state, state_change=action, withdraw_state=withdraw_state, block_number=block_number)
if is_valid:
del channel_state.partner_state.withdraws_pending[withdraw_state.total_withdraw]
channel_state.partner_state.nonce = action.nonce
coop_settle = channel_state.partner_state.initiated_coop_settle
if (coop_settle is not None):
if ((coop_settle.total_withdraw_initiator == withdraw_state.total_withdraw) and (coop_settle.expiration == withdraw_state.expiration)):
channel_state.partner_state.initiated_coop_settle = None
send_processed = SendProcessed(recipient=channel_state.partner_state.address, recipient_metadata=withdraw_state.recipient_metadata, message_identifier=action.message_identifier, canonical_identifier=CANONICAL_IDENTIFIER_UNORDERED_QUEUE)
events = [send_processed]
else:
error_msg = is_valid.as_error_message
assert error_msg, 'is_valid_withdraw_expired should return error msg if not valid'
invalid_withdraw_expired = EventInvalidReceivedWithdrawExpired(attempted_withdraw=action.total_withdraw, reason=error_msg)
events = [invalid_withdraw_expired]
return TransitionResult(channel_state, events) |
def create_task(coro: Coroutine) -> (asyncio.Task | None):
loop = asyncio.get_running_loop()
if (not loop):
return None
def tidy(task: asyncio.Task) -> None:
TASKS.remove(task)
task = asyncio.create_task(coro)
TASKS.append(task)
task.add_done_callback(tidy)
return task |
class AvoidObstaclesBaseEnv(BaseEnv):
DEFAULT_EPISODE_JSON: str
ASSET_UID: str
tcp: sapien.Link
def __init__(self, episode_json=None, **kwargs):
if (episode_json is None):
episode_json = self.DEFAULT_EPISODE_JSON
episode_json = format_path(episode_json)
if (not Path(episode_json).exists()):
raise FileNotFoundError(f'Episode json ({episode_json}) is not found.To download default json:`python -m mani_skill2.utils.download_asset {{}}`.'.format(self.ASSET_UID))
self.episodes = load_json(episode_json)
self.episode_idx = None
self.episode_config = None
super().__init__(**kwargs)
def _get_default_scene_config(self):
scene_config = super()._get_default_scene_config()
scene_config.contact_offset = 0.01
return scene_config
def reset(self, *args, seed=None, episode_idx=None, reconfigure=False, **kwargs):
self.set_episode_rng(seed)
if (episode_idx is None):
episode_idx = self._episode_rng.choice(len(self.episodes))
if (episode_idx != self.episode_idx):
reconfigure = True
self.episode_idx = episode_idx
self.episode_config = self.episodes[episode_idx]
return super().reset(*args, seed=self._episode_seed, reconfigure=reconfigure, **kwargs)
def _build_cube(self, half_size, color=(1, 0, 0), name='cube', static=True, render_material: sapien.RenderMaterial=None):
if (render_material is None):
render_material = self._renderer.create_material()
render_material.set_base_color(np.hstack([color, 1.0]))
builder = self._scene.create_actor_builder()
builder.add_box_collision(half_size=half_size)
builder.add_box_visual(half_size=half_size, material=render_material)
if static:
return builder.build_static(name)
else:
return builder.build(name)
def _build_coord_frame_site(self, scale=0.1, name='coord_frame'):
builder = self._scene.create_actor_builder()
radius = (scale * 0.05)
half_length = (scale * 0.5)
builder.add_capsule_visual(sapien.Pose(p=[(scale * 0.5), 0, 0], q=[1, 0, 0, 0]), radius=radius, half_length=half_length, color=[1, 0, 0], name='x')
builder.add_capsule_visual(sapien.Pose(p=[0, (scale * 0.5), 0], q=[0.707, 0, 0, 0.707]), radius=radius, half_length=half_length, color=[0, 1, 0], name='y')
builder.add_capsule_visual(sapien.Pose(p=[0, 0, (scale * 0.5)], q=[0.707, 0, (- 0.707), 0]), radius=radius, half_length=half_length, color=[0, 0, 1], name='z')
actor = builder.build_static(name)
actor.hide_visual()
return actor
def _load_actors(self):
self._add_ground(render=(self.bg_name is None))
if ('wall' in self.episode_config):
cfg = self.episode_config['wall']
self.wall = self._build_cube(cfg['half_size'], color=(1, 1, 1), name='wall')
self.wall.set_pose(Pose(cfg['pose'][:3], cfg['pose'][3:]))
self.obstacles = []
for (i, cfg) in enumerate(self.episode_config['obstacles']):
actor = self._build_cube(cfg['half_size'], self._episode_rng.rand(3), name=f'obstacle_{i}')
actor.set_pose(Pose(cfg['pose'][:3], cfg['pose'][3:]))
self.obstacles.append(actor)
self.goal_site = self._build_coord_frame_site(scale=0.05)
def _initialize_agent(self):
qpos = self.episode_config['start_qpos']
self.agent.reset(qpos)
self.agent.robot.set_pose(Pose([0, 0, 0]))
def _update_goal_to_obstacle_dist(self):
obstacle_pos = [actor.pose.p for actor in self.obstacles]
goal_pos = self.goal_pose.p
goal_to_obstacle_dist = [np.linalg.norm((goal_pos - x)) for x in obstacle_pos]
self.goal_to_obstacle_dist = np.sort(goal_to_obstacle_dist)
def _initialize_task(self):
end_pose = self.episode_config['end_pose']
self.goal_pose = Pose(end_pose[:3], end_pose[3:])
self.goal_site.set_pose(self.goal_pose)
self._update_goal_to_obstacle_dist()
def _get_obs_agent(self):
obs = self.agent.get_proprioception()
obs['base_pose'] = vectorize_pose(self.agent.robot.pose)
return obs
def _get_obs_extra(self) -> OrderedDict:
tcp_pose = self.tcp.pose
goal_pose = self.goal_pose
return OrderedDict(tcp_pose=vectorize_pose(tcp_pose), goal_pose=vectorize_pose(goal_pose))
def evaluate(self, **kwargs) -> dict:
tcp_pose_at_goal = (self.goal_pose.inv() * self.tcp.pose)
pos_dist = np.linalg.norm(tcp_pose_at_goal.p)
ang_dist = (np.arccos(tcp_pose_at_goal.q[0]) * 2)
if (ang_dist > np.pi):
ang_dist = (ang_dist - (2 * np.pi))
ang_dist = np.abs(ang_dist)
ang_dist = np.rad2deg(ang_dist)
success = ((pos_dist <= 0.025) and (ang_dist <= 15))
return dict(pos_dist=pos_dist, ang_dist=ang_dist, success=success)
def compute_dense_reward(self, info, **kwargs):
if info['success']:
return 10.0
pos_threshold = 0.025
ang_threshold = 15
reward = 0.0
(pos_dist, ang_dist) = (info['pos_dist'], info['ang_dist'])
num_obstacles = len(self.obstacles)
close_to_goal_reward = ((4.0 * np.sum((pos_dist < self.goal_to_obstacle_dist))) / num_obstacles)
angular_reward = 0.0
smallest_g2o_dist = self.goal_to_obstacle_dist[0]
if (pos_dist < smallest_g2o_dist):
angular_reward = (3.0 * (1 - np.tanh((np.maximum((ang_dist - ang_threshold), 0.0) / 180))))
if (ang_dist <= 25):
close_to_goal_reward += (2.0 * (1 - np.tanh((np.maximum((pos_dist - pos_threshold), 0.0) / smallest_g2o_dist))))
contacts = self._scene.get_contacts()
max_impulse_norm = get_articulation_max_impulse_norm(contacts, self.agent.robot)
reward = ((close_to_goal_reward + angular_reward) - (50.0 * max_impulse_norm))
return reward
def _register_cameras(self):
pose = look_at([(- 0.25), 0, 1.2], [0.6, 0, 0.6])
return CameraConfig('base_camera', pose.p, pose.q, 128, 128, (np.pi / 2), 0.01, 10)
def _register_render_cameras(self):
pose = look_at([1.5, 0, 1.5], [0.0, 0.0, 0.5])
return CameraConfig('render_camera', pose.p, pose.q, 512, 512, 1, 0.01, 10)
def _setup_viewer(self):
super()._setup_viewer()
self._viewer.set_camera_xyz(1.5, 0.0, 1.5)
self._viewer.set_camera_rpy(0, (- 0.6), 3.14)
def render(self, mode='human'):
if (mode in ['human', 'rgb_array']):
self.goal_site.unhide_visual()
ret = super().render(mode=mode)
self.goal_site.hide_visual()
else:
ret = super().render(mode=mode)
return ret |
def test_pretend_move(tmpfolder):
tmpfolder.join('a-file.txt').write('text')
tmpfolder.join('another-file.txt').write('text')
tmpfolder.join('a-dir').ensure_dir()
fs.move('a-file.txt', target='a-dir')
assert tmpfolder.join('a-dir/a-file.txt').check()
fs.move('another-file.txt', target='a-dir', pretend=True)
assert (not tmpfolder.join('a-dir/another-file.txt').check())
assert tmpfolder.join('another-file.txt').check() |
def scan_setup_py():
found = set()
setters = False
errors = 0
with open('setup.py', 'r') as f:
for line in f.readlines():
if ('import versioneer' in line):
found.add('import')
if ('versioneer.get_cmdclass()' in line):
found.add('cmdclass')
if ('versioneer.get_version()' in line):
found.add('get_version')
if ('versioneer.VCS' in line):
setters = True
if ('versioneer.versionfile_source' in line):
setters = True
if (len(found) != 3):
print('')
print('Your setup.py appears to be missing some important items')
print('(but I might be wrong). Please make sure it has something')
print('roughly like the following:')
print('')
print(' import versioneer')
print(' setup( version=versioneer.get_version(),')
print(' cmdclass=versioneer.get_cmdclass(), ...)')
print('')
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print('now lives in setup.cfg, and should be removed from setup.py')
print('')
errors += 1
return errors |
class BernoulliLikelihood(NewtonOneDimensionalLikelihood):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, function_samples, nobs=None, **kwargs):
return base_distributions.Bernoulli(logits=function_samples)
def grad_f(self, f, targets):
exp_f = f.exp()
return (targets - (exp_f / (1 + exp_f))).unsqueeze((- 1))
def neg_hessian_f(self, f, targets=None):
exp_f = f.exp()
prob_f = (exp_f / (1 + exp_f))
return DiagLazyTensor((prob_f * (1 + prob_f))) |
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help="Input txt/csv filename. If .txt, must be list of filenames. If .csv, must be comma-separated file with header 'filename, xmin, ymin, xmax, ymax'")
parser.add_argument('output_file', help='Output h5/csv filename. Format depends on extension.')
parser.add_argument('--model_def', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/deploy.prototxt.prototxt'), help='Model definition file.')
parser.add_argument('--pretrained_model', default=os.path.join(pycaffe_dir, '../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'), help='Trained model weights file.')
parser.add_argument('--crop_mode', default='selective_search', choices=CROP_MODES, help='How to generate windows for detection.')
parser.add_argument('--gpu', action='store_true', help='Switch for gpu computation.')
parser.add_argument('--mean_file', default=os.path.join(pycaffe_dir, 'caffe/imagenet/ilsvrc_2012_mean.npy'), help=('Data set image mean of H x W x K dimensions (numpy array). ' + "Set to '' for no mean subtraction."))
parser.add_argument('--input_scale', type=float, help='Multiply input features by this scale to finish preprocessing.')
parser.add_argument('--raw_scale', type=float, default=255.0, help='Multiply raw input by this scale before preprocessing.')
parser.add_argument('--channel_swap', default='2,1,0', help=('Order to permute input channels. The default converts ' + 'RGB -> BGR since BGR is the Caffe default by way of OpenCV.'))
parser.add_argument('--context_pad', type=int, default='16', help='Amount of surrounding context to collect in input window.')
args = parser.parse_args()
(mean, channel_swap) = (None, None)
if args.mean_file:
mean = np.load(args.mean_file)
if (mean.shape[1:] != (1, 1)):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print('GPU mode')
else:
caffe.set_mode_cpu()
print('CPU mode')
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean, input_scale=args.input_scale, raw_scale=args.raw_scale, channel_swap=channel_swap, context_pad=args.context_pad)
t = time.time()
print('Loading input...')
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception('Unknown input file type: not in txt or csv.')
if (args.crop_mode == 'list'):
images_windows = [(ix, inputs.iloc[np.where((inputs.index == ix))][COORD_COLS].values) for ix in inputs.index.unique()]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print('Processed {} windows in {:.3f} s.'.format(len(detections), (time.time() - t)))
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del df['window']
t = time.time()
if args.output_file.lower().endswith('csv'):
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=(COORD_COLS + class_cols))
else:
df.to_hdf(args.output_file, 'df', mode='w')
print('Saved to {} in {:.3f} s.'.format(args.output_file, (time.time() - t))) |
class PyzoSourceStructure(QtWidgets.QWidget):
def __init__(self, parent):
QtWidgets.QWidget.__init__(self, parent)
toolId = self.__class__.__name__.lower()
self._config = pyzo.config.tools[toolId]
if (not hasattr(self._config, 'showTypes')):
self._config.showTypes = ['class', 'def', 'cell', 'todo']
if (not hasattr(self._config, 'level')):
self._config.level = 2
self._nav = {}
self._navbut_back = QtWidgets.QToolButton(self)
self._navbut_back.setIcon(pyzo.icons.arrow_left)
self._navbut_back.setIconSize(QtCore.QSize(16, 16))
self._navbut_back.setStyleSheet('QToolButton { border: none; padding: 0px; }')
self._navbut_back.clicked.connect(self.onNavBack)
self._navbut_forward = QtWidgets.QToolButton(self)
self._navbut_forward.setIcon(pyzo.icons.arrow_right)
self._navbut_forward.setIconSize(QtCore.QSize(16, 16))
self._navbut_forward.setStyleSheet('QToolButton { border: none; padding: 0px; }')
self._navbut_forward.clicked.connect(self.onNavForward)
self._slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self)
self._slider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self._slider.setSingleStep(1)
self._slider.setPageStep(1)
self._slider.setRange(1, 5)
self._slider.setValue(self._config.level)
self._slider.valueChanged.connect(self.updateStructure)
self._options = QtWidgets.QToolButton(self)
self._options.setIcon(pyzo.icons.filter)
self._options.setIconSize(QtCore.QSize(16, 16))
self._options.setPopupMode(self._options.InstantPopup)
self._options.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self._options._menu = QtWidgets.QMenu()
self._options.setMenu(self._options._menu)
self._tree = QtWidgets.QTreeWidget(self)
self._tree.setHeaderHidden(True)
self._tree.itemCollapsed.connect(self.updateStructure)
self._tree.itemClicked.connect(self.onItemClick)
self._sizer1 = QtWidgets.QVBoxLayout(self)
self._sizer2 = QtWidgets.QHBoxLayout()
self._sizer1.setSpacing(2)
margin = pyzo.config.view.widgetMargin
self._sizer1.setContentsMargins(margin, margin, margin, margin)
self._sizer1.addLayout(self._sizer2, 0)
self._sizer1.addWidget(self._tree, 1)
self._sizer2.addWidget(self._navbut_back, 0)
self._sizer2.addWidget(self._navbut_forward, 0)
self._sizer2.addStretch(1)
self._sizer2.addWidget(self._slider, 6)
self._sizer2.addStretch(1)
self._sizer2.addWidget(self._options, 0)
self.setLayout(self._sizer1)
self._currentEditorId = 0
pyzo.editors.currentChanged.connect(self.onEditorsCurrentChanged)
pyzo.editors.parserDone.connect(self.updateStructure)
self._options.pressed.connect(self.onOptionsPress)
self._options._menu.triggered.connect(self.onOptionMenuTiggered)
self.onOptionsPress()
self.onEditorsCurrentChanged()
def onOptionsPress(self):
menu = self._options._menu
menu.clear()
for type in ['class', 'def', 'cell', 'todo', 'import', 'attribute']:
checked = (type in self._config.showTypes)
action = menu.addAction(('Show %s' % type))
action.setCheckable(True)
action.setChecked(checked)
def onOptionMenuTiggered(self, action):
type = action.text().split(' ', 1)[1]
if (type in self._config.showTypes):
while (type in self._config.showTypes):
self._config.showTypes.remove(type)
else:
self._config.showTypes.append(type)
self.updateStructure()
def onEditorsCurrentChanged(self):
editor = pyzo.editors.getCurrentEditor()
self._tree.clear()
if (editor is None):
self._currentEditorId = 0
if (editor is not None):
self._currentEditorId = id(editor)
text = ((translate('pyzoSourceStructure', 'Parsing ') + editor._name) + ' ...')
QtWidgets.QTreeWidgetItem(self._tree, [text])
self.updateStructure()
def _getCurrentNav(self):
if (not self._currentEditorId):
return None
if (self._currentEditorId not in self._nav):
self._nav[self._currentEditorId] = Navigation()
return self._nav[self._currentEditorId]
def onNavBack(self):
nav = self._getCurrentNav()
if ((not nav) or (not nav.back)):
return
linenr = nav.back.pop((- 1))
old_linenr = self._navigate_to_line(linenr)
if (old_linenr is not None):
nav.forward.append(old_linenr)
def onNavForward(self):
nav = self._getCurrentNav()
if ((not nav) or (not nav.forward)):
return
linenr = nav.forward.pop((- 1))
old_linenr = self._navigate_to_line(linenr)
if (old_linenr is not None):
nav.back.append(old_linenr)
def onItemClick(self, item):
if (not item.linenr):
item = item.parent()
old_linenr = self._navigate_to_line(item.linenr)
if (old_linenr is not None):
nav = self._getCurrentNav()
if (nav and ((not nav.back) or (nav.back[(- 1)] != old_linenr))):
nav.back.append(old_linenr)
nav.forward = []
def _navigate_to_line(self, linenr):
editor = pyzo.editors.getCurrentEditor()
if (not editor):
return None
old_linenr = (editor.textCursor().blockNumber() + 1)
editor.gotoLine(linenr)
pyzo.callLater(editor.setFocus)
return old_linenr
def updateStructure(self):
editor = pyzo.editors.getCurrentEditor()
if (not editor):
return
result = pyzo.parser._getResult()
if (result is None):
return
(id0, id1, id2) = (self._currentEditorId, id(editor), result.editorId)
if ((id0 != id1) or (id0 != id2)):
return
ln = editor.textCursor().blockNumber()
ln += 1
def get_color(name, sub='fore'):
parts = [part.partition(':') for part in theme[name].split(',')]
colors = {k.strip(): v.strip() for (k, _, v) in parts}
return colors[sub]
try:
theme = pyzo.themes[pyzo.config.settings.theme.lower()]['data']
colours = {'cell': get_color('syntax.python.cellcomment'), 'class': get_color('syntax.classname'), 'def': get_color('syntax.functionname'), 'attribute': get_color('syntax.comment'), 'import': get_color('syntax.keyword'), 'todo': get_color('syntax.todocomment'), 'nameismain': get_color('syntax.keyword'), 'background': get_color('editor.text', 'back'), 'currentline': get_color('editor.highlightcurrentline', 'back')}
except Exception as err:
print('Reverting to defaut source structure colors:', str(err))
colours = {'cell': '#b58900', 'class': '#cb4b16', 'def': '#073642', 'attribute': '#657b83', 'import': '#268bd2', 'todo': '#d33682', 'nameismain': '#859900', 'background': '#fff', 'currentline': '#ccc'}
showTypes = self._config.showTypes
showLevel = int(self._slider.value())
self._config.level = showLevel
showLevel = (showLevel if (showLevel < 5) else 99)
selectedItem = [None]
def SetItems(parentItem, fictiveObjects, level):
level += 1
for object in fictiveObjects:
type = object.type
if ((type not in showTypes) and (type != 'nameismain')):
continue
if (type == 'import'):
text = (' %s (%s)' % (object.name, object.text))
elif (type == 'todo'):
text = object.name
elif (type == 'nameismain'):
text = object.text
elif (type == 'class'):
text = object.name
elif (type == 'def'):
text = (object.name + '()')
elif (type == 'attribute'):
text = ('- ' + object.name)
elif (type in ('cell', '##', '#%%', '# %%')):
type = 'cell'
text = (('## ' + object.name) + (' ' * 120))
else:
text = ('%s %s' % (type, object.name))
thisItem = QtWidgets.QTreeWidgetItem(parentItem, [text])
color = QtGui.QColor(colours[object.type])
thisItem.setForeground(0, QtGui.QBrush(color))
font = thisItem.font(0)
font.setBold(True)
if (type == 'cell'):
font.setUnderline(True)
thisItem.setFont(0, font)
thisItem.linenr = object.linenr
if (ln and (object.linenr <= ln) and (object.linenr2 > ln)):
selectedItem[0] = thisItem
if object.children:
SetItems(thisItem, object.children, level)
thisItem.setExpanded(bool((level < showLevel)))
self._tree.setStyleSheet((('background-color: ' + colours['background']) + ';'))
self._tree.setUpdatesEnabled(False)
self._tree.clear()
SetItems(self._tree, result.rootItem.children, 0)
self._tree.setUpdatesEnabled(True)
selectedItem = selectedItem[0]
if selectedItem:
selectedItem.setBackground(0, QtGui.QBrush(QtGui.QColor(colours['currentline'])))
self._tree.scrollToItem(selectedItem) |
def fillin_tokens4gts(generator_tokens, mlm_tgts):
size = len(generator_tokens)
data_out = []
for lidx in range(size):
tokens = generator_tokens[lidx]
tgts = mlm_tgts[lidx]
if (len(tgts) < 1):
data_out.append(tokens)
continue
(counter, token) = (0, [])
for idx in range(len(tokens)):
if (tokens[idx] == 103):
try:
token.append(tgts[counter])
except:
print(token, counter)
counter += 1
else:
token.append(tokens[idx])
data_out.append(token)
return data_out |
class MultiSimilarityLoss(torch.nn.Module):
def __init__(self):
super(MultiSimilarityLoss, self).__init__()
self.thresh = 0.5
self.epsilon = 0.1
self.scale_pos = 2
self.scale_neg = 50
self.miner = miners.MultiSimilarityMiner(epsilon=self.epsilon)
self.loss_func = losses.MultiSimilarityLoss(self.scale_pos, self.scale_neg, self.thresh)
def forward(self, embeddings, labels):
hard_pairs = self.miner(embeddings, labels)
loss = self.loss_func(embeddings, labels, hard_pairs)
return loss |
class _ModelTrainingRound():
class EarlyStopNecessary(Exception):
pass
def __init__(self, model_trainer: SmilesRnnTrainer, training_data, test_data, n_epochs, batch_size, print_every, valid_every, num_workers=0) -> None:
self.model_trainer = model_trainer
self.training_data = training_data
self.test_data = test_data
self.n_epochs = n_epochs
self.batch_size = batch_size
self.print_every = print_every
self.valid_every = valid_every
self.num_workers = num_workers
self.start_time = time()
self.unprocessed_train_losses: List[float] = []
self.all_train_losses: List[float] = []
self.all_valid_losses: List[float] = []
self.n_molecules_so_far = 0
self.has_run = False
self.min_valid_loss = np.inf
self.min_avg_train_loss = np.inf
def run(self):
if self.has_run:
raise Exception('_ModelTrainingRound.train() can be called only once.')
try:
for epoch_index in range(1, (self.n_epochs + 1)):
self._train_one_epoch(epoch_index)
self._validation_on_final_model()
except _ModelTrainingRound.EarlyStopNecessary:
logger.error('Probable explosion during training. Stopping now.')
self.has_run = True
return (self.all_train_losses, self.all_valid_losses)
def _train_one_epoch(self, epoch_index: int):
logger.info(f'EPOCH {epoch_index}')
data_loader = DataLoader(self.training_data, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True)
epoch_t0 = time()
self.unprocessed_train_losses.clear()
for (batch_index, batch) in enumerate(data_loader):
self._train_one_batch(batch_index, batch, epoch_index, epoch_t0)
def _train_one_batch(self, batch_index, batch, epoch_index, train_t0):
(loss, size) = self.model_trainer.train_on_batch(batch)
self.unprocessed_train_losses += [loss]
self.n_molecules_so_far += size
if ((batch_index > 0) and ((batch_index % self.print_every) == 0)):
self._report_training_progress(batch_index, epoch_index, epoch_start=train_t0)
if ((batch_index >= 0) and ((batch_index % self.valid_every) == 0)):
self._report_validation_progress(epoch_index)
def _report_training_progress(self, batch_index, epoch_index, epoch_start):
mols_sec = self._calculate_mols_per_second(batch_index, epoch_start)
avg_train_loss = np.array(self.unprocessed_train_losses).mean()
self.all_train_losses += avg_train_loss
self.unprocessed_train_losses.clear()
logger.info(f'TRAIN | elapsed: {time_since(self.start_time)} | epoch|batch : {epoch_index}|{batch_index} ({self._get_overall_progress():.1f}%) | molecules: {self.n_molecules_so_far} | mols/sec: {mols_sec:.2f} | train_loss: {avg_train_loss:.4f}')
self.model_trainer.train_extra_log(self.n_molecules_so_far)
self._check_early_stopping_train_loss(avg_train_loss)
def _calculate_mols_per_second(self, batch_index, epoch_start):
train_time_in_current_epoch = (time() - epoch_start)
processed_batches = (batch_index + 1)
molecules_in_current_epoch = (self.batch_size * processed_batches)
return (molecules_in_current_epoch / train_time_in_current_epoch)
def _report_validation_progress(self, epoch_index):
avg_valid_loss = self._validate_current_model()
self._log_validation_step(epoch_index, avg_valid_loss)
self._check_early_stopping_validation(avg_valid_loss)
if self.model_trainer.log_dir:
if (avg_valid_loss <= min(self.all_valid_losses)):
self._save_current_model(self.model_trainer.log_dir, epoch_index, avg_valid_loss)
def _validate_current_model(self):
test_loader = DataLoader(self.test_data, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)
avg_valid_loss = self.model_trainer.validate(test_loader, self.n_molecules_so_far)
self.all_valid_losses += [avg_valid_loss]
return avg_valid_loss
def _log_validation_step(self, epoch_index, avg_valid_loss):
logger.info(f'VALID | elapsed: {time_since(self.start_time)} | epoch: {epoch_index}/{self.n_epochs} ({self._get_overall_progress():.1f}%) | molecules: {self.n_molecules_so_far} | valid_loss: {avg_valid_loss:.4f}')
self.model_trainer.valid_extra_log(self.n_molecules_so_far)
logger.info('')
def _get_overall_progress(self):
total_mols = (self.n_epochs * len(self.training_data))
return ((100.0 * self.n_molecules_so_far) / total_mols)
def _validation_on_final_model(self):
valid_loss = self._validate_current_model()
logger.info(f'VALID | FINAL_MODEL | elapsed: {time_since(self.start_time)} | molecules: {self.n_molecules_so_far} | valid_loss: {valid_loss:.4f}')
if self.model_trainer.log_dir:
self._save_model(self.model_trainer.log_dir, 'final', valid_loss)
def _save_current_model(self, base_dir, epoch, valid_loss):
for f in glob(os.path.join(base_dir, 'model_*')):
os.remove(f)
self._save_model(base_dir, epoch, valid_loss)
def _save_model(self, base_dir, info, valid_loss):
base_name = f'model_{info}_{valid_loss:.3f}'
logger.info(base_name)
save_model(self.model_trainer.model, base_dir, base_name)
def _check_early_stopping_train_loss(self, avg_train_loss):
threshold = (10 * self.min_avg_train_loss)
if (avg_train_loss > threshold):
raise _ModelTrainingRound.EarlyStopNecessary()
if (avg_train_loss < self.min_avg_train_loss):
self.min_avg_train_loss = avg_train_loss
def _check_early_stopping_validation(self, avg_valid_loss):
threshold = (2 * self.min_valid_loss)
if (avg_valid_loss > threshold):
raise _ModelTrainingRound.EarlyStopNecessary()
if (avg_valid_loss < self.min_valid_loss):
self.min_valid_loss = avg_valid_loss |
def similarity_of_words(qdmr_w, sql_w):
match = SequenceMatcher(None, qdmr_w, sql_w).find_longest_match(0, len(qdmr_w), 0, len(sql_w))
if (match.size > 1):
p = (match.size / len(qdmr_w))
r = (match.size / len(sql_w))
return (((2 * p) * r) / (p + r))
else:
return 0.0 |
class RangeAttribute(IntAttribute):
min_value: int
max_value: int
values: Optional[List[int]] = None
def redoc(cls) -> None:
assert (cls.__doc__ is not None)
super(RangeAttribute, cls).redoc()
cls.__doc__ += ('\n:range: %s <= value <= %s' % (cls.min_value, cls.max_value))
if cls.values:
cls.__doc__ += (' or in %s' % cls.values)
def pre_set(self, value: int) -> int:
if (not (self.min_value <= value <= self.max_value)):
if (not self.values):
raise ValueError(('%r is an invalid value for attribute %s, should be between %r and %r' % (value, self.visa_name, self.min_value, self.max_value)))
elif (value not in self.values):
raise ValueError(('%r is an invalid value for attribute %s, should be between %r and %r or %r' % (value, self.visa_name, self.min_value, self.max_value, self.values)))
return value |
class HeaderDict(MultiDict):
def __init__(self, *a, **ka):
self.dict = {}
if (a or ka):
self.update(*a, **ka)
def __contains__(self, key):
return (_hkey(key) in self.dict)
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][(- 1)]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(_hval(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [_hval(value)]
def getall(self, key):
return (self.dict.get(_hkey(key)) or [])
def get(self, key, default=None, index=(- 1)):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in (_hkey(n) for n in names):
if (name in self.dict):
del self.dict[name] |
def test_get_formatted_immutable_mapping():
class ReadOnlyMapping(typing.Mapping):
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __getitem__(self, key):
return self._data[key]
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
input_obj = {'key': '{ctx}'}
context = Context({'ctx': ReadOnlyMapping({'arb': 1})})
output = context.get_formatted_value(input_obj)
assert (output is not input_obj)
assert isinstance(output['key'], ReadOnlyMapping)
assert (output['key'] == {'arb': 1}) |
def check_importable(dist, attr, value):
try:
ep = metadata.EntryPoint(value=value, name=None, group=None)
assert (not ep.extras)
except (TypeError, ValueError, AttributeError, AssertionError) as e:
raise DistutilsSetupError(("%r must be importable 'module:attrs' string (got %r)" % (attr, value))) from e |
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, norm_layer=None, norm_kwargs=None):
super(SeparableConv2d, self).__init__()
norm_kwargs = (norm_kwargs if (norm_kwargs is not None) else {})
self.kernel_size = kernel_size
self.dilation = dilation
padding = get_padding(kernel_size, stride, dilation)
self.conv_dw = nn.Conv2d(inplanes, inplanes, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=inplanes, bias=bias)
self.bn = norm_layer(num_features=inplanes, **norm_kwargs)
self.conv_pw = nn.Conv2d(inplanes, planes, kernel_size=1, bias=bias)
def forward(self, x):
x = self.conv_dw(x)
x = self.bn(x)
x = self.conv_pw(x)
return x |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
.slow
def test_generate_costing_table_thc():
mf = make_diamond_113_szv()
thc_rank_params = np.array([2, 4, 6])
table = generate_costing_table(mf, thc_rank_params=thc_rank_params, chi=10, beta=22, dE_for_qpe=0.001, bfgs_maxiter=10, adagrad_maxiter=10, fft_df_mesh=([11] * 3))
assert np.allclose(table.dE, 0.001)
assert np.allclose(table.chi, 10)
assert np.allclose(table.beta, 22)
assert np.allclose(table.cutoff, thc_rank_params) |
class TestChannels(unittest.TestCase):
def test_wav(self):
actual = file_info.channels(INPUT_FILE)
expected = 1
self.assertEqual(expected, actual)
def test_wav_pathlib(self):
actual = file_info.channels(Path(INPUT_FILE))
expected = 1
self.assertEqual(expected, actual)
def test_aiff(self):
actual = file_info.channels(INPUT_FILE2)
expected = 3
self.assertEqual(expected, actual)
def test_empty(self):
actual = file_info.channels(EMPTY_FILE)
expected = 1
self.assertEqual(expected, actual) |
class TestVisibilityNotify(EndianTest):
def setUp(self):
self.evt_args_0 = {'sequence_number': 38616, 'state': 253, 'type': 174, 'window': }
self.evt_bin_0 = b'\xae\x00\x96\xd87\xcc\x14A\xfd\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPack0(self):
bin = event.VisibilityNotify._fields.to_binary(*(), **self.evt_args_0)
self.assertBinaryEqual(bin, self.evt_bin_0)
def testUnpack0(self):
(args, remain) = event.VisibilityNotify._fields.parse_binary(self.evt_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.evt_args_0) |
def _put_config(key: str, value: str, config: Dict[(str, Any)]) -> None:
idx = key.find('.')
if (idx < 0):
config[key] = value
elif (idx == (len(key) - 1)):
raise ValueError(f'Illegal config key `{key}`. Key should not have a `.` suffix')
else:
first_key = key[:idx]
rest_keys = key[(idx + 1):]
nested_config = config.setdefault(first_key, {})
_put_config(rest_keys, value, nested_config) |
def test_invalid_unicode_2(app):
token = '4JPCOLIVMAY32Q3XGVPHC4CBF8SKII5FWNYMASOFDIVSXTC5I5NBU'.encode('utf-8')
header = ('basic ' + b64encode((b'devtable+somerobot:%s' % token)).decode('ascii'))
result = validate_basic_auth(header)
assert (result == ValidateResult(AuthKind.basic, error_message='Could not find robot with username: devtable+somerobot and supplied password.')) |
def test_load_rsa_nist_pss_verification_vectors():
vector_data = textwrap.dedent('\n # CAVS 11.0\n # "SigVer PKCS#1 RSASSA-PSS" information\n # Mod sizes selected: 1024 1536 2048 3072 4096\n # SHA Algorithm selected:SHA1 SHA224 SHA256 SHA384 SHA512\n # Salt len: 10\n # Generated on Wed Mar 02 00:25:22 2011\n\n [mod = 1024]\n\n n = be499b5e7f06c83fa0293e31465c8eb6b5\n\n p = e7a80c5d211c06acbf26d365f\n q = d248aa248000fda67b711940c\n\n SHAAlg = SHA1\n e = \n d = c8e26acf49b3422a07c4d834ba\n Msg = 6b9cfac0ba1c7890b13e381ce752195c\n S = 562d87b5781c01d166fef3972669a0495c\n SaltVal = \n Result = F (3 - Signature changed )\n\n SHAAlg = SHA384\n e = 000003\n d = 0d0f17362bdad181db4e1fe03e8de1a320\n Msg = 2a67c70ff14f9b34ddb42e6f89d59710\n S = 2b91c6ae2b3c46ff18d5b7abe239634cb7\n SaltVal = \n Result = P\n ').splitlines()
vectors = load_rsa_nist_vectors(vector_data)
assert (vectors == [{'modulus': int('be499b5e7f06c83fa0293e31465c8eb6b5', 16), 'p': int('e7a80c5d211c06acbf26d365f', 16), 'q': int('d248aa248000fda67b711940c', 16), 'public_exponent': 17, 'algorithm': 'SHA1', 'private_exponent': int('c8e26acf49b3422a07c4d834ba', 16), 'msg': b'6b9cfac0ba1c7890b13e381ce752195c', 's': b'562d87b5781c01d166fef3972669a0495c', 'saltval': b'', 'salt_length': 10, 'fail': True}, {'modulus': int('be499b5e7f06c83fa0293e31465c8eb6b5', 16), 'p': int('e7a80c5d211c06acbf26d365f', 16), 'q': int('d248aa248000fda67b711940c', 16), 'public_exponent': 3, 'algorithm': 'SHA384', 'private_exponent': int('0d0f17362bdad181db4e1fe03e8de1a320', 16), 'msg': b'2a67c70ff14f9b34ddb42e6f89d59710', 's': b'2b91c6ae2b3c46ff18d5b7abe239634cb7', 'saltval': b'', 'salt_length': 10, 'fail': False}]) |
class _libusb_device_descriptor(Structure):
_fields_ = [('bLength', c_uint8), ('bDescriptorType', c_uint8), ('bcdUSB', c_uint16), ('bDeviceClass', c_uint8), ('bDeviceSubClass', c_uint8), ('bDeviceProtocol', c_uint8), ('bMaxPacketSize0', c_uint8), ('idVendor', c_uint16), ('idProduct', c_uint16), ('bcdDevice', c_uint16), ('iManufacturer', c_uint8), ('iProduct', c_uint8), ('iSerialNumber', c_uint8), ('bNumConfigurations', c_uint8)] |
_vision
class GitProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = CLIPImageProcessor()
tokenizer = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel', model_input_names=['input_ids', 'attention_mask'])
processor = GitProcessor(image_processor, tokenizer)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, (- 1))) for x in image_inputs]
return image_inputs
def test_save_load_pretrained_additional_features(self):
processor = GitProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = GitProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, CLIPImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str, return_token_type_ids=False)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = GitProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'lower newer'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values']) |
def test_digit_version():
assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0))
assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0))
assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0))
assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1))
assert (digit_version('1.0rc0') == (1, 0, 0, 0, (- 1), 0))
assert (digit_version('1.0') == digit_version('1.0.0'))
assert (digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5'))
assert (digit_version('1.0.0dev') < digit_version('1.0.0a'))
assert (digit_version('1.0.0a') < digit_version('1.0.0a1'))
assert (digit_version('1.0.0a') < digit_version('1.0.0b'))
assert (digit_version('1.0.0b') < digit_version('1.0.0rc'))
assert (digit_version('1.0.0rc1') < digit_version('1.0.0'))
assert (digit_version('1.0.0') < digit_version('1.0.0post'))
assert (digit_version('1.0.0post') < digit_version('1.0.0post1'))
assert (digit_version('v1') == (1, 0, 0, 0, 0, 0))
assert (digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)) |
class Registry():
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __len__(self):
return len(self._module_dict)
def __contains__(self, key):
return (self.get(key) is not None)
def __repr__(self):
format_str = (self.__class__.__name__ + f'(name={self._name}, items={self._module_dict})')
return format_str
def name(self):
return self._name
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def _register_module(self, module_class, module_name=None, force=False):
if (not (inspect.isclass(module_class) or inspect.isfunction(module_class))):
raise TypeError('module must be a class or a function, but got {type(module_class)}')
if (module_name is None):
module_name = module_class.__name__
if ((not force) and (module_name in self._module_dict)):
raise KeyError(f'{module_name} is already registered in {self.name}')
self._module_dict[module_name] = module_class
def register_module(self, name=None, force=False, module=None):
if (not isinstance(force, bool)):
raise TypeError(f'force must be a boolean, but got {type(force)}')
if (not ((name is None) or isinstance(name, str))):
raise TypeError(f'name must be a str, but got {type(name)}')
if (module is not None):
self._register_module(module_class=module, module_name=name, force=force)
return module
def _register(cls):
self._register_module(module_class=cls, module_name=name, force=force)
return cls
return _register |
def modify(struct: Structure, path: PathLike, modifier: Callable[([AbstractContent, FileOp], ResolvedLeaf)]) -> Structure:
path_parts = Path(path).parts
root = deepcopy(struct)
last_parent: dict = root
name = path_parts[(- 1)]
for parent in path_parts[:(- 1)]:
last_parent = last_parent.setdefault(parent, {})
old_value = resolve_leaf(last_parent.get(name))
new_value = modifier(*old_value)
last_parent[name] = _merge_leaf(old_value, new_value)
return root |
()
def main() -> None:
tool_versions = parse_pre_commit_config_tool_versions(find_pre_commit_config_file())
project_requirements = parse_requirements_dev(find_requirements_dev_file())
errors = check_pre_commit_tool_versions(tool_versions, project_requirements)
if errors:
click.secho('pre-commit config is out of sync!', fg='red')
click.echo(' - ', nl=False)
click.echo('\n - '.join((click.style(error, fg='yellow') for error in errors)))
sys.exit(1) |
def NMC_entropic_change_PeymanMPM(sto, c_s_max):
u_eq = (((((4.3452 - (1.6518 * sto)) + (1.6225 * (sto ** 2))) - (2.0843 * (sto ** 3))) + (3.5146 * (sto ** 4))) - ((0.5623 * (10 ** (- 4))) * np.exp(((109.451 * sto) - 100.006))))
du_dT = ((((((- 800) + (779 * u_eq)) - (284 * (u_eq ** 2))) + (46 * (u_eq ** 3))) - (2.8 * (u_eq ** 4))) * (10 ** (- 3)))
return du_dT |
class AttnScoreBertSelfAttention(BertSelfAttention):
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_scores = (attention_scores + attention_mask)
attention_probs = nn.Softmax(dim=(- 1))(attention_scores)
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
outputs = ((context_layer, attention_scores) if self.output_attentions else (context_layer,))
return outputs |
(frozen=True)
class Endpoint():
id: typing.Union[str]
name: str = dataclasses.field(hash=False)
email: str = dataclasses.field(hash=False)
full_name: str = dataclasses.field(hash=False)
is_staff: bool = dataclasses.field(hash=False)
has_sent_submission_to: typing.List[str] = dataclasses.field(hash=False)
has_item_in_schedule: typing.List[str] = dataclasses.field(hash=False)
has_cancelled_talks: typing.List[str] = dataclasses.field(hash=False)
has_ticket: typing.List[str] = dataclasses.field(hash=False)
talks_by_conference: typing.Dict[(str, typing.List[str])] = dataclasses.field(hash=False)
def to_item(self):
conferences_talks = {f'{code}_items_in_schedule': items for (code, items) in self.talks_by_conference.items()}
return {'ChannelType': 'EMAIL', 'Address': self.email, 'Id': self.id, 'User': {'UserId': self.id, 'UserAttributes': {'Name': [self.name], 'FullName': [self.full_name], 'is_staff': [str(self.is_staff)], 'has_item_in_schedule': self.has_item_in_schedule, 'has_cancelled_talks': self.has_cancelled_talks, 'has_ticket': self.has_ticket, **conferences_talks}}} |
def test_storyboard_mangr_input():
egoname = 'Ego'
targetname = 'target'
init = OSC.Init()
step_time = OSC.TransitionDynamics(OSC.DynamicsShapes.step, OSC.DynamicsDimension.time, 1)
egospeed = OSC.AbsoluteSpeedAction(0, step_time)
egostart = OSC.TeleportAction(OSC.LanePosition(25, 0, (- 3), 0))
targetspeed = OSC.AbsoluteSpeedAction(0, step_time)
targetstart = OSC.TeleportAction(OSC.LanePosition(15, 0, (- 2), 0))
init.add_init_action(egoname, egospeed)
init.add_init_action(egoname, egostart)
init.add_init_action(targetname, targetspeed)
init.add_init_action(targetname, targetstart)
trigger = OSC.ValueTrigger('starttrigger', 0, OSC.ConditionEdge.rising, OSC.SimulationTimeCondition(3, OSC.Rule.greaterThan))
event = OSC.Event('myfirstevent', OSC.Priority.overwrite)
event.add_trigger(trigger)
action = OSC.LongitudinalDistanceAction(egoname, max_deceleration=3, max_speed=50, distance=(- 4))
event.add_action('newspeed', action)
man = OSC.Maneuver('my_maneuver')
man.add_event(event)
mangr = OSC.ManeuverGroup('mangroup')
mangr.add_actor(targetname)
mangr.add_maneuver(man)
sb = OSC.StoryBoard(init)
sb.add_maneuver_group(mangr)
prettyprint(sb.get_element())
sb2 = OSC.StoryBoard(init)
sb2.add_maneuver_group(mangr)
sb3 = OSC.StoryBoard(init)
sb3.add_maneuver_group(mangr)
sb3.add_maneuver_group(mangr)
assert (sb == sb2)
assert (sb != sb3)
sb4 = OSC.StoryBoard.parse(sb3.get_element())
assert (sb3 == sb4)
assert (version_validation('Storyboard', sb, 0) == ValidationResponse.OK)
assert (version_validation('Storyboard', sb, 1) == ValidationResponse.OK)
assert (version_validation('Storyboard', sb, 2) == ValidationResponse.OK) |
class Provider():
UNSAFE_PACKAGES: ClassVar[set[str]] = set()
def __init__(self, package: Package, pool: RepositoryPool, io: IO, *, installed: (list[Package] | None)=None, locked: (list[Package] | None)=None) -> None:
self._package = package
self._pool = pool
self._direct_origin = DirectOrigin(self._pool.artifact_cache)
self._io = io
self._env: (Env | None) = None
self._python_constraint = package.python_constraint
self._is_debugging: bool = (self._io.is_debug() or self._io.is_very_verbose())
self._overrides: dict[(DependencyPackage, dict[(str, Dependency)])] = {}
self._deferred_cache: dict[(Dependency, Package)] = {}
self._load_deferred = True
self._source_root: (Path | None) = None
self._installed_packages = (installed if (installed is not None) else [])
self._direct_origin_packages: dict[(str, Package)] = {}
self._locked: dict[(NormalizedName, list[DependencyPackage])] = defaultdict(list)
self._use_latest: Collection[NormalizedName] = []
for package in (locked or []):
self._locked[package.name].append(DependencyPackage(package.to_dependency(), package))
for dependency_packages in self._locked.values():
dependency_packages.sort(key=(lambda p: p.package.version), reverse=True)
def pool(self) -> RepositoryPool:
return self._pool
def use_latest(self) -> Collection[NormalizedName]:
return self._use_latest
def is_debugging(self) -> bool:
return self._is_debugging
def set_overrides(self, overrides: dict[(DependencyPackage, dict[(str, Dependency)])]) -> None:
self._overrides = overrides
def load_deferred(self, load_deferred: bool) -> None:
self._load_deferred = load_deferred
def use_source_root(self, source_root: Path) -> Iterator[Provider]:
original_source_root = self._source_root
self._source_root = source_root
try:
(yield self)
finally:
self._source_root = original_source_root
def use_environment(self, env: Env) -> Iterator[Provider]:
original_python_constraint = self._python_constraint
self._env = env
self._python_constraint = Version.parse(env.marker_env['python_full_version'])
try:
(yield self)
finally:
self._env = None
self._python_constraint = original_python_constraint
def use_latest_for(self, names: Collection[NormalizedName]) -> Iterator[Provider]:
self._use_latest = names
try:
(yield self)
finally:
self._use_latest = []
def validate_package_for_dependency(dependency: Dependency, package: Package) -> None:
if (dependency.name != package.name):
raise RuntimeError(f"The dependency name for {dependency.name} does not match the actual package's name: {package.name}")
def search_for_installed_packages(self, dependency: Dependency) -> list[Package]:
if (not self._installed_packages):
return []
logger.debug('Falling back to installed packages to discover metadata for <c2>%s</>', dependency.complete_name)
packages = [package for package in self._installed_packages if package.satisfies(dependency, ignore_source_type=True)]
logger.debug('Found <c2>%d</> compatible packages for <c2>%s</>', len(packages), dependency.complete_name)
return packages
def search_for_direct_origin_dependency(self, dependency: Dependency) -> Package:
package = self._deferred_cache.get(dependency)
if (package is not None):
pass
elif dependency.is_vcs():
dependency = cast('VCSDependency', dependency)
package = self._search_for_vcs(dependency)
elif dependency.is_file():
dependency = cast('FileDependency', dependency)
package = self._search_for_file(dependency)
elif dependency.is_directory():
dependency = cast('DirectoryDependency', dependency)
package = self._search_for_directory(dependency)
elif dependency.is_url():
dependency = cast('URLDependency', dependency)
package = self._search_for_url(dependency)
else:
raise RuntimeError(f'{dependency}: unknown direct dependency type {dependency.source_type}')
if dependency.is_vcs():
dependency._source_reference = package.source_reference
dependency._source_resolved_reference = package.source_resolved_reference
dependency._source_subdirectory = package.source_subdirectory
dependency._constraint = package.version
dependency._pretty_constraint = package.version.text
self._deferred_cache[dependency] = package
return package
def search_for(self, dependency: Dependency) -> list[DependencyPackage]:
if dependency.is_root:
return PackageCollection(dependency, [self._package])
if dependency.is_direct_origin():
package = self.search_for_direct_origin_dependency(dependency)
self._direct_origin_packages[dependency.name] = package
return PackageCollection(dependency, [package])
direct_origin_package = self._direct_origin_packages.get(dependency.name)
if (direct_origin_package and direct_origin_package.satisfies(dependency)):
packages = [direct_origin_package]
return PackageCollection(dependency, packages)
packages = self._pool.find_packages(dependency)
packages.sort(key=(lambda p: ((not p.yanked), ((not p.is_prerelease()) and (not dependency.allows_prereleases())), p.version)), reverse=True)
if (not packages):
packages = self.search_for_installed_packages(dependency)
return PackageCollection(dependency, packages)
def _search_for_vcs(self, dependency: VCSDependency) -> Package:
package = self._direct_origin.get_package_from_vcs(dependency.vcs, dependency.source, branch=dependency.branch, tag=dependency.tag, rev=dependency.rev, subdirectory=dependency.source_subdirectory, source_root=(self._source_root or (self._env.path.joinpath('src') if self._env else None)))
self.validate_package_for_dependency(dependency=dependency, package=package)
package.develop = dependency.develop
return package
def _search_for_file(self, dependency: FileDependency) -> Package:
dependency.validate(raise_error=True)
package = self._direct_origin.get_package_from_file(dependency.full_path)
self.validate_package_for_dependency(dependency=dependency, package=package)
if (dependency.base is not None):
package.root_dir = dependency.base
package.files = [{'file': dependency.path.name, 'hash': ('sha256:' + get_file_hash(dependency.full_path))}]
return package
def _search_for_directory(self, dependency: DirectoryDependency) -> Package:
dependency.validate(raise_error=True)
package = self._direct_origin.get_package_from_directory(dependency.full_path)
self.validate_package_for_dependency(dependency=dependency, package=package)
package.develop = dependency.develop
if (dependency.base is not None):
package.root_dir = dependency.base
return package
def _search_for_url(self, dependency: URLDependency) -> Package:
package = self._direct_origin.get_package_from_url(dependency.url)
self.validate_package_for_dependency(dependency=dependency, package=package)
for extra in dependency.extras:
if (extra in package.extras):
for dep in package.extras[extra]:
dep.activate()
for extra_dep in package.extras[extra]:
package.add_dependency(extra_dep)
return package
def _get_dependencies_with_overrides(self, dependencies: list[Dependency], package: DependencyPackage) -> list[Dependency]:
overrides = self._overrides.get(package, {})
_dependencies = []
overridden = []
for dep in dependencies:
if (dep.name in overrides):
if (dep.name in overridden):
continue
if (not overrides[dep.name].constraint.is_empty()):
_dependencies.append(overrides[dep.name])
overridden.append(dep.name)
continue
_dependencies.append(dep)
return _dependencies
def incompatibilities_for(self, dependency_package: DependencyPackage) -> list[Incompatibility]:
package = dependency_package.package
if package.is_root():
dependencies = package.all_requires
else:
dependencies = package.requires
if (not package.python_constraint.allows_all(self._python_constraint)):
transitive_python_constraint = get_python_constraint_from_marker(dependency_package.dependency.transitive_marker)
intersection = package.python_constraint.intersect(transitive_python_constraint)
difference = transitive_python_constraint.difference(intersection)
difference = difference.intersect(self._python_constraint)
if (transitive_python_constraint.is_any() or self._python_constraint.intersect(dependency_package.dependency.python_constraint).is_empty() or intersection.is_empty() or (not difference.is_empty())):
return [Incompatibility([Term(package.to_dependency(), True)], PythonCause(package.python_versions, str(self._python_constraint)))]
_dependencies = [dep for dep in dependencies if ((dep.name not in self.UNSAFE_PACKAGES) and self._python_constraint.allows_any(dep.python_constraint) and ((not self._env) or dep.marker.validate(self._env.marker_env)))]
dependencies = self._get_dependencies_with_overrides(_dependencies, dependency_package)
return [Incompatibility([Term(package.to_dependency(), True), Term(dep, False)], DependencyCause()) for dep in dependencies]
def complete_package(self, dependency_package: DependencyPackage) -> DependencyPackage:
package = dependency_package.package
dependency = dependency_package.dependency
if package.is_root():
dependency_package = dependency_package.clone()
package = dependency_package.package
dependency = dependency_package.dependency
requires = package.all_requires
elif package.is_direct_origin():
requires = package.requires
else:
try:
dependency_package = DependencyPackage(dependency, self._pool.package(package.pretty_name, package.version, extras=list(dependency.extras), repository_name=dependency.source_name))
except PackageNotFound as e:
try:
dependency_package = next((DependencyPackage(dependency, pkg) for pkg in self.search_for_installed_packages(dependency)))
except StopIteration:
raise e from e
package = dependency_package.package
dependency = dependency_package.dependency
requires = package.requires
optional_dependencies = []
_dependencies = []
if dependency.extras:
for extra in dependency.extras:
if (extra not in package.extras):
continue
optional_dependencies += [d.name for d in package.extras[extra]]
dependency_package = dependency_package.with_features(list(dependency.extras))
package = dependency_package.package
dependency = dependency_package.dependency
new_dependency = package.without_features().to_dependency()
if ((not new_dependency.source_name) and dependency.source_name):
new_dependency.source_name = dependency.source_name
_dependencies.append(new_dependency)
for dep in requires:
if (not self._python_constraint.allows_any(dep.python_constraint)):
continue
if (dep.name in self.UNSAFE_PACKAGES):
continue
if (self._env and (not dep.marker.validate(self._env.marker_env))):
continue
if ((not package.is_root()) and ((dep.is_optional() and (dep.name not in optional_dependencies)) or (dep.in_extras and (not set(dep.in_extras).intersection(dependency.extras))))):
continue
_dependencies.append(dep)
if self._load_deferred:
for dep in _dependencies:
if dep.is_direct_origin():
locked = self.get_locked(dep)
if ((locked is not None) and locked.package.is_same_package_as(dep)):
continue
self.search_for_direct_origin_dependency(dep)
dependencies = self._get_dependencies_with_overrides(_dependencies, dependency_package)
duplicates: dict[(str, list[Dependency])] = defaultdict(list)
for dep in dependencies:
duplicates[dep.complete_name].append(dep)
dependencies = []
for (dep_name, deps) in duplicates.items():
if (len(deps) == 1):
dependencies.append(deps[0])
continue
self.debug(f'<debug>Duplicate dependencies for {dep_name}</debug>')
active_extras = (None if package.is_root() else dependency.extras)
deps = self._resolve_overlapping_markers(package, deps, active_extras)
if (len(deps) == 1):
self.debug(f'<debug>Merging requirements for {dep_name}</debug>')
dependencies.append(deps[0])
continue
def fmt_warning(d: Dependency) -> str:
dependency_marker = (d.marker if (not d.marker.is_any()) else '*')
return f'<c1>{d.name}</c1> <fg=default>(<c2>{d.pretty_constraint}</c2>)</> with markers <b>{dependency_marker}</b>'
warnings = ', '.join((fmt_warning(d) for d in deps[:(- 1)]))
warnings += f' and {fmt_warning(deps[(- 1)])}'
self.debug(f'<warning>Different requirements found for {warnings}.</warning>')
overrides = []
overrides_marker_intersection: BaseMarker = AnyMarker()
for dep_overrides in self._overrides.values():
for dep in dep_overrides.values():
overrides_marker_intersection = overrides_marker_intersection.intersect(dep.marker)
for dep in deps:
if (not overrides_marker_intersection.intersect(dep.marker).is_empty()):
current_overrides = self._overrides.copy()
package_overrides = current_overrides.get(dependency_package, {}).copy()
package_overrides.update({dep.name: dep})
current_overrides.update({dependency_package: package_overrides})
overrides.append(current_overrides)
if overrides:
raise OverrideNeeded(*overrides)
clean_dependencies = []
for dep in dependencies:
if (not dependency.transitive_marker.without_extras().is_any()):
transitive_marker_intersection = dependency.transitive_marker.without_extras().intersect(dep.marker.without_extras())
if transitive_marker_intersection.is_empty():
continue
dep.transitive_marker = transitive_marker_intersection
if (not dependency.python_constraint.is_any()):
python_constraint_intersection = dep.python_constraint.intersect(dependency.python_constraint)
if python_constraint_intersection.is_empty():
continue
clean_dependencies.append(dep)
package = package.with_dependency_groups([], only=True)
dependency_package = DependencyPackage(dependency, package)
for dep in clean_dependencies:
package.add_dependency(dep)
return dependency_package
def get_locked(self, dependency: Dependency) -> (DependencyPackage | None):
if (dependency.name in self._use_latest):
return None
locked = self._locked.get(dependency.name, [])
for dependency_package in locked:
package = dependency_package.package
if package.satisfies(dependency):
return DependencyPackage(dependency, package)
return None
def debug(self, message: str, depth: int=0) -> None:
if (not (self._io.is_very_verbose() or self._io.is_debug())):
return
if message.startswith('fact:'):
if ('depends on' in message):
m = re.match('fact: (.+?) depends on (.+?) \\((.+?)\\)', message)
if (m is None):
raise ValueError(f'Unable to parse fact: {message}')
m2 = re.match('(.+?) \\((.+?)\\)', m.group(1))
if m2:
name = m2.group(1)
version = f' (<c2>{m2.group(2)}</c2>)'
else:
name = m.group(1)
version = ''
message = f'<fg=blue>fact</>: <c1>{name}</c1>{version} depends on <c1>{m.group(2)}</c1> (<c2>{m.group(3)}</c2>)'
elif (' is ' in message):
message = re.sub('fact: (.+) is (.+)', '<fg=blue>fact</>: <c1>\\1</c1> is <c2>\\2</c2>', message)
else:
message = re.sub('(?<=: )(.+?) \\((.+?)\\)', '<c1>\\1</c1> (<c2>\\2</c2>)', message)
message = f"<fg=blue>fact</>: {message.split('fact: ')[1]}"
elif message.startswith('selecting '):
message = re.sub('selecting (.+?) \\((.+?)\\)', '<fg=blue>selecting</> <c1>\\1</c1> (<c2>\\2</c2>)', message)
elif message.startswith('derived:'):
m = re.match('derived: (.+?) \\((.+?)\\)$', message)
if m:
message = f'<fg=blue>derived</>: <c1>{m.group(1)}</c1> (<c2>{m.group(2)}</c2>)'
else:
message = f"<fg=blue>derived</>: <c1>{message.split('derived: ')[1]}</c1>"
elif message.startswith('conflict:'):
m = re.match('conflict: (.+?) depends on (.+?) \\((.+?)\\)', message)
if m:
m2 = re.match('(.+?) \\((.+?)\\)', m.group(1))
if m2:
name = m2.group(1)
version = f' (<c2>{m2.group(2)}</c2>)'
else:
name = m.group(1)
version = ''
message = f'<fg=red;options=bold>conflict</>: <c1>{name}</c1>{version} depends on <c1>{m.group(2)}</c1> (<c2>{m.group(3)}</c2>)'
else:
message = f"<fg=red;options=bold>conflict</>: {message.split('conflict: ')[1]}"
message = message.replace('! ', '<error>!</error> ')
if self.is_debugging():
debug_info = str(message)
debug_info = ('\n'.join([f'<debug>{str(depth).rjust(4)}:</debug> {s}' for s in debug_info.split('\n')]) + '\n')
self._io.write(debug_info)
def _group_by_source(self, dependencies: Iterable[Dependency]) -> list[list[Dependency]]:
groups: list[list[Dependency]] = []
for dep in dependencies:
for group in groups:
if (dep.is_same_source_as(group[0]) and (dep.source_name == group[0].source_name)):
group.append(dep)
break
else:
groups.append([dep])
return groups
def _merge_dependencies_by_constraint(self, dependencies: Iterable[Dependency]) -> list[Dependency]:
dep_groups = self._group_by_source(dependencies)
merged_dependencies = []
for group in dep_groups:
by_constraint: dict[(VersionConstraint, list[Dependency])] = defaultdict(list)
for dep in group:
by_constraint[dep.constraint].append(dep)
for deps in by_constraint.values():
dep = deps[0]
if (len(deps) > 1):
new_markers = (dep.marker for dep in deps)
dep.marker = marker_union(*new_markers)
merged_dependencies.append(dep)
return merged_dependencies
def _is_relevant_marker(self, marker: BaseMarker, active_extras: (Collection[NormalizedName] | None)) -> bool:
return ((not marker.is_empty()) and self._python_constraint.allows_any(get_python_constraint_from_marker(marker)) and ((active_extras is None) or marker.validate({'extra': active_extras})) and ((not self._env) or marker.validate(self._env.marker_env)))
def _resolve_overlapping_markers(self, package: Package, dependencies: list[Dependency], active_extras: (Collection[NormalizedName] | None)) -> list[Dependency]:
dependencies = self._merge_dependencies_by_constraint(dependencies)
new_dependencies = []
for uses in itertools.product([True, False], repeat=len(dependencies)):
markers = ((dep.marker if use else dep.marker.invert()) for (use, dep) in sorted(zip(uses, dependencies), key=(lambda ud: ud[0]), reverse=True))
used_marker_intersection: BaseMarker = AnyMarker()
for m in markers:
used_marker_intersection = used_marker_intersection.intersect(m)
if (not self._is_relevant_marker(used_marker_intersection, active_extras)):
continue
constraint: VersionConstraint = VersionRange()
specific_source_dependency = None
used_dependencies = list(itertools.compress(dependencies, uses))
for dep in used_dependencies:
if (dep.is_direct_origin() or dep.source_name):
if (specific_source_dependency and ((not dep.is_same_source_as(specific_source_dependency)) or (dep.source_name != specific_source_dependency.source_name))):
raise IncompatibleConstraintsError(package, dep, specific_source_dependency, with_sources=True)
specific_source_dependency = dep
constraint = constraint.intersect(dep.constraint)
if constraint.is_empty():
raise IncompatibleConstraintsError(package, *used_dependencies)
if (not any(uses)):
constraint = EmptyConstraint()
used_dependencies = dependencies
new_dep = (specific_source_dependency if specific_source_dependency else used_dependencies[0]).with_constraint(constraint)
new_dep.marker = used_marker_intersection
new_dependencies.append(new_dep)
return self._merge_dependencies_by_constraint(new_dependencies) |
class TestBaseHungarianTracker(unittest.TestCase):
def setUp(self):
self._img_size = np.array([600, 800])
self._prev_boxes = np.array([[101, 101, 200, 200], [301, 301, 450, 450]]).astype(np.float32)
self._prev_scores = np.array([0.9, 0.9])
self._prev_classes = np.array([1, 1])
self._prev_masks = np.ones((2, 600, 800)).astype('uint8')
self._curr_boxes = np.array([[302, 303, 451, 452], [101, 102, 201, 203]]).astype(np.float32)
self._curr_scores = np.array([0.95, 0.85])
self._curr_classes = np.array([1, 1])
self._curr_masks = np.ones((2, 600, 800)).astype('uint8')
self._prev_instances = {'image_size': self._img_size, 'pred_boxes': self._prev_boxes, 'scores': self._prev_scores, 'pred_classes': self._prev_classes, 'pred_masks': self._prev_masks}
self._prev_instances = self._convertDictPredictionToInstance(self._prev_instances)
self._curr_instances = {'image_size': self._img_size, 'pred_boxes': self._curr_boxes, 'scores': self._curr_scores, 'pred_classes': self._curr_classes, 'pred_masks': self._curr_masks}
self._curr_instances = self._convertDictPredictionToInstance(self._curr_instances)
self._max_num_instances = 200
self._max_lost_frame_count = 0
self._min_box_rel_dim = 0.02
self._min_instance_period = 1
self._track_iou_threshold = 0.5
def _convertDictPredictionToInstance(self, prediction: Dict) -> Instances:
res = Instances(image_size=torch.IntTensor(prediction['image_size']), pred_boxes=Boxes(torch.FloatTensor(prediction['pred_boxes'])), pred_masks=torch.IntTensor(prediction['pred_masks']), pred_classes=torch.IntTensor(prediction['pred_classes']), scores=torch.FloatTensor(prediction['scores']))
return res
def test_init(self):
cfg = {'_target_': 'detectron2.tracking.hungarian_tracker.BaseHungarianTracker', 'video_height': self._img_size[0], 'video_width': self._img_size[1], 'max_num_instances': self._max_num_instances, 'max_lost_frame_count': self._max_lost_frame_count, 'min_box_rel_dim': self._min_box_rel_dim, 'min_instance_period': self._min_instance_period, 'track_iou_threshold': self._track_iou_threshold}
tracker = instantiate(cfg)
self.assertTrue((tracker._video_height == self._img_size[0]))
def test_initialize_extra_fields(self):
cfg = {'_target_': 'detectron2.tracking.hungarian_tracker.BaseHungarianTracker', 'video_height': self._img_size[0], 'video_width': self._img_size[1], 'max_num_instances': self._max_num_instances, 'max_lost_frame_count': self._max_lost_frame_count, 'min_box_rel_dim': self._min_box_rel_dim, 'min_instance_period': self._min_instance_period, 'track_iou_threshold': self._track_iou_threshold}
tracker = instantiate(cfg)
instances = tracker._initialize_extra_fields(self._curr_instances)
self.assertTrue(instances.has('ID'))
self.assertTrue(instances.has('ID_period'))
self.assertTrue(instances.has('lost_frame_count')) |
def test_continue_on_collection_errors_maxfail(pytester: Pytester) -> None:
pytester.makepyfile(**COLLECTION_ERROR_PY_FILES)
res = pytester.runpytest('--continue-on-collection-errors', '--maxfail=3')
assert (res.ret == 1)
res.stdout.fnmatch_lines(['collected 2 items / 2 errors', '*1 failed, 2 errors*']) |
class Convert():
def __init__(self, arguments):
logger.debug('Initializing %s: (args: %s)', self.__class__.__name__, arguments)
self.args = arguments
Utils.set_verbosity(self.args.loglevel)
self.patch_threads = None
self.images = Images(self.args)
self.validate()
self.alignments = Alignments(self.args, False, self.images.is_video)
Legacy(self.alignments, self.images.input_images, arguments.input_aligned_dir)
self.opts = OptionalActions(self.args, self.images.input_images, self.alignments)
self.add_queues()
self.disk_io = DiskIO(self.alignments, self.images, arguments)
self.predictor = Predict(self.disk_io.load_queue, self.queue_size, arguments)
configfile = (self.args.configfile if hasattr(self.args, 'configfile') else None)
self.converter = Converter(get_folder(self.args.output_dir), self.predictor.output_size, self.predictor.has_predicted_mask, self.disk_io.draw_transparent, self.disk_io.pre_encode, arguments, configfile=configfile)
logger.debug('Initialized %s', self.__class__.__name__)
def queue_size(self):
if self.args.singleprocess:
retval = 16
else:
retval = 32
logger.debug(retval)
return retval
def pool_processes(self):
if self.args.singleprocess:
retval = 1
elif (self.args.jobs > 0):
retval = min(self.args.jobs, total_cpus(), self.images.images_found)
else:
retval = min(total_cpus(), self.images.images_found)
retval = (1 if (retval == 0) else retval)
logger.debug(retval)
return retval
def validate(self):
if ((self.args.writer == 'ffmpeg') and (not self.images.is_video) and (self.args.reference_video is None)):
raise FaceswapError("Output as video selected, but using frames as input. You must provide a reference video ('-ref', '--reference-video').")
output_dir = get_folder(self.args.output_dir)
logger.info('Output Directory: %s', output_dir)
def add_queues(self):
logger.debug('Adding queues. Queue size: %s', self.queue_size)
for qname in ('convert_in', 'convert_out', 'patch'):
queue_manager.add_queue(qname, self.queue_size, multiprocessing_queue=False)
def process(self):
logger.debug('Starting Conversion')
try:
self.convert_images()
self.disk_io.save_thread.join()
queue_manager.terminate_queues()
Utils.finalize(self.images.images_found, self.predictor.faces_count, self.predictor.verify_output)
logger.debug('Completed Conversion')
except MemoryError as err:
msg = "Faceswap ran out of RAM running convert. Conversion is very system RAM heavy, so this can happen in certain circumstances when you have a lot of cpus but not enough RAM to support them all.\nYou should lower the number of processes in use by either setting the 'singleprocess' flag (-sp) or lowering the number of parallel jobs (-j)."
raise FaceswapError(msg) from err
def convert_images(self):
logger.debug('Converting images')
save_queue = queue_manager.get_queue('convert_out')
patch_queue = queue_manager.get_queue('patch')
self.patch_threads = MultiThread(self.converter.process, patch_queue, save_queue, thread_count=self.pool_processes, name='patch')
self.patch_threads.start()
while True:
self.check_thread_error()
if self.disk_io.completion_event.is_set():
logger.debug('DiskIO completion event set. Joining Pool')
break
sleep(1)
self.patch_threads.join()
logger.debug('Putting EOF')
save_queue.put('EOF')
logger.debug('Converted images')
def check_thread_error(self):
for thread in (self.predictor.thread, self.disk_io.load_thread, self.disk_io.save_thread, self.patch_threads):
thread.check_and_raise_error() |
class BasicLayer(GenericLayer):
def __init__(self, in_planes, out_planes, stride=1, mid_planes_and_cardinality=None, reduction=1, final_bn_relu=True, use_se=False, se_reduction_ratio=16):
assert (is_pos_int(in_planes) and is_pos_int(out_planes))
assert ((is_pos_int(stride) or is_pos_int_tuple(stride)) and is_pos_int(reduction))
convolutional_block = nn.Sequential(conv3x3(in_planes, out_planes, stride=stride), nn.BatchNorm2d(out_planes), nn.ReLU(inplace=INPLACE), conv3x3(out_planes, out_planes))
super().__init__(convolutional_block, in_planes, out_planes, stride=stride, reduction=reduction, final_bn_relu=final_bn_relu, use_se=use_se, se_reduction_ratio=se_reduction_ratio) |
(optionalhook=True)
def pytest_selenium_capture_debug(item, report, extra):
provider = SauceLabs(item.config.getini('saucelabs_data_center'))
if (not provider.uses_driver(item.config.getoption('driver'))):
return
pytest_html = item.config.pluginmanager.getplugin('html')
extra.append(pytest_html.extras.html(_video_html(item._driver.session_id))) |
def test_run_with_optional_and_python_restricted_dependencies(installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage) -> None:
package.python_versions = '~2.7 || ^3.4'
package_a = get_package('A', '1.0')
package_b = get_package('B', '1.1')
package_c12 = get_package('C', '1.2')
package_c13 = get_package('C', '1.3')
package_d = get_package('D', '1.4')
package_c13.add_dependency(Factory.create_dependency('D', '^1.2'))
repo.add_package(package_a)
repo.add_package(package_b)
repo.add_package(package_c12)
repo.add_package(package_c13)
repo.add_package(package_d)
package.extras = {canonicalize_name('foo'): [get_dependency('A', '~1.0')]}
package.add_dependency(Factory.create_dependency('A', {'version': '~1.0', 'optional': True}))
package.add_dependency(Factory.create_dependency('B', {'version': '^1.0', 'python': '~2.4'}))
package.add_dependency(Factory.create_dependency('C', {'version': '^1.0', 'python': '~2.7 || ^3.4'}))
result = installer.run()
assert (result == 0)
expected = fixture('with-optional-dependencies')
assert (locker.written_data == expected)
assert isinstance(installer.executor, Executor)
assert (installer.executor.installations_count == 2)
assert (installer.executor.installations[0].name == 'd')
assert (installer.executor.installations[1].name == 'c') |
class RC2(object):
def __init__(self, formula, solver='g3', adapt=False, exhaust=False, incr=False, minz=False, trim=0, verbose=0):
self.verbose = verbose
self.exhaust = exhaust
self.solver = solver
self.adapt = adapt
self.minz = minz
self.trim = trim
(self.sels, self.smap, self.sall, self.s2cl, self.sneg) = ([], {}, [], {}, set([]))
self.pool = IDPool(start_from=(formula.nv + 1))
self.wght = {}
self.sums = []
self.bnds = {}
self.tobj = {}
self.swgt = {}
self.cost = 0
VariableMap = collections.namedtuple('VariableMap', ['e2i', 'i2e'])
self.vmap = VariableMap(e2i={}, i2e={})
self.init(formula, incr=incr)
wght = self.wght.values()
if ((not formula.hard) and (len(self.sels) > 100000) and (min(wght) == max(wght))):
self.minz = False
def __del__(self):
self.delete()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.delete()
def init(self, formula, incr=False):
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard, incr=incr, use_timer=True)
if (isinstance(formula, WCNFPlus) and formula.atms):
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(self.solver)
for atm in formula.atms:
self.oracle.add_atmost(*atm)
for (i, cl) in enumerate(formula.soft):
selv = cl[0]
if (len(cl) > 1):
selv = self.pool.id()
self.s2cl[selv] = cl[:]
cl.append((- selv))
self.oracle.add_clause(cl)
if (selv not in self.wght):
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
self.wght[selv] += formula.wght[i]
self.sels_set = set(self.sels)
self.sall = self.sels[:]
for v in range(1, (formula.nv + 1)):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if (self.verbose > 1):
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv, len(formula.hard), len(formula.soft)))
def add_clause(self, clause, weight=None):
cl = list(map((lambda l: self._map_extlit(l)), (clause if ((not (len(clause) == 2)) or (not (type(clause[0]) in (list, tuple, set)))) else clause[0])))
if (not weight):
if ((not (len(clause) == 2)) or (not (type(clause[0]) in (list, tuple, set)))):
self.oracle.add_clause(cl)
else:
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(self.solver)
self.oracle.add_atmost(cl, clause[1])
else:
selv = cl[0]
if (len(cl) > 1):
selv = self.pool.id()
self.s2cl[selv] = cl[:]
cl.append((- selv))
self.oracle.add_clause(cl)
if (selv not in self.wght):
self.sels.append(selv)
self.wght[selv] = weight
self.smap[selv] = (len(self.sels) - 1)
else:
self.wght[selv] += weight
self.sall.append(selv)
self.sels_set.add(selv)
def delete(self):
if self.oracle:
if (not self.oracle.supports_atmost()):
for t in six.itervalues(self.tobj):
t.delete()
self.oracle.delete()
self.oracle = None
def compute(self):
res = self.compute_()
if res:
self.model = self.oracle.get_model()
if ((self.model is None) and (self.pool.top == 0)):
self.model = []
self.model = filter((lambda l: (abs(l) in self.vmap.i2e)), self.model)
self.model = map((lambda l: int(copysign(self.vmap.i2e[abs(l)], l))), self.model)
self.model = sorted(self.model, key=(lambda l: abs(l)))
return self.model
def enumerate(self, block=0):
done = False
while (not done):
model = self.compute()
if (model != None):
if (block == 1):
(m, cl) = (set(self.oracle.get_model()), [])
for selv in self.sall:
if (selv in m):
cl.append((- selv))
if ((selv in self.s2cl) and (not (selv in self.sneg))):
self.sneg.add(selv)
for il in self.s2cl[selv]:
self.oracle.add_clause([selv, (- il)])
self.oracle.add_clause(cl)
elif (block == (- 1)):
m = set(self.oracle.get_model())
self.oracle.add_clause([l for l in filter((lambda l: ((- l) in m)), self.sall)])
else:
self.add_clause([(- l) for l in model])
(yield model)
else:
done = True
def compute_(self):
if self.adapt:
self.adapt_am1()
while (not self.oracle.solve(assumptions=(self.sels + self.sums))):
self.get_core()
if (not self.core):
return False
self.process_core()
if (self.verbose > 1):
print('c cost: {0}; core sz: {1}; soft sz: {2}'.format(self.cost, len(self.core), (len(self.sels) + len(self.sums))))
return True
def get_core(self):
self.core = self.oracle.get_core()
if self.core:
self.trim_core()
self.minimize_core()
if (not self.core):
return
self.minw = min(map((lambda l: self.wght[l]), self.core))
(iter1, iter2) = itertools.tee(self.core)
self.core_sels = list((l for l in iter1 if (l in self.sels_set)))
self.core_sums = list((l for l in iter2 if (l not in self.sels_set)))
def process_core(self):
self.cost += self.minw
self.garbage = set()
if ((len(self.core_sels) != 1) or (len(self.core_sums) > 0)):
self.process_sels()
self.process_sums()
if (len(self.rels) > 1):
t = self.create_sum()
b = (self.exhaust_core(t) if self.exhaust else 1)
if b:
self.set_bound(t, b, self.minw)
else:
for relv in self.rels:
self.oracle.add_clause([relv])
else:
self.oracle.add_clause([(- self.core_sels[0])])
self.garbage.add(self.core_sels[0])
self.filter_assumps()
def adapt_am1(self):
conns = collections.defaultdict((lambda : set([])))
confl = []
for l1 in self.sels:
(st, props) = self.oracle.propagate(assumptions=[l1], phase_saving=2)
if st:
for l2 in props:
if ((- l2) in self.sels_set):
conns[l1].add((- l2))
conns[(- l2)].add(l1)
else:
confl.append(l1)
if confl:
ccopy = {}
confl = set(confl)
for l in conns:
if (l not in confl):
cc = conns[l].difference(confl)
if cc:
ccopy[l] = cc
conns = ccopy
confl = list(confl)
for l in confl:
(self.core, self.minw) = ([l], self.wght[l])
(self.core_sels, self.core_sums) = ([l], [])
self.process_core()
if (self.verbose > 1):
print('c unit cores found: {0}; cost: {1}'.format(len(confl), self.cost))
nof_am1 = 0
len_am1 = []
lits = set(conns.keys())
while lits:
am1 = [min(lits, key=(lambda l: len(conns[l])))]
for l in sorted(conns[am1[0]], key=(lambda l: len(conns[l]))):
if (l in lits):
for l_added in am1[1:]:
if (l_added not in conns[l]):
break
else:
am1.append(l)
lits.difference_update(set(am1))
for l in conns:
conns[l] = conns[l].difference(set(am1))
if (len(am1) > 1):
self.process_am1(am1)
nof_am1 += 1
len_am1.append(len(am1))
self.sels_set = set(self.sels)
if ((self.verbose > 1) and nof_am1):
print('c am1s found: {0}; avgsz: {1:.1f}; cost: {2}'.format(nof_am1, (sum(len_am1) / float(nof_am1)), self.cost))
def process_am1(self, am1):
self.garbage = set()
while (len(am1) > 1):
self.minw = min(map((lambda l: self.wght[l]), am1))
(self.core_sels, b) = (am1, (len(am1) - 1))
self.cost += (b * self.minw)
self.process_sels()
am1 = list(filter((lambda l: (l not in self.garbage)), am1))
selv = self.pool.id()
self.oracle.add_clause(([(- l) for l in self.rels] + [(- selv)]))
self.sels.append(selv)
self.wght[selv] = self.minw
self.smap[selv] = (len(self.wght) - 1)
self.filter_assumps()
def trim_core(self):
for i in range(self.trim):
self.oracle.solve(assumptions=self.core)
new_core = self.oracle.get_core()
if (len(new_core) == len(self.core)):
break
self.core = new_core
def minimize_core(self):
if (self.minz and (len(self.core) > 1)):
self.core = sorted(self.core, key=(lambda l: self.wght[l]))
self.oracle.conf_budget(1000)
i = 0
while (i < len(self.core)):
to_test = (self.core[:i] + self.core[(i + 1):])
if (self.oracle.solve_limited(assumptions=to_test) == False):
self.core = to_test
elif (self.oracle.get_status() == True):
i += 1
else:
break
def exhaust_core(self, tobj):
if self.oracle.solve(assumptions=[(- tobj.rhs[1])]):
return 1
else:
self.cost += self.minw
for i in range(2, len(self.rels)):
self.tobj[(- tobj.rhs[(i - 1)])] = tobj
self.bnds[(- tobj.rhs[(i - 1)])] = (i - 1)
self.update_sum((- tobj.rhs[(i - 1)]))
if self.oracle.solve(assumptions=[(- tobj.rhs[i])]):
return i
self.cost += self.minw
return None
def process_sels(self):
self.rels = []
for l in self.core_sels:
if (self.wght[l] == self.minw):
self.garbage.add(l)
else:
self.wght[l] -= self.minw
self.rels.append((- l))
def process_sums(self):
for l in self.core_sums:
if (self.wght[l] == self.minw):
self.garbage.add(l)
else:
self.wght[l] -= self.minw
(t, b) = self.update_sum(l)
if (b < len(t.rhs)):
lnew = (- t.rhs[b])
if (lnew not in self.swgt):
self.set_bound(t, b, self.swgt[l])
self.rels.append((- l))
def create_sum(self, bound=1):
if (not self.oracle.supports_atmost()):
t = ITotalizer(lits=self.rels, ubound=bound, top_id=self.pool.top)
self.pool.top = t.top_id
for cl in t.cnf.clauses:
self.oracle.add_clause(cl)
else:
t = ITotalizer()
t.lits = self.rels
bvar = self.pool.id()
t.rhs = ([None] * len(t.lits))
t.rhs[bound] = bvar
rhs = len(t.lits)
amb = [(([(- bvar)] * (rhs - bound)) + t.lits), rhs]
self.oracle.add_atmost(*amb)
return t
def update_sum(self, assump):
t = self.tobj[assump]
b = (self.bnds[assump] + 1)
if (not self.oracle.supports_atmost()):
t.increase(ubound=b, top_id=self.pool.top)
self.pool.top = t.top_id
if t.nof_new:
for cl in t.cnf.clauses[(- t.nof_new):]:
self.oracle.add_clause(cl)
else:
rhs = len(t.lits)
if (b < rhs):
if (not t.rhs[b]):
t.rhs[b] = self.pool.id()
amb = [(([(- t.rhs[b])] * (rhs - b)) + t.lits), rhs]
self.oracle.add_atmost(*amb)
return (t, b)
def set_bound(self, tobj, rhs, weight=None):
if (weight is None):
weight = self.minw
self.tobj[(- tobj.rhs[rhs])] = tobj
self.bnds[(- tobj.rhs[rhs])] = rhs
self.wght[(- tobj.rhs[rhs])] = weight
self.swgt[(- tobj.rhs[rhs])] = weight
self.sums.append((- tobj.rhs[rhs]))
def filter_assumps(self):
self.sels = list(filter((lambda x: (x not in self.garbage)), self.sels))
self.sums = list(filter((lambda x: (x not in self.garbage)), self.sums))
self.bnds = {l: b for (l, b) in six.iteritems(self.bnds) if (l not in self.garbage)}
self.wght = {l: w for (l, w) in six.iteritems(self.wght) if (l not in self.garbage)}
self.sels_set.difference_update(set(self.garbage))
self.garbage.clear()
def oracle_time(self):
return self.oracle.time_accum()
def _map_extlit(self, l):
v = abs(l)
if (v in self.vmap.e2i):
return int(copysign(self.vmap.e2i[v], l))
else:
i = self.pool.id()
self.vmap.e2i[v] = i
self.vmap.i2e[i] = v
return int(copysign(i, l)) |
class TestIncidenceRateRatio():
def test_incidence_rate_ratio_reference_equal_to_1(self, time_data):
irr = IncidenceRateRatio()
irr.fit(time_data, exposure='exp', outcome='dis', time='t')
assert (irr.incidence_rate_ratio[0] == 1)
def test_incidence_rate_ratio_equal_to_expected(self, time_data):
sas_irr = 1.
sas_se = 0.
sas_ci = (0., 4.)
irr = IncidenceRateRatio()
irr.fit(time_data, exposure='exp', outcome='dis', time='t')
npt.assert_allclose(irr.incidence_rate_ratio[1], sas_irr, rtol=0.0001)
rf = irr.results
npt.assert_allclose(rf.loc[(rf.index == '1')][['IRR_LCL', 'IRR_UCL']], [sas_ci], rtol=0.0001)
npt.assert_allclose(rf.loc[(rf.index == '1')][['SD(IRR)']], sas_se, rtol=0.0001)
def test_multiple_exposures(self):
df = pd.DataFrame()
df['exp'] = ((([1] * 50) + ([0] * 50)) + ([2] * 50))
df['dis'] = (((((([1] * 25) + ([0] * 25)) + ([1] * 25)) + ([0] * 25)) + ([1] * 25)) + ([0] * 25))
df['t'] = 2
irr = IncidenceRateRatio()
irr.fit(df, exposure='exp', outcome='dis', time='t')
assert (irr.results.shape[0] == 3)
assert (list(irr.results.index) == ['Ref:0', '1', '2'])
def test_match_sas_sampledata(self):
sas_irr = 0.753956
sas_se = 0.
sas_ci = (0.390146, 1.457017)
df = ze.load_sample_data(False)
irr = IncidenceRateRatio()
irr.fit(df, exposure='art', outcome='dead', time='t')
npt.assert_allclose(irr.incidence_rate_ratio[1], sas_irr, rtol=1e-05)
rf = irr.results
npt.assert_allclose(rf.loc[(rf.index == '1')][['IRR_LCL', 'IRR_UCL']], [sas_ci], rtol=1e-05)
npt.assert_allclose(rf.loc[(rf.index == '1')][['SD(IRR)']], sas_se, rtol=1e-05) |
class VGG16(nn.Module):
def __init__(self, embedding_dim=1024, pretrained=False):
super(VGG16, self).__init__()
seed_model = imagemodels.__dict__['vgg16'](pretrained=pretrained).features
seed_model = nn.Sequential(*list(seed_model.children())[:(- 1)])
last_layer_index = len(list(seed_model.children()))
seed_model.add_module(str(last_layer_index), nn.Conv2d(512, embedding_dim, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))
self.image_model = seed_model
def forward(self, x):
x = self.image_model(x)
return x |
class Effect8053(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Vorton Projector')), 'maxRange', ship.getModifiedItemAttr('shipBonusUC1'), skill='EDENCOM Cruiser', **kwargs) |
class CoordinateConverter(commands.Converter):
async def convert(ctx: commands.Context, coordinate: str) -> tuple[(int, int)]:
if (len(coordinate) not in (2, 3)):
raise commands.BadArgument('Invalid co-ordinate provided.')
coordinate = coordinate.lower()
if coordinate[0].isalpha():
digit = coordinate[1:]
letter = coordinate[0]
else:
digit = coordinate[:(- 1)]
letter = coordinate[(- 1)]
if (not digit.isdecimal()):
raise commands.BadArgument
x = (ord(letter) - ord('a'))
y = (int(digit) - 1)
if ((not (0 <= x <= 9)) or (not (0 <= y <= 9))):
raise commands.BadArgument
return (x, y) |
class BatchThreader():
def __init__(self, func, args_list, batch_size, prefetch_size=4, processes=12):
self.batch_size = batch_size
self.prefetch_size = prefetch_size
self.pool = ThreadPool(processes=processes)
self.async_result = []
self.func = func
self.left_args_list = args_list
self.n_tasks = len(args_list)
self.__start_works(self.__get_n_pending_works())
def __start_works(self, times):
for _ in range(times):
args = self.left_args_list.pop(0)
self.async_result.append(self.pool.apply_async(self.func, args))
def __get_n_pending_works(self):
return min((((self.prefetch_size + 1) * self.batch_size) - len(self.async_result)), len(self.left_args_list))
def pop_results(self):
n_inwork = len(self.async_result)
n_fetch = min(n_inwork, self.batch_size)
rtn = [self.async_result.pop(0).get() for _ in range(n_fetch)]
to_fill = self.__get_n_pending_works()
if (to_fill == 0):
self.pool.close()
else:
self.__start_works(to_fill)
return rtn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.