code stringlengths 281 23.7M |
|---|
def test_multinomial_blocks_cutting_plane():
(X, Y) = generate_blocks_multinomial(n_samples=40, noise=0.5, seed=0)
n_labels = len(np.unique(Y))
crf = GridCRF(n_states=n_labels, inference_method=inference_method)
clf = NSlackSSVM(model=crf, max_iter=100, C=100, check_constraints=False, batch_size=1)
clf.fit(X, Y)
Y_pred = clf.predict(X)
assert_array_equal(Y, Y_pred) |
def test_index_names():
df = pd.DataFrame([{'stat': 'mean', 'score': 4, 'var': 'var1'}, {'stat': 'sd', 'score': 7, 'var': 'var1'}, {'stat': 'mean', 'score': 1, 'var': 'var2'}, {'stat': 'sd', 'score': 2, 'var': 'var2'}, {'stat': 'mean', 'score': 11, 'var': 'var3'}, {'stat': 'sd', 'score': 14, 'var': 'var3'}])
expected_output = pd.DataFrame({'var': ['var1', 'var2', 'var3'], 'mean': [4, 1, 11], 'sd': [7, 2, 14]})
result = df.pivot_wider(index='var', names_from='stat', values_from='score')
assert_frame_equal(result, expected_output) |
class SplitOperatorTrotterStep(TrotterStep):
def __init__(self, hamiltonian: 'openfermion.DiagonalCoulombHamiltonian') -> None:
quad_ham = ops.QuadraticHamiltonian(hamiltonian.one_body)
(self.orbital_energies, self.basis_change_matrix, _) = quad_ham.diagonalizing_bogoliubov_transform()
super().__init__(hamiltonian) |
def chp_hash_table_ref_cont(ht, old, env, cont, _vals):
if (_vals.num_values() != 2):
raise SchemeException('hash-ref handler produced the wrong number of results')
(key, post) = _vals.get_all_values()
val = values.Values.make1(key)
after = check_chaperone_results(val, env, imp_hash_table_post_ref_cont(post, ht, old, env, cont))
return ht.hash_ref(key, env, after) |
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.04
goal_low = ((- 0.1), 0.8, 0.05)
goal_high = (0.1, 0.9, 0.3)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.6, 0.02)
obj_high = (0.1, 0.7, 0.02)
self.task_types = ['pick_place', 'reach', 'push']
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.task_type = None
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.02]), 'hand_init_pos': np.array([0, 0.6, 0.2])}
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.num_resets = 0
def _set_task_inner(self, *, task_type, **kwargs):
super()._set_task_inner(**kwargs)
self.task_type = task_type
if (self.task_type == 'pick_place'):
self.goal = np.array([0.1, 0.8, 0.2])
elif (self.task_type == 'reach'):
self.goal = np.array([(- 0.1), 0.8, 0.2])
elif (self.task_type == 'push'):
self.goal = np.array([0.1, 0.8, 0.02])
else:
raise NotImplementedError
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_reach_push_pick_and_place.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, _, reachDist, _, pushDist, pickRew, _, placingDist) = self.compute_reward(action, ob)
goal_dist = (placingDist if (self.task_type == 'pick_place') else pushDist)
if (self.task_type == 'reach'):
success = float((reachDist <= 0.05))
else:
success = float((goal_dist <= 0.07))
info = {'reachDist': reachDist, 'pickRew': pickRew, 'epRew': reward, 'goalDist': goal_dist, 'success': success}
return (ob, reward, False, info)
def _target_site_config(self):
far_away = np.array([10.0, 10.0, 10.0])
return [(('goal_' + t), (self._target_pos if (t == self.task_type) else far_away)) for t in self.task_types]
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def adjust_initObjPos(self, orig_init_pos):
diff = (self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2])
adjustedPos = (orig_init_pos[:2] + diff)
return [adjustedPos[0], adjustedPos[1], self.data.get_geom_xpos('objGeom')[(- 1)]]
def reset_model(self):
self._reset_hand()
self._target_pos = self._get_state_rand_vec()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
if (self.task_type == 'push'):
self._target_pos = np.concatenate((goal_pos[(- 3):(- 1)], [self.obj_init_pos[(- 1)]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
else:
self._target_pos = goal_pos[(- 3):]
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
self.maxReachDist = np.linalg.norm((self.init_fingerCOM - np.array(self._target_pos)))
self.maxPushDist = np.linalg.norm((self.obj_init_pos[:2] - np.array(self._target_pos)[:2]))
self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget)
self.target_rewards = [((1000 * self.maxPlacingDist) + (1000 * 2)), ((1000 * self.maxReachDist) + (1000 * 2)), ((1000 * self.maxPushDist) + (1000 * 2))]
if (self.task_type == 'reach'):
idx = 1
elif (self.task_type == 'push'):
idx = 2
elif (self.task_type == 'pick_place'):
idx = 0
else:
raise NotImplementedError
self.target_reward = self.target_rewards[idx]
self.num_resets += 1
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
heightTarget = self.heightTarget
goal = self._target_pos
def compute_reward_reach(actions, obs):
del actions
del obs
c1 = 1000
c2 = 0.01
c3 = 0.001
reachDist = np.linalg.norm((fingerCOM - goal))
reachRew = ((c1 * (self.maxReachDist - reachDist)) + (c1 * (np.exp(((- (reachDist ** 2)) / c2)) + np.exp(((- (reachDist ** 2)) / c3)))))
reachRew = max(reachRew, 0)
reward = reachRew
return [reward, reachRew, reachDist, None, None, None, None, None]
def compute_reward_push(actions, obs):
c1 = 1000
c2 = 0.01
c3 = 0.001
del actions
del obs
assert np.all((goal == self._get_site_pos('goal_push')))
reachDist = np.linalg.norm((fingerCOM - objPos))
pushDist = np.linalg.norm((objPos[:2] - goal[:2]))
reachRew = (- reachDist)
if (reachDist < 0.05):
pushRew = ((1000 * (self.maxPushDist - pushDist)) + (c1 * (np.exp(((- (pushDist ** 2)) / c2)) + np.exp(((- (pushDist ** 2)) / c3)))))
pushRew = max(pushRew, 0)
else:
pushRew = 0
reward = (reachRew + pushRew)
return [reward, reachRew, reachDist, pushRew, pushDist, None, None, None]
def compute_reward_pick_place(actions, obs):
del obs
reachDist = np.linalg.norm((objPos - fingerCOM))
placingDist = np.linalg.norm((objPos - goal))
assert np.all((goal == self._get_site_pos('goal_pick_place')))
def reachReward():
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)]))
zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)]))
if (reachDistxy < 0.05):
reachRew = (- reachDist)
else:
reachRew = ((- reachDistxy) - (2 * zRew))
if (reachDist < 0.05):
reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50))
return (reachRew, reachDist)
def pickCompletionCriteria():
tolerance = 0.01
if (objPos[2] >= (heightTarget - tolerance)):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02))
def orig_pickReward():
hScale = 100
if (self.pickCompleted and (not objDropped())):
return (hScale * heightTarget)
elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))):
return (hScale * min(heightTarget, objPos[2]))
else:
return 0
def placeReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped()))
if cond:
placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3)))))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
(reachRew, reachDist) = reachReward()
pickRew = orig_pickReward()
(placeRew, placingDist) = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = ((reachRew + pickRew) + placeRew)
return [reward, reachRew, reachDist, None, None, pickRew, placeRew, placingDist]
if (self.task_type == 'reach'):
return compute_reward_reach(actions, obs)
elif (self.task_type == 'push'):
return compute_reward_push(actions, obs)
elif (self.task_type == 'pick_place'):
return compute_reward_pick_place(actions, obs)
else:
raise NotImplementedError |
def safe_cacher(maxsize):
def safewrap(uncached):
cached = lru_cache(maxsize=maxsize)(uncached)
def mucked_up_func(*arg, **kwarg):
try:
return cached(*arg, **kwarg)
except:
return uncached(*arg, **kwarg)
mucked_up_func.cache_clear = cached.cache_clear
return mucked_up_func
return safewrap |
class CheckUpdateWorker(QThread):
infos = pyqtSignal(object, object)
bg_update_infos = pyqtSignal(object, object)
def __init__(self, parent=None):
super(CheckUpdateWorker, self).__init__(parent)
self._ver = ''
self._manual = False
self._mutex = QMutex()
self._is_work = False
self._folder_id = None
self._api = '
self._api_mirror = '
def set_values(self, ver: str, manual: bool=False):
self._ver = ver
self._manual = manual
self.start()
def __del__(self):
self.wait()
def stop(self):
self._mutex.lock()
self._is_work = False
self._mutex.unlock()
def run(self):
if (not self._is_work):
self._mutex.lock()
self._is_work = True
resp = None
try:
resp = requests.get(self._api).json()
except (requests.RequestException, TimeoutError, requests.exceptions.ConnectionError):
logger.debug('chcek update from github error')
try:
resp = requests.get(self._api_mirror).json()
except:
logger.debug('chcek update from gitee error')
except Exception as e:
logger.error(f'CheckUpdateWorker error: e={e}')
if resp:
try:
(tag_name, msg) = (resp['tag_name'], resp['body'])
ver = self._ver.replace('v', '').split('-')[0].split('.')
ver2 = tag_name.replace('v', '').split('-')[0].split('.')
local_version = (((int(ver[0]) * 100) + (int(ver[1]) * 10)) + int(ver[2]))
remote_version = (((int(ver2[0]) * 100) + (int(ver2[1]) * 10)) + int(ver2[2]))
if (remote_version > local_version):
urls = re.findall(' msg)
for url in urls:
new_url = f'<a href="{url}">{url}</a>'
msg = msg.replace(url, new_url)
msg = msg.replace('\n', '<br />')
self.infos.emit(tag_name, msg)
if (not self._manual):
self.bg_update_infos.emit(tag_name, msg)
elif self._manual:
self.infos.emit('0', '!')
except AttributeError:
if self._manual:
self.infos.emit('v0.0.0', ',!')
except Exception as e:
logger.error(f'Check Update Version error: e={e}')
elif self._manual:
self.infos.emit('v0.0.0', f" <a href='{self._api}'>api.github.com</a><a href='{self._api_mirror}'>gitee.com</a> ,!")
self._manual = False
self._is_work = False
self._mutex.unlock()
elif self._manual:
self.infos.emit('v0.0.0', ',!') |
def mainopt_trymac(i):
format = sys.argv[(i + 1)]
if (not (format in lexFormats)):
return (('No such format ' + repr(format)) + ' (use --formats to see a list of formats)')
for resp in getInputText((i + 2), (('phonemes in ' + format) + ' format'), 'maybe'):
mac = convert(resp, format, 'mac')
toSay = markup_inline_word('mac', mac)
print(as_printable(toSay))
w = os.popen((macSayCommand() + ' -v Vicki'), 'w')
getBuf(w).write(toSay) |
class Timer(object):
def __init__(self):
self.reset()
def tic(self):
self.start_time = time.time()
def toc(self):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
def reset(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0 |
(ATTRS_WITH_ALIAS)
def test_alias_old_style():
class WithAliases():
foo = attr.ib(type=int, alias='foo1')
_foo = attr.ib(type=int, alias='foo2')
assert (get_attrs_shape(WithAliases) == Shape(input=InputShape(constructor=WithAliases, kwargs=None, fields=(InputField(type=int, id='foo', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=int, id='_foo', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='foo', name='foo1', kind=ParamKind.POS_OR_KW), Param(field_id='_foo', name='foo2', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'foo', '_foo'})), output=OutputShape(fields=(OutputField(type=int, id='foo', default=NoDefault(), accessor=create_attr_accessor('foo', is_required=True), metadata=MappingProxyType({}), original=ANY), OutputField(type=int, id='_foo', default=NoDefault(), accessor=create_attr_accessor('_foo', is_required=True), metadata=MappingProxyType({}), original=ANY)), overriden_types=frozenset({'foo', '_foo'})))) |
class FeatureMapResampler(nn.Module):
def __init__(self, in_channels, out_channels, stride, norm=''):
super(FeatureMapResampler, self).__init__()
if (in_channels != out_channels):
self.reduction = Conv2d(in_channels, out_channels, kernel_size=1, bias=(norm == ''), norm=get_norm(norm, out_channels), activation=None)
else:
self.reduction = None
assert (stride <= 2)
self.stride = stride
def forward(self, x):
if (self.reduction is not None):
x = self.reduction(x)
if (self.stride == 2):
x = F.max_pool2d(x, kernel_size=(self.stride + 1), stride=self.stride, padding=1)
elif (self.stride == 1):
pass
else:
raise NotImplementedError()
return x |
def read_array(dirpath: (pathlib.Path | str)) -> cunumeric.ndarray:
dirpath = pathlib.Path(dirpath)
zarr_ary = zarr.open_array(dirpath, mode='r')
if (zarr_ary.compressor is not None):
raise NotImplementedError("compressor isn't supported")
padded_ary = get_padded_array(zarr_ary)
if (padded_ary is None):
ret = cunumeric.empty(shape=zarr_ary.shape, dtype=zarr_ary.dtype)
read_tiles(ret, dirpath=dirpath, tile_shape=zarr_ary.chunks)
else:
read_tiles(padded_ary, dirpath=dirpath, tile_shape=zarr_ary.chunks)
ret = padded_ary[tuple((slice(s) for s in zarr_ary.shape))]
return ret |
class Migration(migrations.Migration):
dependencies = [('questions', '0065_data_migration')]
operations = [migrations.AlterField(model_name='question', name='value_type', field=models.CharField(choices=[('text', 'Text'), ('url', 'URL'), ('integer', 'Integer'), ('float', 'Float'), ('boolean', 'Boolean'), ('datetime', 'Datetime'), ('email', 'Email'), ('phone', 'Phone'), ('option', 'Option'), ('file', 'File')], help_text='Type of value for this question.', max_length=8, verbose_name='Value type'))] |
def rtn_strcpy(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('strcpy hooked')
dst = pstate.get_argument_value(0)
src = pstate.get_argument_value(1)
src_str = pstate.memory.read_string(src)
size = len(src_str)
for (i, c) in enumerate(src_str):
pstate.push_constraint((pstate.read_symbolic_memory_byte((src + i)).getAst() != 0))
pstate.push_constraint((pstate.read_symbolic_memory_byte((src + size)).getAst() == 0))
for index in range((size + 1)):
sym_c = pstate.read_symbolic_memory_byte((src + index))
pstate.write_symbolic_memory_byte((dst + index), sym_c)
return dst |
class PreResNet164Drop():
base = PreResNetDrop
args = list()
kwargs = {'depth': 164}
transform_train = transforms.Compose([transforms.Resize(32), transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
transform_test = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) |
def cal_rouge_path(pred_name, ref_name):
with open(pred_name, 'r') as f:
preds = get_sents_str(f)
with open(ref_name, 'r') as f:
refs = get_sents_str(f)
(ref_ids, pred_ids) = ([], [])
for (ref, pred) in zip(refs, preds):
(ref_id, pred_id) = change_word2id(ref, pred)
ref_ids.append(ref_id)
pred_ids.append(pred_id)
with open('logs/ref_ids.txt', 'w') as f:
for ref in ref_ids:
f.write((ref + '\n'))
with open('logs/pred_ids.txt', 'w') as f:
for pred in pred_ids:
f.write((pred + '\n'))
files2rouge.run('logs/pred_ids.txt', 'logs/ref_ids.txt') |
class BSTLexer(RegexLexer):
name = 'BST'
aliases = ['bst', 'bst-pybtex']
filenames = ['*.bst']
version_added = '2.2'
flags = (re.IGNORECASE | re.MULTILINE)
url = '
tokens = {'root': [include('whitespace'), (words(['read', 'sort']), Keyword), (words(['execute', 'integers', 'iterate', 'reverse', 'strings']), Keyword, 'group'), (words(['function', 'macro']), Keyword, ('group', 'group')), (words(['entry']), Keyword, ('group', 'group', 'group'))], 'group': [include('whitespace'), ('\\{', Punctuation, ('#pop', 'group-end', 'body'))], 'group-end': [include('whitespace'), ('\\}', Punctuation, '#pop')], 'body': [include('whitespace'), ('\\\'[^#\\"\\{\\}\\s]+', Name.Function), ('[^#\\"\\{\\}\\s]+\\$', Name.Builtin), ('[^#\\"\\{\\}\\s]+', Name.Variable), ('"[^\\"]*"', String), ('#-?\\d+', Number), ('\\{', Punctuation, ('group-end', 'body')), default('#pop')], 'whitespace': [('\\s+', Whitespace), ('%.*?$', Comment.Single)]} |
class DealStack(StackBase):
offset_x = 20
offset_y = 0
spread_from = 0
def setup(self):
self.setPen(QPen(Qt.NoPen))
color = QColor(Qt.black)
color.setAlpha(50)
brush = QBrush(color)
self.setBrush(brush)
def reset(self):
super(DealStack, self).reset()
self.spread_from = 0
def is_valid_drop(self, card):
return False
def is_free_card(self, card):
return (card == self.cards[(- 1)])
def update(self):
offset_x = 0
for (n, card) in enumerate(self.cards):
card.setPos((self.pos() + QPointF(offset_x, 0)))
card.setZValue(n)
if (n >= self.spread_from):
offset_x = (offset_x + self.offset_x) |
class PIOSPI():
def __init__(self, sm_id, pin_mosi, pin_miso, pin_sck, cpha=False, cpol=False, freq=1000000):
assert (not (cpol or cpha))
self._sm = rp2.StateMachine(sm_id, spi_cpha0, freq=(4 * freq), sideset_base=Pin(pin_sck), out_base=Pin(pin_mosi), in_base=Pin(pin_sck))
self._sm.active(1)
def write_blocking(wdata):
for b in wdata:
self._sm.put((b << 24))
def read_blocking(n):
data = []
for i in range(n):
data.append((self._sm.get() & 255))
return data
def write_read_blocking(wdata):
rdata = []
for b in wdata:
self._sm.put((b << 24))
rdata.append((self._sm.get() & 255))
return rdata |
class ImageAndLogitsFolder(datasets.ImageFolder):
def __init__(self, *args, logits_prefix, **kwargs):
super().__init__(*args, **kwargs)
self.logits_prefix = logits_prefix
def logits_path_create(prefix, path):
return ((prefix + path[(path.rfind('/') + 1):path.find('.j')]) + '.npy')
def get_image_path(self, index):
return self.imgs[index]
def __getitem__(self, index):
(img, target) = super().__getitem__(index)
logits_path = ImageAndLogitsFolder.logits_path_create(self.logits_prefix, self.get_image_path(index)[0])
logits = np.load(logits_path)
logits = torch.reshape(torch.Tensor(logits), (1, (- 1)))
return (img, target, logits) |
def get_job_links():
run_id = os.environ['GITHUB_RUN_ID']
url = f'
result = requests.get(url).json()
jobs = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']})
pages_to_iterate_over = math.ceil(((result['total_count'] - 100) / 100))
for i in range(pages_to_iterate_over):
result = requests.get((url + f'&page={(i + 2)}')).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']})
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', e)
return {} |
def get_veff(ks_grad, mol=None, dm=None):
if (mol is None):
mol = ks_grad.mol
if (dm is None):
dm = ks_grad.base.make_rdm1()
t0 = (logger.process_clock(), logger.perf_counter())
mf = ks_grad.base
ni = mf._numint
(grids, nlcgrids) = rks_grad._initialize_grids(ks_grad)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ((ks_grad.max_memory * 0.9) - mem_now))
if ks_grad.grid_response:
(exc, vxc) = uks_grad.get_vxc_full_response(ni, mol, grids, mf.xc, dm, max_memory=max_memory, verbose=ks_grad.verbose)
if (mf.nlc or ni.libxc.is_nlc(mf.xc)):
if ni.libxc.is_nlc(mf.xc):
xc = mf.xc
else:
xc = mf.nlc
(enlc, vnlc) = rks_grad.get_nlc_vxc_full_response(ni, mol, nlcgrids, xc, (dm[0] + dm[1]), max_memory=max_memory, verbose=ks_grad.verbose)
exc += enlc
vxc += vnlc
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
(exc, vxc) = uks_grad.get_vxc(ni, mol, grids, mf.xc, dm, max_memory=max_memory, verbose=ks_grad.verbose)
if (mf.nlc or ni.libxc.is_nlc(mf.xc)):
if ni.libxc.is_nlc(mf.xc):
xc = mf.xc
else:
xc = mf.nlc
(enlc, vnlc) = rks_grad.get_nlc_vxc(ni, mol, nlcgrids, xc, (dm[0] + dm[1]), max_memory=max_memory, verbose=ks_grad.verbose)
vxc += vnlc
t0 = logger.timer(ks_grad, 'vxc', *t0)
if (not ni.libxc.is_hybrid_xc(mf.xc)):
vj = ks_grad.get_j(mol, dm)
vxc += (vj[0] + vj[1])
if ks_grad.auxbasis_response:
e1_aux = vj.aux.sum((0, 1))
else:
(omega, alpha, hyb) = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
(vj, vk) = ks_grad.get_jk(mol, dm)
if ks_grad.auxbasis_response:
vk.aux = (vk.aux * hyb)
vk[:] *= hyb
if (omega != 0):
vk_lr = ks_grad.get_k(mol, dm, omega=omega)
vk[:] += (vk_lr * (alpha - hyb))
if ks_grad.auxbasis_response:
vk.aux[:] += (vk_lr.aux * (alpha - hyb))
vxc += ((vj[0] + vj[1]) - vk)
if ks_grad.auxbasis_response:
e1_aux = vj.aux.sum((0, 1))
e1_aux -= numpy.trace(vk.aux, axis1=0, axis2=1)
if ks_grad.auxbasis_response:
logger.debug1(ks_grad, 'sum(auxbasis response) %s', e1_aux.sum(axis=0))
vxc = lib.tag_array(vxc, exc1_grid=exc, aux=e1_aux)
else:
vxc = lib.tag_array(vxc, exc1_grid=exc)
return vxc |
def train_model(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
loader_cfg = {**dict(seed=cfg.get('seed'), drop_last=False, dist=distributed, num_gpus=len(cfg.gpu_ids)), **({} if (torch.__version__ != 'parrots') else dict(prefetch_num=2, pin_memory=False)), **dict(((k, cfg.data[k]) for k in ['samples_per_gpu', 'workers_per_gpu', 'shuffle', 'seed', 'drop_last', 'prefetch_num', 'pin_memory', 'persistent_workers'] if (k in cfg.data)))}
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
use_adverserial_train = cfg.get('use_adversarial_train', False)
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', True)
if use_adverserial_train:
model = DistributedDataParallelWrapper(model, device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizers(model, cfg.optimizer)
runner = EpochBasedRunner(model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)
runner.timestamp = timestamp
if use_adverserial_train:
optimizer_config = None
else:
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
if validate:
eval_cfg = cfg.get('evaluation', {})
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
dataloader_setting = dict(samples_per_gpu=1, workers_per_gpu=cfg.data.get('workers_per_gpu', 1), num_gpus=len(cfg.gpu_ids), dist=distributed, drop_last=False, shuffle=False)
dataloader_setting = dict(dataloader_setting, **cfg.data.get('val_dataloader', {}))
val_dataloader = build_dataloader(val_dataset, **dataloader_setting)
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs) |
def extract_bilibili_video_id(link: str):
'
if ('bilibili.com' in link):
try:
video_id = re.match('.*\\/(.v[0-9]*)', link).group(1)
return video_id
except Exception as e:
print('Not a valid video url', link)
return link
else:
print('Not a valid bilibili url', link)
return link |
class ChangeRahPattern(ContextMenuSingle, DamagePatternMixin):
def __init__(self):
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def display(self, callingWindow, srcContext, mainItem):
if (srcContext != 'fittingModule'):
return False
if (self.mainFrame.getActiveFit() is None):
return False
if (((mainItem is None) or getattr(mainItem, 'isEmpty', False)) and (srcContext != 'fittingShip')):
return False
if (mainItem.item.group.name != 'Armor Resistance Shift Hardener'):
return False
self.module = mainItem
self.patternEventMap = {}
self.patterns = self._getPatterns()
self.items = self._getItems(self.patterns)
return True
def getText(self, callingWindow, itmContext, mainItem):
return _t('RAH Damage Pattern')
def _addPattern(self, parentMenu, pattern, name):
id = ContextMenuSingle.nextID()
self.patternEventMap[id] = pattern
menuItem = wx.MenuItem(parentMenu, id, name, kind=wx.ITEM_CHECK)
parentMenu.Bind(wx.EVT_MENU, self.handlePatternSwitch, menuItem)
checked = (self.module.rahPatternOverride is pattern)
return (menuItem, checked)
def _addCategory(self, parentMenu, name):
id = ContextMenuSingle.nextID()
menuItem = wx.MenuItem(parentMenu, id, name)
parentMenu.Bind(wx.EVT_MENU, self.handlePatternSwitch, menuItem)
return menuItem
def getSubMenu(self, callingWindow, context, mainItem, rootMenu, i, pitem):
msw = ('wxMSW' in wx.PlatformInfo)
def makeMenu(container, parentMenu, root=False):
menu = wx.Menu()
if root:
(menuItem, checked) = self._addPattern((rootMenu if msw else parentMenu), None, 'Fit Pattern')
menu.Append(menuItem)
menuItem.Check(checked)
(menuItem, checked) = self._addPattern((rootMenu if msw else parentMenu), 'disable', 'Do Not Adapt')
menu.Append(menuItem)
menuItem.Check(checked)
menu.AppendSeparator()
for (name, subcontainer) in container[1].items():
menuItem = self._addCategory((rootMenu if msw else parentMenu), name)
subMenu = makeMenu(subcontainer, menu)
menuItem.SetSubMenu(subMenu)
menu.Append(menuItem)
for (name, pattern) in container[0].items():
(menuItem, checked) = self._addPattern((rootMenu if msw else parentMenu), pattern, name)
menu.Append(menuItem)
menuItem.Check(checked)
menu.Bind(wx.EVT_MENU, self.handlePatternSwitch)
return menu
subMenu = makeMenu(self.items, rootMenu, root=True)
return subMenu
def handlePatternSwitch(self, event):
pattern = self.patternEventMap.get(event.Id, False)
if (pattern is False):
event.Skip()
return
sFit = Fit.getInstance()
fitID = self.mainFrame.getActiveFit()
sFit.setRahPattern(fitID, self.module, pattern)
wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,))) |
def runas(args):
if ((args['username'] == None) or (args['password'] == None)):
logging.error('username or password has to be given')
else:
printT('Try to run as via creds...')
startupInfo = STARTUPINFO()
startupInfo.cb = sizeof(startupInfo)
processInformation = PROCESS_INFORMATION()
processInformation.wShowWindow = 1
processInformation.dwFlags = 1
applicationName = getFullCmdPath()
status = CreateProcessWithLogonW(args['username'], args['domain'], args['password'], LOGON_WITH_PROFILE, applicationName, None, CREATE_NEW_CONSOLE, None, None, byref(startupInfo), byref(processInformation))
if (status == 0):
logging.error('Impossible to create new process: {0}'.format(getLastErrorMessage()))
return False
printT('New process created') |
.overload(MultiVector)
def MultiVector_ctor(layout, value=None, dtype=None):
if (not isinstance(layout, LayoutType)):
return
if isinstance(value, types.Array):
def impl(layout, value=None, dtype=None):
return MultiVector_basic_ctor(layout, value)
return impl
elif (dtype is not None):
n = layout.obj.gaDims
def impl(layout, value=None, dtype=None):
return MultiVector_basic_ctor(layout, np.zeros(n, dtype))
return impl |
def handle_speaker_voucher_email_sent(data):
from conferences.models import SpeakerVoucher
speaker_voucher = SpeakerVoucher.objects.get(id=data['speaker_voucher_id'])
speaker = speaker_voucher.user
voucher_code = speaker_voucher.voucher_code
conference_name = speaker_voucher.conference.name.localize('en')
send_email(template=EmailTemplate.SPEAKER_VOUCHER_CODE, to=speaker.email, subject=f'[{conference_name}] Your Speaker Voucher Code', variables={'firstname': get_name(speaker, 'there'), 'voucherCode': voucher_code, 'is_speaker_voucher': (speaker_voucher.voucher_type == SpeakerVoucher.VoucherType.SPEAKER)}, reply_to=[settings.SPEAKERS_EMAIL_ADDRESS])
speaker_voucher.voucher_email_sent_at = timezone.now()
speaker_voucher.save() |
def get_msdnet(blocks, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (blocks == 22)
num_scales = 4
num_feature_blocks = 10
base = 4
step = 2
reduction_rate = 0.5
growth = 6
growth_factor = [1, 2, 4, 4]
use_bottleneck = True
bottleneck_factor_per_scales = [1, 2, 4, 4]
assert (reduction_rate > 0.0)
init_layer_channels = [(64 * c) for c in growth_factor[:num_scales]]
step_mode = 'even'
layers_per_subnets = [base]
for i in range((num_feature_blocks - 1)):
layers_per_subnets.append((step if (step_mode == 'even') else ((step * i) + 1)))
total_layers = sum(layers_per_subnets)
interval = math.ceil((total_layers / num_scales))
global_layer_ind = 0
channels = []
bottleneck_factors = []
in_channels_tmp = init_layer_channels
in_scales = num_scales
for i in range(num_feature_blocks):
layers_per_subnet = layers_per_subnets[i]
scales_i = []
channels_i = []
bottleneck_factors_i = []
for j in range(layers_per_subnet):
out_scales = int((num_scales - math.floor((global_layer_ind / interval))))
global_layer_ind += 1
scales_i += [out_scales]
scale_offset = (num_scales - out_scales)
in_dec_scales = (num_scales - len(in_channels_tmp))
out_channels = [(in_channels_tmp[((scale_offset - in_dec_scales) + k)] + (growth * growth_factor[(scale_offset + k)])) for k in range(out_scales)]
in_dec_scales = (num_scales - len(in_channels_tmp))
bottleneck_factors_ij = bottleneck_factor_per_scales[in_dec_scales:][:len(in_channels_tmp)]
in_channels_tmp = out_channels
channels_i += [out_channels]
bottleneck_factors_i += [bottleneck_factors_ij]
if (in_scales > out_scales):
assert ((in_channels_tmp[0] % growth_factor[scale_offset]) == 0)
out_channels1 = int(math.floor(((in_channels_tmp[0] / growth_factor[scale_offset]) * reduction_rate)))
out_channels = [(out_channels1 * growth_factor[(scale_offset + k)]) for k in range(out_scales)]
in_channels_tmp = out_channels
channels_i += [out_channels]
bottleneck_factors_i += [[]]
in_scales = out_scales
in_scales = scales_i[(- 1)]
channels += [channels_i]
bottleneck_factors += [bottleneck_factors_i]
net = MSDNet(channels=channels, init_layer_channels=init_layer_channels, num_feature_blocks=num_feature_blocks, use_bottleneck=use_bottleneck, bottleneck_factors=bottleneck_factors, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def multipleOf(validator, dB, instance, schema):
if (not validator.is_type(instance, 'number')):
return
if isinstance(dB, float):
quotient = (instance / dB)
try:
failed = (int(quotient) != quotient)
except OverflowError:
failed = ((Fraction(instance) / Fraction(dB)).denominator != 1)
else:
failed = (instance % dB)
if failed:
(yield ValidationError(f'{instance!r} is not a multiple of {dB}')) |
def delete_all_daemonset_namespace(kubecli: KrknKubernetes, namespace: str):
try:
daemonsets = kubecli.get_daemonset(namespace)
for daemonset in daemonsets:
logging.info(('Deleting daemonset' + daemonset))
kubecli.delete_daemonset(daemonset, namespace)
except Exception as e:
logging.error('Exception when calling delete_all_daemonset_namespace: %s\n', str(e))
raise e
return daemonsets |
def embedding_lookup(inputs, voca_size, initializer, reuse=False, trainable=True, scope='Embedding'):
with tf.variable_scope(scope, reuse=reuse) as scope:
embedding_tablePAD = tf.get_variable('embedPAD', initializer=initializer[0:1], trainable=False, dtype=tf.float32)
embedding_tableLast = tf.get_variable('embedLast', initializer=initializer[1:], trainable=trainable, dtype=tf.float32)
embedding_table = tf.concat([embedding_tablePAD, embedding_tableLast], axis=0, name='embed')
inputs_embed = tf.nn.embedding_lookup(embedding_table, inputs)
return (inputs_embed, embedding_table) |
class StructuralTranslatorL3(StructuralTranslatorL2):
def _get_structural_rtlir_gen_pass(s):
return StructuralRTLIRGenL3Pass
def translate_structural(s, tr_top):
s.structural.decl_ifcs = {}
super().translate_structural(tr_top)
def translate_decls(s, m):
m_rtype = m.get_metadata(StructuralRTLIRGenL3Pass.rtlir_type)
ifc_decls = []
for (ifc_id, rtype) in m_rtype.get_ifc_views_packed():
if isinstance(rtype, rt.Array):
array_rtype = rtype
ifc_rtype = rtype.get_sub_type()
else:
array_rtype = None
ifc_rtype = rtype
ports = []
all_properties = ifc_rtype.get_all_properties_packed()
for (p_id, _p_rtype) in all_properties:
if isinstance(_p_rtype, rt.Array):
p_array_rtype = _p_rtype
p_rtype = _p_rtype.get_sub_type()
else:
p_array_rtype = None
p_rtype = _p_rtype
ports.append(s.rtlir_tr_interface_port_decl(m, s.rtlir_tr_var_id(p_id), p_rtype, s.rtlir_tr_unpacked_array_type(p_array_rtype)))
ifc_decls.append(s.rtlir_tr_interface_decl(ifc_id, ifc_rtype, s.rtlir_tr_unpacked_array_type(array_rtype), s.rtlir_tr_interface_port_decls(ports)))
s.structural.decl_ifcs[m] = s.rtlir_tr_interface_decls(ifc_decls)
super().translate_decls(m)
def rtlir_signal_expr_translation(s, expr, m, status='intermediate'):
if isinstance(expr, sexp.InterfaceAttr):
return s.rtlir_tr_interface_attr(s.rtlir_signal_expr_translation(expr.get_base(), m), expr.get_attr(), status)
elif isinstance(expr, sexp.InterfaceViewIndex):
return s.rtlir_tr_interface_array_index(s.rtlir_signal_expr_translation(expr.get_base(), m), expr.get_index(), status)
else:
return super().rtlir_signal_expr_translation(expr, m, status)
def rtlir_tr_interface_port_decls(s, port_decls):
raise NotImplementedError()
def rtlir_tr_interface_port_decl(s, m, port_id, port_rtype, port_array_type):
raise NotImplementedError()
def rtlir_tr_interface_decls(s, ifc_decls):
raise NotImplementedError()
def rtlir_tr_interface_decl(s, ifc_id, ifc_rtype, array_type, port_decls):
raise NotImplementedError()
def rtlir_tr_interface_array_index(s, base_signal, index, status):
raise NotImplementedError()
def rtlir_tr_interface_attr(s, base_signal, attr, status):
raise NotImplementedError() |
def _convert_examples_to_ner_features(examples: List[NERExample], tokenizer: BertTokenizer, args: NERTrainArguments, label_list: List[str], cls_token_at_end: Optional[bool]=False):
label_map = {label: i for (i, label) in enumerate(label_list)}
id_to_label = {i: label for (i, label) in enumerate(label_list)}
features = []
for example in examples:
tokens = tokenizer.tokenize(example.text)
inputs = tokenizer._encode_plus(tokens, max_length=args.max_seq_length, truncation_strategy=TruncationStrategy.LONGEST_FIRST, padding_strategy=PaddingStrategy.MAX_LENGTH)
label_ids = _process_target_sentence(tokens=tokens, origin_sentence=example.text, target_sentence=example.label, max_length=args.max_seq_length, label_map=label_map, tokenizer=tokenizer, cls_token_at_end=cls_token_at_end)
features.append(NERFeatures(**inputs, label_ids=label_ids))
for (i, example) in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(('sentence: %s' % example.text))
logger.info(('target: %s' % example.label))
logger.info(('tokens: %s' % ' '.join(tokenizer.convert_ids_to_tokens(features[i].input_ids))))
logger.info(('label: %s' % ' '.join([id_to_label[label_id] for label_id in features[i].label_ids])))
logger.info(('features: %s' % features[i]))
return features |
class Pin_():
_strict = False
def use_strict(self):
object.__setattr__(self, '_strict', True)
def __getattr__(self, name: str):
if name.startswith('__'):
raise AttributeError(('Pin object has no attribute %r' % name))
return self.__getitem__(name)
def __getitem__(self, name: str):
check_dom_name_value(name, 'pin `name`')
return get_pin_value(name, self._strict)
def __setattr__(self, name: str, value):
assert (name != 'use_strict'), "'use_strict' is a reserve name, can't use as pin widget name"
check_dom_name_value(name, 'pin `name`')
self.__setitem__(name, value)
def __setitem__(self, name: str, value):
send_msg('pin_update', spec=dict(name=name, attributes={'value': value})) |
class F28_ClearPart(F25_ClearPart):
def __init__(self, *args, **kwargs):
super(F28_ClearPart, self).__init__(*args, **kwargs)
self.cdl = kwargs.get('cdl', False)
def __str__(self):
s = super(F28_ClearPart, self).__str__()
if (s and self.cdl):
s = s.rstrip()
s += ' --cdl\n'
return s
def _getParser(self):
op = super(F28_ClearPart, self)._getParser()
op.add_argument('--cdl', dest='cdl', default=False, version=F28, action='store_true', help='\n Reformat any LDL DASDs to CDL format.')
return op |
def analysis(file_path, chaos_tests_config):
data = load_telemetry_data(file_path)
zscores = calculate_zscores(data)
(outliers_cpu, outliers_memory, outliers_network) = identify_outliers(zscores)
(cpu_services, mem_services) = get_services_above_heatmap_threshold(data, heatmap_cpu_threshold, heatmap_mem_threshold)
logging.info(' Profiling ')
logging.info(f'CPU outliers: {outliers_cpu}')
logging.info(f'Memory outliers: {outliers_memory}')
logging.info(f'Network outliers: {outliers_network}')
logging.info(' HeatMap Analysis ')
if cpu_services:
logging.info('Services with CPU_HEATMAP above threshold:', cpu_services)
else:
logging.info('There are no services that are using siginificant CPU compared to their assigned limits (infinite in case no limits are set).')
if mem_services:
logging.info('Services with MEM_HEATMAP above threshold:', mem_services)
else:
logging.info('There are no services that are using siginificant MEMORY compared to their assigned limits (infinite in case no limits are set).')
time.sleep(2)
logging.info(' Recommendations ')
if cpu_services:
logging.info(f'''Recommended tests for {str(cpu_services)} :
{chaos_tests_config['CPU']}''')
logging.info('\n')
if mem_services:
logging.info(f'''Recommended tests for {str(mem_services)} :
{chaos_tests_config['MEM']}''')
logging.info('\n')
if outliers_network:
logging.info(f'''Recommended tests for str(outliers_network) :
{chaos_tests_config['NETWORK']}''')
logging.info('\n')
logging.info('\n')
logging.info('Please check data in utilisation.txt for further analysis') |
def disable_existing_mirrors(func):
(func)
def wrapper(*args, **kwargs):
for mirror in RepoMirrorConfig.select():
mirror.is_enabled = False
mirror.save()
func(*args, **kwargs)
for mirror in RepoMirrorConfig.select():
mirror.is_enabled = True
mirror.save()
return wrapper |
def web_history(fake_save_manager, tmpdir, database, config_stub, stubs, monkeypatch):
config_stub.val.completion.timestamp_format = '%Y-%m-%d'
config_stub.val.completion.web_history.max_items = (- 1)
web_history = history.WebHistory(database, stubs.FakeHistoryProgress())
monkeypatch.setattr(history, 'web_history', web_history)
return web_history |
class TestMatchChromeUrls():
def up(self):
return urlmatch.UrlPattern('chrome://favicon/*')
def test_attrs(self, up):
assert (up._scheme == 'chrome')
assert (up.host == 'favicon')
assert (not up._match_subdomains)
assert (not up._match_all)
assert (up._path is None)
.parametrize('url, expected', [('chrome://favicon/ True), ('chrome://favicon/ True), ('chrome://history', False)])
def test_urls(self, up, url, expected):
assert (up.matches(QUrl(url)) == expected) |
def conduct_admined_team_search(username, query, encountered_teams, results):
matching_teams = model.team.get_matching_admined_teams(query, get_authenticated_user(), limit=5)
for team in matching_teams:
if (team.id in encountered_teams):
continue
encountered_teams.add(team.id)
results.append({'kind': 'team', 'name': team.name, 'organization': search_entity_view(username, team.organization), 'avatar': avatar.get_data_for_team(team), 'score': TEAM_SEARCH_SCORE, 'href': ((('/organization/' + team.organization.username) + '/teams/') + team.name)}) |
class LmdbDataset(Dataset):
def __init__(self, root: str, charset: str, max_label_len: int, min_image_dim: int=0, remove_whitespace: bool=True, normalize_unicode: bool=True, unlabelled: bool=False, transform: Optional[Callable]=None):
self._env = None
self.root = root
self.unlabelled = unlabelled
self.transform = transform
self.labels = []
self.filtered_index_list = []
self.num_samples = self._preprocess_labels(charset, remove_whitespace, normalize_unicode, max_label_len, min_image_dim)
def __del__(self):
if (self._env is not None):
self._env.close()
self._env = None
def _create_env(self):
return lmdb.open(self.root, max_readers=1, readonly=True, create=False, readahead=False, meminit=False, lock=False)
def env(self):
if (self._env is None):
self._env = self._create_env()
return self._env
def _preprocess_labels(self, charset, remove_whitespace, normalize_unicode, max_label_len, min_image_dim):
charset_adapter = CharsetAdapter(charset)
with self._create_env() as env, env.begin() as txn:
num_samples = int(txn.get('num-samples'.encode()))
if self.unlabelled:
return num_samples
for index in range(num_samples):
index += 1
label_key = f'label-{index:09d}'.encode()
label = txn.get(label_key).decode()
if remove_whitespace:
label = ''.join(label.split())
if normalize_unicode:
label = unicodedata.normalize('NFKD', label).encode('ascii', 'ignore').decode()
if (len(label) > max_label_len):
continue
label = charset_adapter(label)
if (not label):
continue
if (min_image_dim > 0):
img_key = f'image-{index:09d}'.encode()
buf = io.BytesIO(txn.get(img_key))
(w, h) = Image.open(buf).size
if ((w < self.min_image_dim) or (h < self.min_image_dim)):
continue
self.labels.append(label)
self.filtered_index_list.append(index)
return len(self.labels)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
if self.unlabelled:
label = index
else:
label = self.labels[index]
index = self.filtered_index_list[index]
img_key = f'image-{index:09d}'.encode()
with self.env.begin() as txn:
imgbuf = txn.get(img_key)
buf = io.BytesIO(imgbuf)
img = Image.open(buf).convert('RGB')
if (self.transform is not None):
img = self.transform(img)
return (img, label) |
_start_docstrings('\n CamemBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear\n layers on top of the hidden-states output to compute `span start logits` and `span end logits`\n ', CAMEMBERT_START_DOCSTRING)
class CamembertForQuestionAnswering(RobertaForQuestionAnswering):
config_class = CamembertConfig |
class AesEncyptionSession():
def create_from_keypair(handshake_key: str, keypair):
handshake_key_bytes: bytes = base64.b64decode(handshake_key.encode('UTF-8'))
private_key_data = base64.b64decode(keypair.get_private_key().encode('UTF-8'))
private_key = serialization.load_der_private_key(private_key_data, None, None)
key_and_iv = private_key.decrypt(handshake_key_bytes, asymmetric_padding.PKCS1v15())
if (key_and_iv is None):
raise ValueError('Decryption failed!')
return AesEncyptionSession(key_and_iv[:16], key_and_iv[16:])
def __init__(self, key, iv):
self.cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
self.padding_strategy = padding.PKCS7(algorithms.AES.block_size)
def encrypt(self, data) -> bytes:
encryptor = self.cipher.encryptor()
padder = self.padding_strategy.padder()
padded_data = (padder.update(data) + padder.finalize())
encrypted = (encryptor.update(padded_data) + encryptor.finalize())
return base64.b64encode(encrypted)
def decrypt(self, data) -> str:
decryptor = self.cipher.decryptor()
unpadder = self.padding_strategy.unpadder()
decrypted = (decryptor.update(base64.b64decode(data)) + decryptor.finalize())
unpadded_data = (unpadder.update(decrypted) + unpadder.finalize())
return unpadded_data.decode() |
class Agent():
max_grad_norm = 0.5
def __init__(self):
self.training_step = 0
self.var = 1.0
(self.eval_cnet, self.target_cnet) = (CriticNet().float(), CriticNet().float())
(self.eval_anet, self.target_anet) = (ActorNet().float(), ActorNet().float())
self.memory = Memory(2000)
self.optimizer_c = optim.Adam(self.eval_cnet.parameters(), lr=0.001)
self.optimizer_a = optim.Adam(self.eval_anet.parameters(), lr=0.0003)
def select_action(self, state):
state = torch.from_numpy(state).float().unsqueeze(0)
mu = self.eval_anet(state)
dist = Normal(mu, torch.tensor(self.var, dtype=torch.float))
action = dist.sample()
action.clamp((- 2.0), 2.0)
return (action.item(),)
def save_param(self):
torch.save(self.eval_anet.state_dict(), 'param/ddpg_anet_params.pkl')
torch.save(self.eval_cnet.state_dict(), 'param/ddpg_cnet_params.pkl')
def store_transition(self, transition):
self.memory.update(transition)
def update(self):
self.training_step += 1
transitions = self.memory.sample(32)
s = torch.tensor([t.s for t in transitions], dtype=torch.float)
a = torch.tensor([t.a for t in transitions], dtype=torch.float).view((- 1), 1)
r = torch.tensor([t.r for t in transitions], dtype=torch.float).view((- 1), 1)
s_ = torch.tensor([t.s_ for t in transitions], dtype=torch.float)
with torch.no_grad():
q_target = (r + (args.gamma * self.target_cnet(s_, self.target_anet(s_))))
q_eval = self.eval_cnet(s, a)
self.optimizer_c.zero_grad()
c_loss = F.smooth_l1_loss(q_eval, q_target)
c_loss.backward()
nn.utils.clip_grad_norm_(self.eval_cnet.parameters(), self.max_grad_norm)
self.optimizer_c.step()
self.optimizer_a.zero_grad()
a_loss = (- self.eval_cnet(s, self.eval_anet(s)).mean())
a_loss.backward()
nn.utils.clip_grad_norm_(self.eval_anet.parameters(), self.max_grad_norm)
self.optimizer_a.step()
if ((self.training_step % 200) == 0):
self.target_cnet.load_state_dict(self.eval_cnet.state_dict())
if ((self.training_step % 201) == 0):
self.target_anet.load_state_dict(self.eval_anet.state_dict())
self.var = max((self.var * 0.999), 0.01)
return q_eval.mean().item() |
def test_ancestor_with_generic() -> None:
tree = builder.parse('\n from typing import TypeVar, Generic\n T = TypeVar("T")\n class A(Generic[T]):\n def a_method(self):\n print("hello")\n class B(A[T]): pass\n class C(B[str]): pass\n ')
inferred_b = next(tree['B'].infer())
assert ([cdef.name for cdef in inferred_b.ancestors()] == ['A', 'Generic', 'object'])
inferred_c = next(tree['C'].infer())
assert ([cdef.name for cdef in inferred_c.ancestors()] == ['B', 'A', 'Generic', 'object']) |
class GetBuiltinSourceTest(unittest.TestCase):
def setUp(self) -> None:
self.test_dir = Path(tempfile.mkdtemp('torchx_specs_finder_test'))
self.orig_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self) -> None:
os.chdir(self.orig_cwd)
shutil.rmtree(self.test_dir)
def test_get_builtin_source_with_echo(self) -> None:
echo_src = finder.get_builtin_source('utils.echo')
echo_copy = (self.test_dir / 'echo_copy.py')
with open(echo_copy, 'w') as f:
f.write(echo_src)
runner = get_runner()
app_handle = runner.run_component(scheduler='local_cwd', component=f'{str(echo_copy)}:echo', component_args=['--msg', 'hello world'])
status = runner.wait(app_handle, wait_interval=0.1)
self.assertIsNotNone(status)
self.assertEqual(AppState.SUCCEEDED, status.state)
def test_get_builtin_source_with_booth(self) -> None:
booth_src = finder.get_builtin_source('utils.booth')
booth_copy = (self.test_dir / 'booth_copy.py')
with open(booth_copy, 'w') as f:
f.write(booth_src)
runner = get_runner()
trial_idx = 0
tracker_base = str((self.test_dir / 'tracking'))
app_handle = runner.run_component(scheduler='local_cwd', cfg={'prepend_cwd': True}, component=f'{str(booth_copy)}:booth', component_args=['--x1=1', '--x2=3', f'--trial_idx={trial_idx}', f'--tracker_base={tracker_base}'])
status = runner.wait(app_handle, wait_interval=0.1)
self.assertIsNotNone(status)
self.assertEqual(AppState.SUCCEEDED, status.state)
tracker = FsspecResultTracker(tracker_base)
self.assertEqual(0, tracker[trial_idx]['booth_eval']) |
def test_basics(testdir, tmp_path, pytestconfig):
p = testdir.makepyfile('\n import warnings\n\n def test_ok():\n pass\n\n def test_fail():\n assert 0\n\n def test_warning():\n warnings.warn("message", UserWarning)\n ')
log_file = (tmp_path / 'log.json')
result = testdir.runpytest('--report-log', str(log_file))
assert (result.ret == pytest.ExitCode.TESTS_FAILED)
result.stdout.fnmatch_lines([f'* generated report log file: {log_file}*'])
json_objs = [json.loads(x) for x in log_file.read_text().splitlines()]
assert (len(json_objs) == 14)
session_start = json_objs[0]
assert (session_start == {'pytest_version': pytest.__version__, '$report_type': 'SessionStart'})
session_start = json_objs[(- 1)]
assert (session_start == {'exitstatus': pytest.ExitCode.TESTS_FAILED, '$report_type': 'SessionFinish'})
split = defaultdict(list)
for obj in json_objs:
split[(obj['$report_type'] == 'WarningMessage')].append(obj)
[warning] = split[True]
json_objs = split[False]
assert (warning == {'$report_type': 'WarningMessage', 'category': 'UserWarning', 'when': 'runtest', 'message': 'message', 'lineno': 10, 'location': None, 'filename': str(p)})
pm = pytestconfig.pluginmanager
for json_obj in json_objs[1:(- 1)]:
rep = pm.hook.pytest_report_from_serializable(config=pytestconfig, data=json_obj)
assert isinstance(rep, BaseReport) |
class Stl10Dataset(dataset_mixin.DatasetMixin):
def __init__(self, resize=64):
self.resize = resize
self.image_files = glob('/home/users/ntu/yasin001/project/stl10/*.jpg')
print(len(self.image_files))
def __len__(self):
return len(self.image_files)
def get_example(self, i):
np.random.seed()
img = None
while (img is None):
try:
fn = ('%s' % self.image_files[i])
img = Image.open(fn)
except Exception as e:
print(i, fn, str(e))
return preprocess_image_no_crop(img, resize=self.resize) |
class DataViewer():
def __init__(self, app, dt=0.01, time_window_length=30, plot_period=0.2, data_recording_period=0.1):
self._dt = dt
self._data_window_length = (time_window_length / data_recording_period)
self._update_counter = 0
self._plots_per_row = 4
self._plotter = Plotter(app=app, plots_per_row=self._plots_per_row)
self._plot_period = plot_period
self._data_recording_period = data_recording_period
self._plot_delay = 0
self._data_recording_delay = 0
self._time = 0
truth_color = (0, 255, 0)
truth_color_2 = (160, 202, 111)
truth_color_3 = (124, 230, 167)
estimate_color = (255, 0, 0)
estimate_color_2 = (255, 150, 150)
estimate_color_3 = (255, 154, 111)
control_color = (0, 0, 255)
self._plotter.create_plot_widget(plot_id='pn', xlabel='Time (s)', ylabel='pn (m)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='pe', xlabel='Time (s)', ylabel='pe (m)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='h', xlabel='Time (s)', ylabel='h (m)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='wind', xlabel='Time (s)', ylabel='wind (m/s)', window_length=self._data_window_length)
self._plotter.create_data_set(plot_id='pn', data_label='pn', data_color=truth_color)
self._plotter.create_data_set(plot_id='pn', data_label='pn_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='pe', data_label='pe', data_color=truth_color)
self._plotter.create_data_set(plot_id='pe', data_label='pe_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='h', data_label='h', data_color=truth_color)
self._plotter.create_data_set(plot_id='h', data_label='h_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='h', data_label='h_c', data_color=control_color)
self._plotter.create_data_set(plot_id='wind', data_label='wn', data_color=truth_color)
self._plotter.create_data_set(plot_id='wind', data_label='wn_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='wind', data_label='we', data_color=truth_color_2)
self._plotter.create_data_set(plot_id='wind', data_label='we_e', data_color=estimate_color_2)
self._plotter.create_plot_widget(plot_id='Va', xlabel='Time (s)', ylabel='Va (m/s)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='alpha', xlabel='Time (s)', ylabel='alpha (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='beta', xlabel='Time (s)', ylabel='beta (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='Vg', xlabel='Time (s)', ylabel='Vg (m/s)', window_length=self._data_window_length)
self._plotter.create_data_set(plot_id='Va', data_label='Va', data_color=truth_color)
self._plotter.create_data_set(plot_id='Va', data_label='Va_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='Va', data_label='Va_c', data_color=control_color)
self._plotter.create_data_set(plot_id='alpha', data_label='alpha', data_color=truth_color)
self._plotter.create_data_set(plot_id='alpha', data_label='alpha_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='beta', data_label='beta', data_color=truth_color)
self._plotter.create_data_set(plot_id='beta', data_label='beta_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='Vg', data_label='Vg', data_color=truth_color)
self._plotter.create_data_set(plot_id='Vg', data_label='Vg_e', data_color=estimate_color)
self._plotter.create_plot_widget(plot_id='phi', xlabel='Time (s)', ylabel='phi (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='theta', xlabel='Time (s)', ylabel='theta (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='psi', xlabel='Time (s)', ylabel='psi (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='chi', xlabel='Time (s)', ylabel='chi (deg)', window_length=self._data_window_length)
self._plotter.create_data_set(plot_id='phi', data_label='phi', data_color=truth_color)
self._plotter.create_data_set(plot_id='phi', data_label='phi_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='phi', data_label='phi_c', data_color=control_color)
self._plotter.create_data_set(plot_id='theta', data_label='theta', data_color=truth_color)
self._plotter.create_data_set(plot_id='theta', data_label='theta_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='theta', data_label='theta_c', data_color=control_color)
self._plotter.create_data_set(plot_id='psi', data_label='psi', data_color=truth_color)
self._plotter.create_data_set(plot_id='psi', data_label='psi_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='psi', data_label='psi_c', data_color=control_color)
self._plotter.create_data_set(plot_id='chi', data_label='chi', data_color=truth_color)
self._plotter.create_data_set(plot_id='chi', data_label='chi_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='chi', data_label='chi_c', data_color=control_color)
self._plotter.create_plot_widget(plot_id='p', xlabel='Time (s)', ylabel='p (deg/s)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='q', xlabel='Time (s)', ylabel='q (deg/s)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='r', xlabel='Time (s)', ylabel='r (deg/s)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='bias', xlabel='Time (s)', ylabel='bias (deg/s)', window_length=self._data_window_length)
self._plotter.create_data_set(plot_id='p', data_label='p', data_color=truth_color)
self._plotter.create_data_set(plot_id='p', data_label='p_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='q', data_label='q', data_color=truth_color)
self._plotter.create_data_set(plot_id='q', data_label='q_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='r', data_label='r', data_color=truth_color)
self._plotter.create_data_set(plot_id='r', data_label='r_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='bias', data_label='bx', data_color=truth_color)
self._plotter.create_data_set(plot_id='bias', data_label='bx_e', data_color=estimate_color)
self._plotter.create_data_set(plot_id='bias', data_label='by', data_color=truth_color_2)
self._plotter.create_data_set(plot_id='bias', data_label='by_e', data_color=estimate_color_2)
self._plotter.create_data_set(plot_id='bias', data_label='bz', data_color=truth_color_3)
self._plotter.create_data_set(plot_id='bias', data_label='bz_e', data_color=estimate_color_3)
self._plotter.create_plot_widget(plot_id='delta_e', xlabel='Time (s)', ylabel='delta_e (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='delta_a', xlabel='Time (s)', ylabel='delta_a (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='delta_r', xlabel='Time (s)', ylabel='delta_r (deg)', window_length=self._data_window_length)
self._plotter.create_plot_widget(plot_id='delta_t', xlabel='Time (s)', ylabel='delta_t (deg)', window_length=self._data_window_length)
self._plotter.create_data_set(plot_id='delta_e', data_label='delta_e', data_color=control_color)
self._plotter.create_data_set(plot_id='delta_a', data_label='delta_a', data_color=control_color)
self._plotter.create_data_set(plot_id='delta_r', data_label='delta_r', data_color=control_color)
self._plotter.create_data_set(plot_id='delta_t', data_label='delta_t', data_color=control_color)
self._plotter.show_window()
def update(self, true_state, estimated_state, commanded_state, delta):
if (self._data_recording_delay >= self._data_recording_period):
self.__update_data(true_state, estimated_state, commanded_state, delta, self._time)
self._data_recording_delay = 0
if (self._plot_delay >= self._plot_period):
self.__update_plot()
self._plot_delay = 0
self._plot_delay += self._dt
self._data_recording_delay += self._dt
self._time += self._dt
def __update_data(self, true_state, estimated_state, commanded_state, delta, t):
if (commanded_state != None):
self._plotter.add_data_point(plot_id='h', data_label='h_c', xvalue=t, yvalue=commanded_state.altitude)
self._plotter.add_data_point(plot_id='Va', data_label='Va_c', xvalue=t, yvalue=commanded_state.Va)
self._plotter.add_data_point(plot_id='phi', data_label='phi_c', xvalue=t, yvalue=self.__rad_to_deg(commanded_state.phi))
self._plotter.add_data_point(plot_id='theta', data_label='theta_c', xvalue=t, yvalue=self.__rad_to_deg(commanded_state.theta))
self._plotter.add_data_point(plot_id='chi', data_label='chi_c', xvalue=t, yvalue=self.__rad_to_deg(commanded_state.chi))
if (true_state != None):
self._plotter.add_data_point(plot_id='pn', data_label='pn', xvalue=t, yvalue=true_state.north)
self._plotter.add_data_point(plot_id='pe', data_label='pe', xvalue=t, yvalue=true_state.east)
self._plotter.add_data_point(plot_id='h', data_label='h', xvalue=t, yvalue=true_state.altitude)
self._plotter.add_data_point(plot_id='Va', data_label='Va', xvalue=t, yvalue=true_state.Va)
self._plotter.add_data_point(plot_id='alpha', data_label='alpha', xvalue=t, yvalue=true_state.alpha)
self._plotter.add_data_point(plot_id='beta', data_label='beta', xvalue=t, yvalue=true_state.beta)
self._plotter.add_data_point(plot_id='phi', data_label='phi', xvalue=t, yvalue=self.__rad_to_deg(true_state.phi))
self._plotter.add_data_point(plot_id='theta', data_label='theta', xvalue=t, yvalue=self.__rad_to_deg(true_state.theta))
self._plotter.add_data_point(plot_id='psi', data_label='psi', xvalue=t, yvalue=self.__rad_to_deg(true_state.psi))
self._plotter.add_data_point(plot_id='chi', data_label='chi', xvalue=t, yvalue=self.__rad_to_deg(true_state.chi))
self._plotter.add_data_point(plot_id='p', data_label='p', xvalue=t, yvalue=self.__rad_to_deg(true_state.p))
self._plotter.add_data_point(plot_id='q', data_label='q', xvalue=t, yvalue=self.__rad_to_deg(true_state.q))
self._plotter.add_data_point(plot_id='r', data_label='r', xvalue=t, yvalue=self.__rad_to_deg(true_state.r))
self._plotter.add_data_point(plot_id='Vg', data_label='Vg', xvalue=t, yvalue=true_state.Vg)
self._plotter.add_data_point(plot_id='wind', data_label='wn', xvalue=t, yvalue=true_state.wn)
self._plotter.add_data_point(plot_id='wind', data_label='we', xvalue=t, yvalue=true_state.we)
self._plotter.add_data_point(plot_id='bias', data_label='bx', xvalue=t, yvalue=self.__rad_to_deg(true_state.bx))
self._plotter.add_data_point(plot_id='bias', data_label='by', xvalue=t, yvalue=self.__rad_to_deg(true_state.by))
self._plotter.add_data_point(plot_id='bias', data_label='bz', xvalue=t, yvalue=self.__rad_to_deg(true_state.bz))
if (estimated_state != None):
self._plotter.add_data_point(plot_id='pn', data_label='pn_e', xvalue=t, yvalue=estimated_state.north)
self._plotter.add_data_point(plot_id='pe', data_label='pe_e', xvalue=t, yvalue=estimated_state.east)
self._plotter.add_data_point(plot_id='h', data_label='h_e', xvalue=t, yvalue=estimated_state.altitude)
self._plotter.add_data_point(plot_id='Va', data_label='Va_e', xvalue=t, yvalue=estimated_state.Va)
self._plotter.add_data_point(plot_id='alpha', data_label='alpha_e', xvalue=t, yvalue=estimated_state.alpha)
self._plotter.add_data_point(plot_id='beta', data_label='beta_e', xvalue=t, yvalue=estimated_state.beta)
self._plotter.add_data_point(plot_id='phi', data_label='phi_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.phi))
self._plotter.add_data_point(plot_id='theta', data_label='theta_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.theta))
self._plotter.add_data_point(plot_id='psi', data_label='psi_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.psi))
self._plotter.add_data_point(plot_id='chi', data_label='chi_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.chi))
self._plotter.add_data_point(plot_id='p', data_label='p_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.p))
self._plotter.add_data_point(plot_id='q', data_label='q_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.q))
self._plotter.add_data_point(plot_id='r', data_label='r_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.r))
self._plotter.add_data_point(plot_id='Vg', data_label='Vg_e', xvalue=t, yvalue=estimated_state.Vg)
self._plotter.add_data_point(plot_id='wind', data_label='wn_e', xvalue=t, yvalue=estimated_state.wn)
self._plotter.add_data_point(plot_id='wind', data_label='we_e', xvalue=t, yvalue=estimated_state.we)
self._plotter.add_data_point(plot_id='bias', data_label='bx_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.bx))
self._plotter.add_data_point(plot_id='bias', data_label='by_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.by))
self._plotter.add_data_point(plot_id='bias', data_label='bz_e', xvalue=t, yvalue=self.__rad_to_deg(estimated_state.bz))
if (delta != None):
self._plotter.add_data_point(plot_id='delta_e', data_label='delta_e', xvalue=t, yvalue=self.__rad_to_deg(delta.elevator))
self._plotter.add_data_point(plot_id='delta_a', data_label='delta_a', xvalue=t, yvalue=self.__rad_to_deg(delta.aileron))
self._plotter.add_data_point(plot_id='delta_r', data_label='delta_r', xvalue=t, yvalue=self.__rad_to_deg(delta.rudder))
self._plotter.add_data_point(plot_id='delta_t', data_label='delta_t', xvalue=t, yvalue=self.__rad_to_deg(delta.throttle))
def process_app(self):
self._plotter.process_app(0)
def __update_plot(self):
self._plotter.update_plots()
def close_data_viewer(self):
self._plotter.close_window()
def save_plot_image(self, plot_name):
self._plotter.save_image(plot_name)
def __rad_to_deg(self, radians):
rad = wrap(radians, 0)
return ((rad * 180) / np.pi) |
def get_lasso_pen_max(X, y, loss, fit_intercept, weights=None, sample_weight=None, offsets=None, multi_task=False, groups=None, nuc=False):
assert (sum([multi_task, (groups is not None), nuc]) <= 1)
loss_func = get_glm_loss_func(config=loss, X=X, y=y, fit_intercept=fit_intercept, sample_weight=sample_weight, offsets=offsets)
grad = loss_func.grad_at_coef_eq0()
if multi_task:
return mult_task_lasso_max(grad, weights=weights)
elif nuc:
return nuclear_norm_max(grad, weights=weights)
elif (groups is not None):
return group_lasso_max(grad, groups=groups, weights=weights)
else:
return lasso_max(grad, weights=weights) |
class ProductOfExpertGaussian(layers.Layer):
def __init__(self, **kwargs):
super(ProductOfExpertGaussian, self).__init__(**kwargs)
def call(self, inputs):
(mu_list, std_list) = zip(*inputs)
prec_list = [(1 / (std ** 2)) for std in std_list]
poe_mu = K.sum([((mu * prec) / K.sum(prec_list, axis=0)) for (mu, prec) in zip(mu_list, prec_list)], axis=0)
poe_std = K.sqrt((1 / K.sum(prec_list, axis=0)))
return [poe_mu, poe_std] |
class CNN16(nn.Module):
def __init__(self, input_dim=256):
super(CNN16, self).__init__()
outputdim = input_dim
self.layer1 = nn.Sequential(nn.Conv2d(IN_DIM, outputdim, 3, padding=1, stride=1), nn.GroupNorm(num_groups=(outputdim // 8), num_channels=outputdim), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
input_dim = outputdim
outputdim = input_dim
self.layer3 = nn.Sequential(nn.Conv2d(input_dim, outputdim, 3, padding=1, stride=1), nn.GroupNorm(num_groups=(outputdim // 8), num_channels=outputdim), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
input_dim = outputdim
outputdim = input_dim
self.layer4 = nn.Sequential(nn.Conv2d(input_dim, outputdim, 3, padding=1, stride=1), nn.GroupNorm(num_groups=(outputdim // 8), num_channels=outputdim), nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2))
input_dim = outputdim
outputdim_final = outputdim
self.layer10 = nn.Sequential(nn.Conv2d(input_dim, outputdim_final, 3, padding=1, stride=1), nn.GroupNorm(num_groups=(outputdim_final // 8), num_channels=outputdim_final), nn.ReLU(), nn.Conv2d(outputdim_final, 2, 1))
def forward(self, x):
x = self.layer1(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer10(x)
return x |
def adjust_learning_rate(args, optimizer, epoch):
lr = args.learning_rate
if args.cosine:
eta_min = (lr * (args.lr_decay_rate ** 3))
lr = (eta_min + (((lr - eta_min) * (1 + math.cos(((math.pi * epoch) / args.epochs)))) / 2))
else:
steps = np.sum((epoch > np.asarray(args.lr_decay_epochs)))
if (steps > 0):
lr = (lr * (args.lr_decay_rate ** steps))
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def test_get_prefixed_keys(orchestrator):
keys_to_generate = 10
key_prefix = 'building/'
generated_keys = set()
for x in range(keys_to_generate):
orchestrator.set_key(slash_join(key_prefix, str(x)), 'test_val')
generated_keys.add(slash_join(key_prefix, str(x)))
assert (len(orchestrator.get_prefixed_keys(key_prefix)) == keys_to_generate)
keys_to_remove = randrange(1, keys_to_generate)
for x in range(keys_to_remove):
orchestrator.delete_key(slash_join(key_prefix, str(x)))
generated_keys.remove(slash_join(key_prefix, str(x)))
assert (len(orchestrator.get_prefixed_keys(key_prefix)) == (keys_to_generate - keys_to_remove))
for k in generated_keys:
orchestrator.delete_key(k)
assert (len(orchestrator.get_prefixed_keys(key_prefix)) == 0) |
def on_event(args):
if isinstance(args, KeyboardEvent):
if ((args.current_key == 'A') and (args.event_type == 'key down') and ('Lcontrol' in args.pressed_key)):
print('Ctrl + A was pressed')
if ((args.current_key == 'K') and (args.event_type == 'key down')):
print('K was pressed')
if ((args.current_key == 'M') and (args.event_type == 'key down') and ('U' in args.pressed_key)):
hk.unhook_mouse()
print('Unhook mouse')
if ((args.current_key == 'K') and (args.event_type == 'key down') and ('U' in args.pressed_key)):
hk.unhook_keyboard()
print('Unhook keyboard')
if isinstance(args, MouseEvent):
if ((args.current_key == 'RButton') and (args.event_type == 'key down')):
print('Right button pressed')
if ((args.current_key == 'WheelButton') and (args.event_type == 'key down')):
print('Wheel button pressed') |
class MoselLexer(RegexLexer):
name = 'Mosel'
aliases = ['mosel']
filenames = ['*.mos']
url = '
version_added = '2.6'
tokens = {'root': [('\\n', Text), ('\\s+', Text.Whitespace), ('!.*?\\n', Comment.Single), ('\\(!(.|\\n)*?!\\)', Comment.Multiline), (words(('and', 'as', 'break', 'case', 'count', 'declarations', 'do', 'dynamic', 'elif', 'else', 'end-', 'end', 'evaluation', 'false', 'forall', 'forward', 'from', 'function', 'hashmap', 'if', 'imports', 'include', 'initialisations', 'initializations', 'inter', 'max', 'min', 'model', 'namespace', 'next', 'not', 'nsgroup', 'nssearch', 'of', 'options', 'or', 'package', 'parameters', 'procedure', 'public', 'prod', 'record', 'repeat', 'requirements', 'return', 'sum', 'then', 'to', 'true', 'union', 'until', 'uses', 'version', 'while', 'with'), prefix='\\b', suffix='\\b'), Keyword.Builtin), (words(('range', 'array', 'set', 'list', 'mpvar', 'mpproblem', 'linctr', 'nlctr', 'integer', 'string', 'real', 'boolean', 'text', 'time', 'date', 'datetime', 'returned', 'Model', 'Mosel', 'counter', 'xmldoc', 'is_sos1', 'is_sos2', 'is_integer', 'is_binary', 'is_continuous', 'is_free', 'is_semcont', 'is_semint', 'is_partint'), prefix='\\b', suffix='\\b'), Keyword.Type), ('(\\+|\\-|\\*|/|=|<=|>=|\\||\\^|<|>|<>|\\.\\.|\\.|:=|::|:|in|mod|div)', Operator), ('[()\\[\\]{},;]+', Punctuation), (words(FUNCTIONS, prefix='\\b', suffix='\\b'), Name.Function), ('(\\d+\\.(?!\\.)\\d*|\\.(?!.)\\d+)([eE][+-]?\\d+)?', Number.Float), ('\\d+([eE][+-]?\\d+)?', Number.Integer), ('[+-]?Infinity', Number.Integer), ('0[xX][0-9a-fA-F]+', Number), ('"', String.Double, 'double_quote'), ("\\'", String.Single, 'single_quote'), ('(\\w+|(\\.(?!\\.)))', Text)], 'single_quote': [("\\'", String.Single, '#pop'), ("[^\\']+", String.Single)], 'double_quote': [('(\\\\"|\\\\[0-7]{1,3}\\D|\\\\[abfnrtv]|)', String.Escape), ('\\"', String.Double, '#pop'), ('[^"\\\\]+', String.Double)]} |
class SvgStop(Tag):
_attribute_decorator('WidgetSpecific', 'Gradient color', 'ColorPicker', {})
def css_stop_color(self):
return self.style.get('stop-color', None)
_stop_color.setter
def css_stop_color(self, value):
self.style['stop-color'] = str(value)
_stop_color.deleter
def css_stop_color(self):
del self.style['stop-color']
_attribute_decorator('WidgetSpecific', 'The opacity property sets the opacity level for the gradient.\n The opacity-level describes the transparency-level, where 1 is not transparent at all, 0.5 is 50% see-through, and 0 is completely transparent.', float, {'possible_values': '', 'min': 0.0, 'max': 1.0, 'default': 1.0, 'step': 0.1})
def css_stop_opactity(self):
return self.style.get('stop-opacity', None)
_stop_opactity.setter
def css_stop_opactity(self, value):
self.style['stop-opacity'] = str(value)
_stop_opactity.deleter
def css_stop_opactity(self):
del self.style['stop-opacity']
_attribute_decorator('WidgetSpecific', 'The offset value for the gradient stop. It is in percentage', float, {'possible_values': '', 'min': 0, 'max': 100, 'default': 0, 'step': 1})
def attr_offset(self):
return self.attributes.get('offset', None)
_offset.setter
def attr_offset(self, value):
self.attributes['offset'] = str(value)
def __init__(self, offset='0%', color='rgb(255,255,0)', opacity=1.0, *args, **kwargs):
super(SvgStop, self).__init__(*args, **kwargs)
self.type = 'stop'
self.attr_offset = offset
self.css_stop_color = color
self.css_stop_opactity = opacity |
def get_win_folder_from_registry(csidl_name: str) -> str:
shell_folder_name = {'CSIDL_APPDATA': 'AppData', 'CSIDL_COMMON_APPDATA': 'Common AppData', 'CSIDL_LOCAL_APPDATA': 'Local AppData', 'CSIDL_PERSONAL': 'Personal'}.get(csidl_name)
if (shell_folder_name is None):
raise ValueError(f'Unknown CSIDL name: {csidl_name}')
if (sys.platform != 'win32'):
raise NotImplementedError
import winreg
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')
(directory, _) = winreg.QueryValueEx(key, shell_folder_name)
return str(directory) |
def parse_measurement_systems(data, tree):
measurement_systems = data.setdefault('measurement_systems', {})
for measurement_system in tree.findall('.//measurementSystemNames/measurementSystemName'):
type = measurement_system.attrib['type']
if (not _should_skip_elem(measurement_system, type=type, dest=measurement_systems)):
_import_type_text(measurement_systems, measurement_system, type=type) |
class Effect298(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Gunnery')), 'falloff', (container.getModifiedItemAttr('falloffBonus') * level), **kwargs) |
def _pil_interp_torch10(method):
if (method == 'bicubic'):
return InterpolationMode.BICUBIC
elif (method == 'lanczos'):
return InterpolationMode.LANCZOS
elif (method == 'hamming'):
return InterpolationMode.HAMMING
else:
return InterpolationMode.BILINEAR |
def compute_trans_list(theta, img_data, use_voronoi):
N = theta['N']
K = theta['K']
n = theta['n']
theta['trans_list'] = [[None for _ in range(K)] for _ in range(N)]
tasks = {}
for k_ in range(K):
A_k = theta['A'][k_]
for (j, d) in enumerate(img_data['dj']):
t = dict()
t['uuid'] = str(uuid.uuid4())
t['module'] = 'aitom.average.ml.faml.faml'
t['method'] = 'model_based_align_help'
t['kwargs'] = {'img_db_path': img_data['db_path'], 'd': d, 'A_k': A_k, 'n': n, 'i': j, 'k': k_}
tasks[t['uuid']] = t
if (len(sys.argv) > 1):
rt_s = [_ for _ in TPRJB.run_iterator(tasks, redis_host=sys.argv[1], redis_port=6379, redis_password='2os43FR0Y1NVxAsy6k10A5to3oltsAl6vVeplZ9ktODQ88cs')]
else:
rt_s = [_ for _ in TPMU.run_iterator(tasks, worker_num=MULTIPROCESSING_WORKER_NUM)]
for rt in rt_s:
j = rt['result']['i']
k_ = rt['result']['k']
result = rt['result']['transforms']
theta['trans_list'][j][k_] = result
if use_voronoi:
compute_voronoi_weights(theta) |
def raw_hyperprior(quality, metric='mse', pretrained=False, progress=True, **kwargs):
if (metric not in ('mse', 'ms-ssim')):
raise ValueError(f'Invalid metric "{metric}"')
if ((quality < 1) or (quality > 8)):
raise ValueError(f'Invalid quality "{quality}", should be between (1, 8)')
return _load_model('raw_hyperprior', metric, quality, pretrained, progress, **kwargs) |
def test_replace_node(game_editor):
region_list = game_editor.game.region_list
loc = AreaIdentifier('Temple Grounds', 'Landing Site')
loc2 = AreaIdentifier('Temple Grounds', 'Service Access')
landing_site = region_list.area_by_area_location(loc)
source = landing_site.node_with_name('Save Station')
door = landing_site.node_with_name('Door to Service Access')
assert isinstance(door, DockNode)
req = landing_site.connections[source][door]
new_node = dataclasses.replace(door, identifier=door.identifier.renamed('FooBar'))
game_editor.replace_node(landing_site, door, new_node)
assert (region_list.area_by_area_location(loc).connections[source][new_node] is req)
dock_to_landing = region_list.area_by_area_location(loc2).node_with_name('Door to Landing Site')
assert isinstance(dock_to_landing, DockNode)
assert (dock_to_landing.default_connection.node_name == 'FooBar') |
def generator_sampler(opt):
opt.batchSize = 64
opt.folderSize = 600
opt.overWrite = False
opt.outf = g.default_repo_dir
opt = addDataInfo(opt)
netG = get_generator_model(opt)
netG.load_state_dict(torch.load(get_generator_loc(opt)))
netG.eval()
opt.name = (((((opt.outf + 'samples/') + opt.data) + '/') + opt.model) + str(opt.epoch))
print_prop(opt)
mkdir((opt.outf + 'samples'))
mkdir(((opt.outf + 'samples/') + opt.data))
if (os.path.exists(opt.name) and (not opt.overWrite)):
if os.path.exists((opt.name + '/mark')):
print('Sampling already finished before. Now pass.')
saveFeature(opt.name, opt, opt.feature_model)
return
else:
print('Partially finished. Now rerun. ')
mkdir(opt.name)
netG.cuda()
noise = Variable(torch.FloatTensor(opt.batchSize, 100, 1, 1).cuda())
def giveName(iter):
ans = str(iter)
return (('0' * (7 - len(ans))) + ans)
iter = 0
for subfolder in range(0, (1 + (opt.sampleSize // opt.folderSize))):
mkdir(((opt.name + '/') + str(subfolder)))
for i in range(0, (1 + (opt.folderSize // opt.batchSize))):
noise.data.normal_(0, 1)
fake = netG(noise)
for j in range(0, len(fake.data)):
saveImage(fake.data[j], (((((opt.name + '/') + str(subfolder)) + '/') + giveName(iter)) + '.png'))
iter += 1
if (((iter % opt.folderSize) == 0) or (iter >= opt.sampleSize)):
break
if (((iter % opt.folderSize) == 0) or (iter >= opt.sampleSize)):
break
if (iter >= opt.sampleSize):
break
if (opt.dataset == 'mnist_s'):
print('Warning: subclass experiment.. Not saving features..')
else:
saveFeature(opt.name, opt, opt.feature_model)
peek(opt.data, (opt.model + str(opt.epoch)))
with open((opt.name + '/mark'), 'w') as f:
f.write('') |
class TestSendEvent(EndianTest):
def setUp(self):
self.req_args_0 = {'destination': , 'event': event.Expose(count=50227, height=24760, sequence_number=0, type=12, width=10272, window=, x=40165, y=13291), 'event_mask': , 'propagate': 0}
self.req_bin_0 = b'\x19\x00\x0b\x001_\x0bE\x9fG\rz\x0c\x00\x00\x00\xea\x18\\xe5\x9c\xeb3 (\xb8`3\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPackRequest0(self):
bin = request.SendEvent._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.SendEvent._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def test_macro_create_with_alias_name(base_app):
macro = 'my_macro'
run_cmd(base_app, 'alias create {} help'.format(macro))
(out, err) = run_cmd(base_app, 'macro create {} help'.format(macro))
assert ('Macro cannot have the same name as an alias' in err[0])
assert (base_app.last_result is False) |
def transform_return_stmt(builder: IRBuilder, stmt: ReturnStmt) -> None:
if stmt.expr:
retval = builder.accept(stmt.expr)
else:
retval = builder.builder.none()
retval = builder.coerce(retval, builder.ret_types[(- 1)], stmt.line)
builder.nonlocal_control[(- 1)].gen_return(builder, retval, stmt.line) |
class MenuManager(QObject):
(ROOT, MENU1, MENU2, LAUNCH, DOCUMENTATION, QUIT, FULLSCREEN, UP, DOWN, BACK, LAUNCH_QML) = range(11)
pInstance = None
def __init__(self):
super(MenuManager, self).__init__()
self.contentsDoc = None
self.assistantProcess = QProcess()
self.helpRootUrl = ''
self.docDir = QDir()
self.imgDir = QDir()
self.info = {}
self.window = None
self.ticker = None
self.tickerInAnim = None
self.upButton = None
self.downButton = None
self.score = Score()
self.currentMenu = '[no menu visible]'
self.currentCategory = '[no category visible]'
self.currentMenuButtons = '[no menu buttons visible]'
self.currentInfo = '[no info visible]'
self.currentMenuCode = (- 1)
self.readXmlDocument()
def instance(cls):
if (cls.pInstance is None):
cls.pInstance = cls()
return cls.pInstance
def getResource(self, name):
return QByteArray()
def readXmlDocument(self):
root = QFileInfo(__file__).absolutePath()
xml_file = QFile((root + '/examples.xml'))
xml_file.open((QFile.ReadOnly | QFile.Text))
contents = xml_file.readAll().data()
xml_file.close()
self.contentsDoc = parseString(contents)
def itemSelected(self, userCode, menuName):
if (userCode == MenuManager.LAUNCH):
self.launchExample(self.currentInfo)
elif (userCode == MenuManager.LAUNCH_QML):
self.launchQml(self.currentInfo)
elif (userCode == MenuManager.DOCUMENTATION):
self.showDocInAssistant(self.currentInfo)
elif (userCode == MenuManager.QUIT):
QApplication.quit()
elif (userCode == MenuManager.FULLSCREEN):
self.window.toggleFullscreen()
elif (userCode == MenuManager.ROOT):
self.score.queueMovie((self.currentMenu + ' -out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((self.currentMenuButtons + ' -out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((self.currentInfo + ' -out'))
self.score.queueMovie((self.currentInfo + ' -buttons -out'), Score.NEW_ANIMATION_ONLY)
self.score.queueMovie('back -out', Score.ONLY_IF_VISIBLE)
self.currentMenuCode = MenuManager.ROOT
self.currentMenu = (menuName + ' -menu1')
self.currentMenuButtons = (menuName + ' -buttons')
self.currentInfo = (menuName + ' -info')
self.score.queueMovie('upndown -shake')
self.score.queueMovie(self.currentMenu, Score.FROM_START, Score.UNLOCK_ITEMS)
self.score.queueMovie(self.currentMenuButtons, Score.FROM_START, Score.UNLOCK_ITEMS)
self.score.queueMovie(self.currentInfo)
if (not Colors.noTicker):
self.ticker.doIntroTransitions = True
self.tickerInAnim.setStartDelay(2000)
self.ticker.useGuideQt()
self.score.queueMovie('ticker', Score.NEW_ANIMATION_ONLY)
elif (userCode == MenuManager.MENU1):
self.score.queueMovie((self.currentMenu + ' -out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((self.currentMenuButtons + ' -out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((self.currentInfo + ' -out'))
self.currentMenuCode = MenuManager.MENU1
self.currentCategory = menuName
self.currentMenu = (menuName + ' -menu1')
self.currentInfo = (menuName + ' -info')
self.score.queueMovie('upndown -shake')
self.score.queueMovie('back -in')
self.score.queueMovie(self.currentMenu, Score.FROM_START, Score.UNLOCK_ITEMS)
self.score.queueMovie(self.currentInfo)
if (not Colors.noTicker):
self.ticker.useGuideTt()
elif (userCode == MenuManager.MENU2):
self.score.queueMovie((self.currentInfo + ' -out'), Score.NEW_ANIMATION_ONLY)
self.score.queueMovie((self.currentInfo + ' -buttons -out'), Score.NEW_ANIMATION_ONLY)
self.currentMenuCode = MenuManager.MENU2
self.currentInfo = menuName
self.score.queueMovie('upndown -shake')
self.score.queueMovie('back -shake')
self.score.queueMovie((self.currentMenu + ' -shake'))
self.score.queueMovie(self.currentInfo, Score.NEW_ANIMATION_ONLY)
self.score.queueMovie((self.currentInfo + ' -buttons'), Score.NEW_ANIMATION_ONLY)
if (not Colors.noTicker):
self.score.queueMovie('ticker -out', Score.NEW_ANIMATION_ONLY)
elif (userCode == MenuManager.UP):
backMenu = self.info[self.currentMenu]['back']
if backMenu:
self.score.queueMovie((self.currentMenu + ' -top_out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((backMenu + ' -bottom_in'), Score.FROM_START, Score.UNLOCK_ITEMS)
self.currentMenu = backMenu
elif (userCode == MenuManager.DOWN):
moreMenu = self.info[self.currentMenu]['more']
if moreMenu:
self.score.queueMovie((self.currentMenu + ' -bottom_out'), Score.FROM_START, Score.LOCK_ITEMS)
self.score.queueMovie((moreMenu + ' -top_in'), Score.FROM_START, Score.UNLOCK_ITEMS)
self.currentMenu = moreMenu
elif (userCode == MenuManager.BACK):
if (self.currentMenuCode == MenuManager.MENU2):
self.score.queueMovie((self.currentInfo + ' -out'), Score.NEW_ANIMATION_ONLY)
self.score.queueMovie((self.currentInfo + ' -buttons -out'), Score.NEW_ANIMATION_ONLY)
self.currentMenuCode = MenuManager.MENU1
self.currentMenuButtons = (self.currentCategory + ' -buttons')
self.currentInfo = (self.currentCategory + ' -info')
self.score.queueMovie('upndown -shake')
self.score.queueMovie((self.currentMenu + ' -shake'))
self.score.queueMovie(self.currentInfo, Score.NEW_ANIMATION_ONLY)
self.score.queueMovie((self.currentInfo + ' -buttons'), Score.NEW_ANIMATION_ONLY)
if (not Colors.noTicker):
self.ticker.doIntroTransitions = False
self.tickerInAnim.setStartDelay(500)
self.score.queueMovie('ticker', Score.NEW_ANIMATION_ONLY)
elif (self.currentMenuCode != MenuManager.ROOT):
self.itemSelected(MenuManager.ROOT, Colors.rootMenuName)
if self.info.setdefault(self.currentMenu, {}).get('back'):
back_state = TextButton.OFF
else:
back_state = TextButton.DISABLED
if self.info[self.currentMenu].get('more'):
more_state = TextButton.OFF
else:
more_state = TextButton.DISABLED
self.upButton.setState(back_state)
self.downButton.setState(more_state)
if self.score.hasQueuedMovies():
self.score.playQue()
self.window.fpsHistory = []
def showDocInAssistant(self, name):
url = self.resolveDocUrl(name)
Colors.debug('Sending URL to Assistant:', url)
if (self.assistantProcess.state() != QProcess.Running):
app = (QLibraryInfo.location(QLibraryInfo.BinariesPath) + QDir.separator())
if (sys.platform == 'darwin'):
app += 'Assistant.app/Contents/MacOS/Assistant'
else:
app += 'assistant'
args = ['-enableRemoteControl']
self.assistantProcess.start(app, args)
if (not self.assistantProcess.waitForStarted()):
QMessageBox.critical(None, 'PyQt Demo', ('Could not start %s.' % app))
return
cmd_str = QTextStream(self.assistantProcess)
(((cmd_str << 'SetSource ') << url) << '\n')
def launchExample(self, name):
executable = self.resolveExeFile(name)
process = QProcess(self)
process.error.connect(self.launchError)
if (sys.platform == 'win32'):
env = QProcessEnvironment.systemEnvironment()
env.insert('PATH', ((QLibraryInfo.location(QLibraryInfo.BinariesPath) + ';') + env.value('PATH')))
process.setProcessEnvironment(env)
if (self.info[name]['changedirectory'] != 'false'):
workingDirectory = self.resolveDataDir(name)
process.setWorkingDirectory(workingDirectory)
Colors.debug('Setting working directory:', workingDirectory)
Colors.debug('Launching:', executable)
process.start(sys.executable, [executable])
def launchQml(self, name):
import_path = self.resolveDataDir(name)
qml = self.resolveQmlFile(name)
process = QProcess(self)
process.error.connect(self.launchError)
env = QProcessEnvironment.systemEnvironment()
env.insert('QML2_IMPORT_PATH', import_path)
process.setProcessEnvironment(env)
executable = (QLibraryInfo.location(QLibraryInfo.BinariesPath) + '/qmlscene')
Colors.debug('Launching:', executable)
process.start(executable, [qml])
def launchError(self, error):
if (error != QProcess.Crashed):
QMessageBox.critical(None, 'Failed to launch the example', 'Could not launch the example. Ensure that it has been built.', QMessageBox.Cancel)
def init(self, window):
self.window = window
self.createTicker()
self.createUpnDownButtons()
self.createBackButton()
rootElement = self.contentsDoc.documentElement
self.createRootMenu(rootElement)
level2Menu = self._first_element(rootElement)
while (level2Menu is not None):
self.createSubMenu(level2Menu)
example = self._first_element(level2Menu)
while (example is not None):
self.readInfoAboutExample(example)
self.createLeafMenu(example)
example = self._next_element(example)
level2Menu = self._next_element(level2Menu)
def _first_element(cls, node):
return cls._skip_nonelements(node.firstChild)
def _next_element(cls, node):
return cls._skip_nonelements(node.nextSibling)
def _skip_nonelements(node):
while ((node is not None) and (node.nodeType != node.ELEMENT_NODE)):
node = node.nextSibling
return node
def readInfoAboutExample(self, example):
name = example.getAttribute('name')
if (name in self.info):
Colors.debug('__WARNING: MenuManager.readInfoAboutExample: Demo/example with name', name, 'appears twice in the xml-file!__')
self.info.setdefault(name, {})['filename'] = example.getAttribute('filename')
self.info[name]['dirname'] = example.parentNode.getAttribute('dirname')
self.info[name]['changedirectory'] = example.getAttribute('changedirectory')
self.info[name]['image'] = example.getAttribute('image')
self.info[name]['qml'] = example.getAttribute('qml')
def resolveDir(self, name):
dirName = self.info[name]['dirname']
fileName = self.info[name]['filename'].split('/')
dir = QFileInfo(__file__).dir()
dir.cdUp()
dir.cd(dirName)
if (len(fileName) > 1):
dir.cd('/'.join(fileName[:(- 1)]))
dir.cd(fileName[(- 1)])
return dir
def resolveDataDir(self, name):
return self.resolveDir(name).absolutePath()
def resolveExeFile(self, name):
dir = self.resolveDir(name)
fileName = self.info[name]['filename'].split('/')[(- 1)]
pyFile = QFile((((dir.path() + '/') + fileName) + '.py'))
if pyFile.exists():
return pyFile.fileName()
pywFile = QFile((((dir.path() + '/') + fileName) + '.pyw'))
if pywFile.exists():
return pywFile.fileName()
Colors.debug('- WARNING: Could not resolve executable:', dir.path(), fileName)
return '__executable not found__'
def resolveQmlFile(self, name):
dir = self.resolveDir(name)
fileName = self.info[name]['filename'].split('/')[(- 1)]
qmlFile = QFile((((dir.path() + '/') + fileName) + '.qml'))
if qmlFile.exists():
return qmlFile.fileName()
Colors.debug('- WARNING: Could not resolve QML file:', dir.path(), fileName)
return '__QML not found__'
def resolveDocUrl(self, name):
dirName = self.info[name]['dirname']
fileName = self.info[name]['filename']
return ((((self.helpRootUrl + dirName.replace('/', '-')) + '-') + fileName) + '.html')
def resolveImageUrl(self, name):
return ((self.helpRootUrl + 'images/') + name)
def getHtml(self, name):
return self.getResource(self.resolveDocUrl(name))
def getImage(self, name):
imageName = self.info[name]['image']
fileName = self.info[name]['filename']
if (self.info[name]['qml'] == 'true'):
fileName = ('qml-' + fileName.split('/')[(- 1)])
if (not imageName):
imageName = (fileName + '-example.png')
if self.getResource(self.resolveImageUrl(imageName)).isEmpty():
imageName = (fileName + '.png')
if self.getResource(self.resolveImageUrl(imageName)).isEmpty():
imageName = (fileName + 'example.png')
return self.getResource(self.resolveImageUrl(imageName))
def createRootMenu(self, el):
name = el.getAttribute('name')
self.createMenu(el, MenuManager.MENU1)
self.createInfo(MenuContentItem(el, self.window.mainSceneRoot), (name + ' -info'))
menuButtonsIn = self.score.insertMovie((name + ' -buttons'))
menuButtonsOut = self.score.insertMovie((name + ' -buttons -out'))
self.createLowLeftButton('Quit', MenuManager.QUIT, menuButtonsIn, menuButtonsOut, None)
self.createLowRightButton('Toggle fullscreen', MenuManager.FULLSCREEN, menuButtonsIn, menuButtonsOut, None)
def createSubMenu(self, el):
name = el.getAttribute('name')
self.createMenu(el, MenuManager.MENU2)
self.createInfo(MenuContentItem(el, self.window.mainSceneRoot), (name + ' -info'))
def createLeafMenu(self, el):
name = el.getAttribute('name')
self.createInfo(ExampleContent(name, self.window.mainSceneRoot), name)
infoButtonsIn = self.score.insertMovie((name + ' -buttons'))
infoButtonsOut = self.score.insertMovie((name + ' -buttons -out'))
self.createLowRightLeafButton('Documentation', 600, MenuManager.DOCUMENTATION, infoButtonsIn, infoButtonsOut, None)
if (el.getAttribute('executable') != 'false'):
self.createLowRightLeafButton('Launch', 405, MenuManager.LAUNCH, infoButtonsIn, infoButtonsOut, None)
elif (el.getAttribute('qml') == 'true'):
self.createLowRightLeafButton('Display', 405, MenuManager.LAUNCH_QML, infoButtonsIn, infoButtonsOut, None)
def createMenu(self, category, type):
sw = self.window.scene.sceneRect().width()
xOffset = 15
yOffset = 10
maxExamples = Colors.menuCount
menuIndex = 1
name = category.getAttribute('name')
currentNode = self._first_element(category)
currentMenu = ('%s -menu%d' % (name, menuIndex))
while (currentNode is not None):
movieIn = self.score.insertMovie(currentMenu)
movieOut = self.score.insertMovie((currentMenu + ' -out'))
movieNextTopOut = self.score.insertMovie((currentMenu + ' -top_out'))
movieNextBottomOut = self.score.insertMovie((currentMenu + ' -bottom_out'))
movieNextTopIn = self.score.insertMovie((currentMenu + ' -top_in'))
movieNextBottomIn = self.score.insertMovie((currentMenu + ' -bottom_in'))
movieShake = self.score.insertMovie((currentMenu + ' -shake'))
i = 0
while ((currentNode is not None) and (i < maxExamples)):
label = currentNode.getAttribute('name')
item = TextButton(label, TextButton.LEFT, type, self.window.mainSceneRoot)
item.setRecursiveVisible(False)
item.setZValue(10)
ih = item.sceneBoundingRect().height()
iw = item.sceneBoundingRect().width()
ihp = (ih + 3)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
anim.setDuration((1000 + (i * 20)))
anim.setStartValue(QPointF(xOffset, (- ih)))
anim.setKeyValueAt(0.2, QPointF(xOffset, (- ih)))
anim.setKeyValueAt(0.5, QPointF(xOffset, ((((i * ihp) + yOffset) + Colors.contentStartY) + (10 * float((i / 4.0))))))
anim.setKeyValueAt(0.6, QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.7, QPointF(xOffset, ((((i * ihp) + yOffset) + Colors.contentStartY) + (5 * float((i / 4.0))))))
anim.setKeyValueAt(0.8, QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.9, QPointF(xOffset, ((((i * ihp) + yOffset) + Colors.contentStartY) + (2 * float((i / 4.0))))))
anim.setEndValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
movieIn.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
anim.setHideOnFinished(True)
anim.setDuration((700 + (30 * i)))
anim.setStartValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.6, QPointF(xOffset, ((600 - ih) - ih)))
anim.setKeyValueAt(0.65, QPointF((xOffset + 20), (600 - ih)))
anim.setEndValue(QPointF((sw + iw), (600 - ih)))
movieOut.append(anim)
anim = DemoItemAnimation(item)
anim.setDuration(700)
anim.setStartValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.55, QPointF(xOffset, ((((i * ihp) + yOffset) + Colors.contentStartY) - (i * 2.0))))
anim.setKeyValueAt(0.7, QPointF((xOffset - 10), ((((i * ihp) + yOffset) + Colors.contentStartY) - (i * 1.5))))
anim.setKeyValueAt(0.8, QPointF(xOffset, ((((i * ihp) + yOffset) + Colors.contentStartY) - (i * 1.0))))
anim.setKeyValueAt(0.9, QPointF((xOffset - 2), ((((i * ihp) + yOffset) + Colors.contentStartY) - (i * 0.5))))
anim.setEndValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
movieShake.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
anim.setHideOnFinished(True)
anim.setDuration((200 + (30 * i)))
anim.setStartValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.7, QPointF(xOffset, (yOffset + Colors.contentStartY)))
anim.setEndValue(QPointF((- iw), (yOffset + Colors.contentStartY)))
movieNextTopOut.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
anim.setHideOnFinished(True)
anim.setDuration((200 + (30 * i)))
anim.setStartValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.7, QPointF(xOffset, (((maxExamples * ihp) + yOffset) + Colors.contentStartY)))
anim.setEndValue(QPointF((- iw), (((maxExamples * ihp) + yOffset) + Colors.contentStartY)))
movieNextBottomOut.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
anim.setDuration((700 - (30 * i)))
anim.setStartValue(QPointF((- iw), (yOffset + Colors.contentStartY)))
anim.setKeyValueAt(0.3, QPointF(xOffset, (yOffset + Colors.contentStartY)))
anim.setEndValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
movieNextTopIn.append(anim)
reverse = (maxExamples - i)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
anim.setDuration((1000 - (30 * reverse)))
anim.setStartValue(QPointF((- iw), (((maxExamples * ihp) + yOffset) + Colors.contentStartY)))
anim.setKeyValueAt(0.3, QPointF(xOffset, (((maxExamples * ihp) + yOffset) + Colors.contentStartY)))
anim.setEndValue(QPointF(xOffset, (((i * ihp) + yOffset) + Colors.contentStartY)))
movieNextBottomIn.append(anim)
i += 1
currentNode = self._next_element(currentNode)
if ((currentNode is not None) and (i == maxExamples)):
menuIndex += 1
self.info.setdefault(currentMenu, {})['more'] = ('%s -menu%d' % (name, menuIndex))
currentMenu = ('%s -menu%d' % (name, menuIndex))
self.info.setdefault(currentMenu, {})['back'] = ('%s -menu%d' % (name, (menuIndex - 1)))
def createLowLeftButton(self, label, type, movieIn, movieOut, movieShake, menuString=''):
button = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL)
if menuString:
button.setMenuString(menuString)
button.setRecursiveVisible(False)
button.setZValue(10)
iw = button.sceneBoundingRect().width()
xOffset = 15
buttonIn = DemoItemAnimation(button, DemoItemAnimation.ANIM_IN)
buttonIn.setDuration(1800)
buttonIn.setStartValue(QPointF((- iw), ((Colors.contentStartY + Colors.contentHeight) - 35)))
buttonIn.setKeyValueAt(0.5, QPointF((- iw), ((Colors.contentStartY + Colors.contentHeight) - 35)))
buttonIn.setKeyValueAt(0.7, QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 35)))
buttonIn.setEndValue(QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 26)))
movieIn.append(buttonIn)
buttonOut = DemoItemAnimation(button, DemoItemAnimation.ANIM_OUT)
buttonOut.setHideOnFinished(True)
buttonOut.setDuration(400)
buttonOut.setStartValue(QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 26)))
buttonOut.setEndValue(QPointF((- iw), ((Colors.contentStartY + Colors.contentHeight) - 26)))
movieOut.append(buttonOut)
if (movieShake is not None):
shakeAnim = DemoItemAnimation(button, DemoItemAnimation.ANIM_UNSPECIFIED)
shakeAnim.setDuration(650)
shakeAnim.setStartValue(buttonIn.endValue())
shakeAnim.setKeyValueAt(0.6, buttonIn.endValue())
shakeAnim.setKeyValueAt(0.7, (buttonIn.endValue() + QPointF((- 3), 0)))
shakeAnim.setKeyValueAt(0.8, (buttonIn.endValue() + QPointF(2, 0)))
shakeAnim.setKeyValueAt(0.9, (buttonIn.endValue() + QPointF((- 1), 0)))
shakeAnim.setEndValue(buttonIn.endValue())
movieShake.append(shakeAnim)
def createLowRightButton(self, label, type, movieIn, movieOut, movieShake):
item = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL)
item.setRecursiveVisible(False)
item.setZValue(10)
sw = self.window.scene.sceneRect().width()
xOffset = 70
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
anim.setDuration(1800)
anim.setStartValue(QPointF(sw, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.5, QPointF(sw, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.7, QPointF((xOffset + 535), ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setEndValue(QPointF((xOffset + 535), ((Colors.contentStartY + Colors.contentHeight) - 26)))
movieIn.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
anim.setHideOnFinished(True)
anim.setDuration(400)
anim.setStartValue(QPointF((xOffset + 535), ((Colors.contentStartY + Colors.contentHeight) - 26)))
anim.setEndValue(QPointF(sw, ((Colors.contentStartY + Colors.contentHeight) - 26)))
movieOut.append(anim)
def createLowRightLeafButton(self, label, xOffset, type, movieIn, movieOut, movieShake):
item = TextButton(label, TextButton.RIGHT, type, self.window.mainSceneRoot, TextButton.PANEL)
item.setRecursiveVisible(False)
item.setZValue(10)
sw = self.window.scene.sceneRect().width()
sh = self.window.scene.sceneRect().height()
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
anim.setDuration(1050)
anim.setStartValue(QPointF(sw, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.1, QPointF(sw, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.3, QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.35, QPointF((xOffset + 30), ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.4, QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.45, QPointF((xOffset + 5), ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setKeyValueAt(0.5, QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 35)))
anim.setEndValue(QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 26)))
movieIn.append(anim)
anim = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
anim.setHideOnFinished(True)
anim.setDuration(300)
anim.setStartValue(QPointF(xOffset, ((Colors.contentStartY + Colors.contentHeight) - 26)))
anim.setEndValue(QPointF(xOffset, sh))
movieOut.append(anim)
def createInfo(self, item, name):
movie_in = self.score.insertMovie(name)
movie_out = self.score.insertMovie((name + ' -out'))
item.setZValue(8)
item.setRecursiveVisible(False)
xOffset = 230.0
infoIn = DemoItemAnimation(item, DemoItemAnimation.ANIM_IN)
infoIn.setDuration(650)
infoIn.setStartValue(QPointF(self.window.scene.sceneRect().width(), Colors.contentStartY))
infoIn.setKeyValueAt(0.6, QPointF(xOffset, Colors.contentStartY))
infoIn.setKeyValueAt(0.7, QPointF((xOffset + 20), Colors.contentStartY))
infoIn.setKeyValueAt(0.8, QPointF(xOffset, Colors.contentStartY))
infoIn.setKeyValueAt(0.9, QPointF((xOffset + 7), Colors.contentStartY))
infoIn.setEndValue(QPointF(xOffset, Colors.contentStartY))
movie_in.append(infoIn)
infoOut = DemoItemAnimation(item, DemoItemAnimation.ANIM_OUT)
infoOut.setCurveShape(QEasingCurve.InQuad)
infoOut.setDuration(300)
infoOut.setHideOnFinished(True)
infoOut.setStartValue(QPointF(xOffset, Colors.contentStartY))
infoOut.setEndValue(QPointF((- 600), Colors.contentStartY))
movie_out.append(infoOut)
def createTicker(self):
if Colors.noTicker:
return
movie_in = self.score.insertMovie('ticker')
movie_out = self.score.insertMovie('ticker -out')
movie_activate = self.score.insertMovie('ticker -activate')
movie_deactivate = self.score.insertMovie('ticker -deactivate')
self.ticker = ItemCircleAnimation()
self.ticker.setZValue(50)
self.ticker.hide()
qtendpos = 485
qtPosY = 120
self.tickerInAnim = DemoItemAnimation(self.ticker, DemoItemAnimation.ANIM_IN)
self.tickerInAnim.setDuration(500)
self.tickerInAnim.setStartValue(QPointF(self.window.scene.sceneRect().width(), (Colors.contentStartY + qtPosY)))
self.tickerInAnim.setKeyValueAt(0.6, QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
self.tickerInAnim.setKeyValueAt(0.7, QPointF((qtendpos + 30), (Colors.contentStartY + qtPosY)))
self.tickerInAnim.setKeyValueAt(0.8, QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
self.tickerInAnim.setKeyValueAt(0.9, QPointF((qtendpos + 5), (Colors.contentStartY + qtPosY)))
self.tickerInAnim.setEndValue(QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
movie_in.append(self.tickerInAnim)
qtOut = DemoItemAnimation(self.ticker, DemoItemAnimation.ANIM_OUT)
qtOut.setHideOnFinished(True)
qtOut.setDuration(500)
qtOut.setStartValue(QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
qtOut.setEndValue(QPointF((self.window.scene.sceneRect().width() + 700), (Colors.contentStartY + qtPosY)))
movie_out.append(qtOut)
qtActivate = DemoItemAnimation(self.ticker)
qtActivate.setDuration(400)
qtActivate.setStartValue(QPointF(self.window.scene.sceneRect().width(), (Colors.contentStartY + qtPosY)))
qtActivate.setKeyValueAt(0.6, QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
qtActivate.setKeyValueAt(0.7, QPointF((qtendpos + 30), (Colors.contentStartY + qtPosY)))
qtActivate.setKeyValueAt(0.8, QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
qtActivate.setKeyValueAt(0.9, QPointF((qtendpos + 5), (Colors.contentStartY + qtPosY)))
qtActivate.setEndValue(QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
movie_activate.append(qtActivate)
qtDeactivate = DemoItemAnimation(self.ticker)
qtDeactivate.setHideOnFinished(True)
qtDeactivate.setDuration(400)
qtDeactivate.setStartValue(QPointF(qtendpos, (Colors.contentStartY + qtPosY)))
qtDeactivate.setEndValue(QPointF(qtendpos, 800))
movie_deactivate.append(qtDeactivate)
def createUpnDownButtons(self):
xOffset = 15.0
yOffset = 450.0
self.upButton = TextButton('', TextButton.LEFT, MenuManager.UP, self.window.mainSceneRoot, TextButton.UP)
self.upButton.prepare()
self.upButton.setPos(xOffset, yOffset)
self.upButton.setState(TextButton.DISABLED)
self.downButton = TextButton('', TextButton.LEFT, MenuManager.DOWN, self.window.mainSceneRoot, TextButton.DOWN)
self.downButton.prepare()
self.downButton.setPos(((xOffset + 10) + self.downButton.sceneBoundingRect().width()), yOffset)
movieShake = self.score.insertMovie('upndown -shake')
shakeAnim = DemoItemAnimation(self.upButton, DemoItemAnimation.ANIM_UNSPECIFIED)
shakeAnim.setDuration(650)
shakeAnim.setStartValue(self.upButton.pos())
shakeAnim.setKeyValueAt(0.6, self.upButton.pos())
shakeAnim.setKeyValueAt(0.7, (self.upButton.pos() + QPointF((- 2), 0)))
shakeAnim.setKeyValueAt(0.8, (self.upButton.pos() + QPointF(1, 0)))
shakeAnim.setKeyValueAt(0.9, (self.upButton.pos() + QPointF((- 1), 0)))
shakeAnim.setEndValue(self.upButton.pos())
movieShake.append(shakeAnim)
shakeAnim = DemoItemAnimation(self.downButton, DemoItemAnimation.ANIM_UNSPECIFIED)
shakeAnim.setDuration(650)
shakeAnim.setStartValue(self.downButton.pos())
shakeAnim.setKeyValueAt(0.6, self.downButton.pos())
shakeAnim.setKeyValueAt(0.7, (self.downButton.pos() + QPointF((- 5), 0)))
shakeAnim.setKeyValueAt(0.8, (self.downButton.pos() + QPointF((- 3), 0)))
shakeAnim.setKeyValueAt(0.9, (self.downButton.pos() + QPointF((- 1), 0)))
shakeAnim.setEndValue(self.downButton.pos())
movieShake.append(shakeAnim)
def createBackButton(self):
backIn = self.score.insertMovie('back -in')
backOut = self.score.insertMovie('back -out')
backShake = self.score.insertMovie('back -shake')
self.createLowLeftButton('Back', MenuManager.ROOT, backIn, backOut, backShake, Colors.rootMenuName) |
class RangeProperty(Property):
def __init__(self, name, initial, min_value, max_value, **kwargs):
self.min_value = min_value
self.max_value = max_value
if ((initial < min_value) or (initial > max_value)):
print(_('invalid initial value for range property'), name, initial)
super(RangeProperty, self).__init__(name, initial, **kwargs)
self.info['type'] = 'RangeProperty'
self.info['min'] = self.min_value
self.info['max'] = self.max_value
def get_msg(self):
return ('%.4f' % self.value)
def set(self, value):
try:
value = float(value)
except:
return
if ((value >= self.min_value) and (value <= self.max_value)):
super(RangeProperty, self).set(value)
def set_max(self, max_value):
if (self.value > max_value):
self.value = max_value
self.max_value = max_value |
def test_use_correct_python_version_string(tmpdir, tmpdir_cwd, monkeypatch):
dist = Distribution()
cmd = dist.get_command_obj('easy_install')
cmd.args = ['ok']
cmd.optimize = 0
cmd.user = True
cmd.install_userbase = str(tmpdir)
cmd.install_usersite = None
install_cmd = dist.get_command_obj('install')
install_cmd.install_userbase = str(tmpdir)
install_cmd.install_usersite = None
with monkeypatch.context() as patch, warnings.catch_warnings():
warnings.simplefilter('ignore')
version = '3.10.1 (main, Dec 21 2021, 09:17:12) [GCC 10.2.1 ]'
info = VersionStub(3, 10, 1, 'final', 0)
patch.setattr('site.ENABLE_USER_SITE', True)
patch.setattr('sys.version', version)
patch.setattr('sys.version_info', info)
patch.setattr(cmd, 'create_home_path', mock.Mock())
cmd.finalize_options()
name = ('pypy' if hasattr(sys, 'pypy_version_info') else 'python')
install_dir = cmd.install_dir.lower()
if re.search((name + '3\\.?1'), install_dir):
assert re.search((name + '3\\.?1\\d'), install_dir)
assert (cmd.config_vars['py_version'] == '3.10.1')
assert (cmd.config_vars['py_version_short'] == '3.10')
assert (cmd.config_vars['py_version_nodot'] == '310') |
def get_users_handler(config, _, override_config_dir):
authentication_type = config.get('AUTHENTICATION_TYPE', 'Database')
if (authentication_type == 'Database'):
return DatabaseUsers()
if (authentication_type == 'LDAP'):
ldap_uri = config.get('LDAP_URI', 'ldap://localhost')
base_dn = config.get('LDAP_BASE_DN')
admin_dn = config.get('LDAP_ADMIN_DN')
admin_passwd = config.get('LDAP_ADMIN_PASSWD')
user_rdn = config.get('LDAP_USER_RDN', [])
uid_attr = config.get('LDAP_UID_ATTR', 'uid')
email_attr = config.get('LDAP_EMAIL_ATTR', 'mail')
memberof_attr = config.get('LDAP_MEMBEROF_ATTR', 'memberOf')
secondary_user_rdns = config.get('LDAP_SECONDARY_USER_RDNS', [])
timeout = config.get('LDAP_TIMEOUT')
network_timeout = config.get('LDAP_NETWORK_TIMEOUT')
ldap_user_filter = config.get('LDAP_USER_FILTER', None)
ldap_superuser_filter = config.get('LDAP_SUPERUSER_FILTER', None)
ldap_restricted_user_filter = config.get('LDAP_RESTRICTED_USER_FILTER', None)
ldap_referrals = int(config.get('LDAP_FOLLOW_REFERRALS', True))
allow_tls_fallback = config.get('LDAP_ALLOW_INSECURE_FALLBACK', False)
return LDAPUsers(ldap_uri, base_dn, admin_dn, admin_passwd, user_rdn, uid_attr, email_attr, memberof_attr, allow_tls_fallback, secondary_user_rdns=secondary_user_rdns, requires_email=features.MAILING, timeout=timeout, network_timeout=network_timeout, ldap_user_filter=ldap_user_filter, ldap_superuser_filter=ldap_superuser_filter, ldap_restricted_user_filter=ldap_restricted_user_filter, ldap_referrals=ldap_referrals)
if (authentication_type == 'JWT'):
verify_url = config.get('JWT_VERIFY_ENDPOINT')
issuer = config.get('JWT_AUTH_ISSUER')
max_fresh_s = config.get('JWT_AUTH_MAX_FRESH_S', 300)
query_url = config.get('JWT_QUERY_ENDPOINT', None)
getuser_url = config.get('JWT_GETUSER_ENDPOINT', None)
return ExternalJWTAuthN(verify_url, query_url, getuser_url, issuer, override_config_dir, config['HTTPCLIENT'], max_fresh_s, requires_email=features.MAILING)
if (authentication_type == 'Keystone'):
auth_url = config.get('KEYSTONE_AUTH_URL')
auth_version = int(config.get('KEYSTONE_AUTH_VERSION', 2))
timeout = config.get('KEYSTONE_AUTH_TIMEOUT')
keystone_admin_username = config.get('KEYSTONE_ADMIN_USERNAME')
keystone_admin_password = config.get('KEYSTONE_ADMIN_PASSWORD')
keystone_admin_tenant = config.get('KEYSTONE_ADMIN_TENANT')
return get_keystone_users(auth_version, auth_url, keystone_admin_username, keystone_admin_password, keystone_admin_tenant, timeout, requires_email=features.MAILING)
if (authentication_type == 'AppToken'):
if features.DIRECT_LOGIN:
raise Exception('Direct login feature must be disabled to use AppToken internal auth')
if (not features.APP_SPECIFIC_TOKENS):
raise Exception('AppToken internal auth requires app specific token support to be enabled')
return AppTokenInternalAuth()
raise RuntimeError(('Unknown authentication type: %s' % authentication_type)) |
class FilterTests(unittest.TestCase):
def setUp(self) -> None:
member = MockMember(id=123)
channel = MockTextChannel(id=345)
message = MockMessage(author=member, channel=channel)
self.ctx = FilterContext(Event.MESSAGE, member, channel, '', message)
def test_role_bypass_is_off_for_user_without_roles(self):
member = MockMember()
self.ctx.author = member
bypass_entry = RoleBypass(bypass_roles=['123'])
result = bypass_entry.triggers_on(self.ctx)
self.assertTrue(result)
def test_role_bypass_is_on_for_a_user_with_the_right_role(self):
cases = (([123], ['123']), ([123, 234], ['123']), ([123], ['123', '234']), ([123, 234], ['123', '234']))
for (user_role_ids, bypasses) in cases:
with self.subTest(user_role_ids=user_role_ids, bypasses=bypasses):
user_roles = [MockRole(id=role_id) for role_id in user_role_ids]
member = MockMember(roles=user_roles)
self.ctx.author = member
bypass_entry = RoleBypass(bypass_roles=bypasses)
result = bypass_entry.triggers_on(self.ctx)
self.assertFalse(result)
def test_context_doesnt_trigger_for_empty_channel_scope(self):
channel = MockTextChannel()
scope = ChannelScope(disabled_channels=None, disabled_categories=None, enabled_channels=None, enabled_categories=None)
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertTrue(result)
def test_context_doesnt_trigger_for_disabled_channel(self):
channel = MockTextChannel(id=123)
scope = ChannelScope(disabled_channels=['123'], disabled_categories=None, enabled_channels=None, enabled_categories=None)
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertFalse(result)
def test_context_doesnt_trigger_in_disabled_category(self):
channel = MockTextChannel(category=MockCategoryChannel(id=456))
scope = ChannelScope(disabled_channels=None, disabled_categories=['456'], enabled_channels=None, enabled_categories=None)
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertFalse(result)
def test_context_triggers_in_enabled_channel_in_disabled_category(self):
channel = MockTextChannel(id=123, category=MockCategoryChannel(id=234))
scope = ChannelScope(disabled_channels=None, disabled_categories=['234'], enabled_channels=['123'], enabled_categories=None)
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertTrue(result)
def test_context_triggers_inside_enabled_category(self):
channel = MockTextChannel(id=123, category=MockCategoryChannel(id=234))
scope = ChannelScope(disabled_channels=None, disabled_categories=None, enabled_channels=None, enabled_categories=['234'])
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertTrue(result)
def test_context_doesnt_trigger_outside_enabled_category(self):
channel = MockTextChannel(id=123, category=MockCategoryChannel(id=234))
scope = ChannelScope(disabled_channels=None, disabled_categories=None, enabled_channels=None, enabled_categories=['789'])
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertFalse(result)
def test_context_doesnt_trigger_inside_disabled_channel_in_enabled_category(self):
channel = MockTextChannel(id=123, category=MockCategoryChannel(id=234))
scope = ChannelScope(disabled_channels=['123'], disabled_categories=None, enabled_channels=None, enabled_categories=['234'])
self.ctx.channel = channel
result = scope.triggers_on(self.ctx)
self.assertFalse(result)
def test_filtering_dms_when_necessary(self):
cases = ((True, MockDMChannel(), True), (False, MockDMChannel(), False), (True, MockTextChannel(), True), (False, MockTextChannel(), True))
for (apply_in_dms, channel, expected) in cases:
with self.subTest(apply_in_dms=apply_in_dms, channel=channel):
filter_dms = FilterDM(filter_dm=apply_in_dms)
self.ctx.channel = channel
result = filter_dms.triggers_on(self.ctx)
self.assertEqual(expected, result)
def test_infraction_merge_of_same_infraction_type(self):
infraction1 = InfractionAndNotification(infraction_type='TIMEOUT', infraction_reason='hi', infraction_duration=InfractionDuration(10), dm_content='how', dm_embed='what is', infraction_channel=0)
infraction2 = InfractionAndNotification(infraction_type='TIMEOUT', infraction_reason='there', infraction_duration=InfractionDuration(20), dm_content='are you', dm_embed='your name', infraction_channel=0)
result = infraction1.union(infraction2)
self.assertDictEqual(result.model_dump(), {'infraction_type': Infraction.TIMEOUT, 'infraction_reason': 'there', 'infraction_duration': InfractionDuration(20.0), 'dm_content': 'are you', 'dm_embed': 'your name', 'infraction_channel': 0})
def test_infraction_merge_of_different_infraction_types(self):
infraction1 = InfractionAndNotification(infraction_type='TIMEOUT', infraction_reason='hi', infraction_duration=InfractionDuration(20), dm_content='', dm_embed='', infraction_channel=0)
infraction2 = InfractionAndNotification(infraction_type='BAN', infraction_reason='', infraction_duration=InfractionDuration(10), dm_content='there', dm_embed='', infraction_channel=0)
result = infraction1.union(infraction2)
self.assertDictEqual(result.model_dump(), {'infraction_type': Infraction.BAN, 'infraction_reason': '', 'infraction_duration': InfractionDuration(10), 'dm_content': 'there', 'dm_embed': '', 'infraction_channel': 0}) |
.skipif('sys.platform == "win32" or platform.python_implementation() == "PyPy"')
def test_dist_combine_racecondition(testdir):
script = testdir.makepyfile(('\nimport pytest\n\.parametrize("foo", range(1000))\ndef test_foo(foo):\n' + '\n'.join((f'''
if foo == {i}:
assert True
''' for i in range(1000)))))
result = testdir.runpytest('-v', f'--cov={script.dirpath()}', '--cov-report=term-missing', '-n', '5', '-s', script)
result.stdout.fnmatch_lines(['test_dist_combine_racecondition* 0 * 100%*', '*1000 passed*'])
for line in chain(result.stdout.lines, result.stderr.lines):
assert ('The following workers failed to return coverage data' not in line)
assert ('INTERNALERROR' not in line)
assert (result.ret == 0) |
def downgrade(op, tables, tester):
op.drop_index('organizationrhskus_subscription_id', table_name='organizationrhskus')
op.drop_index('organizationrhskus_subscription_id_org_id', table_name='organizationrhskus')
op.drop_index('organizationrhskus_subscription_id_org_id_user_id', table_name='organizationrhskus')
op.drop_table('organizationrhskus') |
class GRUencoder(nn.Module):
def __init__(self, d_emb, d_out, num_layers):
super(GRUencoder, self).__init__()
self.gru = nn.GRU(input_size=d_emb, hidden_size=d_out, bidirectional=True, num_layers=num_layers, dropout=0.3)
def forward(self, sent, sent_lens):
device = sent.device
sent_embs = sent.transpose(0, 1)
(s_lens, idx_sort) = (np.sort(sent_lens)[::(- 1)], np.argsort((- sent_lens)))
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda(device)
s_embs = sent_embs.index_select(1, Variable(idx_sort))
sent_packed = pack_padded_sequence(s_embs, s_lens)
sent_output = self.gru(sent_packed)[0]
sent_output = pad_packed_sequence(sent_output, total_length=sent.size(1))[0]
idx_unsort = torch.from_numpy(idx_unsort).cuda(device)
sent_output = sent_output.index_select(1, Variable(idx_unsort))
output = sent_output.transpose(0, 1)
return output |
def test_get_arguments_from_argument_str():
argument_str = 'LClass;,10,String,new-array(),3.14,1'
descriptor = '(I Ljava/lang/String; [B F Z)'
arguments = get_arguments_from_argument_str(argument_str, descriptor)
assert (arguments == ['LClass;', 10, 'String', 'new-array()', 3.14, True]) |
.parametrize(('ctype', 'vconv'), (('VELO-F2W', u.doppler_optical), ('VELO-F2V', u.doppler_relativistic), ('VRAD', u.doppler_radio), ('VOPT', u.doppler_optical), ('VELO', u.doppler_relativistic), ('WAVE', u.doppler_optical), ('WAVE-F2W', u.doppler_optical), ('WAVE-V2W', u.doppler_optical), ('FREQ', u.doppler_radio), ('FREQ-V2F', u.doppler_radio), ('FREQ-W2F', u.doppler_radio)))
def test_vconv_determinator(ctype, vconv):
assert (determine_vconv_from_ctype(ctype) == vconv) |
def test_save_options(skip_qtbot, tmp_path):
options = Options(tmp_path)
window = DreadGameExportDialog(options, {}, 'MyHash', True, [])
window.atmosphere_radio.setChecked(True)
window.save_options()
game_options = options.options_for_game(RandovaniaGame.METROID_DREAD)
assert isinstance(game_options, DreadPerGameOptions)
assert (game_options.target_platform == DreadModPlatform.ATMOSPHERE) |
def test_basic_notification_endtoend(initialized_db):
assert (not model.user_has_local_notifications('public'))
notification_uuid = model.create_notification_for_testing('public')
event_data = {}
worker = NotificationWorker(None)
worker.process_queue_item({'notification_uuid': notification_uuid, 'event_data': event_data})
assert model.user_has_local_notifications('public') |
class PasswdUtilsTest(unittest.TestCase):
def check_if_two_generated_salts_are_different(self):
self.assertNotEqual(random_salt_function(), random_salt_function())
def check_random_add_function_output_is_as_specified(self):
self.assertEqual(len(random_salt_function(salt_len=125)), 125)
def check_crypt_function_gives_expected_output_for_known_magic_and_salt(self):
password = 'foobarbaz'
expected_hash = '$6$SqAoXRvk$spgLlL/WL/vcb16ZZ4cMdF5uN90IjH0PpYKdMhqyW.BxXJEVc5RyvnpWcT.OKKJO2vsp32.CWDEd45K6r05bL0'
salt = 'SqAoXRvk'
self.assertEqual(expected_hash, hash_password_function(password, salt))
def check_crypt_function_uses_random_salt(self):
password = 'foobarbaz'
expected_hash = '$6$SqAoXRvk$spgLlL/WL/vcb16ZZ4cMdF5uN90IjH0PpYKdMhqyW.BxXJEVc5RyvnpWcT.OKKJO2vsp32.CWDEd45K6r05bL0'
salt = 'SqAoXRvk'
with patch('provy.more.debian.users.passwd_utils.random_salt_function') as rnd:
rnd.return_value = salt
self.assertEqual(expected_hash, hash_password_function(password))
self.assertTrue(rnd.called) |
class TestCmdLineTools(BaseTestCase):
def test_visa_main_argument_handling(self):
from pyvisa.cmd_line_tools import visa_main
old = sys.argv = ['python']
try:
with pytest.raises(ValueError):
visa_main('unknown')
finally:
sys.argv = old
def test_visa_info(self):
result = run('pyvisa-info', stdout=PIPE, universal_newlines=True)
details = util.system_details_to_str(util.get_system_details())
assert (result.stdout.strip().split('\n')[:5] == details.strip().split('\n')[:5])
_visa_lib
def test_visa_shell(self):
with Popen(['pyvisa-shell'], stdin=PIPE, stdout=PIPE) as p:
(stdout, stderr) = p.communicate(b'exit')
assert (stdout.count(b'Welcome to the VISA shell') == 1) |
def test_typed_value() -> None:
val = TypedValue(str)
assert (val.typ is str)
assert (str(val) == 'str')
assert val.is_type(str)
assert (not val.is_type(int))
assert_can_assign(val, val)
assert_cannot_assign(val, TypedValue(int))
assert_can_assign(val, KnownValue('x'))
assert_can_assign(val, MultiValuedValue([val, KnownValue('x')]))
assert_cannot_assign(val, MultiValuedValue([KnownValue('x'), TypedValue(int)]))
literal_string = TypedValue(str, literal_only=True)
assert (literal_string.typ is str)
assert (str(literal_string) == 'LiteralString')
assert_can_assign(val, literal_string)
assert_cannot_assign(literal_string, val)
assert_can_assign(literal_string, KnownValue('x'))
float_val = TypedValue(float)
assert (str(float_val) == 'float')
assert_can_assign(float_val, KnownValue(1.0))
assert_can_assign(float_val, KnownValue(1))
assert_cannot_assign(float_val, KnownValue(''))
assert_can_assign(float_val, TypedValue(float))
assert_can_assign(float_val, TypedValue(int))
assert_cannot_assign(float_val, TypedValue(str))
assert_can_assign(float_val, TypedValue(mock.Mock))
assert_cannot_assign(float_val, SubclassValue(TypedValue(float)))
assert_can_assign(TypedValue(type), SubclassValue(TypedValue(float))) |
_test
def test_unit_norm():
unit_norm_instance = constraints.unit_norm()
normalized = unit_norm_instance(K.variable(get_example_array()))
norm_of_normalized = np.sqrt(np.sum((K.eval(normalized) ** 2), axis=0))
difference = (norm_of_normalized - 1.0)
largest_difference = np.max(np.abs(difference))
assert (np.abs(largest_difference) < 0.0001) |
def ZFNetBody(net, from_layer, need_fc=True, fully_conv=False, reduced=False, dilated=False, dropout=True, need_fc8=False, freeze_layers=[]):
kwargs = {'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)], 'weight_filler': dict(type='xavier'), 'bias_filler': dict(type='constant', value=0)}
assert (from_layer in net.keys())
net.conv1 = L.Convolution(net[from_layer], num_output=96, pad=3, kernel_size=7, stride=2, **kwargs)
net.relu1 = L.ReLU(net.conv1, in_place=True)
net.norm1 = L.LRN(net.relu1, local_size=3, alpha=5e-05, beta=0.75, norm_region=P.LRN.WITHIN_CHANNEL, engine=P.LRN.CAFFE)
net.pool1 = L.Pooling(net.norm1, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2)
net.conv2 = L.Convolution(net.pool1, num_output=256, pad=2, kernel_size=5, stride=2, **kwargs)
net.relu2 = L.ReLU(net.conv2, in_place=True)
net.norm2 = L.LRN(net.relu2, local_size=3, alpha=5e-05, beta=0.75, norm_region=P.LRN.WITHIN_CHANNEL, engine=P.LRN.CAFFE)
net.pool2 = L.Pooling(net.norm2, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2)
net.conv3 = L.Convolution(net.pool2, num_output=384, pad=1, kernel_size=3, **kwargs)
net.relu3 = L.ReLU(net.conv3, in_place=True)
net.conv4 = L.Convolution(net.relu3, num_output=384, pad=1, kernel_size=3, **kwargs)
net.relu4 = L.ReLU(net.conv4, in_place=True)
net.conv5 = L.Convolution(net.relu4, num_output=256, pad=1, kernel_size=3, **kwargs)
net.relu5 = L.ReLU(net.conv5, in_place=True)
if need_fc:
if dilated:
name = 'pool5'
net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=1)
else:
name = 'pool5'
net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2)
if fully_conv:
if dilated:
if reduced:
net.fc6 = L.Convolution(net[name], num_output=1024, pad=5, kernel_size=3, dilation=5, **kwargs)
else:
net.fc6 = L.Convolution(net[name], num_output=4096, pad=5, kernel_size=6, dilation=2, **kwargs)
elif reduced:
net.fc6 = L.Convolution(net[name], num_output=1024, pad=2, kernel_size=3, dilation=2, **kwargs)
else:
net.fc6 = L.Convolution(net[name], num_output=4096, pad=2, kernel_size=6, **kwargs)
net.relu6 = L.ReLU(net.fc6, in_place=True)
if dropout:
net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True)
if reduced:
net.fc7 = L.Convolution(net.relu6, num_output=1024, kernel_size=1, **kwargs)
else:
net.fc7 = L.Convolution(net.relu6, num_output=4096, kernel_size=1, **kwargs)
net.relu7 = L.ReLU(net.fc7, in_place=True)
if dropout:
net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True)
else:
net.fc6 = L.InnerProduct(net.pool5, num_output=4096)
net.relu6 = L.ReLU(net.fc6, in_place=True)
if dropout:
net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True)
net.fc7 = L.InnerProduct(net.relu6, num_output=4096)
net.relu7 = L.ReLU(net.fc7, in_place=True)
if dropout:
net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True)
if need_fc8:
from_layer = net.keys()[(- 1)]
if fully_conv:
net.fc8 = L.Convolution(net[from_layer], num_output=1000, kernel_size=1, **kwargs)
else:
net.fc8 = L.InnerProduct(net[from_layer], num_output=1000)
net.prob = L.Softmax(net.fc8)
kwargs['param'] = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)]
layers = net.keys()
for freeze_layer in freeze_layers:
if (freeze_layer in layers):
net.update(freeze_layer, kwargs)
return net |
_required('task.add_ansibleplaybook', raise_exception=True)
def playbook_upload(request):
if (request.method == 'POST'):
playbook_file = request.FILES.get('playbook_file')
playbook_name = request.POST.get('playbook_name')
if playbook_file:
playbook = AnsiblePlaybook.objects.create(playbook_name=playbook_file.name, playbook_file=playbook_file, playbook_user=request.user)
playbook_content = ''
with open(playbook.playbook_file.path, 'r') as f:
for line in f.readlines():
playbook_content = (playbook_content + line)
playbook.playbook_content = playbook_content
playbook.save()
return JsonResponse({'code': 200, 'msg': '!'})
elif playbook_name:
playbook_desc = request.POST.get('playbook_desc')
playbook_obj = AnsiblePlaybook.objects.select_related('playbook_user').get(playbook_name=playbook_name)
playbook_obj.playbook_desc = playbook_desc
playbook_obj.save()
return JsonResponse({'code': 200, 'msg': '!', 'playbook_time': playbook_obj.playbook_time, 'id': playbook_obj.id}) |
class TruncatedDataset(Dataset):
def __init__(self, base_dataset, pc):
self.base_dataset = base_dataset
self.len = int((len(self.base_dataset) * pc))
self.random_order = np.random.choice(len(self.base_dataset), size=self.len, replace=False)
def __getitem__(self, item):
assert (item < self.len)
return self.base_dataset.__getitem__(self.random_order[item])
def __len__(self):
return self.len |
def test_param_scaling():
param = ParameterList()
with pytest.raises(ValueError, match='Parameter scaling must be a VariableScaling'):
param.add('gravity_z', my_parameter_function, size=1, scaling='a')
with pytest.raises(ValueError, match='Parameter scaling must be a VariableScaling'):
param.add('gravity_z', my_parameter_function, size=1, scaling=1.0)
with pytest.raises(ValueError, match='Parameter scaling must be a VariableScaling'):
param.add('gravity_z', my_parameter_function, size=1, scaling=[])
with pytest.raises(ValueError, match='Scaling factors must be strictly greater than zero.'):
param.add('gravity_z', my_parameter_function, size=1, scaling=VariableScaling('gravity_z', np.array([(- 1)])))
with pytest.raises(ValueError, match='Scaling must be a 1- or 2- dimensional numpy array'):
param.add('gravity_z', my_parameter_function, size=1, scaling=VariableScaling('gravity_z', np.array([[[1]]])))
with pytest.raises(ValueError, match=f'Parameter scaling must be of size 3, not 2.'):
param.add('gravity_z', my_parameter_function, size=3, scaling=VariableScaling('gravity_z', np.array([1, 2])))
with pytest.raises(ValueError, match=f'Parameter scaling must have exactly one column, not 2.'):
param.add('gravity_z', my_parameter_function, size=3, scaling=VariableScaling('gravity_z', np.ones((3, 2)))) |
class Snake(Converter):
snakes = None
special_cases = None
async def convert(self, ctx: Context, name: str) -> str:
(await self.build_list())
name = name.lower()
if (name == 'python'):
return 'Python (programming language)'
def get_potential(iterable: Iterable, *, threshold: int=80) -> list[str]:
nonlocal name
potential = []
for item in iterable:
(original, item) = (item, item.lower())
if (name == item):
return [original]
(a, b) = (fuzz.ratio(name, item), fuzz.partial_ratio(name, item))
if ((a >= threshold) or (b >= threshold)):
potential.append(original)
return potential
if (name.lower() in self.special_cases):
return self.special_cases.get(name.lower(), name.lower())
names = {snake['name']: snake['scientific'] for snake in self.snakes}
all_names = (names.keys() | names.values())
timeout = (len(all_names) * (3 / 4))
embed = discord.Embed(title='Found multiple choices. Please choose the correct one.', colour=5871663)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar.url)
name = (await disambiguate(ctx, get_potential(all_names), timeout=timeout, embed=embed))
return names.get(name, name)
async def build_list(cls) -> None:
if (cls.snakes is None):
cls.snakes = json.loads((SNAKE_RESOURCES / 'snake_names.json').read_text('utf8'))
if (cls.special_cases is None):
special_cases = json.loads((SNAKE_RESOURCES / 'special_snakes.json').read_text('utf8'))
cls.special_cases = {snake['name'].lower(): snake for snake in special_cases}
async def random(cls) -> str:
(await cls.build_list())
names = [snake['scientific'] for snake in cls.snakes]
return random.choice(names) |
def select_device(device='', batch_size=None):
cpu_request = (device.lower() == 'cpu')
if (device and (not cpu_request)):
os.environ['CUDA_VISIBLE_DEVICES'] = device
assert torch.cuda.is_available(), ('CUDA unavailable, invalid device %s requested' % device)
cuda = (False if cpu_request else torch.cuda.is_available())
if cuda:
c = (1024 ** 2)
ng = torch.cuda.device_count()
if ((ng > 1) and batch_size):
assert ((batch_size % ng) == 0), ('batch-size %g not multiple of GPU count %g' % (batch_size, ng))
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = 'Using CUDA '
for i in range(0, ng):
if (i == 1):
s = (' ' * len(s))
logger.info(("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" % (s, i, x[i].name, (x[i].total_memory / c))))
else:
logger.info('Using CPU')
logger.info('')
return torch.device(('cuda:0' if cuda else 'cpu')) |
class TesttestPurePyShpWrapper():
def setup_method(self):
test_file = pysal_examples.get_path('10740.shp')
self.test_file = test_file
self.shp_obj = PurePyShpWrapper(test_file, 'r')
f = tempfile.NamedTemporaryFile(suffix='.shp')
shpcopy = f.name
f.close()
self.shpcopy = shpcopy
self.shxcopy = shpcopy.replace('.shp', '.shx')
def test_len(self):
assert (len(self.shp_obj) == 195)
def test_tell(self):
assert (self.shp_obj.tell() == 0)
self.shp_obj.read(1)
assert (self.shp_obj.tell() == 1)
self.shp_obj.read(50)
assert (self.shp_obj.tell() == 51)
self.shp_obj.read()
assert (self.shp_obj.tell() == 195)
def test_seek(self):
self.shp_obj.seek(0)
assert (self.shp_obj.tell() == 0)
self.shp_obj.seek(55)
assert (self.shp_obj.tell() == 55)
self.shp_obj.read(1)
assert (self.shp_obj.tell() == 56)
def test_read(self):
self.shp_obj.seek(0)
objs = self.shp_obj.read()
assert (len(objs) == 195)
self.shp_obj.seek(0)
objs_b = list(self.shp_obj)
assert (len(objs_b) == 195)
for (shp_a, shp_b) in zip(objs, objs_b, strict=True):
assert (shp_a.vertices == shp_b.vertices)
def test_random_access(self):
self.shp_obj.seek(57)
shp57 = self.shp_obj.read(1)[0]
self.shp_obj.seek(32)
shp32 = self.shp_obj.read(1)[0]
self.shp_obj.seek(57)
assert (self.shp_obj.read(1)[0].vertices == shp57.vertices)
self.shp_obj.seek(32)
assert (self.shp_obj.read(1)[0].vertices == shp32.vertices)
def test_write(self):
out = PurePyShpWrapper(self.shpcopy, 'w')
self.shp_obj.seek(0)
for shp in self.shp_obj:
out.write(shp)
out.close()
orig = open(self.test_file, 'rb')
copy = open(self.shpcopy, 'rb')
assert (orig.read() == copy.read())
orig.close()
copy.close()
oshx = open(self.test_file.replace('.shp', '.shx'), 'rb')
cshx = open(self.shxcopy, 'rb')
assert (oshx.read() == cshx.read())
oshx.close()
cshx.close()
os.remove(self.shpcopy)
os.remove(self.shxcopy) |
def create_patterns_for_ops():
for (pattern_op_type, info_dict) in op_type_templates.items():
input_shape = info_dict['input_shape']
constructor_string = info_dict['constructor']
additional_starting_ops = info_dict.get('additional_starting_ops', [])
op_type = info_dict['op_type']
supported_tf_versions = info_dict['supported_tf_versions']
if (version.parse(tf.version.VERSION).major not in supported_tf_versions):
continue
subgraph_keras_input = create_subgraph_for_op_default(input_shape, constructor_string)
store_op_type_pattern_info(subgraph_keras_input, additional_starting_ops, pattern_op_type, info_dict)
if (op_type not in ['FusedBatchNormV3', 'BatchNorm']):
pattern_op_type = (pattern_op_type + '_placeholder')
subgraph_placeholder = create_subgraph_for_op_with_placeholder(input_shape, constructor_string, op_type)
store_op_type_pattern_info(subgraph_placeholder, additional_starting_ops, pattern_op_type, info_dict) |
class MockProposer(Proposer):
def load(self, search_space: List[ShardingOption], enumerator: Optional[Enumerator]=None) -> None:
pass
def feedback(self, partitionable: bool, plan: Optional[List[ShardingOption]]=None, perf_rating: Optional[float]=None, storage_constraint: Optional[Topology]=None) -> None:
pass
def propose(self) -> Optional[List[ShardingOption]]:
pass |
class Canvas(object):
def __init__(self, width, height):
self.bytes = array.array('B', ([0] * ((width * height) * 3)))
for i in range((width * height)):
self.bytes[((i * 3) + 2)] = 255
self.width = width
self.height = height
def plot(self, x, y, r, g, b):
i = (((((self.height - y) - 1) * self.width) + x) * 3)
self.bytes[i] = max(0, min(255, int((r * 255))))
self.bytes[(i + 1)] = max(0, min(255, int((g * 255))))
self.bytes[(i + 2)] = max(0, min(255, int((b * 255))))
def write_ppm(self, filename):
header = ('P6 %d %d 255\n' % (self.width, self.height))
with open(filename, 'wb') as fp:
fp.write(header.encode('ascii'))
fp.write(self.bytes.tobytes()) |
class PFGeneralPref(PreferenceView):
def populatePanel(self, panel):
self.title = _t('Database')
self.dirtySettings = False
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.stTitle = wx.StaticText(panel, wx.ID_ANY, self.title, wx.DefaultPosition, wx.DefaultSize, 0)
self.stTitle.Wrap((- 1))
self.stTitle.SetFont(wx.Font(12, 70, 90, 90, False, wx.EmptyString))
mainSizer.Add(self.stTitle, 0, (wx.EXPAND | wx.ALL), 5)
self.stSubTitle = wx.StaticText(panel, wx.ID_ANY, _t('(Cannot be changed while pyfa is running. Set via command line switches.)'), wx.DefaultPosition, wx.DefaultSize, 0)
self.stSubTitle.Wrap((- 1))
mainSizer.Add(self.stSubTitle, 0, wx.ALL, 3)
self.m_staticline1 = wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline1, 0, ((wx.EXPAND | wx.TOP) | wx.BOTTOM), 5)
self.cbsaveInRoot = wx.CheckBox(panel, wx.ID_ANY, _t('Using Executable Path for Saved Fit Database and Settings'), wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.cbsaveInRoot, 0, (wx.ALL | wx.EXPAND), 5)
self.stSetUserPath = wx.StaticText(panel, wx.ID_ANY, _t('pyfa User Path:'), wx.DefaultPosition, wx.DefaultSize, 0)
self.stSetUserPath.Wrap((- 1))
mainSizer.Add(self.stSetUserPath, 0, wx.ALL, 5)
self.inputUserPath = wx.TextCtrl(panel, wx.ID_ANY, config.savePath, wx.DefaultPosition, wx.DefaultSize, 0)
self.inputUserPath.SetEditable(False)
self.inputUserPath.SetBackgroundColour((200, 200, 200))
mainSizer.Add(self.inputUserPath, 0, (wx.ALL | wx.EXPAND), 5)
self.stFitDB = wx.StaticText(panel, wx.ID_ANY, _t('Fitting Database:'), wx.DefaultPosition, wx.DefaultSize, 0)
self.stFitDB.Wrap((- 1))
mainSizer.Add(self.stFitDB, 0, wx.ALL, 5)
self.inputFitDB = wx.TextCtrl(panel, wx.ID_ANY, config.saveDB, wx.DefaultPosition, wx.DefaultSize, 0)
self.inputFitDB.SetEditable(False)
self.inputFitDB.SetBackgroundColour((200, 200, 200))
mainSizer.Add(self.inputFitDB, 0, (wx.ALL | wx.EXPAND), 5)
self.stGameDB = wx.StaticText(panel, wx.ID_ANY, _t('Game Database:'), wx.DefaultPosition, wx.DefaultSize, 0)
self.stGameDB.Wrap((- 1))
mainSizer.Add(self.stGameDB, 0, wx.ALL, 5)
self.inputGameDB = wx.TextCtrl(panel, wx.ID_ANY, config.gameDB, wx.DefaultPosition, wx.DefaultSize, 0)
self.inputGameDB.SetEditable(False)
self.inputGameDB.SetBackgroundColour((200, 200, 200))
mainSizer.Add(self.inputGameDB, 0, (wx.ALL | wx.EXPAND), 5)
self.cbsaveInRoot.SetValue(config.saveInRoot)
self.cbsaveInRoot.Bind(wx.EVT_CHECKBOX, self.onCBsaveInRoot)
self.m_staticline3 = wx.StaticLine(panel, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline3, 0, ((wx.EXPAND | wx.TOP) | wx.BOTTOM), 5)
btnSizer = wx.BoxSizer(wx.VERTICAL)
btnSizer.AddStretchSpacer()
self.btnDeleteDamagePatterns = wx.Button(panel, wx.ID_ANY, _t('Delete All Damage Pattern Profiles'), wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer.Add(self.btnDeleteDamagePatterns, 0, wx.ALL, 5)
self.btnDeleteDamagePatterns.Bind(wx.EVT_BUTTON, self.DeleteDamagePatterns)
self.btnDeleteTargetProfiles = wx.Button(panel, wx.ID_ANY, _t('Delete All Target Profiles'), wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer.Add(self.btnDeleteTargetProfiles, 0, wx.ALL, 5)
self.btnDeleteTargetProfiles.Bind(wx.EVT_BUTTON, self.DeleteTargetProfiles)
self.btnPrices = wx.Button(panel, wx.ID_ANY, _t('Delete All Prices'), wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer.Add(self.btnPrices, 0, wx.ALL, 5)
self.btnPrices.Bind(wx.EVT_BUTTON, self.DeletePrices)
mainSizer.Add(btnSizer, 0, wx.EXPAND, 5)
panel.SetSizer(mainSizer)
panel.Layout()
def DeleteDamagePatterns(self, event):
question = _t('This is a destructive action that will delete all damage pattern profiles.\nAre you sure you want to do this?')
if wxHelpers.YesNoDialog(question, _t('Confirm')):
clearDamagePatterns()
def DeleteTargetProfiles(self, event):
question = _t('This is a destructive action that will delete all target profiles.\nAre you sure you want to do this?')
if wxHelpers.YesNoDialog(question, _t('Confirm')):
clearTargetProfiles()
def DeletePrices(self, event):
question = _t('This is a destructive action that will delete all cached prices out of the database.\nAre you sure you want to do this?')
if wxHelpers.YesNoDialog(question, _t('Confirm')):
clearPrices()
def onCBsaveInRoot(self, event):
self.cbsaveInRoot.SetValue(config.saveInRoot)
def getImage(self):
return BitmapLoader.getBitmap('settings_database', 'gui') |
.unit()
.parametrize('decorator', [pytask.mark.depends_on, pytask.mark.produces])
.parametrize(('values', 'expected'), [({'objects': 'a'}, ['a']), ({'objects': ['b']}, [['b']]), ({'objects': ['e', 'f']}, [['e', 'f']])])
def test_extract_kwargs_from_mark(decorator, values, expected):
(**values)
def task_example():
pass
parser = (depends_on if (decorator.name == 'depends_on') else produces)
result = list(_extract_nodes_from_function_markers(task_example, parser))
assert (result == expected) |
def init_summary_writer(config):
makedirs(config.summary_dir)
makedirs(config.checkpoint_dir)
print(config.checkpoint, os.path.exists(config.checkpoint))
if (not os.path.exists(config.checkpoint)):
os.makedirs(config.checkpoint)
path = os.path.dirname(os.path.abspath(__file__))
path_model = os.path.join(path, ('models/%s.py' % config.network))
path_main = os.path.join(path, 'main_prune.py')
path_pruner = os.path.join(path, ('pruner/%s.py' % config.pruner))
logger = get_logger(f'log{running_time}.log_time', logpath=config.saving_log, filepath=path_model, package_files=[path_main, path_pruner])
logger.info(dict(config))
writer = SummaryWriter(config.summary_dir)
return (logger, writer) |
def get_metrics(image_features, text_features, logit_scale):
metrics = {}
logits_per_image = ((logit_scale * image_features) text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {'image_to_text': logits_per_image, 'text_to_image': logits_per_text}
ground_truth = torch.arange(len(text_features)).view((- 1), 1)
for (name, logit) in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where((ranking == ground_truth))[1]
preds = preds.detach().cpu().numpy()
metrics[f'{name}_mean_rank'] = (preds.mean() + 1)
metrics[f'{name}_median_rank'] = (np.floor(np.median(preds)) + 1)
for k in [1, 5, 10]:
metrics[f'{name}_{k}'] = np.mean((preds < k))
return metrics |
def command_tttextract(args):
def setup(parser):
parser.add_option('--output', dest='output_fn', metavar='TEMPLATE', help='output to text files instead of stdout (example TEMPLATE: "extracted/%(args)s.txt")')
(parser, options, args) = cl_parse('tttextract', args, setup=setup)
try:
sdef = args.pop()
except Exception:
parser.error('cannot get <selection> argument')
try:
sphase = args.pop()
except Exception:
parser.error('cannot get <phase> argument')
try:
phases = [gf.meta.Timing(x.strip()) for x in sphase.split(',')]
except gf.meta.InvalidTimingSpecification:
parser.error(('invalid phase specification: "%s"' % sphase))
try:
gdef = gf.meta.parse_grid_spec(sdef)
except gf.meta.GridSpecError as e:
die(e)
store_dir = get_store_dir(args)
try:
store = gf.Store(store_dir)
for args in store.config.iter_extraction(gdef, level=(- 1)):
s = [('%e' % x) for x in args]
for phase in phases:
t = store.t(phase, args)
if (t is not None):
s.append(('%e' % t))
else:
s.append('nan')
if options.output_fn:
d = dict(args='_'.join((('%e' % x) for x in args)), extension='txt')
fn = (options.output_fn % d)
util.ensuredirs(fn)
with open(fn, 'a') as f:
f.write(' '.join(s))
f.write('\n')
else:
print(' '.join(s))
except (gf.meta.GridSpecError, gf.StoreError, gf.meta.OutOfBounds) as e:
die(e) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.