code stringlengths 281 23.7M |
|---|
class ListCategory(QSortFilterProxyModel):
def __init__(self, name: str, items: Iterable[Tuple[(str, ...)]], sort: bool=True, delete_func: util.DeleteFuncType=None, parent: QWidget=None):
super().__init__(parent)
self.name = name
self.srcmodel = QStandardItemModel(parent=self)
self._pattern = ''
self.columns_to_filter = [0, 1, 2]
self.setFilterKeyColumn((- 1))
for item in items:
self.srcmodel.appendRow([QStandardItem(x) for x in item])
self.setSourceModel(self.srcmodel)
self.delete_func = delete_func
self._sort = sort
def set_pattern(self, val):
if (len(val) > 5000):
log.completion.warning(f'Trimming {len(val)}-char pattern to 5000')
val = val[:5000]
self._pattern = val
val = (('(?=.*' + ')(?=.*'.join(map(re.escape, val.split()))) + ')')
rx = QRegularExpression(val, QRegularExpression.PatternOption.CaseInsensitiveOption)
qtutils.ensure_valid(rx)
self.setFilterRegularExpression(rx)
self.invalidate()
sortcol = 0
self.sort(sortcol)
def lessThan(self, lindex, rindex):
qtutils.ensure_valid(lindex)
qtutils.ensure_valid(rindex)
left = self.srcmodel.data(lindex)
right = self.srcmodel.data(rindex)
if ((left is None) or (right is None)):
log.completion.warning('Got unexpected None value, left={!r} right={!r} lindex={!r} rindex={!r}'.format(left, right, lindex, rindex))
return False
leftstart = left.startswith(self._pattern)
rightstart = right.startswith(self._pattern)
if (leftstart and (not rightstart)):
return True
elif (rightstart and (not leftstart)):
return False
elif self._sort:
return (left < right)
else:
return False |
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
results = dict(str='0')
to_tensor(results)
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
original_results = dict(tensor=torch.randn(2, 3), numpy=np.random.randn(2, 3), sequence=list(range(10)), int=1, float=0.1)
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
original_results = dict(tensor=torch.randn(2, 3), numpy=np.random.randn(2, 3), sequence=list(range(10)), int=1, float=0.1, str='test')
results = to_tensor(original_results)
assert assert_dict_has_keys(results, target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, original_results[key])
assert (repr(to_tensor) == (to_tensor.__class__.__name__ + f'(keys={target_keys})')) |
def process(num_to_process, path_list, target_file_path, search_item):
i = 1
file_store = []
while (i <= num_to_process):
file_list = get_files(options.file_mask, path_list[0])
for file in file_list:
file_path = os.path.join(path_list[0], file)
output = run_7za(file_path, target_file_path)
parse_return(output, file_path, search_item)
path_list.pop(0)
i = (i + 1)
return path_list |
def _upgrade_venv(venv_dir: Path, pip_args: List[str], verbose: bool, *, include_injected: bool, upgrading_all: bool, force: bool) -> int:
if (not venv_dir.is_dir()):
raise PipxError(f'''
Package is not installed. Expected to find {str(venv_dir)}, but it
does not exist.
''')
venv = Venv(venv_dir, verbose=verbose)
if (not venv.package_metadata):
raise PipxError(f'''Not upgrading {red(bold(venv_dir.name))}. It has missing internal pipx metadata.
It was likely installed using a pipx version before 0.15.0.0.
Please uninstall and install this package to fix.''', wrap_message=False)
venv.upgrade_packaging_libraries(pip_args)
versions_updated = 0
package_name = venv.main_package_name
versions_updated += _upgrade_package(venv, package_name, pip_args, is_main_package=True, force=force, upgrading_all=upgrading_all)
if include_injected:
for package_name in venv.package_metadata:
if (package_name == venv.main_package_name):
continue
versions_updated += _upgrade_package(venv, package_name, pip_args, is_main_package=False, force=force, upgrading_all=upgrading_all)
return versions_updated |
class CalcRemoveCommandFitCommand(wx.Command):
def __init__(self, fitID, commandFitID):
wx.Command.__init__(self, True, 'Remove Command Fit')
self.fitID = fitID
self.commandFitID = commandFitID
self.savedState = None
def Do(self):
pyfalog.debug('Doing removal of command fit {} for fit {}'.format(self.commandFitID, self.fitID))
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
commandFit = sFit.getFit(self.commandFitID)
if (commandFit is None):
pyfalog.debug('Command fit is not available')
return False
commandInfo = commandFit.getCommandInfo(self.fitID)
if (commandInfo is None):
pyfalog.warning('Fit command info is not available')
return False
self.savedState = commandInfo.active
if (commandFit.ID not in fit.commandFitDict):
pyfalog.warning('Unable to find commanding fit in command dict')
return False
del fit.commandFitDict[commandFit.ID]
return True
def Undo(self):
pyfalog.debug('Undoing removal of command fit {} for fit {}'.format(self.commandFitID, self.fitID))
from .add import CalcAddCommandCommand
cmd = CalcAddCommandCommand(fitID=self.fitID, commandFitID=self.commandFitID, state=self.savedState)
return cmd.Do() |
def load_and_map_checkpoint(model, model_dir, remap):
path = os.path.join(model_dir, 'model_checkpoint')
print(('Loading parameters %s from %s' % (remap.keys(), model_dir)))
checkpoint = torch.load(path)
new_state_dict = model.state_dict()
for (name, value) in remap.items():
new_state_dict[name] = checkpoint['model'][value]
model.load_state_dict(new_state_dict) |
def test_upload_generic_package_select(tmp_path, project):
path = (tmp_path / file_name2)
path.write_text(file_content)
package = project.generic_packages.upload(package_name=package_name, package_version=package_version, file_name=file_name2, path=path, select='package_file')
assert isinstance(package, GenericPackage)
assert (package.file_name == file_name2)
assert (package.size == path.stat().st_size) |
def load_operator(file_name=None, data_directory=None, plain_text=False):
file_path = get_file_path(file_name, data_directory)
if plain_text:
with open(file_path, 'r') as f:
data = f.read()
(operator_type, operator_terms) = data.split(':\n')
if (operator_type == 'FermionOperator'):
operator = FermionOperator(operator_terms)
elif (operator_type == 'BosonOperator'):
operator = BosonOperator(operator_terms)
elif (operator_type == 'QubitOperator'):
operator = QubitOperator(operator_terms)
elif (operator_type == 'QuadOperator'):
operator = QuadOperator(operator_terms)
else:
raise TypeError('Operator of invalid type.')
else:
with open(file_path, 'rb') as f:
data = marshal.load(f)
operator_type = data[0]
operator_terms = data[1]
if (operator_type == 'FermionOperator'):
operator = FermionOperator()
for term in operator_terms:
operator += FermionOperator(term, operator_terms[term])
elif (operator_type == 'BosonOperator'):
operator = BosonOperator()
for term in operator_terms:
operator += BosonOperator(term, operator_terms[term])
elif (operator_type == 'QubitOperator'):
operator = QubitOperator()
for term in operator_terms:
operator += QubitOperator(term, operator_terms[term])
elif (operator_type == 'QuadOperator'):
operator = QuadOperator()
for term in operator_terms:
operator += QuadOperator(term, operator_terms[term])
else:
raise TypeError('Operator of invalid type.')
return operator |
def set_default_configs(cfgs, display=False):
default_dict = dict(lr_decline=(60, 75, 90), resume_epoch=0, load_from=None, log_level='INFO', white_box_attack=True, source_model_path=None, target_model_path=None, freeze_set=(), sampler=None, lr_dict=None, backbone='WideResNet', classifier='FC', feature_ext=False, centroids=False, begin_save=40, eval_rob=False, denoise=(), existing_ratio=1, imbalance_ratio=0.0, beta=1.0, step_bar=2.0, method='', load_modules=(), targeted=False, remark='', alpha=1.0, margin_opt=None, target_opt=None, adv_loss_opt=None, nat_loss_opt=None, activation='relu', cpu_data=False, eval_freq=5, free_bn=False, other_params=dict(), load_model=None, deffer_opt=None, warmup_epochs=0)
assert (cfgs.test_step_size == 0.0078)
print('[Default Configs]: ')
for (k, v) in default_dict.items():
if (k not in cfgs):
setattr(cfgs, k, v)
print(' {} : {}'.format(k, v), end=' | ')
print() |
.parametrize('text', ('`a`', '`1`', '``', '``', '`select`', '`concat(`', '`-- `', '`/*`', '`#`'))
def test_schema_object_names_quoted(lexer, text):
tokens = list(lexer.get_tokens(text))[:(- 1)]
assert (tokens[0] == (Name.Quoted, '`'))
assert (tokens[1] == (Name.Quoted, text[1:(- 1)]))
assert (tokens[2] == (Name.Quoted, '`'))
assert (''.join((token[1] for token in tokens)) == text) |
class Meteor():
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert (len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
for i in range(0, len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return (score, scores)
def method(self):
return 'METEOR'
def _stat(self, hypothesis_str, reference_list):
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() |
class WebBrowsingExecutor():
def __init__(self, instruction: str, plan: str='', mode: str='react') -> None:
self.instruction: str = instruction
self.mode: str = mode
if (self.mode == 'react'):
self.thoughts_taken: List[str] = []
self.actions_taken: List[str] = []
self.pages_viewed: List[Any] = []
self.plan: str = plan
def finish(self):
return (True if ((len(self.actions_taken) > 0) and ('finish' in self.actions_taken[(- 1)])) else False)
def interrupt(self):
return (True if ((len(self.actions_taken) > 0) and ('interrupt' in self.actions_taken[(- 1)])) else False)
def error(self):
return (True if ((len(self.actions_taken) > 0) and ('error' in self.actions_taken[(- 1)])) else False)
def fail(self):
return (True if ((len(self.actions_taken) > 0) and ('fail' in self.actions_taken[(- 1)])) else False)
def action_history(self):
if (self.mode == 'basic'):
action_history = 'Action: '
for action in self.actions_taken:
action_history += (action + ' -> ')
return action_history
elif (self.mode == 'react'):
action_history = ''
for (thought, action) in zip(self.thoughts_taken, self.actions_taken):
action_history += (((thought + ' -> ') + action) + ' -> ')
return action_history
else:
raise ValueError(f'The mode {self.mode} is not supported')
def run(self, page_info: Any, llm: BaseLanguageModel) -> Dict[(str, Any)]:
model = HTMLDataModel.from_raw_data(raw_data=page_info)
processed_html = model.get_llm_side_data()
if (self.mode == 'basic'):
method = WebotChain.from_llm(llm)
self.pages_viewed.append(processed_html)
action_element = method({'user_query': self.instruction, 'previous_actions': self.actions_taken, 'page_info': processed_html})
elif (self.mode == 'react'):
method = ReActWebotChain.from_llm(llm)
self.pages_viewed.append(processed_html)
print('self.plan:', self.plan)
webot_chain_return = method({'user_query': self.instruction, 'plan': self.plan, 'previous_actions': self.actions_taken, 'previous_thoughts': self.thoughts_taken, 'page_info': processed_html})
else:
raise ValueError(f'The mode {self.mode} is not supported')
self.thoughts_taken.append(webot_chain_return['thought'])
self.actions_taken.append(webot_chain_return['action'])
print('actions_taken:', self.actions_taken)
return webot_chain_return |
def nms(dets, thresh, force_cpu=False):
if (dets.shape[0] == 0):
return []
if (cfg.USE_GPU_NMS and (not force_cpu)):
if (dets.shape[1] == 9):
return gpu_nms_quad(dets, thresh, device_id=cfg.GPU_ID)
else:
return gpu_nms(dets, thresh, device_id=cfg.GPU_ID) |
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.CharField(max_length=240, blank=True)
city = models.CharField(max_length=30, blank=True)
avatar = models.ImageField(null=True, blank=True)
def __str__(self):
return self.user.username |
def check_request(headers: Headers) -> str:
connection: List[ConnectionOption] = sum([parse_connection(value) for value in headers.get_all('Connection')], [])
if (not any(((value.lower() == 'upgrade') for value in connection))):
raise InvalidUpgrade('Connection', ', '.join(connection))
upgrade: List[UpgradeProtocol] = sum([parse_upgrade(value) for value in headers.get_all('Upgrade')], [])
if (not ((len(upgrade) == 1) and (upgrade[0].lower() == 'websocket'))):
raise InvalidUpgrade('Upgrade', ', '.join(upgrade))
try:
s_w_key = headers['Sec-WebSocket-Key']
except KeyError as exc:
raise InvalidHeader('Sec-WebSocket-Key') from exc
except MultipleValuesError as exc:
raise InvalidHeader('Sec-WebSocket-Key', 'more than one Sec-WebSocket-Key header found') from exc
try:
raw_key = base64.b64decode(s_w_key.encode(), validate=True)
except binascii.Error as exc:
raise InvalidHeaderValue('Sec-WebSocket-Key', s_w_key) from exc
if (len(raw_key) != 16):
raise InvalidHeaderValue('Sec-WebSocket-Key', s_w_key)
try:
s_w_version = headers['Sec-WebSocket-Version']
except KeyError as exc:
raise InvalidHeader('Sec-WebSocket-Version') from exc
except MultipleValuesError as exc:
raise InvalidHeader('Sec-WebSocket-Version', 'more than one Sec-WebSocket-Version header found') from exc
if (s_w_version != '13'):
raise InvalidHeaderValue('Sec-WebSocket-Version', s_w_version)
return s_w_key |
class EqualHeaviside(_Heaviside):
def __init__(self, left, right):
super().__init__('<=', left, right)
def __str__(self):
return f'{self.left!s} <= {self.right!s}'
def _binary_evaluate(self, left, right):
with np.errstate(invalid='ignore'):
return (left <= right) |
def mobilenet_v1_base(inputs, final_endpoint='Conv2d_13_pointwise', min_depth=8, depth_multiplier=1.0, conv_defs=None, output_stride=None, use_explicit_padding=False, scope=None):
depth = (lambda d: max(int((d * depth_multiplier)), min_depth))
end_points = {}
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
if (conv_defs is None):
conv_defs = MOBILENETV1_CONV_DEFS
if ((output_stride is not None) and (output_stride not in [8, 16, 32])):
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
current_stride = 1
rate = 1
net = inputs
for (i, conv_def) in enumerate(conv_defs):
end_point_base = ('Conv2d_%d' % i)
if ((output_stride is not None) and (current_stride == output_stride)):
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel, stride=conv_def.stride, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
elif isinstance(conv_def, DepthSepConv):
end_point = (end_point_base + '_depthwise')
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel, depth_multiplier=1, stride=layer_stride, rate=layer_rate, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
end_point = (end_point_base + '_pointwise')
net = slim.conv2d(net, depth(conv_def.depth), [1, 1], stride=1, scope=end_point)
end_points[end_point] = net
if (end_point == final_endpoint):
return (net, end_points)
else:
raise ValueError(('Unknown convolution type %s for layer %d' % (conv_def.ltype, i)))
raise ValueError(('Unknown final endpoint %s' % final_endpoint)) |
class DontReadFromInput(TextIO):
def encoding(self) -> str:
return sys.__stdin__.encoding
def read(self, size: int=(- 1)) -> str:
raise OSError('pytest: reading from stdin while output is captured! Consider using `-s`.')
readline = read
def __next__(self) -> str:
return self.readline()
def readlines(self, hint: Optional[int]=(- 1)) -> List[str]:
raise OSError('pytest: reading from stdin while output is captured! Consider using `-s`.')
def __iter__(self) -> Iterator[str]:
return self
def fileno(self) -> int:
raise UnsupportedOperation('redirected stdin is pseudofile, has no fileno()')
def flush(self) -> None:
raise UnsupportedOperation('redirected stdin is pseudofile, has no flush()')
def isatty(self) -> bool:
return False
def close(self) -> None:
pass
def readable(self) -> bool:
return False
def seek(self, offset: int, whence: int=0) -> int:
raise UnsupportedOperation('redirected stdin is pseudofile, has no seek(int)')
def seekable(self) -> bool:
return False
def tell(self) -> int:
raise UnsupportedOperation('redirected stdin is pseudofile, has no tell()')
def truncate(self, size: Optional[int]=None) -> int:
raise UnsupportedOperation('cannot truncate stdin')
def write(self, data: str) -> int:
raise UnsupportedOperation('cannot write to stdin')
def writelines(self, lines: Iterable[str]) -> None:
raise UnsupportedOperation('Cannot write to stdin')
def writable(self) -> bool:
return False
def __enter__(self) -> 'DontReadFromInput':
return self
def __exit__(self, type: Optional[Type[BaseException]], value: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
pass
def buffer(self) -> BinaryIO:
return self |
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
self._nlp = nlp
self._reader = reader
def register_subcommand(parser: ArgumentParser):
run_parser = parser.add_parser('run', help='Run a pipeline through the CLI')
run_parser.add_argument('--task', choices=SUPPORTED_TASKS.keys(), help='Task to run')
run_parser.add_argument('--input', type=str, help='Path to the file to use for inference')
run_parser.add_argument('--output', type=str, help='Path to the file that will be used post to write results.')
run_parser.add_argument('--model', type=str, help='Name or path to the model to instantiate.')
run_parser.add_argument('--config', type=str, help="Name or path to the model's config to instantiate.")
run_parser.add_argument('--tokenizer', type=str, help='Name of the tokenizer to use. (default: same as the model name)')
run_parser.add_argument('--column', type=str, help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)')
run_parser.add_argument('--format', type=str, default='infer', choices=PipelineDataFormat.SUPPORTED_FORMATS, help='Input format to read from')
run_parser.add_argument('--device', type=int, default=(- 1), help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)')
run_parser.add_argument('--overwrite', action='store_true', help='Allow overwriting the output file.')
run_parser.set_defaults(func=run_command_factory)
def run(self):
(nlp, outputs) = (self._nlp, [])
for entry in self._reader:
output = (nlp(**entry) if self._reader.is_multi_columns else nlp(entry))
if isinstance(output, dict):
outputs.append(output)
else:
outputs += output
if self._nlp.binary_output:
binary_path = self._reader.save_binary(outputs)
logger.warning('Current pipeline requires output to be in binary format, saving at {}'.format(binary_path))
else:
self._reader.save(outputs) |
class FitbitOAuth2(BaseOAuth2):
name = 'fitbit'
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
ACCESS_TOKEN_METHOD = 'POST'
REFRESH_TOKEN_URL = '
DEFAULT_SCOPE = ['profile']
ID_KEY = 'encodedId'
REDIRECT_STATE = False
EXTRA_DATA = [('expires_in', 'expires'), ('refresh_token', 'refresh_token', True), ('encodedId', 'id'), ('displayName', 'username')]
def get_user_details(self, response):
return {'username': response.get('displayName'), 'email': ''}
def user_data(self, access_token, *args, **kwargs):
auth_header = {'Authorization': ('Bearer %s' % access_token)}
return self.get_json(' headers=auth_header)['user']
def auth_headers(self):
tokens = '{}:{}'.format(*self.get_key_and_secret())
tokens = base64.urlsafe_b64encode(tokens.encode())
tokens = tokens.decode()
return {'Authorization': f'Basic {tokens}'} |
def test(model, dataloader):
average_meter = AverageMeter(dataloader.dataset.benchmark)
all_times = []
for (idx, batch) in enumerate(dataloader):
src_img = batch['src_img'].cuda()
trg_img = batch['trg_img'].cuda()
src_kps = batch['src_kps'].cuda()
n_pts = batch['n_pts'].cuda()
cls_id = batch['category_id']
start_time = time.time_ns()
(corr_matrix, scale_sels) = model(src_img, trg_img)
prd_kps = Geometry.transfer_kps_diff(corr_matrix, src_kps, n_pts, normalized=False)
all_times.append(((time.time_ns() - start_time) // 1000000))
eval_result = Evaluator.evaluate(Geometry.unnormalize_kps(prd_kps), batch)
average_meter.update(eval_result)
average_meter.write_test_process(idx, len(dataloader))
print('Average time per epoch:', (sum(all_times) / len(all_times)))
return average_meter.get_test_result() |
class TestUtils(TestCase):
([('_build_date', _build_date), ('_build_time', _build_time)])
def test_build_none(self, name, f):
with self.assertRaises(ValueError):
f(None, {})
def test_build_offset_default(self):
default = object()
self.assertIs(default, _build_offset(None, {}, default))
def test_build_offset_both(self):
with self.assertRaises(ValueError):
_build_offset(datetime.timedelta(minutes=1), {'minutes': 1}, None)
def test_build_offset_exc(self):
with self.assertRaises(TypeError):
_build_offset(object(), {}, None)
def test_build_offset_kwargs(self):
kwargs = {'minutes': 1}
self.assertEqual(_build_offset(None, kwargs, None), datetime.timedelta(**kwargs))
def test_build_offset_td(self):
td = datetime.timedelta(minutes=1)
self.assertEqual(_build_offset(td, {}, None), td)
def test_build_date_both(self):
with self.assertRaises(ValueError):
_build_date(datetime.date(year=2014, month=9, day=25), {'year': 2014, 'month': 9, 'day': 25})
def test_build_date_kwargs(self):
kwargs = {'year': 2014, 'month': 9, 'day': 25}
self.assertEqual(_build_date(None, kwargs), datetime.date(**kwargs))
def test_build_date_date(self):
date = datetime.date(year=2014, month=9, day=25)
self.assertEqual(_build_date(date, {}), date)
def test_build_time_both(self):
with self.assertRaises(ValueError):
_build_time(datetime.time(hour=1, minute=5), {'hour': 1, 'minute': 5})
def test_build_time_kwargs(self):
kwargs = {'hour': 1, 'minute': 5}
self.assertEqual(_build_time(None, kwargs), datetime.time(**kwargs)) |
class ReadInputProtein():
def __init__(self, atoms: List[Atom], bonds: Optional[List[Bond]]=None, coords: Optional[np.ndarray]=None, residues: Optional[List[str]]=None, name: Optional[str]=None):
self.atoms = atoms
self.bonds = bonds
self.coords = coords
self.name = name
self.residues = residues
def from_pdb(cls, file_name: str, name: Optional[str]=None):
with open(file_name, 'r') as pdb:
lines = pdb.readlines()
coords = []
atoms = []
bonds = []
Residues = []
atom_count = 0
for line in lines:
if (('ATOM' in line) or ('HETATM' in line)):
atomic_symbol = str(line[76:78])
atomic_symbol = re.sub('[0-9]+', '', atomic_symbol).strip()
if (not atomic_symbol):
atomic_symbol = str(line.split()[2])
atomic_symbol = re.sub('[0-9]+', '', atomic_symbol)
if ((atomic_symbol.lower() != 'cl') and (atomic_symbol.lower() != 'br')):
atomic_symbol = atomic_symbol[0]
atom_name = f'{atomic_symbol}{atom_count}'
qube_atom = Atom(atomic_number=Element().number(atomic_symbol), atom_index=atom_count, atom_name=atom_name, formal_charge=0, aromatic=False, bonds=[])
atoms.append(qube_atom)
Residues.append(str(line.split()[3]))
atom_count += 1
coords.append([float(line[30:38]), float(line[38:46]), float(line[46:54])])
elif ('CONECT' in line):
conect_terms = line.split()
for atom in conect_terms[2:]:
if int(atom):
bond = Bond(atom1_index=(int(conect_terms[1]) - 1), atom2_index=(int(atom) - 1), bond_order=1, aromatic=False)
bonds.append(bond)
atoms[(int(conect_terms[1]) - 1)].bonds.append((int(atom) - 1))
coords = np.array(coords)
residues = [res for (res, group) in groupby(Residues)]
if (name is None):
name = Path(file_name).stem
return cls(atoms=atoms, bonds=bonds, coords=coords, residues=residues, name=name) |
def translate_pattern(glob):
pat = ''
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = ('[^%s]' % (sep,))
for (c, chunk) in enumerate(chunks):
last_chunk = (c == (len(chunks) - 1))
if (chunk == '**'):
if last_chunk:
pat += '.*'
else:
pat += ('(?:%s+%s)*' % (valid_char, sep))
continue
i = 0
chunk_len = len(chunk)
while (i < chunk_len):
char = chunk[i]
if (char == '*'):
pat += (valid_char + '*')
elif (char == '?'):
pat += valid_char
elif (char == '['):
inner_i = (i + 1)
if ((inner_i < chunk_len) and (chunk[inner_i] == '!')):
inner_i = (inner_i + 1)
if ((inner_i < chunk_len) and (chunk[inner_i] == ']')):
inner_i = (inner_i + 1)
while ((inner_i < chunk_len) and (chunk[inner_i] != ']')):
inner_i = (inner_i + 1)
if (inner_i >= chunk_len):
pat += re.escape(char)
else:
inner = chunk[(i + 1):inner_i]
char_class = ''
if (inner[0] == '!'):
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += ('[%s]' % (char_class,))
i = inner_i
else:
pat += re.escape(char)
i += 1
if (not last_chunk):
pat += sep
pat += '\\Z'
return re.compile(pat, flags=(re.MULTILINE | re.DOTALL)) |
class ParameterValidator(KeywordValidator):
def schema_validator(self) -> SchemaValidator:
return cast(SchemaValidator, self.registry['schema'])
def __call__(self, parameter: SchemaPath) -> Iterator[ValidationError]:
if ('schema' in parameter):
schema = (parameter / 'schema')
(yield from self.schema_validator(schema)) |
.functions
def test_round_to_nearest_half(dataframe):
df = dataframe.round_to_fraction('Bell__Chart', 2)
assert (df.iloc[(0, 1)] == 1.0)
assert (df.iloc[(1, 1)] == 2.5)
assert (df.iloc[(2, 1)] == 3.0)
assert (df.iloc[(3, 1)] == 1.0)
assert (df.iloc[(4, 1)] == 2.5)
assert (df.iloc[(5, 1)] == 3.0)
assert (df.iloc[(6, 1)] == 1.0)
assert (df.iloc[(7, 1)] == 2.5)
assert (df.iloc[(8, 1)] == 3.0) |
.parametrize('category', ['linear', 'linear_per_time', 'angular', 'angular_per_time', 'scale', 'scale_per_time', 'time'])
def test_units_map__category(category):
units_map = get_units_map(category=category)
assert (len(units_map) > 1)
for item in units_map.values():
assert (item.category == category) |
def move_out_64(library, session, space, offset, length, data, extended=False):
converted_buffer = (ViUInt64 * length)(*tuple(data))
if extended:
return library.viMoveOut64Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut64(session, space, offset, length, converted_buffer) |
def test_is_engineering():
eng_wkt = 'ENGCRS["A construction site CRS",\nEDATUM["P1",ANCHOR["Peg in south corner"]],\nCS[Cartesian,2],\nAXIS["site east",southWest,ORDER[1]],\nAXIS["site north",southEast,ORDER[2]],\nLENGTHUNIT["metre",1.0],\nTIMEEXTENT["date/time t1","date/time t2"]]'
assert CRS(eng_wkt).is_engineering |
class DjangoRoleTest(ProvyTestCase):
def setUp(self):
super(DjangoRoleTest, self).setUp()
self.role = DjangoRole(prov=None, context={'owner': 'some-owner'})
self.supervisor_role = SupervisorRole(prov=None, context=self.role.context)
def installs_necessary_packages_to_provision(self):
with self.using_stub(AptitudeRole) as aptitude, self.mock_role_method('register_template_loader'), self.using_stub(PipRole) as pip:
self.role.provision()
self.role.register_template_loader.assert_called_with('provy.more.debian.web')
aptitude.ensure_package_installed.assert_called_with('python-mysqldb')
self.assertEqual(pip.ensure_package_installed.mock_calls, [call('django'), call('gunicorn')])
def installs_necessary_packages_to_provision_with_version(self):
with self.using_stub(AptitudeRole) as aptitude, self.mock_role_method('register_template_loader'), self.using_stub(PipRole) as pip:
self.role.context['django-version'] = '1.5.1'
self.role.provision()
self.role.register_template_loader.assert_called_with('provy.more.debian.web')
aptitude.ensure_package_installed.assert_called_with('python-mysqldb')
self.assertEqual(pip.ensure_package_installed.mock_calls, [call('django', version=self.role.context['django-version']), call('gunicorn')])
def requires_a_settings_path_to_create_site(self):
def create_site():
with self.role.create_site('some-site') as site:
site.settings_path = None
self.assertRaises(RuntimeError, create_site)
def prepares_site_under_supervisor(self):
with self.using_stub(SupervisorRole), self.role.using(SupervisorRole) as supervisor_role:
supervisor_role.log_folder = '/supervisor/log/folder'
with self.role.create_site('some-site') as site:
site.settings_path = '/some/settings.path'
self.assertTrue(site.use_supervisor)
self.assertEqual(site.supervisor_log_folder, supervisor_role.log_folder)
self.assertFalse(site.daemon)
self.assertFalse(site.auto_start)
def prepares_site_without_supervisor(self):
with self.role.create_site('some-site') as site:
site.settings_path = '/some/settings.path'
self.assertFalse(site.use_supervisor)
self.assertEqual(site.supervisor_log_folder, '/var/log')
self.assertTrue(site.daemon)
self.assertTrue(site.auto_start)
def guarantees_that_site_is_prepared_for_supervisor(self):
with self.using_stub(SupervisorRole), self.role.using(SupervisorRole) as supervisor_role:
supervisor_role.log_folder = '/supervisor/log/folder'
with self.role.create_site('some-site') as site:
site.settings_path = '/some/settings.path'
self.assertIn(site, self.role.context[SITES_KEY])
self.assertTrue(self.role.restart_supervisor_on_changes)
def guarantees_that_site_is_prepared_for_standalone(self):
with self.role.create_site('some-site') as site:
site.settings_path = '/some/settings.path'
self.assertIn(site, self.role.context[SITES_KEY])
self.assertFalse(self.role.restart_supervisor_on_changes)
def does_nothing_on_cleanup_if_nothing_done(self):
self.role.cleanup()
def installs_each_configured_site(self):
with self.using_stub(SupervisorRole), self.role.using(SupervisorRole) as supervisor_role:
supervisor_role.log_folder = '/supervisor/log/folder'
with self.role.create_site('foo_site') as foo_site:
foo_site.settings_path = '/some/settings.path'
with self.role.create_site('bar_site') as bar_site:
bar_site.settings_path = '/some/settings.path'
with self.mock_role_methods('_update_init_script', '_update_settings', '_update_supervisor_program', '_restart'), self.using_stub(SupervisorRole) as supervisor_role:
self.role._update_init_script.return_value = True
self.role._update_settings.return_value = True
self.role.cleanup()
self.assertEqual(self.role._update_init_script.mock_calls, [call(foo_site), call(bar_site)])
self.assertEqual(self.role._update_settings.mock_calls, [call(foo_site), call(bar_site)])
self.assertEqual(self.role._update_supervisor_program.mock_calls, [call(foo_site), call(bar_site)])
self.assertEqual(self.role._restart.mock_calls, [call(foo_site), call(bar_site)])
supervisor_role.ensure_restart.assert_called_with()
def installs_each_configured_site_without_supervisor(self):
with self.role.create_site('foo_site') as foo_site:
foo_site.settings_path = '/some/settings.path'
with self.mock_role_methods('_update_init_script', '_update_settings', '_update_supervisor_program', '_restart'), self.using_stub(SupervisorRole) as supervisor_role:
self.role._update_init_script.return_value = True
self.role._update_settings.return_value = True
self.role.cleanup()
self.role._restart.assert_called_once_with(foo_site)
self.assertFalse(self.role._update_supervisor_program.called)
self.assertFalse(supervisor_role.ensure_restart.called)
def doesnt_restart_on_cleanup_if_settings_not_updated(self):
with self.role.create_site('foo_site') as foo_site:
foo_site.settings_path = '/some/settings.path'
with self.mock_role_methods('_update_init_script', '_update_settings', '_restart'):
self.role._update_init_script.return_value = False
self.role._update_settings.return_value = False
self.role.cleanup()
self.assertFalse(self.role._restart.called)
def updates_supervisor_program_with_site(self):
website = self.role.create_site('foo-site')
website.starting_port = 8000
website.processes = 2
website.settings_path = '/some/settings/path/settings.conf'
website.user = 'some-user'
website.supervisor_log_folder = '/some/log/folder'
programs = [MagicMock(), MagicMock()]
with self.using_stub(SupervisorRole) as supervisor_role:
mock_with_program = supervisor_role.with_program.return_value
mock_with_program.__enter__.side_effect = programs
self.role._update_supervisor_program(website)
self.assertEqual(programs[0].directory, '/some/settings/path')
self.assertEqual(programs[0].command, '/etc/init.d/foo-site-8000 start')
self.assertEqual(programs[0].name, 'foo-site-8000')
self.assertEqual(programs[0].number_of_processes, 1)
self.assertEqual(programs[0].user, website.user)
self.assertEqual(programs[0].log_folder, website.supervisor_log_folder)
self.assertEqual(programs[1].directory, '/some/settings/path')
self.assertEqual(programs[1].command, '/etc/init.d/foo-site-8001 start')
self.assertEqual(programs[1].name, 'foo-site-8001')
self.assertEqual(programs[1].number_of_processes, 1)
self.assertEqual(programs[1].user, website.user)
self.assertEqual(programs[1].log_folder, website.supervisor_log_folder)
def restarts_site_when_running(self):
website = self.role.create_site('bar-site')
website.pid_file_path = '/foo/'
with self.mock_role_methods('execute', 'remote_exists'):
self.role.remote_exists.return_value = True
self.role._restart(website)
self.assertEqual(self.role.remote_exists.mock_calls, [call('/foo/bar-site_8000.pid')])
self.assertEqual(self.role.execute.mock_calls, [call('/etc/init.d/bar-site-8000 stop', stdout=False, sudo=True), call('/etc/init.d/bar-site-8000 start', stdout=False, sudo=True)])
def restarts_site_when_not_running(self):
website = self.role.create_site('bar-site')
website.pid_file_path = '/foo/'
with self.mock_role_methods('execute', 'remote_exists'):
self.role.remote_exists.return_value = False
self.role._restart(website)
self.assertEqual(self.role.remote_exists.mock_calls, [call('/foo/bar-site_8000.pid')])
self.assertEqual(self.role.execute.mock_calls, [call('/etc/init.d/bar-site-8000 start', stdout=False, sudo=True)])
def doesnt_restart_when_not_autostarting(self):
website = self.role.create_site('bar-site')
website.auto_start = False
with self.mock_role_methods('execute', 'remote_exists'):
self.role._restart(website)
self.assertFalse(self.role.remote_exists.called)
self.assertFalse(self.role.execute.called)
def updates_settings(self):
with self.role.create_site('bar-site') as website:
website.settings_path = '/foo/settings.py'
with self.mock_role_method('update_file'):
self.role.update_file.return_value = 'some result'
result = self.role._update_settings(website)
self.assertEqual(result, 'some result')
self.role.update_file.assert_called_once_with('local.settings.template', '/foo/local_settings.py', owner=None, sudo=True, options={'settings': {}, 'settings_file': 'settings'})
def updates_init_script(self):
with self.role.create_site('bar-site') as website:
website.settings_path = '/foo/settings.py'
with self.mock_role_methods('execute', 'update_file'):
self.role.update_file.return_value = True
result = self.role._update_init_script(website)
self.assertTrue(result)
self.role.update_file.assert_called_once_with('website.init.template', '/etc/init.d/bar-site-8000', options={'pid_file_path': '/var/run', 'name': 'bar-site', 'threads': 1, 'host': '0.0.0.0', 'settings_directory': '/foo', 'port': 8000, 'user': None, 'daemon': True}, sudo=True, owner=None)
self.assertEqual(self.role.execute.mock_calls, [call('chmod +x /etc/init.d/bar-site-8000', sudo=True, stdout=False), call('update-rc.d bar-site-8000 defaults', sudo=True, stdout=False)])
def updates_init_script_without_auto_start(self):
with self.role.create_site('bar-site') as website:
website.settings_path = '/foo/settings.py'
website.auto_start = False
with self.mock_role_methods('execute', 'update_file'):
self.role.update_file.return_value = True
result = self.role._update_init_script(website)
self.assertTrue(result)
self.role.update_file.assert_called_once_with('website.init.template', '/etc/init.d/bar-site-8000', options={'pid_file_path': '/var/run', 'name': 'bar-site', 'threads': 1, 'host': '0.0.0.0', 'settings_directory': '/foo', 'port': 8000, 'user': None, 'daemon': True}, sudo=True, owner=None)
self.assertEqual(self.role.execute.mock_calls, [call('chmod +x /etc/init.d/bar-site-8000', sudo=True, stdout=False)])
def doesnt_update_init_script_if_update_file_fails(self):
with self.role.create_site('bar-site') as website:
website.settings_path = '/foo/settings.py'
with self.mock_role_methods('execute', 'update_file'):
self.role.update_file.return_value = False
result = self.role._update_init_script(website)
self.assertFalse(result)
self.role.update_file.assert_called_once_with('website.init.template', '/etc/init.d/bar-site-8000', options={'pid_file_path': '/var/run', 'name': 'bar-site', 'threads': 1, 'host': '0.0.0.0', 'settings_directory': '/foo', 'port': 8000, 'user': None, 'daemon': True}, sudo=True, owner=None)
self.assertFalse(self.role.execute.called) |
class DSDChunk(DSFChunk):
CHUNK_SIZE = 28
total_size = 0
offset_metdata_chunk = 0
def __init__(self, fileobj, create=False):
super(DSDChunk, self).__init__(fileobj, create)
if create:
self.chunk_header = b'DSD '
self.chunk_size = DSDChunk.CHUNK_SIZE
def load(self):
data = self.fileobj.read(DSDChunk.CHUNK_SIZE)
if (len(data) != DSDChunk.CHUNK_SIZE):
raise error('DSF chunk truncated')
self.chunk_header = data[0:4]
if (self.chunk_header != b'DSD '):
raise error('DSF dsd header not found')
self.chunk_size = cdata.ulonglong_le(data[4:12])
if (self.chunk_size != DSDChunk.CHUNK_SIZE):
raise error('DSF dsd header size mismatch')
self.total_size = cdata.ulonglong_le(data[12:20])
self.offset_metdata_chunk = cdata.ulonglong_le(data[20:28])
def write(self):
f = BytesIO()
f.write(self.chunk_header)
f.write(struct.pack('<Q', DSDChunk.CHUNK_SIZE))
f.write(struct.pack('<Q', self.total_size))
f.write(struct.pack('<Q', self.offset_metdata_chunk))
self.fileobj.seek(self.chunk_offset)
self.fileobj.write(f.getvalue())
def pprint(self):
return (u'DSD Chunk (Total file size = %d, Pointer to Metadata chunk = %d)' % (self.total_size, self.offset_metdata_chunk)) |
class Menu(MenuComponent):
iterator: Iterator[MenuComponent] = None
menuComponents: List[MenuComponent]
name: str
description: str
def __init__(self, name: str, description: str):
self.name = name
self.description = description
self.menuComponents = My_List([])
def add(self, menuComponent: MenuComponent) -> None:
self.menuComponents.append(menuComponent)
def remove(self, menuComponent: MenuComponent) -> None:
self.menuComponents.remove(menuComponent)
def getChild(self, i: int) -> MenuComponent:
return self.menuComponents[i]
def getName(self) -> str:
return self.name
def getDescription(self) -> str:
return self.description
def createIterator(self) -> Iterator[MenuComponent]:
if (self.iterator == None):
self.iterator = CompositeIterator(iter(self.menuComponents))
return self.iterator
def print(self) -> None:
print(f'''
{self.getName()}''', end='')
print(f', {self.getDescription()}')
print('')
iterator: Iterator[MenuComponent] = iter(self.menuComponents)
menuComponent: MenuComponent = next(iterator, None)
while (menuComponent != None):
menuComponent.print()
menuComponent: MenuComponent = next(iterator, None) |
def test_ito_int_vjp():
(D, ts, y0, args, f, g) = make_sde()
(flat_args, _) = ravel_pytree(args)
bm = make_brownian_motion(ts[0], np.zeros(y0.shape), ts[1], random.PRNGKey(0))
dt = 0.0001
eps = 1e-06
method = 'milstein'
def ito_int(argv):
(y0, args) = argv
ys = ito_integrate(f, g, y0, ts, bm, dt, args, method=method)
return np.sum(ys[1])
ys = ito_integrate(f, g, y0, ts, bm, dt, args, method=method)
v_yt = np.ones_like(y0)
v_argst = np.zeros_like(flat_args)
(y0_rec, exact_grad) = vjp_ito_integrate(v_yt, v_argst, ys[(- 1)], f, g, ts, bm, dt, args, method=method)
numerical_grad = numerical_gradient(ito_int, (y0, args), eps=eps)
print('states:', y0, y0_rec)
assert np.allclose(y0, y0_rec, rtol=0.01, atol=0.01)
(flat_grads, unravel) = ravel_pytree(exact_grad)
print('numerical grad: ', unravel(numerical_grad))
print(' exact grad: ', exact_grad)
assert np.allclose(numerical_grad, flat_grads, rtol=0.01, atol=0.01) |
class GroupMetric(BaseMetric):
def __init__(self, distance_metric_name: Distance=Distance.COSINE):
super().__init__(distance_metric_name=distance_metric_name)
self.accumulator = GroupAccumulator()
def update(self, embeddings: Tensor, groups: torch.LongTensor, device=None) -> None:
self.accumulator.update(embeddings, groups, device)
def reset(self):
self.accumulator.reset()
def prepare_labels(groups: Tensor):
group_matrix = groups.repeat(groups.shape[0], 1)
group_mask = (group_matrix == groups.unsqueeze(1)).bool()
group_mask[torch.eye(group_mask.shape[0], dtype=torch.bool)] = False
return group_mask
def compute(self, embeddings: torch.Tensor, groups: torch.Tensor) -> torch.Tensor:
(labels, distance_matrix) = self.precompute(embeddings, groups=groups)
return self.raw_compute(distance_matrix, labels)
def evaluate(self) -> torch.Tensor:
return self.compute(**self.accumulator.state)
def raw_compute(self, distance_matrix: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
raise NotImplementedError() |
def gen_target_seq(input_file, target_train_file, target_vali_file, target_test_file, user_seq_file, database_file, context_dict_train_file, context_dict_vali_file, context_dict_test_file):
line_dict = {}
user_seq_dict = {}
context_dict_train = {}
context_dict_vali = {}
context_dict_test = {}
with open(input_file, 'r') as f:
for line in f:
(uid, iid, cid, sid, sea_id, ud_id, ts) = line[:(- 1)].split(',')
if (uid not in line_dict):
line_dict[uid] = [line]
user_seq_dict[uid] = [iid]
else:
line_dict[uid].append(line)
user_seq_dict[uid].append(iid)
target_train_lines = []
target_vali_lines = []
target_test_lines = []
user_seq_lines = []
database_lines = []
for uid in user_seq_dict:
if (len(user_seq_dict[uid]) > 3):
target_train_lines += [(','.join([uid, user_seq_dict[uid][(- 3)]]) + '\n')]
target_train_lines += [(','.join([uid, neg_sample(user_seq_dict[uid][:(- 3)])]) + '\n')]
context_dict_train[uid] = list(map(int, line_dict[uid][(- 3)][:(- 1)].split(',')[(- 3):(- 1)]))
target_vali_lines += [(','.join([uid, user_seq_dict[uid][(- 2)]]) + '\n')]
target_vali_lines += [(','.join([uid, neg_sample(user_seq_dict[uid][:(- 3)])]) + '\n')]
context_dict_vali[uid] = list(map(int, line_dict[uid][(- 2)][:(- 1)].split(',')[(- 3):(- 1)]))
target_test_lines += [(','.join([uid, user_seq_dict[uid][(- 1)]]) + '\n')]
target_test_lines += [(','.join([uid, neg_sample(user_seq_dict[uid][:(- 3)])]) + '\n')]
context_dict_test[uid] = list(map(int, line_dict[uid][(- 1)][:(- 1)].split(',')[(- 3):(- 1)]))
user_seq = user_seq_dict[uid][:(- 3)]
user_seq_lines += ([(','.join(user_seq) + '\n')] * 2)
database_lines += line_dict[uid][:(- 3)]
with open(target_train_file, 'w') as f:
f.writelines(target_train_lines)
with open(target_vali_file, 'w') as f:
f.writelines(target_vali_lines)
with open(target_test_file, 'w') as f:
f.writelines(target_test_lines)
with open(user_seq_file, 'w') as f:
f.writelines(user_seq_lines)
with open(database_file, 'w') as f:
f.writelines(database_lines)
with open(context_dict_train_file, 'wb') as f:
pkl.dump(context_dict_train, f)
with open(context_dict_vali_file, 'wb') as f:
pkl.dump(context_dict_vali, f)
with open(context_dict_test_file, 'wb') as f:
pkl.dump(context_dict_test, f) |
_fixtures(WebFixture, UserInterfaceErrorScenarios)
def test_ui_slots_map_error(web_fixture, user_interface_error_scenarios):
class SimpleUserInterface(UserInterface):
def assemble(self):
root = self.define_view('/', title='View')
root.set_slot('name', P.factory())
class MainUI(UserInterface):
def assemble(self):
self.define_page(HTML5Page)
self.define_user_interface('/a_ui', SimpleUserInterface, user_interface_error_scenarios.slot_map, name='test_ui')
wsgi_app = web_fixture.new_wsgi_app(site_root=MainUI)
browser = Browser(wsgi_app)
with expected(ProgrammerError):
browser.open('/a_ui/') |
def test_lshift():
x = Bits(8, 12)
y = Bits(8, 4)
assert ((x << y) == 192)
assert ((x << 4) == 192)
assert ((x << 6) == 0)
assert ((y << x) == 0)
assert ((y << 0) == 4)
assert ((y << 1) == 8)
assert ((x << 255) == 0)
with pytest.raises(ValueError):
a = (Bits(4, 3) << Bits(3, 1))
with pytest.raises(ValueError):
a = (x << 256) |
def process_glove(glove_file, vocab_dict, save_path, random_init=True):
if os.path.isfile((save_path + '.pkl')):
print(('Glove file already exists at %s' % (save_path + '.pkl')))
else:
glove_path = os.path.join(glove_file)
if random_init:
glove = {v: np.random.randn(300) for v in vocab_dict}
else:
glove = {v: np.zeros(300) for v in vocab_dict}
found = 0
for line in open(glove_path, 'r'):
(word, vec) = line.split(' ', 1)
if (word in vocab_dict):
glove[word] = np.fromstring(vec, sep=' ')
found += 1
pickle.dump(glove, open((save_path + '.pkl'), 'wb'))
print('saved glove data to: {}'.format(save_path))
return pickle.load(open((save_path + '.pkl'), 'rb')) |
(1, 'itemID')
def getDynamicItem(itemID, eager=None):
try:
if isinstance(itemID, int):
if (eager is None):
result = get_gamedata_session().query(DynamicItem).filter((DynamicItem.ID == itemID)).one()
else:
result = get_gamedata_session().query(DynamicItem).options(*processEager(eager)).filter((DynamicItem.ID == itemID)).one()
else:
raise TypeError('Need integer as argument')
except exc.NoResultFound:
result = None
return result |
class TestBuildValidation():
def test_unknown_version(self, isolation):
config = {'project': {'name': 'foo', 'version': '0.1.0'}, 'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ['1']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
builder.get_version_api = (lambda : {'1': str})
with pytest.raises(ValueError, match='Unknown versions for target `foo`: 42, 9000'):
next(builder.build(directory=str(isolation), versions=['9000', '42']))
def test_invalid_metadata(self, isolation):
config = {'project': {'name': 'foo', 'version': '0.1.0', 'dynamic': ['version']}, 'tool': {'hatch': {'build': {'targets': {'foo': {'versions': ['1']}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
builder.get_version_api = (lambda : {'1': (lambda *_args, **_kwargs: '')})
with pytest.raises(ValueError, match='Metadata field `version` cannot be both statically defined and listed in field `project.dynamic`'):
next(builder.build(directory=str(isolation))) |
def view_feedback(request, schedule_item_id):
schedule_item = get_object_or_404(ScheduleItem, pk=schedule_item_id)
if (not can_view_feedback(user=request.user, schedule_item=schedule_item)):
return HttpResponseForbidden('Access Denied')
feedback = feedback_service.get_feedback(schedule_item=schedule_item)
context = {'feedback': feedback, 'schedule_item': schedule_item}
return render(request, 'feedback/detail.html', context) |
def test_ces_ordering(s):
assert (models.CauseEffectStructure([concept(subsystem=s)], subsystem=s) == models.CauseEffectStructure([concept(subsystem=s)], subsystem=s))
assert (models.CauseEffectStructure([concept(phi=1, subsystem=s)], subsystem=s) > models.CauseEffectStructure([concept(phi=0, subsystem=s)], subsystem=s)) |
class FakeHostnameResolver(HostnameResolver):
family_addr_pairs: Sequence[tuple[(AddressFamily, str)]] = attr.ib()
async def getaddrinfo(self, host: ((bytes | str) | None), port: (((bytes | str) | int) | None), family: int=0, type: int=0, proto: int=0, flags: int=0) -> list[tuple[(AddressFamily, SocketKind, int, str, (tuple[(str, int)] | tuple[(str, int, int, int)]))]]:
assert isinstance(port, int)
return [(family, tsocket.SOCK_STREAM, 0, '', (addr, port)) for (family, addr) in self.family_addr_pairs]
async def getnameinfo(self, sockaddr: (tuple[(str, int)] | tuple[(str, int, int, int)]), flags: int) -> tuple[(str, str)]:
raise NotImplementedError() |
def check_filename_length(data_root, verbose=False):
too_long = []
for file in Path(data_root).glob('**/*'):
if (len(file.name.encode('UTF-8')) >= 140):
too_long.append(file)
if too_long:
print('Files that have an unreasonably long name:')
for file in too_long:
print(f' - {file}')
sys.exit(1) |
class PublisherImpression(BasePublisherImpression):
publisher = models.ForeignKey(Publisher, related_name='publisher_impressions', on_delete=models.PROTECT, null=True)
class Meta():
ordering = ('-date',)
unique_together = ('publisher', 'date')
verbose_name_plural = _('Publisher impressions') |
_transform('ImgPilColorDistortion')
class ImgPilColorDistortion(ClassyTransform):
def __init__(self, strength):
self.strength = strength
self.color_jitter = pth_transforms.ColorJitter((0.8 * self.strength), (0.8 * self.strength), (0.8 * self.strength), (0.2 * self.strength))
self.rnd_color_jitter = pth_transforms.RandomApply([self.color_jitter], p=0.8)
self.rnd_gray = pth_transforms.RandomGrayscale(p=0.2)
self.transforms = pth_transforms.Compose([self.rnd_color_jitter, self.rnd_gray])
def __call__(self, image):
return self.transforms(image)
def from_config(cls, config: Dict[(str, Any)]) -> 'ImgPilColorDistortion':
strength = config.get('strength', 1.0)
logging.info(f'ImgPilColorDistortion | Using strength: {strength}')
return cls(strength=strength) |
class RatingWorkflowT1(BaseWorkflowVisualQC, ABC):
def __init__(self, id_list, in_dir, out_dir, issue_list, mri_name, in_dir_type, images_for_id, outlier_method, outlier_fraction, outlier_feat_types, disable_outlier_detection, vis_type, saturate_perc, views, num_slices_per_view, num_rows_per_view, screenshot_only=cfg.default_screenshot_only):
super().__init__(id_list, in_dir, out_dir, outlier_method, outlier_fraction, outlier_feat_types, disable_outlier_detection, screenshot_only=screenshot_only)
self.vis_type = vis_type
self.saturate_perc = saturate_perc
self.issue_list = issue_list
self.mri_name = mri_name
self.in_dir_type = in_dir_type
self.images_for_id = images_for_id
self.expt_id = 'rate_mri_{}'.format(self.mri_name)
self.suffix = self.expt_id
self.current_alert_msg = None
self.init_layout(views, num_rows_per_view, num_slices_per_view)
self.init_getters()
self.__module_type__ = 't1_mri'
def preprocess(self):
if (not self.disable_outlier_detection):
print('Preprocessing data - please wait .. \n\t(or contemplate the vastness of universe! )')
self.extract_features()
self.detect_outliers()
def prepare_UI(self):
self.open_figure()
self.add_UI()
self.add_histogram_panel()
def init_layout(self, views, num_rows_per_view, num_slices_per_view, padding=cfg.default_padding):
plt.style.use('dark_background')
self.display_params = dict(interpolation='none', aspect='equal', origin='lower', cmap='gray', vmin=0.0, vmax=1.0)
self.figsize = cfg.default_review_figsize
self.collage = Collage(view_set=views, num_slices=num_slices_per_view, num_rows=num_rows_per_view, display_params=self.display_params, bounding_rect=cfg.bounding_box_review, figsize=self.figsize)
self.fig = self.collage.fig
set_fig_window_title(self.fig, f'VisualQC T1w MRI : {self.in_dir} {self.mri_name}')
self.padding = padding
def init_getters(self):
from visualqc.features import extract_T1_features
self.feature_extractor = extract_T1_features
if (self.in_dir_type.lower() in ('freesurfer',)):
self.path_getter_inputs = (lambda sub_id: realpath(pjoin(self.in_dir, sub_id, 'mri', self.mri_name)))
elif (self.in_dir_type.upper() in ('BIDS',)):
self.path_getter_inputs = (lambda sub_id: self.images_for_id[sub_id]['image'])
else:
self.path_getter_inputs = (lambda sub_id: realpath(pjoin(self.in_dir, sub_id, self.mri_name)))
def open_figure(self):
pass
def add_UI(self):
map_key_to_callback = {'alt+s': self.show_saturated, 's+alt': self.show_saturated, 'alt+b': self.show_background_only, 'b+alt': self.show_background_only, 'alt+t': self.show_tails_trimmed, 't+alt': self.show_tails_trimmed, 'alt+o': self.show_original, 'o+alt': self.show_original}
self.UI = T1MriInterface(self.collage.fig, self.collage.flat_grid, self.issue_list, next_button_callback=self.next, quit_button_callback=self.quit, processing_choice_callback=self.process_and_display, map_key_to_callback=map_key_to_callback)
self.con_id_click = self.fig.canvas.mpl_connect('button_press_event', self.UI.on_mouse)
self.con_id_keybd = self.fig.canvas.mpl_connect('key_press_event', self.UI.on_keyboard)
self.fig.set_size_inches(self.figsize)
def add_histogram_panel(self):
self.ax_hist = plt.axes(cfg.position_histogram_t1_mri)
self.ax_hist.set_xticks(cfg.xticks_histogram_t1_mri)
self.ax_hist.set_yticks([])
self.ax_hist.set_autoscaley_on(True)
self.ax_hist.set_prop_cycle('color', cfg.color_histogram_t1_mri)
self.ax_hist.set_title(cfg.title_histogram_t1_mri, fontsize='small')
def update_histogram(self, img):
nonzero_values = img.ravel()[np.flatnonzero(img)]
(_, _, patches_hist) = self.ax_hist.hist(nonzero_values, density=True, bins=cfg.num_bins_histogram_display)
self.ax_hist.relim(visible_only=True)
self.ax_hist.autoscale_view(scalex=False)
self.UI.data_handles.extend(patches_hist)
def update_alerts(self):
if (self.current_alert_msg is not None):
h_alert_text = self.fig.text(cfg.position_outlier_alert_t1_mri[0], cfg.position_outlier_alert_t1_mri[1], self.current_alert_msg, **cfg.alert_text_props)
self.UI.data_handles.append(h_alert_text)
def add_alerts(self):
flagged_as_outlier = (self.current_unit_id in self.by_sample)
if flagged_as_outlier:
alerts_list = self.by_sample.get(self.current_unit_id, None)
print('\n\tFlagged as a possible outlier by these measures:\n\t\t{}'.format('\t'.join(alerts_list)))
strings_to_show = (['Flagged as an outlier:'] + alerts_list)
self.current_alert_msg = '\n'.join(strings_to_show)
self.update_alerts()
else:
self.current_alert_msg = None
def load_unit(self, unit_id):
for attr in ('current_img_raw', 'current_img', 'saturated_img', 'tails_trimmed_img', 'background_img'):
if hasattr(self, attr):
delattr(self, attr)
t1_mri_path = self.path_getter_inputs(unit_id)
self.current_img_raw = read_image(t1_mri_path, error_msg='T1 mri')
self.current_img = scale_0to1(crop_image(self.current_img_raw, self.padding))
self.currently_showing = None
skip_subject = False
if (np.count_nonzero(self.current_img) == 0):
skip_subject = True
print('MR image is empty!')
return skip_subject
def display_unit(self):
self.collage.attach(self.current_img)
self.update_histogram(self.current_img)
def process_and_display(self, user_choice):
if (user_choice in ('Saturate',)):
self.show_saturated(no_toggle=True)
elif (user_choice in ('Background_only',)):
self.show_background_only(no_toggle=True)
elif (user_choice in ('Tails_trimmed', 'Tails trimmed')):
self.show_tails_trimmed(no_toggle=True)
elif (user_choice in ('Original',)):
self.show_original()
else:
print('Chosen option seems to be not implemented!')
def show_saturated(self, no_toggle=False):
if ((not (self.currently_showing in ['saturated'])) or no_toggle):
if (not hasattr(self, 'saturated_img')):
self.saturated_img = saturate_brighter_intensities(self.current_img, percentile=self.saturate_perc)
self.collage.attach(self.saturated_img)
self.currently_showing = 'saturated'
else:
self.show_original()
def show_background_only(self, no_toggle=False):
if ((not (self.currently_showing in ['Background_only'])) or no_toggle):
self._compute_background()
self.collage.attach(self.background_img)
self.currently_showing = 'Background_only'
else:
self.show_original()
def _compute_background(self):
if (not hasattr(self, 'background_img')):
self.foreground_mask = mask_image(self.current_img, out_dtype=bool)
temp_background_img = np.copy(self.current_img)
temp_background_img[self.foreground_mask] = 0.0
self.background_img = scale_0to1(temp_background_img, exclude_outliers_below=1, exclude_outliers_above=1)
def show_tails_trimmed(self, no_toggle=False):
if ((not (self.currently_showing in ['tails_trimmed'])) or no_toggle):
if (not hasattr(self, 'tails_trimmed_img')):
self.tails_trimmed_img = scale_0to1(self.current_img, exclude_outliers_below=1, exclude_outliers_above=0.05)
self.collage.attach(self.tails_trimmed_img)
self.currently_showing = 'tails_trimmed'
else:
self.show_original()
def show_original(self):
self.collage.attach(self.current_img)
self.currently_showing = 'original'
def close_UI(self):
self.fig.canvas.mpl_disconnect(self.con_id_click)
self.fig.canvas.mpl_disconnect(self.con_id_keybd)
plt.close('all') |
class WordEmbeddingTuner(Tuner):
def __init__(self, train_corpus_fname, test_corpus_fname, model_save_path, embedding_name, embedding_fname=None, embedding_size=100, batch_size=128, learning_rate=0.0001, num_labels=2):
super().__init__(train_corpus_fname=train_corpus_fname, tokenized_train_corpus_fname=(train_corpus_fname + '.word-embedding-tokenized'), test_corpus_fname=test_corpus_fname, batch_size=batch_size, tokenized_test_corpus_fname=(test_corpus_fname + '.word-embedding-tokenized'), model_name=embedding_name, model_save_path=model_save_path)
self.lr = learning_rate
self.embedding_size = embedding_size
(self.embeddings, self.vocab) = self.load_embeddings(embedding_name, embedding_fname)
self.unk_idx = len(self.vocab)
self.pad_idx = (len(self.vocab) + 1)
(self.ids_placeholder, self.input_lengths, self.labels_placeholder, self.dropout_keep_prob, self.embedding_placeholder, self.embed_init, self.logits, self.loss) = make_word_embedding_graph(num_labels, (len(self.vocab) + 2), self.embedding_size, tune=True)
def tune(self):
global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
grads_and_vars = optimizer.compute_gradients(self.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
output_feed = [train_op, global_step, self.logits, self.loss]
saver = tf.train.Saver(max_to_keep=1)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(self.embed_init, feed_dict={self.embedding_placeholder: self.embeddings})
self.train(sess, saver, global_step, output_feed)
def make_input(self, sentences, labels, is_training):
(input_ids, lengths) = ([], [])
max_token_length = self.get_max_token_length_this_batch(sentences)
for tokens in sentences:
token_ids = []
tokens_length = len(tokens)
for token in tokens:
if (token in self.vocab):
token_ids.append(self.vocab[token])
else:
token_ids.append(self.unk_idx)
if (len(tokens) < max_token_length):
token_ids.extend(([self.pad_idx] * (max_token_length - tokens_length)))
input_ids.append(token_ids)
lengths.append(len(token_ids))
if is_training:
input_feed = {self.ids_placeholder: np.array(input_ids), self.input_lengths: np.array(lengths), self.labels_placeholder: np.array(labels), self.dropout_keep_prob: 0.9}
else:
input_feed = {self.ids_placeholder: np.array(input_ids), self.input_lengths: np.array(lengths), self.labels_placeholder: np.array(labels), self.dropout_keep_prob: 1.0}
input_feed = [input_feed, labels]
return input_feed
def get_max_token_length_this_batch(self, sentences):
return max((len(sentence) for sentence in sentences))
def get_truncated_normal(self, mean=0, sd=1, low=(- 1), upp=1):
return truncnorm(((low - mean) / sd), ((upp - mean) / sd), loc=mean, scale=sd)
def load_embeddings(self, embedding_name, embedding_fname):
random_generator = self.get_truncated_normal()
if (embedding_name in ['fasttext', 'glove', 'swivel']):
(embeddings, words) = ([], [])
with open(embedding_fname, 'r') as f:
if (embedding_name == 'fasttext'):
next(f)
for line in f:
if (embedding_name == 'swivel'):
splitedLine = line.strip().split('\t')
else:
splitedLine = line.strip().split()
word = splitedLine[0]
embedding = [float(el) for el in splitedLine[1:]]
words.append(word)
embeddings.append(embedding)
embeddings = np.array(embeddings)
vocab = {word: idx for (idx, word) in enumerate(words)}
elif (embedding_name == 'word2vec'):
model = Word2Vec.load(embedding_fname)
embeddings = model.wv.vectors
vocab = {word: idx for (idx, word) in enumerate(model.wv.index2word)}
else:
words_count = defaultdict(int)
for (tokens, _) in self.train_data:
for token in tokens:
words_count[token] += 1
sorted_words = sorted(words_count.items(), key=(lambda x: x[1]), reverse=True)[:50000]
words = [word for (word, _) in sorted_words]
vocab = {word: idx for (idx, word) in enumerate(words)}
random_embeddings = random_generator.rvs((len(vocab) * self.embedding_size))
embeddings = random_embeddings.reshape(len(vocab), self.embedding_size)
added_embeddings = random_generator.rvs((self.embedding_size * 2))
embeddings = np.append(embeddings, added_embeddings.reshape(2, self.embedding_size), axis=0)
return (embeddings, vocab) |
_datapipe('dataframe')
class DataFrameMakerIterDataPipe(IterDataPipe):
def __new__(cls, source_dp: IterDataPipe[T_co], dataframe_size: int=1000, dtype=None, dtype_generator=None, columns: Optional[List[str]]=None, device: str=''):
if (torcharrow is None):
raise ImportError("The library 'torcharrow' is necessary for this DataPipe but it is not available.Please visit to install it.")
batch_dp = source_dp.batch(dataframe_size)
df_dp = batch_dp.map(partial(_construct_dataframe, dtype=dtype, dtype_generator=dtype_generator, columns=columns, device=device))
return df_dp |
.requires_internet
class TestOtherBackend():
def test_standard(self, hatch, temp_dir, helpers):
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
path = (temp_dir / 'my-app')
data_path = (temp_dir / 'data')
data_path.mkdir()
project = Project(path)
config = dict(project.raw_config)
config['build-system']['requires'] = ['flit-core']
config['build-system']['build-backend'] = 'flit_core.buildapi'
config['project']['version'] = '0.0.1'
config['project']['dynamic'] = []
del config['project']['license']
project.save_config(config)
build_directory = (path / 'dist')
assert (not build_directory.is_dir())
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('build')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Creating environment: hatch-build\n Checking dependencies\n Syncing dependencies\n '))
assert build_directory.is_dir()
assert (build_directory / 'my_app-0.0.1-py3-none-any.whl').is_file()
assert (build_directory / 'my_app-0.0.1.tar.gz').is_file()
build_directory.remove()
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('build', '-t', 'wheel')
assert (result.exit_code == 0), result.output
assert (not result.output)
assert build_directory.is_dir()
assert (build_directory / 'my_app-0.0.1-py3-none-any.whl').is_file()
assert (not (build_directory / 'my_app-0.0.1.tar.gz').is_file())
build_directory.remove()
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('build', '-t', 'sdist')
assert (result.exit_code == 0), result.output
assert (not result.output)
assert build_directory.is_dir()
assert (not (build_directory / 'my_app-0.0.1-py3-none-any.whl').is_file())
assert (build_directory / 'my_app-0.0.1.tar.gz').is_file()
def test_legacy(self, hatch, temp_dir, helpers):
path = (temp_dir / 'tmp')
path.mkdir()
data_path = (temp_dir / 'data')
data_path.mkdir()
(path / 'pyproject.toml').write_text('[build-system]\nrequires = ["setuptools"]\nbuild-backend = "setuptools.build_meta"\n')
(path / 'setup.py').write_text('import setuptools\nsetuptools.setup(name="tmp", version="0.0.1")\n')
(path / 'tmp.py').write_text('print("Hello World!")\n')
build_directory = (path / 'dist')
assert (not build_directory.is_dir())
with path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}):
result = hatch('build')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Creating environment: hatch-build\n Checking dependencies\n Syncing dependencies\n '))
assert build_directory.is_dir()
assert (build_directory / 'tmp-0.0.1-py3-none-any.whl').is_file()
assert (build_directory / 'tmp-0.0.1.tar.gz').is_file() |
def test_exceptions2exit_verbose(capsys, monkeypatch):
([RuntimeError])
def func(_):
raise RuntimeError('Exception raised')
monkeypatch.setattr('pyscaffold.cli.get_log_level', (lambda : logging.DEBUG))
with pytest.raises(SystemExit):
func(1)
error = capsys.readouterr().err
match = re.search('raise RuntimeError', error)
assert match |
class JsonEncodedValueAdapter(construct.Adapter):
def _decode(self, obj: str, context: construct.Container, path: str) -> typing.Any:
return json.loads(obj)
def _encode(self, obj: typing.Any, context: construct.Container, path: str) -> str:
return json.dumps(obj, separators=(',', ':')) |
_message(((pyrogram.filters.command(commands=['check']) & pyrogram.filters.private) & tools.is_admin))
def check(bot: AutoPoster, message: Message):
result = ''
for domain in bot.config.get('domains', {}).keys():
group = Group(domain=domain, session=bot.vk_session)
if group.get_raw_posts(1):
result += f'''`{domain}`
'''
else:
result += f'''`{domain}`
'''
message.reply(messages.CHECK_RESULTS.format(result)) |
_utils.require_version('3.9')
class TypeBrain(unittest.TestCase):
def test_type_subscript(self):
src = builder.extract_node('\n a: type[int] = int\n ')
val_inf = src.annotation.value.inferred()[0]
self.assertIsInstance(val_inf, astroid.ClassDef)
self.assertEqual(val_inf.name, 'type')
meth_inf = val_inf.getattr('__class_getitem__')[0]
self.assertIsInstance(meth_inf, astroid.FunctionDef)
def test_invalid_type_subscript(self):
src = builder.extract_node('\n a: str[int] = "abc"\n ')
val_inf = src.annotation.value.inferred()[0]
self.assertIsInstance(val_inf, astroid.ClassDef)
self.assertEqual(val_inf.name, 'str')
with self.assertRaises(AttributeInferenceError):
val_inf.getattr('__class_getitem__')[0]
_utils.require_version(minver='3.9')
def test_builtin_subscriptable(self):
for typename in ('tuple', 'list', 'dict', 'set', 'frozenset', 'enumerate'):
src = f'''
{typename:s}[int]
'''
right_node = builder.extract_node(src)
inferred = next(right_node.infer())
self.assertIsInstance(inferred, nodes.ClassDef)
self.assertIsInstance(inferred.getattr('__iter__')[0], nodes.FunctionDef) |
def test_area_with_invalid_connections():
db = ResourceDatabase(RandovaniaGame.METROID_PRIME_ECHOES, [], [], [], [], [], [], {}, damage_reductions={}, energy_tank_item=MagicMock())
reader = RegionReader(db, MagicMock())
reader.current_region_name = 'World'
with pytest.raises(MissingResource) as e:
reader.read_area('Broken Area', {'extra': {}, 'nodes': {'A': {'heal': True, 'coordinates': None, 'node_type': 'generic', 'valid_starting_location': False, 'connections': {}, 'extra': {}, 'layers': ['default'], 'description': ''}, 'Broken': {'heal': True, 'coordinates': None, 'node_type': 'generic', 'valid_starting_location': False, 'layers': ['default'], 'extra': {}, 'description': '', 'connections': {'A': {'type': 'resource', 'data': {'type': 'items', 'name': 'Dark', 'amount': 1, 'negate': False}}}}}})
assert (str(e.value) == "In area Broken Area, connection from Broken to A got error: ITEM Resource with short_name 'Dark' not found in 0 resources") |
class TestTime(TestCase):
def test_dump_time(self):
d = datetime(year=2018, month=7, day=8, hour=21, minute=34).time()
dumped = jsons.dump(d)
self.assertEqual('21:34:00', dumped)
def test_load_time(self):
loaded = jsons.load('21:34:00', time)
expected = datetime(year=2018, month=7, day=8, hour=21, minute=34).time()
self.assertEqual(expected, loaded) |
class Migration(migrations.Migration):
dependencies = [('grants', '0001_initial')]
operations = [migrations.AlterField(model_name='grant', name='gender', field=models.CharField(blank=True, choices=[('male', 'Male'), ('female', 'Female'), ('other', 'Other'), ('not_say', 'Prefer not to say')], max_length=10, verbose_name='gender')), migrations.AlterField(model_name='grant', name='notes', field=models.TextField(blank=True, verbose_name='Notes'))] |
def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) -> int:
dp = [([0] * (len(reference_tokens) + 1)) for _ in range((len(prediction_tokens) + 1))]
for i in range((len(prediction_tokens) + 1)):
dp[i][0] = i
for j in range((len(reference_tokens) + 1)):
dp[0][j] = j
for i in range(1, (len(prediction_tokens) + 1)):
for j in range(1, (len(reference_tokens) + 1)):
if (prediction_tokens[(i - 1)] == reference_tokens[(j - 1)]):
dp[i][j] = dp[(i - 1)][(j - 1)]
else:
dp[i][j] = (min(dp[(i - 1)][j], dp[i][(j - 1)], dp[(i - 1)][(j - 1)]) + 1)
return dp[(- 1)][(- 1)] |
class FunDefFindingVisitor(ast.NodeVisitor):
def __init__(self):
super(ast.NodeVisitor).__init__()
def visit_Module(self, node):
return self.visit(node.body[0])
def visit_Assign(self, node):
return self.visit(node.value)
def visit_Call(self, node):
assert (node.func.id == 'constraint')
return self.visit(node.args[0])
def visit_Lambda(self, node):
return node
def visit_FunctionDef(self, node):
return node |
.parametrize('domain, expected', [('known.domain', "'1.2.3.4'"), ('bogus.domain.foobar', 'null')])
def test_dnsResolve(monkeypatch, domain, expected):
def mock_fromName(host):
info = QHostInfo()
if (host == 'known.domain'):
info.setAddresses([QHostAddress('1.2.3.4')])
return info
monkeypatch.setattr(QHostInfo, 'fromName', mock_fromName)
_pac_equality_test("dnsResolve('{}')".format(domain), expected) |
class AutoRating(EventPlugin):
PLUGIN_ID = 'Automatic Rating'
PLUGIN_NAME = _('Automatic Rating')
PLUGIN_DESC = _("Rates songs automatically when they are played or skipped. This uses the 'accelerated' algorithm from vux (Vacillating Utilitarian eXtemporizer) by Brian Nelson.")
PLUGIN_ICON = Icons.USER_BOOKMARKS
def plugin_on_song_ended(self, song, skipped):
if (song is not None):
rating = song('~#rating')
invrating = (1.0 - rating)
delta = (min(rating, invrating) / 2.0)
if skipped:
rating -= delta
else:
rating += delta
song['~#rating'] = rating |
def distribution() -> Optional[DistributionInfo]:
info = _parse_os_release()
if (info is None):
return None
pretty = info.get('PRETTY_NAME', None)
if (pretty in ['Linux', None]):
pretty = info.get('NAME', 'Unknown')
assert (pretty is not None)
dist_id = info.get('ID', None)
id_mappings = {'funtoo': 'gentoo', 'artix': 'arch', 'org.kde.Platform': 'kde_flatpak'}
ids = []
if (dist_id is not None):
ids.append(id_mappings.get(dist_id, dist_id))
if ('ID_LIKE' in info):
ids.extend(info['ID_LIKE'].split())
parsed = Distribution.unknown
for cur_id in ids:
try:
parsed = Distribution[cur_id]
except KeyError:
pass
else:
break
return DistributionInfo(parsed=parsed, pretty=pretty, id=dist_id) |
(symbol('['))
def led(self, left, parser):
self.first = left
self.second = []
if (parser.token.name != ']'):
while 1:
if (parser.token.name == ']'):
break
self.second.append(parser.expression())
if (parser.token.name != ','):
break
parser.advance(',')
parser.advance(']')
return self |
class BeamCandidate():
def __init__(self, args, all_entities_dict, infer_attributes_string, outline, model=None, opt_model=None, controllers=None, step=0, alignment_score=(- .0), best_alignment_so_far=(- .0), alignment_history=None, all_paragraphs=None, outline_sections=None, detailed_outline_section_history=None, paragraphs_by_outline_section=None):
self.args = args
self.all_entities_dict = all_entities_dict
self.infer_attributes_string = infer_attributes_string
self.outline = outline
self.model = model
self.opt_model = opt_model
self.controllers = controllers
self.step = step
self.alignment_score = alignment_score
self.best_alignment_so_far = best_alignment_so_far
self.alignment_history = (alignment_history if (alignment_history is not None) else [])
self.all_paragraphs = (all_paragraphs if (all_paragraphs is not None) else [])
self.outline_sections = (outline_sections if (outline_sections is not None) else [])
self.detailed_outline_section_history = (detailed_outline_section_history if (detailed_outline_section_history is not None) else [])
self.paragraphs_by_outline_section = (paragraphs_by_outline_section if (paragraphs_by_outline_section is not None) else {})
self.is_consistent = False
def story(self, demarcated_outline_section=None):
out = ''
for p in self.all_paragraphs:
if ((demarcated_outline_section is not None) and (demarcated_outline_section in self.paragraphs_by_outline_section) and (p in self.paragraphs_by_outline_section[demarcated_outline_section])):
out += (('<SECTION START>' + p) + '<SECTION END>')
else:
out += p
return out
def previous_passage(self, max_tokens, suffix=None):
if (len(self.all_paragraphs) == 0):
return ''
passage = self.story()
if (len(self.story().strip()) == 0):
return ''
if (suffix is not None):
passage = passage[:(len(passage) - len(suffix))].rstrip()
if (len(passage.strip()) == 0):
return ''
passage = self.model.tokenizer.decode(self.model.tokenizer.encode(passage)[(- max_tokens):])
return cut_first_sentence(passage)
def print_section(self, section_idx):
return ''.join(self.paragraphs_by_outline_section[self.outline_sections[section_idx]])
def create_updated_entities(self, new_passage, cached_update_dict=None):
new_entities_dict = {k: v for (k, v) in self.all_entities_dict.items()}
entities = [str(ent) for ent in detect_entities(new_passage)]
(matched_entities, new_entities, _) = deduplicate_match_entities(entities, self.all_entities_dict.keys())
new_entities_dict = {k: v for (k, v) in self.all_entities_dict.items()}
for ent in new_entities:
entity = Entity(ent)
entity.infer_description(new_passage, self.model, max_length=self.args.entity_description_max_length)
entity.infer_is_character(new_passage, self.model)
entity.infer_attributes(new_passage, self.model, other_names=([name for name in matched_entities if (name != entity.name)] + [name for name in new_entities if (name != entity.name)]))
new_entities_dict[ent] = entity
for ent in matched_entities:
if ((cached_update_dict is not None) and (ent in cached_update_dict)):
new_entities_dict[ent] = cached_update_dict[ent]
else:
new_entities_dict[ent].infer_attributes(new_passage, self.model, other_names=([name for name in matched_entities if (name != ent)] + list(new_entities)), detect_contradictions=False)
complete_mutual_relations(new_entities_dict, self.model)
return new_entities_dict
def detect_attribute_contradictions(self, completion, detect_contradictions=True):
(matched_entities, new_entities, _) = deduplicate_match_entities(detect_entities(completion, add_dpr_entities=False, all_entities_dict=self.all_entities_dict), self.all_entities_dict.keys())
matched_entities = list(matched_entities)
contradictions = {}
cached_update_dict = {}
copied_entities = {k: v for (k, v) in self.all_entities_dict.items()}
for ent in matched_entities:
entity = copied_entities[ent]
contradictions[ent] = entity.infer_attributes(completion, self.model, detect_contradictions=detect_contradictions, other_names=([name for name in matched_entities if (name != entity.name)] + list(new_entities)))
cached_update_dict[ent] = entity
(_, additional_contradictions) = complete_mutual_relations(copied_entities, self.model)
for ent in additional_contradictions:
for key in additional_contradictions[ent]:
if (ent not in contradictions):
contradictions[ent] = {}
contradictions[ent][key] = additional_contradictions[ent][key]
return (matched_entities, contradictions, cached_update_dict)
def condense_outline_sections(self, outline, section_list, i):
logging.log(23, 'CONDENSING OUTLINE')
logging.log(23, 'BEFORE')
logging.log(23, str([n.text for n in self.outline_sections]))
current_leaf = section_list[i]
assert (current_leaf.text in self.outline_sections[(- 1)].text)
keep_nodes = ([current_leaf] if (i == 0) else [current_leaf, section_list[(i - 1)]])
self.outline_sections = [node for node in outline.collapse_around(keep_nodes, up_to=current_leaf)]
logging.log(23, 'AFTER')
logging.log(23, str([n.text for n in self.outline_sections]))
def construct_prompt(self, node, selected_entities=[]):
presumed_max_prompt_length = (self.args.max_context_length - self.args.max_tokens)
if (len(self.all_paragraphs) == 0):
prompt = (((('Premise: ' + self.all_entities_dict['Premise'].description.replace('Premise:', '').strip()) + ' ') + self.all_entities_dict['Setting'].description.replace('Setting:', '').strip()) + '\n\n\n\n')
else:
prompt = ''
parent_texts = []
current_node = node
current_scene = current_node.scene
while True:
parent = current_node.parent
if ((parent is not None) and (len(parent.text.strip()) > 0)):
parent_texts.append(parent.text)
current_node = parent
else:
break
if (len(parent_texts) > 0):
parent_texts = reversed(parent_texts)
prompt = (('Premise: ' + ' '.join(parent_texts)) + '\n\n\n\n')
tokenizer = (self.opt_model.tokenizer if (self.opt_model is not None) else self.model.tokenizer)
prompt += 'This book was authored by a well-known novelist, and received glowing reviews from critics, who praised the interesting dialogue and interactions between characters.'
if (len(selected_entities) > 0):
selected_entity_strings = []
for ent in selected_entities:
desc = self.all_entities_dict[ent].get_outline_description_up_to_node(node, max_tokens=128, tokenizer=tokenizer)
assert (len(tokenizer.encode(desc)) <= 128)
selected_entity_strings.append(desc)
while (sum([len(tokenizer.encode(desc)) for desc in selected_entity_strings]) > self.args.max_entity_context_tokens):
selected_entity_strings = selected_entity_strings[:(- 1)]
logging.log(22, ('SELECTED ENTITIES: ' + str(selected_entities)))
logging.log(22, ('SELECTED ENTITY STRINGS: ' + str(selected_entity_strings)))
prompt += ('\n\n\n\nRelevant Context:\n\n' + '\n\n'.join(selected_entity_strings))
else:
logging.warning('No selected entities')
if (self.step > 1):
prompt += ('\n\n\n\nPrevious story summary: ' + ' '.join([n.text for n in self.outline_sections[:(- 1)]]))
previous_text = self.previous_passage(self.args.previous_prompt_length)
if (len(self.all_paragraphs) > 0):
previous_passage = self.previous_passage(int((self.args.max_context_length / 2)), suffix=previous_text)
if (len(tokenizer.encode(previous_passage)) > int((self.args.max_context_length / 4))):
max_preceding_summary_tokens = int((self.args.previous_prompt_length / 2))
preceding_summary = self.model([(previous_passage + '\n\nSummarize the events in this passage.')], max_tokens=max_preceding_summary_tokens, model_string='text-curie-001', cut_sentence=True)[0].strip().replace('\n\n', ' ')
if (len(tokenizer.encode(preceding_summary)) == max_preceding_summary_tokens):
logging.warning('Warning: preceding events summary is too long, truncating')
prompt += ('\n\n\n\nEvents immediately prior to the upcoming passage: ' + preceding_summary)
next_node = node.successor()
if ((next_node is not None) and self.args.include_future_context):
next_text = (' ' + next_node.text.strip())
else:
next_text = ''
if (self.step == 1):
prompt += (('\n\n\n\nChapter 1 Summary: ' + node.text.strip()) + next_text)
else:
previous_node = self.detailed_outline_section_history[(- 2)]
previous_scene = previous_node.scene
(previous_text_entities, _, _) = deduplicate_match_entities(detect_entities(previous_text), self.all_entities_dict.keys())
prompt += (('\n\n\n\nThe characters currently in the scene are ' + ', '.join((list(previous_text_entities) + [e for e in previous_node.selected_entities if (e not in previous_text_entities)]))) + '.')
prompt += (((('\n\n\n\nIn the upcoming passage, ' + self.detailed_outline_section_history[(- 2)].text) + ' ') + node.text.strip()) + next_text)
logging.log(22, ('PREVIOUS SCENE: ' + str(previous_scene)))
logging.log(22, ('CURRENT SCENE: ' + str(current_scene)))
if (not is_same_scene(previous_scene, current_scene)):
prompt += ((('\n\n\n\nThis part of the story initially takes place in ' + previous_scene) + ' The characters then move to ') + current_scene)
else:
prompt += ('\n\n\n\nThis part of the story takes place in ' + current_scene)
prompt += '\n\n\n\nFull text below:\n\n\n\n'
if (len(self.all_paragraphs) == 0):
prompt = (prompt + 'Chapter 1\n\n')
prompt = (prompt + previous_text)
prompt = prompt.replace('\n\n\n\n', '\n\n')
if (len(tokenizer.encode(prompt)) > presumed_max_prompt_length):
logging.log(22, 'WARNING: CONTEXT TOO LONG, TRUNCATING')
prompt = tokenizer.decode(tokenizer.encode(prompt)[(- presumed_max_prompt_length):])
return prompt
_grad()
def edit_update_contradictions(self):
assert (not self.is_consistent)
completion = self.all_paragraphs[(- 1)]
autoregressive_context = (self.all_paragraphs[(- 2)].lstrip(string.punctuation) if (len(self.all_paragraphs) > 1) else '')
(matched_entities, contradictions, cached_update_dict) = self.detect_attribute_contradictions(completion.strip(), detect_contradictions=True)
edited_sentences = set()
if (any([(len(contradictions[ent]) > 0) for ent in matched_entities]) and (len(autoregressive_context) > 0)):
logging.log(23, 'editing completion based on contradictions')
logging.log(23, ('AUTOREGRESSIVE CONTEXT ' + autoregressive_context))
logging.log(23, ('BEFORE ' + completion))
for ent in matched_entities:
for contradiction_key in contradictions[ent]:
for contradicted_sentence in contradictions[ent][contradiction_key][0]['text'].strip().split('\n'):
if (contradicted_sentence in edited_sentences):
continue
edited_sentences.add(contradicted_sentence)
instruction = (('Edit so that ' + contradicted_sentence) + ' Keep the text unchanged as much as possible.')
logging.log(23, ('INSTRUCTION ' + instruction))
completion = gpt3_edit(completion, instruction, prefix=(None if (len(autoregressive_context.strip()) == 0) else autoregressive_context)).strip()
if (len(self.model.tokenizer.encode(completion)) > (self.args.max_tokens + 64)):
logging.warning('WARNING: completion is too long after editing. Truncating...')
completion = self.model.tokenizer.decode(self.model.tokenizer.encode(completion)[:(self.args.max_tokens + 64)])
completion = cut_last_sentence(completion)
logging.log(23, ('AFTER ' + completion))
(_, _, cached_update_dict) = self.detect_attribute_contradictions(completion.strip(), detect_contradictions=False)
self.all_paragraphs[(- 1)] = completion
self.paragraphs_by_outline_section[self.outline_sections[(- 1)]][(- 1)] = completion
self.all_entities_dict = self.create_updated_entities(completion.strip(), cached_update_dict=cached_update_dict)
self.is_consistent = True
_grad()
def extend(self, node):
logging.log(25, ('extension step ' + str(self.step)))
logging.log(23, ('outline section: ' + node.text))
self.step += 1
self.alignment_score = (- .0)
self.best_alignment_so_far = (- .0)
self.alignment_history = []
self.outline_sections.append(node)
self.detailed_outline_section_history.append(node)
self.paragraphs_by_outline_section[node] = []
completed_candidates = []
beam = [self]
substep = 0
while (len(completed_candidates) < self.args.max_beam_size):
logging.log(25, ('substep ' + str(substep)))
next_candidates = []
for (beam_idx, prev_candidate) in enumerate(beam):
candidates = []
for candidate in prev_candidate.extend_single(node, batch_size=self.args.max_candidates, top_p=self.args.draft_top_p, substep=substep):
candidates.append(candidate)
logging.log(25, ((('beam idx ' + str(beam_idx)) + ' single extension with score ') + str(candidates[(- 1)].alignment_score)))
if (max([c.alignment_score for c in candidates]) < self.args.skip_threshold):
for candidate in prev_candidate.extend_single(node, batch_size=self.args.max_candidates, top_p=self.args.draft_top_p, substep=substep):
candidates.append(candidate)
logging.log(25, ((('beam idx ' + str(beam_idx)) + ' extra single extension with score ') + str(candidates[(- 1)].alignment_score)))
candidates = sorted(candidates, key=(lambda x: x.alignment_score), reverse=True)
logging.log(25, ((('best candidate with score ' + str(candidates[0].alignment_score)) + ':\n') + candidates[0].all_paragraphs[(- 1)]))
if ((candidates[0].alignment_score < (prev_candidate.best_alignment_so_far - self.args.continuation_threshold)) and (prev_candidate.best_alignment_so_far >= self.args.early_stop_threshold)):
logging.log(25, ((((('beam idx ' + str(beam_idx)) + ' adding completed candidate with early stop score ') + str(prev_candidate.alignment_score)) + ' and best alignment score ') + str(prev_candidate.best_alignment_so_far)))
assert (self.args.no_editor or prev_candidate.is_consistent)
completed_candidates.append(prev_candidate)
elif (candidates[0].alignment_score < self.args.skip_threshold):
logging.log(25, ((((('beam idx ' + str(beam_idx)) + ' adding acceptable candidate with score ') + str(prev_candidate.alignment_score)) + ' and best alignment score ') + str(prev_candidate.best_alignment_so_far)))
assert (self.args.no_editor or prev_candidate.is_consistent)
completed_candidates.append(prev_candidate)
else:
if (candidates[0].alignment_score < prev_candidate.best_alignment_so_far):
logging.log(25, 'continuation with slightly worse score')
next_candidates.extend(candidates)
next_candidates = sorted(next_candidates, key=(lambda x: x.alignment_score), reverse=True)[:self.args.max_beam_size]
beam = next_candidates
if (len(completed_candidates) > 0):
beam = []
if (not self.args.no_editor):
for c in beam:
c.edit_update_contradictions()
substep += 1
if (substep >= self.args.max_continuation_substeps):
for c in beam:
logging.log(25, ((((('beam idx ' + str(beam_idx)) + ' adding completed candidate with score ') + str(c.alignment_score)) + ' and best alignment score ') + str(c.best_alignment_so_far)))
assert (self.args.no_editor or c.is_consistent)
completed_candidates.append(c)
break
completed_candidates = [c for c in completed_candidates if (c is not None)]
if (len(completed_candidates) == 0):
completed_candidates = [self]
return sorted(completed_candidates, key=(lambda x: ((x.best_alignment_so_far * 10000) + x.alignment_score)), reverse=True)[:self.args.max_beam_size]
def calculate_alignment(self, completions, prompt, node):
if (self.args.max_candidates == 1):
return np.zeros(len(completions))
unstripped_completions = completions
completions = [c.strip() for c in completions]
repetition_penalty = np.array([calculate_repetition_length_penalty(c, [prompt]) for c in completions])
last_prompt_paragraph = split_paragraphs(prompt, mode='newline')[(- 1)]
is_first_person = np.array([(1 if (detect_first_second_person((last_prompt_paragraph + c)) - detect_first_second_person(last_prompt_paragraph)) else 0) for c in completions])
repetition_penalty += is_first_person
alignment_score = 0
if (self.args.controller[0] == 'longformer_classifier'):
previous_outline_section = (self.detailed_outline_section_history[(- 2)] if (len(self.detailed_outline_section_history) > 1) else None)
if ((previous_outline_section is not None) and (len(self.paragraphs_by_outline_section[previous_outline_section]) > 0)):
previous_text = self.paragraphs_by_outline_section[previous_outline_section][(- 1)]
else:
previous_text = ''
alignment_input = [create_prefix_completion((''.join(self.paragraphs_by_outline_section[node]) + c), node.text)[1] for c in unstripped_completions]
prefix_alignment_input = [create_prefix_completion(((previous_text + ''.join(self.paragraphs_by_outline_section[node])) + c), node.text)[1] for c in unstripped_completions]
logging.log(22, ('prefix alignment input 0: ' + str(prefix_alignment_input[0])))
else:
raise NotImplementedError
relevance_scores = self.controllers[0].evaluate_overall_texts(alignment_input).cpu().numpy()
logging.log(22, ('relevance scores: ' + str([('%.2f' % score) for score in relevance_scores])))
prefix_relevance_scores = self.controllers[0].evaluate_overall_texts(prefix_alignment_input).cpu().numpy()
logging.log(22, ('prefix relevance scores: ' + str([('%.2f' % score) for score in prefix_relevance_scores])))
relevance_scores = np.array([min(rs, prs) for (rs, prs) in zip(relevance_scores, prefix_relevance_scores)])
logging.log(22, ('min relevance scores: ' + str([('%.2f' % score) for score in relevance_scores])))
alignment_score += (relevance_scores * self.args.control_strength[0])
current_node = node
if (len(self.detailed_outline_section_history) > 1):
previous_node = self.detailed_outline_section_history[(- 2)]
extra_relevance_strings = []
if (not is_same_scene(current_node.scene, previous_node.scene)):
extra_relevance_strings.append((('The characters move to ' + current_node.scene), (self.args.control_strength[0] * 0.5)))
for character in current_node.selected_entities:
if (character not in previous_node.selected_entities):
extra_relevance_strings.append(((character + ' enters the scene.'), (self.args.control_strength[0] * 0.2)))
for (ers, cs) in extra_relevance_strings:
logging.log(22, ('scene/char relevance string: ' + ers))
extra_alignment_input = [create_prefix_completion((''.join(self.paragraphs_by_outline_section[node]) + c), ers)[1] for c in unstripped_completions]
extra_prefix_alignment_input = [create_prefix_completion(((previous_text + ''.join(self.paragraphs_by_outline_section[node])) + c), ers)[1] for c in unstripped_completions]
extra_relevance_scores = self.controllers[0].evaluate_overall_texts(extra_alignment_input).cpu().numpy()
logging.log(22, ('scene/char relevance scores: ' + str([('%.2f' % score) for score in extra_relevance_scores])))
extra_prefix_relevance_scores = self.controllers[0].evaluate_overall_texts(extra_prefix_alignment_input).cpu().numpy()
logging.log(22, ('scene/char prefix relevance scores: ' + str([('%.2f' % score) for score in extra_prefix_relevance_scores])))
extra_relevance_scores = np.array([min(rs, prs) for (rs, prs) in zip(extra_relevance_scores, extra_prefix_relevance_scores)])
logging.log(22, ('min scene/char relevance scores: ' + str([('%.2f' % score) for score in extra_relevance_scores])))
alignment_score += (extra_relevance_scores * cs)
if (len(self.story().strip()) > 0):
coherence_scores = self.controllers[1]([self.previous_passage(1000) for _ in range(len(completions))], completions).cpu().numpy()
logging.log(22, ('coherence scores: ' + str([('%.2f' % score) for score in coherence_scores])))
alignment_score += (coherence_scores * self.args.control_strength[1])
else:
alignment_score += ((- 1) * self.args.control_strength[1])
logging.log(22, ('repetition: ' + str([('%.2f' % score) for score in ((- repetition_penalty) * self.args.repetition_penalty_weight)])))
alignment_score += ((- repetition_penalty) * self.args.repetition_penalty_weight)
return alignment_score
def extend_single(self, node, batch_size=1, top_p=None, substep=0):
if ((self.args.generation_outline_levels is not None) and (self.args.generation_outline_levels == 1)):
assert (self.step == len(self.outline_sections))
selected_entities = node.selected_entities
prompt = self.construct_prompt(node, selected_entities=selected_entities)
logging.log(22, 'PROMPT')
logging.log(22, prompt)
if (self.args.extension_method == 'gpt3'):
completions = self.model([prompt], model_string=self.args.draft_model_string, num_completions=batch_size, top_p=top_p, temperature=self.args.summarizer_temperature, cut_sentence=self.args.cut_sentence, logit_bias={50256: (- 100)})
elif (self.args.extension_method == 'opt-control'):
current_control_strength = min((self.args.control_strength[2] + (substep * self.args.control_strength_substep_increment)), self.args.max_control_strength)
exclude_strings = (((stopwords.words('english') + list('!"\'(),-.:;?')) + ['\n', '\n\n']) + selected_entities)
assert ('\n\nFull text below:\n\n' in prompt)
previous_paragraph = prompt.split('\n\nFull text below:\n\n')[(- 1)].strip()
opt_control_logit_bias = self.opt_model.create_logit_bias_for_prompt(previous_paragraph, bias=(- self.args.summarizer_frequency_penalty), decay=self.args.summarizer_frequency_penalty_decay)
prompt_logit_bias_string = prompt[:(len(prompt) - len(previous_paragraph))]
for character in self.all_entities_dict:
prompt_logit_bias_string = prompt_logit_bias_string.replace(self.all_entities_dict[character].description, '')
opt_control_logit_bias_prompt = self.opt_model.create_logit_bias_for_prompt(prompt_logit_bias_string, bias=(- self.args.summarizer_prompt_penalty), exclude_strings=exclude_strings)
for key in opt_control_logit_bias_prompt:
if (key in opt_control_logit_bias):
opt_control_logit_bias[key] = min(opt_control_logit_bias[key], opt_control_logit_bias_prompt[key])
else:
opt_control_logit_bias[key] = opt_control_logit_bias_prompt[key]
opt_control_logit_bias[2] = (- .0)
current_controllers = [self.controllers[2]]
current_control_texts = [concatenate_summary_text(node.text, '')]
current_control_strengths = [current_control_strength]
current_node = node
if (len(self.detailed_outline_section_history) > 1):
previous_node = self.detailed_outline_section_history[(- 2)]
if (not is_same_scene(current_node.scene, previous_node.scene)):
current_controllers.append(self.controllers[2])
current_control_texts.append(concatenate_summary_text(('The characters move to ' + current_node.scene), ''))
current_control_strengths.append((current_control_strength * 0.5))
(section_entities, _, _) = deduplicate_match_entities(detect_entities(''.join(self.paragraphs_by_outline_section[node])), self.all_entities_dict.keys(), prioritized_names=current_node.selected_entities)
logging.log(22, ('section entities: ' + str(section_entities)))
for character in current_node.selected_entities:
if ((character not in previous_node.selected_entities) and (character not in section_entities)):
current_controllers.append(self.controllers[2])
current_control_texts.append(concatenate_summary_text((character + ' enters the scene.'), ''))
current_control_strengths.append((current_control_strength * 0.2))
logging.log(22, ('control texts: ' + str(current_control_texts)))
logging.log(22, ('control strengths' + str(current_control_strengths)))
completions = self.opt_model.generate_with_controller(current_controllers, current_control_texts, prompt, control_strengths=current_control_strengths, max_tokens=self.args.max_tokens, temperature=self.args.opt_summarizer_temperature, logit_bias=opt_control_logit_bias, num_completions=batch_size, cut_sentence=self.args.cut_sentence, logit_bias_decay=self.args.summarizer_frequency_penalty_decay)
else:
raise NotImplementedError
for i in range(len(completions)):
logging.log(22, ('COMPLETION: ' + completions[i]))
while ('\n\n\n' in completions[i]):
completions[i] = completions[i].replace('\n\n\n', '\n\n')
for i in range(len(completions)):
(_, _, replacements) = deduplicate_match_entities(detect_entities(completions[i].strip()), self.all_entities_dict.keys())
if (not self.args.no_editor):
for (key, value) in replacements.items():
completions[i] = completions[i].replace(key, value)
alignment_score = self.calculate_alignment(completions, prompt, node)
new_candidates = []
for (c, s) in zip(completions, alignment_score):
new_paragraphs_by_outline_section = deepcopy(self.paragraphs_by_outline_section)
new_paragraphs_by_outline_section[node].append(c)
new_candidates.append(BeamCandidate(self.args, self.all_entities_dict, self.infer_attributes_string, self.outline, model=self.model, opt_model=self.opt_model, controllers=self.controllers, step=self.step, alignment_score=s, best_alignment_so_far=max(s, self.best_alignment_so_far), alignment_history=(self.alignment_history + [s]), all_paragraphs=(deepcopy(self.all_paragraphs) + [c]), outline_sections=[o for o in self.outline_sections], detailed_outline_section_history=[o for o in self.detailed_outline_section_history], paragraphs_by_outline_section=new_paragraphs_by_outline_section))
return new_candidates
def complete_ending(self):
node = self.outline_sections[(- 1)]
if (node not in self.paragraphs_by_outline_section):
self.paragraphs_by_outline_section[node] = []
selected_entities = node.selected_entities
prompt = self.construct_prompt(node, selected_entities=selected_entities)
completions = gpt3_insert(prompt, ('\n\n\n\n' + GPT3_END), top_p=self.args.draft_top_p, temperature=self.args.summarizer_temperature, n=self.args.max_candidates, max_tokens=self.args.max_tokens, frequency_penalty=self.args.summarizer_frequency_penalty, presence_penalty=self.args.summarizer_presence_penalty)
completions = [c.replace('\n\n\n\n', '\n\n') for c in completions]
alignment_score = self.calculate_alignment(completions, prompt, node)
logging.log(23, ('ENDING ALIGNMENT SCORES ' + str(alignment_score)))
ranked_completions = sorted(zip(completions, alignment_score), key=(lambda x: x[1]), reverse=True)
ending = ranked_completions[0][0]
should_continue = (len(self.model.tokenizer.encode(ending)) == self.args.max_tokens)
ending = cut_last_sentence(ending)
logging.log(23, (('ENDING' + ' ') + ending))
new_paragraphs_by_outline_section = deepcopy(self.paragraphs_by_outline_section)
new_paragraphs_by_outline_section[node].append(ending)
new_candidate = BeamCandidate(self.args, self.all_entities_dict, self.infer_attributes_string, self.outline, model=self.model, opt_model=self.opt_model, controllers=self.controllers, step=self.step, alignment_score=self.alignment_score, best_alignment_so_far=self.best_alignment_so_far, alignment_history=(self.alignment_history + [self.alignment_score]), all_paragraphs=(deepcopy(self.all_paragraphs) + [ending]), outline_sections=[o for o in self.outline_sections], detailed_outline_section_history=[o for o in self.detailed_outline_section_history], paragraphs_by_outline_section=new_paragraphs_by_outline_section)
if (not self.args.no_editor):
new_candidate.edit_update_contradictions()
return (new_candidate, should_continue) |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms._SEEDInternal((b'\x00' * 16)), modes.CBC((b'\x00' * 16)))), skip_message='Does not support SEED CBC')
class TestSEEDModeCBC():
test_cbc = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', 'SEED'), ['rfc-4196.txt'], (lambda key, **kwargs: algorithms._SEEDInternal(binascii.unhexlify(key))), (lambda iv, **kwargs: modes.CBC(binascii.unhexlify(iv)))) |
class KeyAccessLoggerAtomicDB(BaseAtomicDB):
logger = logging.getLogger('eth.db.KeyAccessLoggerAtomicDB')
def __init__(self, wrapped_db: AtomicDatabaseAPI, log_missing_keys: bool=True) -> None:
self.wrapped_db = wrapped_db
self._keys_read: Set[bytes] = set()
self._log_missing_keys = log_missing_keys
def keys_read(self) -> FrozenSet[bytes]:
return frozenset(self._keys_read)
def __getitem__(self, key: bytes) -> bytes:
try:
result = self.wrapped_db.__getitem__(key)
except KeyError:
if self._log_missing_keys:
self._keys_read.add(key)
raise
else:
self._keys_read.add(key)
return result
def __setitem__(self, key: bytes, value: bytes) -> None:
self.wrapped_db[key] = value
def __delitem__(self, key: bytes) -> None:
del self.wrapped_db[key]
def _exists(self, key: bytes) -> bool:
does_exist = (key in self.wrapped_db)
if (does_exist or self._log_missing_keys):
self._keys_read.add(key)
return does_exist
def atomic_batch(self) -> Iterator[AtomicWriteBatchAPI]:
with self.wrapped_db.atomic_batch() as readable_batch:
(yield readable_batch) |
class UpdateWorker(QRunnable):
signals = WorkerSignals()
is_interrupted = False
def __init__(self, base_currency):
super(UpdateWorker, self).__init__()
self.base_currency = base_currency
self.signals.cancel.connect(self.cancel)
()
def run(self):
try:
today = date.today()
total_requests = len(DATE_REQUEST_OFFSETS)
for (n, offset) in enumerate(DATE_REQUEST_OFFSETS, 1):
when = (today - timedelta(days=offset))
url = '
r = requests.get(url, params={'base': self.base_currency})
r.raise_for_status()
data = r.json()
rates = data['rates']
rates[self.base_currency] = 1.0
self.signals.data.emit(offset, rates)
self.signals.progress.emit(int(((100 * n) / total_requests)))
if (not r.from_cache):
time.sleep(1)
if self.is_interrupted:
break
except Exception as e:
print(e)
(exctype, value) = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
return
self.signals.finished.emit()
def cancel(self):
self.is_interrupted = True |
def fetch_data(collection, patient_ids=None, modalities=None, nifti=True, output_directory='./tcia'):
result = {}
if isinstance(output_directory, str):
output_directory = Path(output_directory)
output_directory = output_directory.joinpath(collection)
output_directory.mkdir(exist_ok=True, parents=True)
modalities_available = get_modalities_in_collection(collection)
logger.debug('Modalities available: %s', modalities_available)
if (modalities is None):
logger.debug('Will fetch all modalities in collection')
modalities = modalities_available
else:
modalities_all_available = True
for modality in modalities:
if (not (modality in modalities_available)):
modalities_all_available = False
logger.error('Modality not available in collection: %s', modality)
if (not modalities_all_available):
raise ValueError("Modalities aren't all available in collection")
if (not patient_ids):
patient_ids = get_patients_in_collection(collection)
for pid in patient_ids:
patient_directory = output_directory.joinpath(pid)
dicom_directory = patient_directory.joinpath('DICOM')
nifti_directory = patient_directory.joinpath('NIFTI')
result[pid] = {}
result[pid]['DICOM'] = {}
logger.debug('Fetching data for Patient: %s', pid)
for modality in modalities:
res = requests.get(series_endpoint, params={'Collection': collection, 'PatientID': pid, 'Modality': modality})
series = json.loads(res.text)
if (not (modality in result[pid])):
result[pid]['DICOM'][modality] = {}
for obj in series:
series_uid = obj['SeriesInstanceUID']
target_directory = dicom_directory.joinpath(series_uid)
result[pid]['DICOM'][modality][series_uid] = target_directory
if target_directory.exists():
logger.warning("Series directory exists: %s, won't fetch data", target_directory)
continue
logger.debug('Downloading Series: %s', series_uid)
target_directory.mkdir(parents=True)
save_path = target_directory.joinpath(f'{pid}.zip')
response = requests.get(download_series_endpoint, stream=True, params={'SeriesInstanceUID': obj['SeriesInstanceUID']})
with open(save_path, 'wb') as file_obj:
for chunk in response.iter_content():
file_obj.write(chunk)
with zipfile.ZipFile(save_path, 'r') as zip_ref:
zip_ref.extractall(target_directory)
os.remove(save_path)
if nifti:
logger.info('Converting data for %s to Nifti', pid)
nifti_results = process_dicom_directory(dicom_directory, output_directory=nifti_directory)
result[pid]['NIFTI'] = nifti_results[pid]
return result |
def read_rank(path):
sorted_id = []
sorted_p = []
with open(path, 'r', encoding='utf-8') as f:
for line in tqdm(f):
(i, p) = line.strip('\n').split()
(i, p) = (int(i), float(p))
sorted_id.append(i)
sorted_p.append(p)
sorted_id_np = np.array(sorted_id)
sorted_p_np = np.array(sorted_p)
return (sorted_id_np, sorted_p_np) |
class ToolManager(QtCore.QObject):
toolInstanceChange = QtCore.Signal()
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent)
self._toolInfo = None
self._activeTools = {}
def loadToolInfo(self):
toolDir1 = os.path.join(pyzo.pyzoDir, 'tools')
toolDir2 = os.path.join(pyzo.appDataDir, 'tools')
toolfiles = []
for toolDir in [toolDir1, toolDir2]:
tmp = [os.path.join(toolDir, f) for f in os.listdir(toolDir)]
toolfiles.extend(tmp)
newlist = []
for file in toolfiles:
modulePath = file
if os.path.isdir(file):
file = os.path.join(file, '__init__.py')
if (not os.path.isfile(file)):
continue
elif (file.endswith('__.py') or (not file.endswith('.py'))):
continue
elif file.endswith('pyzoFileBrowser.py'):
continue
toolName = ''
toolSummary = ''
linecount = 0
with open(file, 'rt', encoding='utf-8') as fd:
for line in fd:
linecount += 1
if (linecount > 50):
break
if line.startswith('tool_name'):
i = line.find('=')
if (i < 0):
continue
line = line.rstrip('\n').rstrip('\r')
line = line[(i + 1):].strip(' ')
toolName = eval(line)
elif line.startswith('tool_summary'):
i = line.find('=')
if (i < 0):
continue
line = line.rstrip('\n').rstrip('\r')
line = line[(i + 1):].strip(' ')
toolSummary = line.strip("'").strip('"')
else:
pass
tmp = ToolDescription(modulePath, toolName, toolSummary)
newlist.append(tmp)
self._toolInfo = sorted(newlist, key=(lambda x: x.id))
self.updateToolInstances()
return self._toolInfo
def updateToolInstances(self):
for toolDes in self.getToolInfo():
if (toolDes.id in self._activeTools):
toolDes.instance = self._activeTools[toolDes.id]
else:
toolDes.instance = None
self.toolInstanceChange.emit()
def getToolInfo(self):
if (self._toolInfo is None):
self.loadToolInfo()
return self._toolInfo
def getToolClass(self, toolId):
if (self._toolInfo is None):
self.loadToolInfo()
for toolDes in self._toolInfo:
if (toolDes.id == toolId):
moduleName = toolDes.moduleName
modulePath = toolDes.modulePath
break
else:
print('WARNING: could not find module for tool', repr(toolId))
return None
for key in [key for key in sys.modules]:
if (key and key.startswith(('pyzo.tools.' + moduleName))):
del sys.modules[key]
try:
modulePyFilepath = modulePath
if (not modulePath.endswith('.py')):
modulePyFilepath = os.path.join(modulePyFilepath, '__init__.py')
moduleNameFull = ('pyzo.tools.' + moduleName)
spec = importlib.util.spec_from_file_location(moduleNameFull, modulePyFilepath)
mod = importlib.util.module_from_spec(spec)
sys.modules[moduleNameFull] = mod
spec.loader.exec_module(mod)
except Exception as why:
print((('Invalid tool ' + toolId) + ':'), why)
return None
className = ''
for member in dir(mod):
if (member.lower() == toolId):
className = member
break
else:
print(("Invalid tool, Classname must match module name '%s'!" % toolId))
return None
plug = mod.__dict__[className]
if (not (isinstance(plug, type) and issubclass(plug, QtWidgets.QWidget))):
print('Invalid tool, tool class must inherit from QWidget!')
return None
return plug
def loadTool(self, toolId, splitWith=None):
toolId = toolId.lower()
if (toolId in self._activeTools):
old = self._activeTools[toolId].widget()
self._activeTools[toolId].setWidget(QtWidgets.QWidget(pyzo.main))
if old:
old.close()
old.deleteLater()
toolClass = self.getToolClass(toolId)
if (toolClass is None):
return
if (toolId in self._activeTools):
self._activeTools[toolId].reload(toolClass)
return
for toolDes in self._toolInfo:
if (toolDes.id == toolId):
name = toolDes.name
break
else:
name = toolId
if (not hasattr(pyzo.config.tools, toolId)):
pyzo.config.tools[toolId] = ssdf.new()
dock = ToolDockWidget(pyzo.main, self)
dock.setTool(toolId, name, toolClass)
if (splitWith and (splitWith in self._activeTools)):
otherDock = self._activeTools[splitWith]
pyzo.main.splitDockWidget(otherDock, dock, QtCore.Qt.Horizontal)
else:
pyzo.main.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock)
self._activeTools[toolId] = dock
self.updateToolInstances()
def reloadTools(self):
self.loadToolInfo()
for id in self.getLoadedTools():
self.loadTool(id)
def closeTool(self, toolId):
if (toolId in self._activeTools):
dock = self._activeTools[toolId]
dock.close()
def getTool(self, toolId):
if (toolId in self._activeTools):
return self._activeTools[toolId].widget()
else:
return None
def onToolClose(self, toolId):
self._activeTools.pop(toolId, None)
self.updateToolInstances()
def getLoadedTools(self):
tmp = []
for toolDes in self.getToolInfo():
if (toolDes.id in self._activeTools):
tmp.append(toolDes.id)
return tmp |
def _interpret_power_of_two_units(value, unit):
SUFFIXES = ' KMGTPEZY'
if (unit is None):
unit = ' '
if (unit not in SUFFIXES):
raise ValueError(f"Could not interpret unit '{unit}'. Allowed suffixes are 'K', 'M', 'G', 'T', 'P', 'E', 'Z', and 'Y'.")
exponent = SUFFIXES.find(unit)
value *= (1024 ** exponent)
return value |
class DSRCNN_Denoise_BN_Large(nn.Module):
def __init__(self):
super(DSRCNN_Denoise_BN_Large, self).__init__()
self.conv1_ref = nn.Conv2d(1, 32, 5, padding=2)
self.relu1_ref = nn.PReLU(num_parameters=32)
self.conv1 = nn.Conv2d(1, 32, 5, padding=2)
self.relu1 = nn.PReLU(num_parameters=32)
self.conv2 = nn.Conv2d(128, 64, 1, padding=0)
self.relu2 = nn.PReLU(num_parameters=64)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.conv3_bn = nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True)
self.relu3 = nn.PReLU(num_parameters=64)
self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
self.conv4_bn = nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True)
self.relu4 = nn.PReLU(num_parameters=64)
self.conv5 = nn.Conv2d(64, 64, 3, padding=1)
self.conv5_bn = nn.BatchNorm2d(num_features=64, eps=1e-05, momentum=0.1, affine=True)
self.blob_conv1_ref = []
self.blob_eletsum = []
def forward(self, x):
split_dim = 1
split_x = torch.split(tensor=x, split_size=2, dim=split_dim)
split_sum = torch.sum(split_x[0], dim=split_dim, keepdim=True)
conv1_x = []
self.blob_conv1_ref = self.relu1_ref(self.conv1_ref(split_sum))
conv1_x.append(self.relu1_ref(self.conv1_ref(split_sum)))
for i in range(1, len(split_x)):
split_sum = torch.sum(split_x[i], dim=split_dim, keepdim=True)
conv1_x.append(self.relu1(self.conv1(split_sum)))
x = torch.cat(conv1_x, dim=split_dim)
x = self.relu2(self.conv2(x))
x_res = self.relu3(self.conv3_bn(self.conv3(x)))
x_res = self.relu4(self.conv4_bn(self.conv4(x_res)))
x_res = self.conv5_bn(self.conv5(x_res))
x = (x + x_res)
self.blob_eletsum = x
return x |
class BotCommandScopeChatMember(BotCommandScope):
def __init__(self, chat_id: Union[(int, str)], user_id: Union[(int, str)]):
super().__init__('chat_member')
self.chat_id = chat_id
self.user_id = user_id
async def write(self, client: 'pyrogram.Client') -> 'raw.base.BotCommandScope':
return raw.types.BotCommandScopePeerUser(peer=(await client.resolve_peer(self.chat_id)), user_id=(await client.resolve_peer(self.user_id))) |
def test_get_operation_times():
with expected_protocol(Fpu60, [('TIMERS?', 'PSU Time = Mins'), (None, 'Laser Enabled Time = Mins'), (None, 'Laser Threshold Time = Mins'), (None, '')]) as inst:
assert (inst.get_operation_times() == {'psu': 594820, 'laser': 196700, 'laser_above_1A': 196500}) |
.parametrize('padding', ['SAME', 'same'])
def test_Conv2dTF_same_padding(padding):
input_size = (1, 513, 88)
batch_size = tuple(((10,) + input_size[:]))
batch_of_spects = torch.rand(batch_size)
n_filters = 64
kernel_size = (5, 5)
conv2dtf = tweetynet.network.Conv2dTF(in_channels=input_size[0], out_channels=n_filters, kernel_size=kernel_size, padding=padding)
out = conv2dtf(batch_of_spects)
assert (tuple(out.shape)[2:] == batch_size[2:]) |
def rebalance(context, data):
pipeline_data = context.pipeline_data
all_assets = pipeline_data.index
longs = all_assets[pipeline_data.longs]
shorts = all_assets[pipeline_data.shorts]
record(universe_size=len(all_assets))
one_third = (1.0 / 3.0)
for asset in longs:
order_target_percent(asset, one_third)
for asset in shorts:
order_target_percent(asset, (- one_third))
portfolio_assets = (longs | shorts)
positions = context.portfolio.positions
for asset in (viewkeys(positions) - set(portfolio_assets)):
if data.can_trade(asset):
order_target_percent(asset, 0) |
def test_force_install(pipx_temp_env, capsys):
run_pipx_cli(['install', 'pycowsay'])
captured = capsys.readouterr()
assert ('installed package' in captured.out)
run_pipx_cli(['install', 'pycowsay'])
captured = capsys.readouterr()
assert ('installed package' not in captured.out)
assert ("'pycowsay' already seems to be installed" in captured.out)
run_pipx_cli(['install', 'pycowsay', '--force'])
captured = capsys.readouterr()
assert ('Installing to existing venv' in captured.out) |
class RHEL6_TestCase(F12_TestCase):
def runTest(self):
F12_TestCase.runTest(self)
self.assert_parse('logvol / --size=1024 --fsprofile "FS_PROFILE" --name=NAME --vgname=VGNAME', 'logvol / --size=1024 --fsprofile="FS_PROFILE" --name=NAME --vgname=VGNAME\n')
self.assert_parse('logvol / --encrypted --cipher=3-rot13 --name=NAME --vgname=VGNAME', 'logvol / --encrypted --cipher="3-rot13" --name=NAME --vgname=VGNAME\n')
self.assert_parse('logvol / --cipher=3-rot13 --name=NAME --vgname=VGNAME', 'logvol / --name=NAME --vgname=VGNAME\n')
self.assert_parse_error('logvol / --cipher --name=NAME --vgname=VGNAME', regex='argument --cipher: expected one argument')
self.assert_parse('logvol swap --hibernation --name=NAME --vgname=VGNAME', 'logvol swap --hibernation --name=NAME --vgname=VGNAME\n')
self.assert_parse('logvol swap --recommended --hibernation --name=NAME --vgname=VGNAME', 'logvol swap --recommended --hibernation --name=NAME --vgname=VGNAME\n')
self.assert_parse('logvol none --name=pool1 --vgname=vg --thinpool', 'logvol none --thinpool --name=pool1 --vgname=vg\n')
self.assert_parse('logvol none --name=pool1 --vgname=vg --thinpool --chunksize=512', 'logvol none --thinpool --chunksize=512 --name=pool1 --vgname=vg\n')
self.assert_parse('logvol none --name=pool1 --vgname=vg --thinpool --metadatasize=4 --chunksize=1024', 'logvol none --thinpool --metadatasize=4 --chunksize=1024 --name=pool1 --vgname=vg\n')
self.assert_parse('logvol /home --name=home --vgname=vg --thin --poolname=pool1', 'logvol /home --thin --poolname=pool1 --name=home --vgname=vg\n')
self.assert_parse('logvol none --name=pool1 --vgname=vg --thinpool --profile=performance --size=500', 'logvol none --size=500 --thinpool --profile=performance --name=pool1 --vgname=vg\n')
self.assert_parse_error('logvol /home --name=home --vgname=vg --thin')
self.assert_parse_error('logvol none --name=pool1 --vgname=vg --thinpool --chunksize=foo')
self.assert_parse_error('logvol /home --name=home --thin --thinpool --vgname=vg --size=10000')
self.assert_parse_error('logvol none --name=pool1 --vgname=vg --chunksize=512') |
class TrainDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'mtop_train.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
expansion = (args.seq2seq.expansion if args.seq2seq.expansion else 1)
for expand_id in range(expansion):
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
extend_data.update({'struct_in': (extend_data['structure'] if args.seq2seq.use_apis else ''), 'text_in': extend_data['input'], 'seq_out': extend_data['target']})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
class TestStepScheduler(unittest.TestCase):
_num_epochs = 12
def _get_valid_config(self):
return {'name': 'step', 'num_epochs': self._num_epochs, 'values': [0.1, 0.01, 0.001, 0.0001]}
def test_invalid_config(self):
config = self._get_valid_config()
bad_config = copy.deepcopy(config)
bad_config['num_epochs'] = (- 1)
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
bad_config['num_epochs'] = config['num_epochs']
del bad_config['values']
with self.assertRaises(TypeError):
StepParamScheduler.from_config(bad_config)
bad_config['values'] = {'a': 'b'}
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
bad_config['values'] = []
with self.assertRaises(ValueError):
StepParamScheduler.from_config(bad_config)
def test_scheduler(self):
config = self._get_valid_config()
scheduler = StepParamScheduler.from_config(config)
schedule = [scheduler((epoch_num / self._num_epochs)) for epoch_num in range(self._num_epochs)]
expected_schedule = [0.1, 0.1, 0.1, 0.01, 0.01, 0.01, 0.001, 0.001, 0.001, 0.0001, 0.0001, 0.0001]
self.assertEqual(schedule, expected_schedule)
def test_build_step_scheduler(self):
config = self._get_valid_config()
scheduler = build_param_scheduler(config)
self.assertTrue(isinstance(scheduler, StepParamScheduler)) |
class MemcachedCollector(diamond.collector.Collector):
GAUGES = ['bytes', 'connection_structures', 'curr_connections', 'curr_items', 'threads', 'reserved_fds', 'limit_maxbytes', 'hash_power_level', 'hash_bytes', 'hash_is_expanding', 'uptime']
def get_default_config_help(self):
config_help = super(MemcachedCollector, self).get_default_config_help()
config_help.update({'publish': (("Which rows of 'status' you would like to publish." + " Telnet host port' and type stats and hit enter to see the") + ' list of possibilities. Leave unset to publish all.'), 'hosts': ('List of hosts, and ports to collect. Set an alias by ' + ' prefixing the host:port with ')})
return config_help
def get_default_config(self):
config = super(MemcachedCollector, self).get_default_config()
config.update({'path': 'memcached', 'hosts': ['localhost:11211']})
return config
def get_raw_stats(self, host, port):
data = ''
try:
if (port is None):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
sock.settimeout(3)
sock.send('stats\n')
while True:
received = sock.recv(4096)
if (not received):
break
data += received
if data.endswith('END\r\n'):
break
except socket.error:
self.log.exception('Failed to get stats from %s:%s', host, port)
sock.close()
return data
def get_stats(self, host, port):
ignored = ('libevent', 'pointer_size', 'time', 'version', 'repcached_version', 'replication', 'accepting_conns', 'pid')
pid = None
cmdline = None
stats = {}
data = self.get_raw_stats(host, port)
for line in data.splitlines():
pieces = line.split(' ')
if ((pieces[0] != 'STAT') or (pieces[1] in ignored)):
continue
elif (pieces[1] == 'pid'):
pid = pieces[2]
continue
elif (pieces[1] == 'commandargs'):
cmdline = pieces[2]
continue
if ('.' in pieces[2]):
stats[pieces[1]] = float(pieces[2])
else:
stats[pieces[1]] = int(pieces[2])
self.log.debug('pid %s', pid)
try:
if (cmdline is None):
with open(('/proc/%s/cmdline' % pid), 'r') as f:
cmdline = f.readline()
m = re.search('-(?:c|-max-conns)\x00(\\d+)', cmdline)
if (m is not None):
self.log.debug('limit connections %s', m.group(1))
stats['limit_maxconn'] = m.group(1)
except:
self.log.debug('Cannot parse command line options for memcached')
return stats
def collect(self):
hosts = self.config.get('hosts')
if isinstance(hosts, basestring):
hosts = [hosts]
for host in hosts:
matches = re.search('((.+)\\)?([^:]+)(:(\\d+))?', host)
alias = matches.group(2)
hostname = matches.group(3)
port = matches.group(5)
if (alias is None):
alias = hostname
stats = self.get_stats(hostname, port)
desired = self.config.get('publish', stats.keys())
for stat in desired:
if (stat in stats):
if (stat in self.GAUGES):
self.publish_gauge(((alias + '.') + stat), stats[stat])
else:
self.publish_counter(((alias + '.') + stat), stats[stat])
else:
self.log.error("No such key '%s' available, issue 'stats' for a full list", stat) |
class TweetSauceCache(db.Entity):
tweet_id = Required(int, size=64)
index_no = Required(int, size=8, index=True)
sauce_header = Optional(Json)
sauce_data = Optional(Json)
sauce_class = Optional(str, 255)
sauce_index = Optional(str, 255)
media_id = Optional(int, size=64)
trigger = Optional(str, 50)
created_at = Required(int, size=64, index=True)
_session
def fetch(tweet_id: int, index_no: int=0, cutoff: int=86400) -> typing.Optional['TweetSauceCache']:
now = int(time.time())
cutoff_ts = (0 if (not cutoff) else (now - cutoff))
sauce = TweetSauceCache.get(tweet_id=tweet_id, index_no=index_no)
if sauce:
log.debug(f'[SYSTEM] Sauce cache hit on index {index_no} for tweet {tweet_id}')
if (sauce.created_at < cutoff_ts):
log.info(f'[SYSTEM] Sauce cache query on index {index_no} for tweet {tweet_id} has expired')
return None
return sauce
_session
def set(tweet: TweetCache, sauce_results: typing.Optional[SauceNaoResults]=None, index_no: int=0, trigger: str=TRIGGER_MENTION, media_id: typing.Optional[int]=None) -> 'TweetSauceCache':
cache = TweetSauceCache.get(tweet_id=tweet.tweet_id, index_no=index_no)
if cache:
log.info(f'[SYSTEM] Overwriting sauce cache entry for tweet {tweet.tweet_id}')
cache.delete()
commit()
def no_results():
log.info(f'[SYSTEM] Logging a failed Sauce lookup for tweet {tweet.tweet_id} on indice {index_no}')
_cache = TweetSauceCache(tweet_id=tweet.tweet_id, index_no=index_no, trigger=trigger, media_id=(media_id or 0), created_at=int(time.time()))
return _cache
if (not sauce_results):
return no_results()
similarity_cutoff = int(config.getfloat('Twitter', f'min_similarity_{trigger}', fallback=50.0))
sauce = sauce_results.results[0]
if (sauce.similarity < similarity_cutoff):
log.debug(f"[SYSTEM] Sauce potentially found for tweet {tweet.tweet_id}, but it didn't meet the minimum {trigger} similarity requirements")
return no_results()
log.info(f'[SYSTEM] Caching a successful sauce lookup query for tweet {tweet.tweet_id} on indice {index_no}')
cache = TweetSauceCache(tweet_id=tweet.tweet_id, index_no=index_no, sauce_header=sauce.header, sauce_data=sauce.data, sauce_class=type(sauce).__name__, sauce_index=(sauce.index or f'Unknown index ({sauce.index_id})'), trigger=trigger, media_id=(media_id or 0), created_at=int(time.time()))
return cache
_session
def sauce_count(cutoff: typing.Optional[int]=None, found_only: bool=True) -> int:
now = int(time.time())
cutoff_ts = (0 if (not cutoff) else (now - cutoff))
if found_only:
sauce_count = count((s for s in TweetSauceCache if (s.sauce_class and (s.created_at >= cutoff_ts))))
else:
sauce_count = count((s for s in TweetSauceCache if (s.created_at >= cutoff_ts)))
return sauce_count
def sauce(self) -> typing.Optional[GenericSource]:
if (not all([self.sauce_class, self.sauce_header, self.sauce_data])):
return None
container = getattr(pysaucenao.containers, self.sauce_class)
sauce = container(self.sauce_header, self.sauce_data)
return sauce |
class TestDict():
def klass(self):
return configtypes.Dict
.parametrize('val', ['{"foo": "bar"}', '{"foo": "bar", "baz": "fish"}', '{}'])
def test_from_str_valid(self, klass, val):
d = klass(keytype=configtypes.String(), valtype=configtypes.String(), none_ok=True)
assert (d.from_str(val) == json.loads(val))
.parametrize('val', ['["foo"]', '{"hello": 23}', '[invalid', '{}'])
def test_from_str_invalid(self, klass, val):
d = klass(keytype=configtypes.String(), valtype=configtypes.String())
with pytest.raises(configexc.ValidationError):
d.from_str(val)
def test_from_str_int(self):
typ = configtypes.Dict(keytype=configtypes.String(), valtype=configtypes.Int())
assert (typ.from_str('{"answer": 42}') == {'answer': 42})
.parametrize('obj, expected', [({'a': 'b'}, {'a': 'b'}), ({}, {}), (None, {})])
def test_from_obj(self, klass, obj, expected):
d = klass(keytype=configtypes.String(), valtype=configtypes.String(), none_ok=True)
assert (d.from_obj(obj) == expected)
.parametrize('keytype, valtype, val', [(configtypes.String(), configtypes.String(), {'hello': 'world'}), (configtypes.String(), configtypes.Int(), {'hello': 42})])
def test_to_py_valid(self, klass, keytype, valtype, val):
assert (klass(keytype=keytype, valtype=valtype).to_py(val) == val)
.parametrize('val', [{'': 'foo'}, {'foo': ''}, {0: 'foo'}, {'foo': 0}])
def test_to_py_invalid(self, klass, val):
typ = klass(keytype=configtypes.String(), valtype=configtypes.String())
with pytest.raises(configexc.ValidationError):
typ.to_py(val)
.parametrize('kind, val, ok', [('fixed', {'one': '1'}, True), ('fixed', {'one': '1', 'two': '2', 'three': '3'}, False), ('fixed', {'one': '1', 'two': '2'}, True), ('required', {'one': '1'}, False), ('required', {'one': '1', 'two': '2', 'three': '3'}, True), ('required', {'one': '1', 'two': '2'}, True)])
.parametrize('from_str', [True, False])
def test_keys(self, klass, kind, val, ok, from_str):
if (kind == 'fixed'):
d = klass(keytype=configtypes.String(), valtype=configtypes.String(none_ok=True), fixed_keys=['one', 'two'])
message = 'Expected keys .*'
elif (kind == 'required'):
d = klass(keytype=configtypes.String(), valtype=configtypes.String(), required_keys=['one', 'two'])
message = 'Required keys .*'
if ok:
expectation = testutils.nop_contextmanager()
else:
expectation = pytest.raises(configexc.ValidationError, match=message)
with expectation:
if from_str:
d.from_str(json.dumps(val))
else:
d.to_py(val)
(val=strategies.dictionaries(strategies.text(min_size=1, alphabet=strategies.characters(blacklist_categories=['Cc', 'Cs'], max_codepoint=65535)), strategies.booleans()))
def test_hypothesis(self, klass, val):
d = klass(keytype=configtypes.String(), valtype=configtypes.Bool(), none_ok=True)
converted = d.to_py(val)
expected = (converted if converted else None)
assert (d.from_str(d.to_str(converted)) == expected)
(val=strategies.dictionaries(strategies.text(min_size=1), strategies.booleans()))
def test_hypothesis_text(self, klass, val):
text = json.dumps(val)
d = klass(keytype=configtypes.String(), valtype=configtypes.Bool())
try:
converted = d.from_str(text)
except configexc.ValidationError:
return
if (len(converted) > 1):
return
assert ((d.to_str(converted) == '') if (not val) else text)
.parametrize('valtype, val, expected', [(configtypes.String(), {'foo': 'bar'}, '\n\n- +pass:[foo]+: +pass:[bar]+'), (configtypes.Dict(keytype=configtypes.String(), valtype=configtypes.String()), {'foo': {'bar': 'baz'}}, '\n\n- +pass:[foo]+:\n\n* +pass:[bar]+: +pass:[baz]+'), (configtypes.String(), {}, 'empty')])
def test_to_doc(self, klass, valtype, val, expected):
typ = klass(keytype=configtypes.String(), valtype=valtype)
doc = typ.to_doc(val)
print(doc)
assert (doc == expected)
def test_from_obj_sub(self):
typ = configtypes.Dict(keytype=configtypes.String(), valtype=FromObjType())
value = typ.from_obj({'1': '2'})
assert (value == {'1': 2})
def test_to_str(self, klass):
typ = klass(keytype=configtypes.String(), valtype=configtypes.String())
d = {'a': 'b', 'c': 'd'}
assert (typ.to_str(d) == '{"a": "b", "c": "d"}') |
class PingOpenIdConnect(OpenIdConnectAuth):
name = 'ping'
OIDC_ENDPOINT = ''
REDIRECT_STATE = False
ACCESS_TOKEN_METHOD = 'POST'
RESPONSE_TYPE = 'code'
USERNAME_KEY = 'preferred_username'
def find_valid_key(self, id_token):
for key in self.get_jwks_keys():
rsakey = jwk.construct(key, algorithm='RS256')
(message, encoded_sig) = id_token.rsplit('.', 1)
decoded_sig = base64url_decode(encoded_sig.encode('utf-8'))
if rsakey.verify(message.encode('utf-8'), decoded_sig):
return key
def validate_and_return_id_token(self, id_token, access_token):
(client_id, client_secret) = self.get_key_and_secret()
key = self.find_valid_key(id_token)
if (not key):
raise AuthTokenError(self, 'Signature verification failed')
rsakey = jwk.construct(key, algorithm='RS256')
try:
claims = jwt.decode(id_token, rsakey.to_pem().decode('utf-8'), algorithms=self.JWT_ALGORITHMS, audience=client_id, issuer=self.id_token_issuer(), access_token=access_token, options=self.JWT_DECODE_OPTIONS)
except ExpiredSignatureError:
raise AuthTokenError(self, 'Signature has expired')
except JWTClaimsError as error:
raise AuthTokenError(self, str(error))
except JWTError:
raise AuthTokenError(self, 'Invalid signature')
self.validate_claims(claims)
return claims
def get_user_details(self, response):
username_key = self.setting('USERNAME_KEY', default=self.USERNAME_KEY)
(fullname, first_name, last_name) = self.get_user_names(first_name=response.get('given_name'), last_name=response.get('family_name'))
return {'username': response.get(username_key), 'email': response.get('email'), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name} |
def test_fixtures_in_conftest(pytester: Pytester) -> None:
pytester.makeconftest('\n import pytest\n \n def arg1():\n """arg1 docstring"""\n \n def arg2():\n """arg2 docstring"""\n \n def arg3(arg1, arg2):\n """arg3\n docstring\n """\n ')
p = pytester.makepyfile('\n def test_arg2(arg2):\n pass\n def test_arg3(arg3):\n pass\n ')
result = pytester.runpytest('--fixtures-per-test', p)
assert (result.ret == 0)
result.stdout.fnmatch_lines(['*fixtures used by test_arg2*', '*(test_fixtures_in_conftest.py:2)*', 'arg2 -- conftest.py:6', ' arg2 docstring', '*fixtures used by test_arg3*', '*(test_fixtures_in_conftest.py:4)*', 'arg1 -- conftest.py:3', ' arg1 docstring', 'arg2 -- conftest.py:6', ' arg2 docstring', 'arg3 -- conftest.py:9', ' arg3']) |
class SocialAdminTest(TestCase):
def setUpTestData(cls):
User = get_user_model()
User._default_manager.create_superuser(username='admin', email='', first_name='Admin', password='super-duper-test')
def test_admin_app_name(self):
self.client.login(username='admin', password='super-duper-test')
response = self.client.get(reverse('admin:index'))
self.assertContains(response, 'Python Social Auth')
def test_social_auth_changelist(self):
self.client.login(username='admin', password='super-duper-test')
meta = UserSocialAuth._meta
url_name = f'admin:{meta.app_label}_{meta.model_name}_changelist'
self.client.get(reverse(url_name)) |
def ftdi_to_clkbits(baudrate):
clk =
clk_div = 16
frac_code = [0, 3, 2, 4, 1, 5, 6, 7]
actual_baud = 0
if (baudrate >= (clk / clk_div)):
encoded_divisor = 0
actual_baud = (clk // clk_div)
elif (baudrate >= (clk / (clk_div + (clk_div / 2)))):
encoded_divisor = 1
actual_baud = (clk // (clk_div + (clk_div // 2)))
elif (baudrate >= (clk / (2 * clk_div))):
encoded_divisor = 2
actual_baud = (clk // (2 * clk_div))
else:
divisor = (((clk * 16) // clk_div) // baudrate)
best_divisor = ((divisor + 1) // 2)
if (best_divisor > 131072):
best_divisor = 131071
actual_baud = (((clk * 16) // clk_div) // best_divisor)
actual_baud = ((actual_baud + 1) // 2)
encoded_divisor = ((best_divisor >> 3) + (frac_code[(best_divisor & 7)] << 14))
value = (encoded_divisor & 65535)
index = (encoded_divisor >> 16)
return (actual_baud, value, index) |
.parametrize('url, has_secret', [(' True), (' True), (' False), (' False), (' False), (' False), (' False), (' False)])
.parametrize('from_file', [True, False])
def test_secret_url(url, has_secret, from_file):
test_str = '\n function FindProxyForURL(domain, host) {{\n has_secret = domain.indexOf("secret") !== -1;\n expected_secret = {};\n if (has_secret !== expected_secret) {{\n throw new Error("Expected secret: " + expected_secret + ", found: " + has_secret + " in " + domain);\n }}\n return "DIRECT";\n }}\n '.format(('true' if (has_secret or from_file) else 'false'))
res = pac.PACResolver(test_str)
res.resolve(QNetworkProxyQuery(QUrl(url)), from_file=from_file) |
.route('/play/<item_id>')
def play(item_id: str) -> None:
item = plugin.items.instantiate_from_item_id(item_id)
si = plugin.kwargs.get('season_index')
i = plugin.kwargs.get('index')
playable_li = plugin.items.get_playable(item, season_index=si, index=i).playable_list_item
player = Player(list_item=playable_li)
xbmcplugin.setResolvedUrl(plugin.handle, True, playable_li)
while player.is_playing:
player.set_marktime()
xbmc.sleep(1000) |
def get_bleu_score(args, trainer, subset, src_target_hypo_strs):
src_target_hypo_strs_filter = []
for sents in src_target_hypo_strs:
for sent in sents:
if ((sent is None) or (len(sent) == 0)):
continue
src_target_hypo_strs_filter.append(sent)
src_target_hypo_strs_filter = sorted(src_target_hypo_strs_filter, key=(lambda elem: int(elem[0])), reverse=False)
valid_sys_path = os.path.join(args.valid_decoding_path, '{}_{}.txt'.format(subset, trainer.get_num_updates()))
if (args.valid_decoding_path is not None):
os.makedirs(args.valid_decoding_path, exist_ok=True)
with open(valid_sys_path, 'w', encoding='utf-8') as f:
for sent in src_target_hypo_strs_filter:
if (len(sent) == 0):
continue
f.write((sent[(- 1)] + '\n'))
else:
print('| WARNING: Validation Output Path not Specified!')
if (len(list(args.num_ref.values())) == 1):
num_ref = int(list(args.num_ref.values())[0])
else:
raise NotImplementedError
ref_path = []
if (num_ref == 1):
ref_path.append(os.path.join(args.valid_decoding_path, '{}.{}'.format(subset, args.target_lang)))
else:
for i in range(num_ref):
ref_path.append(os.path.join(args.valid_decoding_path, '{}.{}{}'.format(subset, args.target_lang, str(i))))
if True:
with open(valid_sys_path) as out_file:
out_file.seek(0)
res = subprocess.check_output(('perl %s/multi-bleu.perl %s' % (args.multi_bleu_path, ' '.join(ref_path))), stdin=out_file, shell=True).decode('utf-8')
return float(res.split(',')[0].split('=')[1]) |
class DateValue(Value):
def __init__(self, year, month, day, original_string=None):
assert isinstance(year, int)
assert (isinstance(month, int) and ((month == (- 1)) or (1 <= month <= 12)))
assert (isinstance(day, int) and ((day == (- 1)) or (1 <= day <= 31)))
assert (not (year == month == day == (- 1)))
self._year = year
self._month = month
self._day = day
if (not original_string):
self._normalized = '{}-{}-{}'.format((year if (year != (- 1)) else 'xx'), (month if (month != (- 1)) else 'xx'), (day if (day != '-1') else 'xx'))
else:
self._normalized = normalize(original_string)
self._hash = hash((self._year, self._month, self._day))
def ymd(self):
return (self._year, self._month, self._day)
def __eq__(self, other):
return (isinstance(other, DateValue) and (self.ymd == other.ymd))
def __hash__(self):
return self._hash
def __str__(self):
return (('D(%d,%d,%d)' % (self._year, self._month, self._day)) + str([self._normalized]))
__repr__ = __str__
def match(self, other):
assert isinstance(other, Value)
if (self.normalized == other.normalized):
return True
if isinstance(other, DateValue):
return (self.ymd == other.ymd)
return False
def parse(text):
try:
ymd = text.lower().split('-')
assert (len(ymd) == 3)
year = ((- 1) if (ymd[0] in ('xx', 'xxxx')) else int(ymd[0]))
month = ((- 1) if (ymd[1] == 'xx') else int(ymd[1]))
day = ((- 1) if (ymd[2] == 'xx') else int(ymd[2]))
assert (not (year == month == day == (- 1)))
assert ((month == (- 1)) or (1 <= month <= 12))
assert ((day == (- 1)) or (1 <= day <= 31))
return (year, month, day)
except:
return None |
def train_model(input_dim, output_dim, activation='leaky_relu', dropout=0.2):
inputs = layers.Input(shape=input_dim, name='input')
input = layers.Lambda((lambda x: (x / 255)))(inputs)
x1 = residual_block(input, 16, activation=activation, skip_conv=True, strides=1, dropout=dropout)
x2 = residual_block(x1, 16, activation=activation, skip_conv=True, strides=2, dropout=dropout)
x3 = residual_block(x2, 16, activation=activation, skip_conv=False, strides=1, dropout=dropout)
x4 = residual_block(x3, 32, activation=activation, skip_conv=True, strides=2, dropout=dropout)
x5 = residual_block(x4, 32, activation=activation, skip_conv=False, strides=1, dropout=dropout)
x6 = residual_block(x5, 64, activation=activation, skip_conv=True, strides=2, dropout=dropout)
x7 = residual_block(x6, 32, activation=activation, skip_conv=True, strides=1, dropout=dropout)
x8 = residual_block(x7, 64, activation=activation, skip_conv=True, strides=2, dropout=dropout)
x9 = residual_block(x8, 64, activation=activation, skip_conv=False, strides=1, dropout=dropout)
squeezed = layers.Reshape(((x9.shape[(- 3)] * x9.shape[(- 2)]), x9.shape[(- 1)]))(x9)
blstm = layers.Bidirectional(layers.LSTM(128, return_sequences=True))(squeezed)
blstm = layers.Dropout(dropout)(blstm)
output = layers.Dense((output_dim + 1), activation='softmax', name='output')(blstm)
model = Model(inputs=inputs, outputs=output)
return model |
class LoginManager(gui.Tag, gui.EventSource):
def __init__(self, cookieInterface, session_timeout_seconds=60, **kwargs):
super(LoginManager, self).__init__(**kwargs)
gui.EventSource.__init__(self)
self.expired = True
self.session_uid = str(random.randint(1, ))
self.cookieInterface = cookieInterface
self.session_timeout_seconds = session_timeout_seconds
self.timer_request_cookies()
self.timeout_timer = None
def timer_request_cookies(self):
self.cookieInterface.request_cookies()
self.cookie_timer = threading.Timer((self.session_timeout_seconds / 10.0), self.timer_request_cookies)
self.cookie_timer.daemon = True
self.cookie_timer.start()
_event
def on_session_expired(self):
self.expired = True
return ()
def renew_session(self):
if (((not ('user_uid' in self.cookieInterface.cookies)) or (self.cookieInterface.cookies['user_uid'] != self.session_uid)) and (not self.expired)):
self.on_session_expired()
if self.expired:
self.session_uid = str(random.randint(1, ))
self.cookieInterface.set_cookie('user_uid', self.session_uid, str(self.session_timeout_seconds))
if self.timeout_timer:
self.timeout_timer.cancel()
self.timeout_timer = threading.Timer(self.session_timeout_seconds, self.on_session_expired)
self.timeout_timer.daemon = True
self.expired = False
self.timeout_timer.start() |
class Attacker():
def __init__(self, severity: float, args: argparse.Namespace) -> None:
self.type_of_attack = args.attack
self.severity_of_attack = severity
self.attack = AdversarialAttacker()
self.rng = np.random.default_rng(42)
def __call__(self, example: InputExample) -> InputExample:
type_and_severity = [(self.type_of_attack, self.severity_of_attack)]
if (self.type_of_attack in ['visual', 'phonetic', 'intrude', 'confusable']):
example.words = [self.attack.multiattack(word, type_and_severity) for word in example.words]
else:
example.words = [(self.attack.multiattack(word, type_and_severity) if (self.rng.random() < self.severity_of_attack) else word) for word in example.words]
return example |
class XJSEExchangeCalendar(TradingCalendar):
name = 'XJSE'
tz = timezone('Africa/Johannesburg')
open_times = ((None, time(9, 1)),)
close_times = ((None, time(17, 0)),)
def regular_holidays(self):
return HolidayCalendar([new_years_day(observance=sunday_to_monday), Holiday('Human Rights Day', month=3, day=21, observance=sunday_to_monday), GoodFriday, Holiday('Family Day', month=1, day=1, offset=[Easter(), Day(1)]), Holiday('Freedom Day', month=4, day=27, observance=sunday_to_monday), Holiday("Workers' Day", month=5, day=1, observance=sunday_to_monday), Holiday('Youth Day', month=6, day=16, observance=sunday_to_monday), Holiday("National Women's Day", month=8, day=9, observance=sunday_to_monday), Holiday('Heritage Day', month=9, day=24, observance=sunday_to_monday), Holiday('Day of Reconciliation', month=12, day=16, observance=sunday_to_monday), Holiday('Christmas', month=12, day=25, observance=sunday_to_monday), Holiday('Day of Goodwill', month=12, day=26, observance=sunday_to_monday)])
def adhoc_holidays(self):
return [Timestamp(date, tz=UTC) for date in ['2004-04-14', '2006-03-01', '2009-04-22', '2011-05-18', '2014-05-07', '2016-08-03', '2019-05-08', '2008-05-02', '2011-12-27', '2016-12-27']] |
def orchestration(config):
logfile_data_directory = pathlib.Path(config['trf_logfiles']['root_directory'])
print('Data directory used:\n {}\n'.format(logfile_data_directory))
linac_details = {}
machine_ip_map = {}
mosaiq_sql = {}
for site_config in config['site']:
try:
site_name = site_config['name']
mosaiq_config = site_config['mosaiq']
timezone = mosaiq_config['timezone']
hostname_port = f"{mosaiq_config['hostname']}:{mosaiq_config['port']}"
except KeyError:
continue
mosaiq_sql[site_name] = {'timezone': timezone, 'mosaiq_sql_server': hostname_port}
for linac_config in site_config['linac']:
try:
linac_name = linac_config['name']
ip = linac_config['samba_ip']
except KeyError:
continue
machine_ip_map[linac_name] = ip
linac_details[linac_name] = {'centre': site_name, 'ip': ip}
diagnostics_directory = logfile_data_directory.joinpath('diagnostics')
print('Fetching diagnostic zip files from Linacs...')
fetch_system_diagnostics_multi_linac(machine_ip_map, diagnostics_directory)
print('Extracting trf logfiles from diagnostics zip files...')
extract_diagnostic_zips_and_archive(logfile_data_directory)
print('Indexing logfiles...')
index_logfiles(mosaiq_sql, linac_details, logfile_data_directory) |
def sample_mesh_query(mesh: wp.uint64, query_points: wp.array(dtype=wp.vec3), query_faces: wp.array(dtype=int), query_signs: wp.array(dtype=float), query_dist: wp.array(dtype=float)):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point(mesh, p, max_dist, sign, face_index, face_u, face_v)
cp = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
query_signs[tid] = sign
query_faces[tid] = face_index
query_dist[tid] = wp.length((cp - p)) |
def simxGetObjectSelection(clientID, operationMode):
objectCount = ct.c_int()
objectHandles = ct.POINTER(ct.c_int)()
ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode)
newobj = []
if (ret == 0):
for i in range(objectCount.value):
newobj.append(objectHandles[i])
return (ret, newobj) |
def lr_write_tables(modulename=tab_module, outputdir=''):
filename = (os.path.join(outputdir, modulename) + '.py')
try:
f = open(filename, 'w')
f.write(('\n# %s\n# This file is automatically generated. Do not edit.\n\n_lr_method = %s\n\n_lr_signature = %s\n' % (filename, repr(_lr_method), repr(Signature.digest()))))
smaller = 1
if smaller:
items = {}
for (k, v) in _lr_action.items():
i = items.get(k[1])
if (not i):
i = ([], [])
items[k[1]] = i
i[0].append(k[0])
i[1].append(v)
f.write('\n_lr_action_items = {')
for (k, v) in items.items():
f.write(('%r:([' % k))
for i in v[0]:
f.write(('%r,' % i))
f.write('],[')
for i in v[1]:
f.write(('%r,' % i))
f.write(']),')
f.write('}\n')
f.write('\n_lr_action = { }\nfor _k, _v in _lr_action_items.items():\n for _x,_y in zip(_v[0],_v[1]):\n _lr_action[(_x,_k)] = _y\ndel _lr_action_items\n')
else:
f.write('\n_lr_action = { ')
for (k, v) in _lr_action.items():
f.write(('(%r,%r):%r,' % (k[0], k[1], v)))
f.write('}\n')
if smaller:
items = {}
for (k, v) in _lr_goto.items():
i = items.get(k[1])
if (not i):
i = ([], [])
items[k[1]] = i
i[0].append(k[0])
i[1].append(v)
f.write('\n_lr_goto_items = {')
for (k, v) in items.items():
f.write(('%r:([' % k))
for i in v[0]:
f.write(('%r,' % i))
f.write('],[')
for i in v[1]:
f.write(('%r,' % i))
f.write(']),')
f.write('}\n')
f.write('\n_lr_goto = { }\nfor _k, _v in _lr_goto_items.items():\n for _x,_y in zip(_v[0],_v[1]):\n _lr_goto[(_x,_k)] = _y\ndel _lr_goto_items\n')
else:
f.write('\n_lr_goto = { ')
for (k, v) in _lr_goto.items():
f.write(('(%r,%r):%r,' % (k[0], k[1], v)))
f.write('}\n')
f.write('_lr_productions = [\n')
for p in Productions:
if p:
if p.func:
f.write((' (%r,%d,%r,%r,%d),\n' % (p.name, p.len, p.func.__name__, p.file, p.line)))
else:
f.write((' (%r,%d,None,None,None),\n' % (p.name, p.len)))
else:
f.write(' None,\n')
f.write(']\n')
f.close()
except IOError as e:
print(("Unable to create '%s'" % filename))
print(e)
return |
class Effect4038(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: ('overloadECMStrengthBonus' in mod.itemModifiedAttributes)), 'overloadECMStrengthBonus', module.getModifiedItemAttr('overloadBonusMultiplier'), **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.