code stringlengths 281 23.7M |
|---|
def parse_access(register, name, write, value, byte_order, silent):
(modbus_type, address, pack_type, presenter) = re.match(REGISTER_RE, register).groups()
if (not address):
logging.warn('%r is not a known named register nor a valid register definition. Skipping it.', register)
return None
if (not modbus_type):
modbus_type = 'h'
else:
modbus_type = modbus_type[:(- 1)]
if (not pack_type):
if (modbus_type in 'cCd'):
pack_type = 'B'
else:
pack_type = 'H'
else:
pack_type = pack_type[1:]
address = int(address)
if (pack_type[0] not in '=<>!'):
if (byte_order in ('le', 'mixed')):
pack_type = ('<' + pack_type)
elif (byte_order == 'be'):
pack_type = ('!' + pack_type)
if (modbus_type not in 'cCdhHi'):
raise ValueError("Invalid Modbus type '{}'. Valid ones are 'cCdhHi'".format(modbus_type))
if (write and (modbus_type not in 'cChH')):
raise ValueError("Invalid Modbus type '{}'. Only coils and holding registers are writable".format(modbus_type))
return Access(modbus_type, [address], [pack_type], [value], names=[name], presenters=[presenter], byte_order=byte_order, silent=silent) |
def graphs_no_ep_in_address():
x = vars('x', 6)
y = vars('y', 6)
z = vars('z', 6, aliased=True)
c = const(10)
in_n0 = BasicBlock(0, [_assign(x[0], x[2]), _assign(z[0], x[0]), _assign(y[0], _addr(z[0])), _assign(x[1], c[0]), _assign(z[1], x[1]), _assign(y[1], _add(_addr(z[1]), x[0])), _assign(y[2], _add(_addr(c[2]), z[1]))])
in_cfg = ControlFlowGraph()
in_cfg.add_node(in_n0)
out_n0 = BasicBlock(0, [_assign(x[0], x[2]), _assign(z[0], x[2]), _assign(y[0], _addr(z[0])), _assign(x[1], c[0]), _assign(z[1], c[0]), _assign(y[1], _add(_addr(z[1]), x[2])), _assign(y[2], _add(_addr(c[2]), z[1]))])
out_cfg = ControlFlowGraph()
out_cfg.add_node(out_n0)
return (in_cfg, out_cfg) |
class TriggerReqBody(BaseModel):
messages: str = Field(..., description='User input messages')
command: Optional[str] = Field(default='fix', description='Command name')
model: Optional[str] = Field(default='gpt-3.5-turbo', description='Model name')
stream: Optional[bool] = Field(default=False, description='Whether return stream')
language: Optional[str] = Field(default='hive', description='Language')
target_language: Optional[str] = Field(default='hive', description='Target language, use in translate')
context: Optional[ReqContext] = Field(default=None, description='The context of the model request.') |
_custom_acc_mapper_fn(op_and_target=('call_method', 'repeat_interleave'), arg_replacement_tuples=[('input', 'input'), ('repeats', 'repeats'), ('dim', 'dim', this_arg_is_optional), ('output_size', 'output_size', this_arg_is_optional)], skip_normalization_if_none=True)
_custom_acc_mapper_fn(op_and_target=('call_function', torch.repeat_interleave), arg_replacement_tuples=[('input', 'input'), ('repeats', 'repeats'), ('dim', 'dim', this_arg_is_optional), ('output_size', 'output_size', this_arg_is_optional)], skip_normalization_if_none=True)
def repeat_interleave_mapper(node: torch.fx.Node, _: nn.Module):
input_node = node.kwargs['input']
repeats = cast(int, node.kwargs['repeats'])
dim = node.kwargs['dim']
if (not (type(repeats) is int)):
logger.info('Not mapping repeat_interleave to an acc op. We currently only support `repeat_interleave` with int repeats')
return
assert (type(repeats) is int), 'We currently only support `repeat_interleave` with int repeats'
rank = node.meta['tensor_rank']
if (dim is None):
repeat_dim = (rank - 1)
else:
assert (type(dim) is int), 'dim should be an int'
repeat_dim = dim
tile_dims = ([1] * (rank + 1))
tile_dims[(repeat_dim + 1)] = repeats
with node.graph.inserting_before(node):
unsqueeze_node = node.graph.create_node('call_function', unsqueeze, kwargs={'input': input_node, 'dim': (repeat_dim + 1)}, name=f'{node.name}_unsqueeze')
tile_node = node.graph.create_node('call_function', tile, kwargs={'input': unsqueeze_node, 'dims': tuple(tile_dims)}, name=f'{node.name}_repeat_interleave_map_tile')
new_shape = []
if (dim is not None):
if (dim < 0):
repeat_dim = (dim + rank)
else:
repeat_dim = dim
size_node = node.graph.create_node('call_function', size, kwargs={'input': input_node}, name=f'{node.name}_size')
size_node.meta['type'] = torch.Size
for i in range(rank):
shape_i = node.graph.create_node('call_function', getitem, kwargs={'input': size_node, 'idx': i}, name=f'{node.name}_size_{i}')
if (i == repeat_dim):
new_shape.append((- 1))
else:
new_shape.append(shape_i)
else:
new_shape.append((- 1))
reshaped_node = node.graph.create_node('call_function', reshape, kwargs={'input': tile_node, 'acc_out_ty': acc_utils.build_raw_tensor_meta(shape=new_shape)}, name=f'{node.name}_reshape')
reshaped_node.meta = node.meta.copy()
return reshaped_node |
def extractTracantranslationBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def setup_to_pass():
shutil.copy('/etc/pam.d/system-auth', '/etc/pam.d/system-auth.bak')
shutil.copy('/etc/pam.d/password-auth', '/etc/pam.d/password-auth.bak')
shellexec("sed -i '/password\\s*sufficient\\s*pam_unix.so/ s/sha512/sha512 remember=5/' /etc/pam.d/system-auth")
shellexec("sed -i '/password\\s*sufficient\\s*pam_unix.so/ s/sha512/sha512 remember=5/' /etc/pam.d/password-auth")
(yield None)
shutil.move('/etc/pam.d/system-auth.bak', '/etc/pam.d/system-auth')
shutil.move('/etc/pam.d/password-auth.bak', '/etc/pam.d/password-auth') |
.param_file((FIXTURE_PATH / 'mock_include.md'))
def test_render(file_params, tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
tmp_path.joinpath('other.md').write_text('a\nb\nc')
tmp_path.joinpath('fmatter.md').write_text('---\na: 1\n---\nb')
doctree = publish_doctree(file_params.content, parser=Parser(), settings_overrides={'myst_highlight_code_blocks': False})
doctree['source'] = 'tmpdir/test.md'
output = doctree.pformat().replace((str(tmp_path) + os.sep), 'tmpdir/').rstrip()
file_params.assert_expected(output, rstrip=True) |
def xls_for_macs2(macs_xls, row_limit=None, cell_char_limit=None):
if macs_xls.macs_version.startswith('1.'):
raise Exception('Only handles output from MACS 2.0*')
macs_xls.sort_on('fold_enrichment', reverse=True)
if (row_limit is None):
row_limit = Limits.MAX_NUMBER_ROWS_PER_WORKSHEET
if (len(macs_xls.data) > row_limit):
logging.warning('Data will be split over multiple worksheets on output')
if (cell_char_limit is None):
cell_char_limit = Limits.MAX_LEN_WORKSHEET_CELL_VALUE
sheet_title_limit = Limits.MAX_LEN_WORKSHEET_TITLE
legends_text = {'order': 'Sorting order FE', 'chr': 'Chromosome location of binding region', 'start': 'Start coordinate of binding region', 'end': 'Start coordinate of binding region', 'summit+100': 'Summit + 100bp', 'summit-1': 'Summit of binding region - 1', 'summit': 'Summit of binding region', 'abs_summit+100': 'Summit + 100bp', 'abs_summit-100': 'Summit of binding region - 100bp', 'abs_summit': 'Summit of binding region', 'length': 'Length of binding region', 'abs_summit': 'Coordinate of region summit', 'pileup': 'Number of non-degenerate and position corrected reads at summit', '-log10(pvalue)': 'Transformed Pvalue -log10(Pvalue) for the binding region (e.g. if Pvalue=1e-10, then this value should be 10)', 'fold_enrichment': 'Fold enrichment for this region against random Poisson distribution with local lambda', '-log10(qvalue)': 'Transformed Qvalue -log10(Pvalue) for the binding region (e.g. if Qvalue=0.05, then this value should be 1.3)', 'name': 'Name'}
xls = XLSWorkBook()
boldstyle = XLSStyle(bold=True)
data_sheets = []
sheet_number = 1
data = xls.add_work_sheet('data', macs_xls.name)
for line in macs_xls.data:
if (data.last_row == row_limit):
sheet_number += 1
name = ('data%d' % sheet_number)
title = ('%s(%d)' % (macs_xls.name[:(sheet_title_limit - 4)], sheet_number))
print(("Making additional data sheet '%s'" % title))
data = xls.add_work_sheet(name, title)
if (data.next_row == 1):
data.write_row(1, data=macs_xls.columns_as_xls_header)
data_sheets.append(data)
data.append_row(line)
for data in data_sheets:
if (not macs_xls.with_broad_option):
data.insert_column('E', text='chr')
data.write_column('E', fill='=B?', from_row=2)
data.insert_column('F', text='abs_summit-100')
data.write_column('F', fill='=L?-100', from_row=2)
data.insert_column('G', text='abs_summit+100')
data.write_column('G', fill='=L?+100', from_row=2)
data.insert_column('H', text='chr')
data.write_column('H', fill='=B?', from_row=2)
data.insert_column('I', text='summit-1')
data.write_column('I', fill='=L?-1', from_row=2)
data.insert_column('J', text='summit')
data.write_column('J', fill='=L?', from_row=2)
else:
data.insert_column('E', text='chr')
data.write_column('E', fill='=B?', from_row=2)
notes = xls.add_work_sheet('notes', 'Notes')
notes.write_row(1, text='MACS RUN NOTES:', style=boldstyle)
notes.write_column('A', macs_xls.header, from_row=notes.next_row)
notes.append_row(text='ADDITIONAL NOTES:', style=boldstyle)
notes.append_row(text='By default regions are sorted by fold enrichment (in descending order)')
for row in range(1, (notes.last_row + 1)):
if notes['A'][row].startswith('# Command line:'):
command_line = notes['A'][row]
if (len(command_line) > cell_char_limit):
logging.warning('Splitting command line over multiple cells')
row_data = chunk(command_line, cell_char_limit, delimiter=' ')
notes.write_row(row, data=row_data)
data = data_sheets[0]
legends = xls.add_work_sheet('legends', 'Legends')
for col in ColumnRange(data.last_column):
name = data[col][1].lstrip('#')
try:
legends.append_row(data=(name, legends_text[name]))
except KeyError:
logging.warning(("No legend description found for column '%s'" % name))
legends.append_row(data=(name, name.title()))
for data in data_sheets:
data.freeze_panes = 'A2'
return xls |
def train(model, train_loader, val_loader, optimizer, init_lr=0.002, checkpoint_dir=None, checkpoint_interval=None, nepochs=None, clip_thresh=1.0):
if use_cuda:
model = model.cuda()
criterion = nn.CrossEntropyLoss()
criterion_reconstruction = DiscretizedMixturelogisticLoss()
global global_step, global_epoch
if (hparams.exponential_moving_average is not None):
ema = ExponentialMovingAverage(hparams.ema_decay)
for (name, param) in model.named_parameters():
if param.requires_grad:
ema.register(name, param.data)
else:
ema = None
while (global_epoch < nepochs):
model.train()
h = open(logfile_name, 'a')
running_loss = 0.0
running_loss_vq = 0.0
running_loss_encoder = 0.0
running_entropy = 0.0
running_loss_lid = 0.0
running_loss_reconstruction = 0.0
for (step, (mfcc, mfcc_lengths, mel, mol, lid, fnames)) in tqdm(enumerate(train_loader)):
current_lr = learning_rate_decay(init_lr, global_step)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
(sorted_lengths, indices) = torch.sort(mfcc_lengths.view((- 1)), dim=0, descending=True)
sorted_lengths = sorted_lengths.long().numpy()
mfcc = mfcc[indices]
mel = mel[indices]
mol = mol[indices]
lid = lid[indices]
(mfcc, mel, mol, lid) = (Variable(mfcc), Variable(mel), Variable(mol), Variable(lid))
if use_cuda:
(mfcc, mel, mol, lid) = (mfcc.cuda(), mel.cuda(), mol.cuda(), lid.cuda().long())
logits = model(mfcc, mfcc_lengths=sorted_lengths)
lid_loss = criterion(logits, lid)
loss = lid_loss
loss.backward(retain_graph=False)
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_thresh)
optimizer.step()
model.quantizer.after_update()
if (ema is not None):
for (name, param) in model.named_parameters():
if (name in ema.shadow):
ema.update(name, param.data)
if ((global_step % checkpoint_interval) == 0):
save_checkpoint(model, optimizer, global_step, checkpoint_dir, global_epoch, ema=ema)
log_value('Training Loss', float(loss.item()), global_step)
log_value('gradient norm', grad_norm, global_step)
log_value('learning rate', current_lr, global_step)
log_value('VQ Penalty', vq_penalty, global_step)
log_value('Encoder Penalty', encoder_penalty, global_step)
log_value('Entropy', entropy, global_step)
global_step += 1
running_loss += loss.item()
averaged_loss = (running_loss / len(train_loader))
log_value('loss (per epoch)', averaged_loss, global_epoch)
h.write((((('Loss after epoch ' + str(global_epoch)) + ': ') + format((running_loss / len(train_loader)))) + '\n'))
h.close()
(recall, model) = validate_model(model, val_loader)
log_value('Unweighted Recall per epoch', recall, global_epoch)
global_epoch += 1
return (model, ema) |
def string_to_script_template(text: str) -> ScriptTemplate:
if text.startswith(PREFIX_BIP276_SCRIPT):
(prefix, version, network, data) = bip276_decode(text, Net.BIP276_VERSION)
assert (network == Net.BIP276_VERSION), 'incompatible network'
return classify_output_script(Script(data), Net.COIN)
return Address.from_string(text, Net.COIN) |
def create_diagram(cnarr, segarr, threshold, min_probes, outfname, show_range=None, title=None, show_labels=True):
if (cnarr and segarr):
do_both = True
cnarr_is_seg = False
else:
if cnarr:
cnarr_is_seg = False
elif segarr:
cnarr = segarr
cnarr_is_seg = True
else:
raise ValueError('Must provide argument cnarr or segarr, or both. ')
do_both = False
if show_range:
(chrom, start, end) = unpack_range(show_range)
if (not ((start is None) and (end is None))):
raise ValueError("Must provide chromosome only (genomic-range not allowed for 'diagram').")
if cnarr:
cnarr = cnarr.in_range(chrom=chrom, start=None, end=None)
if segarr:
segarr = segarr.in_range(chrom=chrom, start=None, end=None)
gene_labels = _get_gene_labels(cnarr, segarr, cnarr_is_seg, threshold, min_probes)
seen_genes = set()
features = collections.defaultdict(list)
strand = (1 if do_both else None)
chrom_sizes = plots.chromosome_sizes(cnarr)
if (not cnarr_is_seg):
cnarr = cnarr.squash_genes()
for row in cnarr:
if (((row.start - 1) >= 0) and (row.end <= chrom_sizes[row.chromosome])):
if (show_labels and (row.gene in gene_labels) and (row.gene not in seen_genes)):
seen_genes.add(row.gene)
feat_name = row.gene
if (',' in feat_name):
feat_name = feat_name.replace(',', ', ')
else:
feat_name = None
features[row.chromosome].append(((row.start - 1), row.end, strand, feat_name, colors.Color(*plots.cvg2rgb(row.log2, (not cnarr_is_seg)))))
if do_both:
for (chrom, segrows) in segarr.by_chromosome():
for srow in segrows:
if (((srow.start - 1) >= 0) and (srow.end <= chrom_sizes[chrom])):
features[chrom].append(((srow.start - 1), srow.end, (- 1), None, colors.Color(*plots.cvg2rgb(srow.log2, False))))
if (not outfname):
outfname = (cnarr.sample_id + '-diagram.pdf')
drawing = build_chrom_diagram(features, chrom_sizes, cnarr.sample_id, title)
cvs = canvas.Canvas(outfname, pagesize=PAGE_SIZE)
renderPDF.draw(drawing, cvs, 0, 0)
cvs.showPage()
cvs.save()
return outfname |
def _create_model_start_listener(system_app: SystemApp):
from dbgpt.datasource.manages.connection_manager import ConnectManager
cfg = Config()
def startup_event(wh):
print('begin run _add_app_startup_event')
conn_manage = ConnectManager(system_app)
cfg.LOCAL_DB_MANAGE = conn_manage
async_db_summary(system_app)
return startup_event |
.django_db
def test_limit_values(client, create_idv_test_data):
_test_post(client, {'award_id': 2, 'limit': 1}, (400002, 1, 1, False, 14))
_test_post(client, {'award_id': 2, 'limit': 5}, (400002, 5, 1, False, 14, 13, 12, 11, 10))
_test_post(client, {'award_id': 2, 'limit': 0}, expected_status_code=status.HTTP_422_UNPROCESSABLE_ENTITY)
_test_post(client, {'award_id': 2, 'limit': }, expected_status_code=status.HTTP_422_UNPROCESSABLE_ENTITY)
_test_post(client, {'award_id': 2, 'limit': {'BOGUS': 'LIMIT'}}, expected_status_code=status.HTTP_400_BAD_REQUEST) |
class OptionSeriesBubbleSonification(Options):
def contextTracks(self) -> 'OptionSeriesBubbleSonificationContexttracks':
return self._config_sub_data('contextTracks', OptionSeriesBubbleSonificationContexttracks)
def defaultInstrumentOptions(self) -> 'OptionSeriesBubbleSonificationDefaultinstrumentoptions':
return self._config_sub_data('defaultInstrumentOptions', OptionSeriesBubbleSonificationDefaultinstrumentoptions)
def defaultSpeechOptions(self) -> 'OptionSeriesBubbleSonificationDefaultspeechoptions':
return self._config_sub_data('defaultSpeechOptions', OptionSeriesBubbleSonificationDefaultspeechoptions)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def pointGrouping(self) -> 'OptionSeriesBubbleSonificationPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesBubbleSonificationPointgrouping)
def tracks(self) -> 'OptionSeriesBubbleSonificationTracks':
return self._config_sub_data('tracks', OptionSeriesBubbleSonificationTracks) |
def get_erpnext_item_code(integration: str, integration_item_code: str, variant_id: Optional[str]=None, has_variants: Optional[int]=0) -> Optional[str]:
filters = {'integration': integration, 'integration_item_code': integration_item_code}
if variant_id:
filters.update({'variant_id': variant_id})
elif has_variants:
filters.update({'has_variants': 1})
return frappe.db.get_value('Ecommerce Item', filters, fieldname='erpnext_item_code') |
def test_router_add_websocket_route(client):
with client.websocket_connect('/ws') as session:
text = session.receive_text()
assert (text == 'Hello, world!')
with client.websocket_connect('/ws/test') as session:
text = session.receive_text()
assert (text == 'Hello, test!') |
(scope='function')
def experience_config_tcf_overlay(db: Session) -> Generator:
config = PrivacyExperienceConfig.create(db=db, data={'accept_button_label': 'Accept all', 'acknowledge_button_label': 'Confirm', 'banner_enabled': 'enabled_where_required', 'component': 'tcf_overlay', 'description': 'On this page you can opt in and out of these data uses cases', 'disabled': False, 'privacy_preferences_link_label': 'Manage preferences', 'privacy_policy_link_label': 'View our company's privacy policy', 'privacy_policy_url': ' 'reject_button_label': 'Reject all', 'save_button_label': 'Save', 'title': 'Manage your consent'})
(yield config)
for history in config.histories:
history.delete(db)
config.delete(db) |
def run_trace(filename, out):
with open(filename) as trace_file:
sequence = parse_trace(trace_file, None)
with open(out, 'w') as trace_file:
for (command, out) in sequence:
trace_file.write('$ {}\n'.format(command))
(status, stdout, stderr) = run_command(command)
if ((status != 0) and (stdout == '')):
print('[Error from command]')
print(command)
trace_file.write(stderr)
else:
trace_file.write(stdout) |
def extractZetrotranslationWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def bm25(rawMatchInfo):
match_info = _parseMatchInfo(rawMatchInfo)
K = 0.5
B = 0.75
score = 0.0
(P_O, C_O, N_O, A_O) = range(4)
term_count = match_info[P_O]
col_count = match_info[C_O]
total_docs = match_info[N_O]
L_O = (A_O + col_count)
X_O = (L_O + col_count)
weights = ([1] * col_count)
for i in range(term_count):
for j in range(col_count):
weight = weights[j]
if (weight == 0):
continue
avg_length = float(match_info[(A_O + j)])
doc_length = float(match_info[(L_O + j)])
if (avg_length == 0):
D = 0
else:
D = ((1 - B) + (B * (doc_length / avg_length)))
x = (X_O + ((3 * j) * (i + 1)))
term_frequency = float(match_info[x])
docs_with_term = float(match_info[(x + 2)])
idf = max(math.log((((total_docs - docs_with_term) + 0.5) / (docs_with_term + 0.5))), 0)
denom = (term_frequency + (K * D))
if (denom == 0):
rhs = 0
else:
rhs = ((term_frequency * (K + 1)) / denom)
score += ((idf * rhs) * weight)
return score |
class _InnerEnv(BaseEnv, _EnvInterfaceInner):
action_space = None
observation_space = None
reward_range = None
metadata = None
def seed(self, seed: int) -> None:
pass
def close(self) -> None:
pass
def reset(self) -> Any:
pass
def step(self, action):
pass
def method_inner(self) -> int:
return 41 |
class _BaseVenvFactory():
venvs_directory: Path
def venv(self, project: str, setup_commands: list[str]) -> Generator[(VirtualEnvironment, None, None)]:
venv_path = (self.venvs_directory / project)
old_cwd = Path.cwd()
virtual_env = VirtualEnvironment(venv_path, project)
virtual_env.setup(setup_commands, old_cwd)
os.chdir((venv_path / 'project'))
try:
(yield virtual_env)
finally:
os.chdir(old_cwd) |
.parametrize('host_in,port_in,expected_host_out,expected_port_out', ((None, None, 'localhost', 5432), (None, 5432, 'localhost', 5432), ('localhost', '5432', 'localhost', 5432), ('foo.bar', '5432', 'foo.bar', 5432), ('localhost,foo.bar', '5432,1234', 'localhost', 5432)))
def test_get_destination(host_in, port_in, expected_host_out, expected_port_out):
(host, port) = get_destination_info(host_in, port_in)
assert (host == expected_host_out)
assert (port == expected_port_out) |
_converter(acc_ops.tile)
def acc_ops_tile(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = kwargs['input']
if (not isinstance(input_val, AITTensor)):
raise RuntimeError(f'Unexpected input for {name}: {input_val}')
shape_dims = list(kwargs['dims'])
input_dim_len = len(input_val.shape())
result = input_val
if (len(shape_dims) < input_dim_len):
for _ in range((input_dim_len - len(shape_dims))):
shape_dims.insert(0, 1)
if (input_dim_len < len(shape_dims)):
new_shape = list(input_val.shape())
for _ in range((len(shape_dims) - input_dim_len)):
new_shape.insert(0, IntImm(1))
result = reshape()(input_val, new_shape)
for (i, shape) in enumerate(shape_dims):
if (result.shape()[i]._attrs['name'] is not None):
continue
cat_groups = ([result] * shape)
result = concatenate()(cat_groups, dim=i)
return result |
class DataFile(AbstractLayout):
layout = 'text'
opened_queue = []
max_queue_entries = 31
def __init__(self, textFile, textParser, variationModules):
self._record_index = RecordIndex(textFile, textParser)
self._text_parser = textParser
self._text_file = textFile
self._variation_modules = variationModules
def index_text(self, forceIndexBuild=False, validateData=False):
self._record_index.create(forceIndexBuild, validateData)
return self
def close(self):
self._record_index.close()
def get_handles(self):
if (not self._record_index.is_open()):
if (len(DataFile.opened_queue) > self.max_queue_entries):
log.info(('Closing %s' % self))
DataFile.opened_queue[0].close()
del DataFile.opened_queue[0]
DataFile.opened_queue.append(self)
log.info(('Opening %s' % self))
return self._record_index.get_handles()
def process_var_binds(self, var_binds, **context):
rsp_var_binds = []
if context.get('nextFlag'):
error_status = exval.endOfMib
else:
error_status = exval.noSuchInstance
try:
(text, db) = self.get_handles()
except SnmpsimError as exc:
log.error(('Problem with data file or its index: %s' % exc))
ReportingManager.update_metrics(data_file=self._text_file, datafile_failure_count=1, transport_call_count=1, **context)
return [(vb[0], error_status) for vb in var_binds]
vars_remaining = vars_total = len(var_binds)
err_total = 0
log.info(('Request var-binds: %s, flags: %s, %s' % (', '.join([('%s=<%s>' % (vb[0], vb[1].prettyPrint())) for vb in var_binds]), ((context.get('nextFlag') and 'NEXT') or 'EXACT'), ((context.get('setFlag') and 'SET') or 'GET'))))
for (oid, val) in var_binds:
text_oid = str(univ.OctetString('.'.join([('%s' % x) for x in oid])))
try:
line = self._record_index.lookup(str(univ.OctetString('.'.join([('%s' % x) for x in oid]))))
except KeyError:
offset = search_record_by_oid(oid, text, self._text_parser)
subtree_flag = exact_match = False
else:
(offset, subtree_flag, prev_offset) = line.split(str2octs(','), 2)
(subtree_flag, exact_match) = (int(subtree_flag), True)
offset = int(offset)
text.seek(offset)
vars_remaining -= 1
(line, _, _) = get_record(text)
while True:
if exact_match:
if (context.get('nextFlag') and (not subtree_flag)):
(_next_line, _, _) = get_record(text)
if _next_line:
(_next_oid, _) = self._text_parser.evaluate(_next_line, oidOnly=True)
try:
(_, subtree_flag, _) = self._record_index.lookup(str(_next_oid)).split(str2octs(','), 2)
except KeyError:
log.error(('data error for %s at %s, index broken?' % (self, _next_oid)))
line = ''
else:
subtree_flag = int(subtree_flag)
line = _next_line
else:
line = _next_line
else:
if line:
(_oid, _) = self._text_parser.evaluate(line, oidOnly=True)
else:
_oid = 'last'
try:
(_, _, _prev_offset) = self._record_index.lookup(str(_oid)).split(str2octs(','), 2)
except KeyError:
log.error(('data error for %s at %s, index broken?' % (self, _oid)))
line = ''
else:
_prev_offset = int(_prev_offset)
if (_prev_offset >= 0):
text.seek(_prev_offset)
(_prev_line, _, _) = get_record(text)
(_prev_oid, _) = self._text_parser.evaluate(_prev_line, oidOnly=True)
if _prev_oid.isPrefixOf(oid):
line = _prev_line
subtree_flag = True
if (not line):
_oid = oid
_val = error_status
break
call_context = context.copy()
call_context.update((), origOid=oid, origValue=val, dataFile=self._text_file, subtreeFlag=subtree_flag, exactMatch=exact_match, errorStatus=error_status, varsTotal=vars_total, varsRemaining=vars_remaining, variationModules=self._variation_modules)
try:
(_oid, _val) = self._text_parser.evaluate(line, **call_context)
if (_val is exval.endOfMib):
exact_match = True
subtree_flag = False
continue
except NoDataNotification:
raise
except MibOperationError:
raise
except Exception as exc:
_oid = oid
_val = error_status
err_total += 1
log.error(('data error at %s for %s: %s' % (self, text_oid, exc)))
break
rsp_var_binds.append((_oid, _val))
log.info(('Response var-binds: %s' % ', '.join([('%s=<%s>' % (vb[0], vb[1].prettyPrint())) for vb in rsp_var_binds])))
ReportingManager.update_metrics(data_file=self._text_file, varbind_count=vars_total, datafile_call_count=1, datafile_failure_count=err_total, transport_call_count=1, **context)
return rsp_var_binds
def __str__(self):
return ('%s controller' % self._text_file) |
class OptionPlotoptionsDependencywheelSonificationTracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsCylinderSonificationTracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ViolationAttributes(BaseModel):
data_categories: List[str] = Field(description='A list of data categories which led to an evaluation violation.')
data_subjects: List[str] = Field(description='A list of data subjects which led to an evaluation violation.')
data_uses: List[str] = Field(description='A list of data uses which led to an evaluation violation.') |
def test_periodic_interval_div_free():
m = PeriodicUnitIntervalMesh(50)
mesh = ExtrudedMesh(m, 50)
V = VectorFunctionSpace(mesh, 'CG', 3)
(x, y) = SpatialCoordinate(mesh)
u = Function(V)
u.interpolate(as_vector([sin(((2 * np.pi) * x)), ((((- 2) * np.pi) * y) * cos(((2 * np.pi) * x)))]))
assert np.allclose(assemble((div(u) * dx)), 0)
L2 = FunctionSpace(mesh, 'DG', 2)
v = TestFunction(L2)
f = assemble((inner(div(u), v) * dx))
assert np.allclose(f.dat.data, 0) |
def test_sampling_array_dims():
raw = [1, 2, 3, 4, 5, 6]
dimensions = [3]
samples = sampling(raw, dimensions)
assert np.array_equal(samples[0], np.array(raw[0:3]))
assert np.array_equal(samples[1], np.array(raw[3:6]))
dimensions = [6]
samples = sampling(raw, dimensions)
assert np.array_equal(samples[0], np.array(raw))
dimensions = [2, 3]
samples = sampling(raw, dimensions)
assert np.array_equal(samples[0][0], np.array(raw[0:3]))
assert np.array_equal(samples[0][1], np.array(raw[3:6]))
dimensions = [3, 2]
samples = sampling(raw, dimensions)
assert np.array_equal(samples[0][0], np.array(raw[0:2]))
assert np.array_equal(samples[0][1], np.array(raw[2:4]))
assert np.array_equal(samples[0][2], np.array(raw[4:6])) |
def cfrp(agent: Agent, state: ShortDeckPokerState, i: int, t: int, c: int, locks: Dict[(str, mp.synchronize.Lock)]={}):
ph = state.player_i
player_not_in_hand = (not state.players[i].is_active)
if (state.is_terminal or player_not_in_hand):
return state.payout[i]
elif (ph == i):
this_info_sets_regret = agent.regret.get(state.info_set, state.initial_regret)
sigma = calculate_strategy(this_info_sets_regret)
vo = 0.0
voa: Dict[(str, float)] = dict()
explored: Dict[(str, bool)] = {action: False for action in state.legal_actions}
this_info_sets_regret = agent.regret.get(state.info_set, state.initial_regret)
for action in state.legal_actions:
if (this_info_sets_regret[action] > c):
new_state: ShortDeckPokerState = state.apply_action(action)
voa[action] = cfrp(agent, new_state, i, t, c, locks)
explored[action] = True
vo += (sigma[action] * voa[action])
if locks:
locks['regret'].acquire()
this_info_sets_regret = agent.regret.get(state.info_set, state.initial_regret)
for action in state.legal_actions:
if explored[action]:
this_info_sets_regret[action] += (voa[action] - vo)
agent.regret[state.info_set] = this_info_sets_regret
if locks:
locks['regret'].release()
return vo
else:
this_info_sets_regret = agent.regret.get(state.info_set, state.initial_regret)
sigma = calculate_strategy(this_info_sets_regret)
available_actions: List[str] = list(sigma.keys())
action_probabilities: List[float] = list(sigma.values())
action: str = np.random.choice(available_actions, p=action_probabilities)
new_state: ShortDeckPokerState = state.apply_action(action)
return cfrp(agent, new_state, i, t, c, locks) |
def test_adding_a_extra_init_container_as_yaml():
config = "\nextraInitContainers:\n - name: do-something\n image: busybox\n command: ['do', 'something']\n"
r = helm_template(config)
extraInitContainer = r['statefulset'][uname]['spec']['template']['spec']['initContainers']
assert ({'name': 'do-something', 'image': 'busybox', 'command': ['do', 'something']} in extraInitContainer) |
class OptionSeriesPyramidDataDragdropGuideboxDefault(Options):
def className(self):
return self._config_get('highcharts-drag-box-default')
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get('rgba(0, 0, 0, 0.1)')
def color(self, text: str):
self._config(text, js_type=False)
def cursor(self):
return self._config_get('move')
def cursor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#888')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(1)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(900)
def zIndex(self, num: float):
self._config(num, js_type=False) |
def sequential_graph_fixer(fixers: List[GraphFixer]) -> GraphFixer:
def sequential(bmg: BMGraphBuilder) -> GraphFixerResult:
made_progress = False
errors = ErrorReport()
current = bmg
for fixer in fixers:
(current, fixer_made_progress, errors) = fixer(current)
made_progress |= fixer_made_progress
if errors.any():
break
return (current, made_progress, errors)
return sequential |
def Fstiffness_k(k: int, kappa0: float, kappa1: float, innerimagelist: np.ndarray, outerimagelist: np.ndarray, Eholo_vertical: np.ndarray):
v0inner = (innerimagelist[k] - innerimagelist[(k + 1)])
tdelta0inner = np.linalg.norm(v0inner)
v0outer = (outerimagelist[k] - outerimagelist[(k + 1)])
tdelta0outer = np.linalg.norm(v0outer)
tdeltadelta = (tdelta0inner - tdelta0outer)
Vvert = ((((- Eholo_vertical[k]) * tdeltadelta) * kappa0) * 0.5)
v1inner = (innerimagelist[k] - innerimagelist[(k - 1)])
tdelta1inner = np.linalg.norm(v1inner)
v1outer = (outerimagelist[k] - outerimagelist[(k - 1)])
tdelta1outer = np.linalg.norm(v1outer)
tdeltadelta = (tdelta1inner - tdelta1outer)
Vvert -= (((Eholo_vertical[k] * tdeltadelta) * kappa1) * 0.5)
return Vvert |
class TestBasicLazy(AllTypes):
def arg_builder(monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test.yaml'])
config = ConfigArgBuilder(TypeConfig, TypeOptConfig, lazy=True)
return config.generate() |
def on_window_new(layouts: Layouts, state: State):
def _on_window_new(i3l: Connection, e: WindowEvent):
logger.debug(f'[ipc] window new event - container:{e.container.id}:{e.container.window}')
context = state.sync_context(i3l)
if ((not layouts.exists_for(context.workspace.name)) or (context.workspace_sequence is None)):
logger.debug(' [ipc] window new event - no workspace layout')
return
if (not is_layout_container(e.container)):
logger.debug(' [ipc] window new event - not a layout container')
return
if (len(context.containers) == 0):
logger.debug(' [ipc] window new event - no container to handle')
return
context.workspace_sequence.set_order(e.container)
logger.debug(' [ipc] window new event - update layout')
layout = layouts.get(context.workspace.name)
layout.update(context, e.container)
state.handle_rebuild(context, e.container)
return _on_window_new |
class TaskStatusCondition(Condition):
def __init__(self, workflow_name: str, task_name: str, namespace: str, expect_status: TaskStatus):
key = TaskStatusChangedEvent.generate_task_status_changed_event_key(namespace, workflow_name, task_name)
super().__init__([key])
self.expect_status = expect_status
def is_met(self, event: Event, context: Context) -> bool:
context_dict = json.loads(event.context)
task_status = context_dict[EventContextConstant.TASK_STATUS]
if (self.expect_status == task_status):
return True
else:
return False |
def upgrade():
op.execute('delete from users_events_roles where event_id is null or role_id is null or user_id is null')
op.alter_column('users_events_roles', 'event_id', existing_type=sa.INTEGER(), nullable=False)
op.alter_column('users_events_roles', 'role_id', existing_type=sa.INTEGER(), nullable=False)
op.alter_column('users_events_roles', 'user_id', existing_type=sa.INTEGER(), nullable=False) |
class EvaluatorStaticTest(unittest.TestCase, EvaluatorTestCases):
grammar_scopes = AttributeGrammar.from_modules(scopes, rule_extractor=Parser())
grammar_arithmetic = AttributeGrammar.from_modules(arithmetic, rule_extractor=Parser())
evaluator_scopes = StaticEvaluator(grammar_scopes)
evaluator_arithmetic = StaticEvaluator(grammar_arithmetic)
def test_arithmetic(self):
for (e, r, t, d) in self.expressions:
ast = arithmetic.parse_rpn(e)
ast.depth = 0
ast.index = None
ast = self.evaluator_arithmetic.for_tree(ast).content
self.assertEqual(ast.value, r)
self.assertEqual(ast.type, t)
self.assertEqual(ast.subtree_depth, d)
def test_scopes(self):
for e in self.ok:
ast = scopes_parser.parse(e)
ast.same = []
ast.env = {}
self.assertTrue(self.evaluator_scopes.for_tree(ast).ok)
for e in self.not_ok:
ast = scopes_parser.parse(e)
ast.same = []
ast.env = {}
self.assertFalse(self.evaluator_scopes.for_tree(ast).ok, e) |
class Deployer(object):
def __init__(self, cloud='heroku', config_json=None, credentials_json=None):
self.cloud = cloud
self.dep = None
if (cloud == 'local'):
self.dep = Local(config_json=config_json, credentials_json=credentials_json)
if (cloud == 'heroku'):
self.dep = Heroku(config_json=config_json, credentials_json=credentials_json)
if (cloud == 'aws'):
self.dep = Aws(config_json=config_json, credentials_json=credentials_json)
if (cloud == 'googlecloud'):
self.dep = GoogleCloud(config_json=config_json, credentials_json=credentials_json)
if (cloud == 'azure'):
self.dep = Azure(config_json=config_json, credentials_json=credentials_json)
def deploy(self, model_id):
self.dep.deploy(model_id) |
class OptionPlotoptionsPyramidSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def test_get_experiment(poly_example_tmp_dir, dark_storage_client):
resp: Response = dark_storage_client.get('/experiments')
answer_json = resp.json()
assert (len(answer_json) == 1)
assert ('ensemble_ids' in answer_json[0])
assert (len(answer_json[0]['ensemble_ids']) == 2)
assert ('name' in answer_json[0])
assert (answer_json[0]['name'] == 'default') |
def rotate_alphabet(text, key):
result = ''
for letter in text:
if letter.isalpha():
if letter.isupper():
result += AlPHABET[((AlPHABET.index(letter.lower()) + key) % 26)].upper()
else:
result += AlPHABET[((AlPHABET.index(letter) + key) % 26)]
else:
result += letter
return result |
def test_write_long(la: LogicAnalyzer, slave: I2CSlave):
la.capture(2, block=False)
slave.write_long(WRITE_DATA, REGISTER_ADDRESS)
la.stop()
(scl, sda) = la.fetch_data()
assert (len(scl) == ((SCL_START + (SCL_WRITE * 6)) + SCL_STOP))
assert (len(sda) == ((((SDA_START + SDA_DEVICE_ADDRESS) + SDA_REGISTER_ADDRESS) + (SDA_WRITE * 4)) + SDA_ACK)) |
(scope='function')
def exit_manager(manager_nospawn, monkeypatch, temp_output):
def no_op(self, *args, **kwargs):
def _():
self.is_counting = False
return _
def new_config(self, qtile, bar):
QuickExit._configure(self, qtile, bar)
self.qtile.stop = no_op(self)
monkeypatch.setattr('qtile_extras.widget.ScriptExit._configure', new_config)
class ExitConfig(libqtile.confreader.Config):
auto_fullscreen = True
keys = []
mouse = []
groups = [libqtile.config.Group('a')]
layouts = [libqtile.layout.Max()]
floating_layout = libqtile.resources.default_config.floating_layout
screens = [libqtile.config.Screen(top=libqtile.bar.Bar([qtile_extras.widget.ScriptExit(timer_interval=0.05, exit_script=temp_output)], 50))]
manager_nospawn.start(ExitConfig)
(yield manager_nospawn) |
def test_data_integrity_test_all_unique_values() -> None:
test_dataset = pd.DataFrame({'feature1': [1, 1, 2, 3], 'feature2': [1, 2, np.nan, 4], 'target': ['1', '2', '3', '']})
suite = TestSuite(tests=[TestColumnAllUniqueValues(column_name='not_exists_feature')])
suite.run(current_data=test_dataset, reference_data=None, column_mapping=ColumnMapping())
assert (not suite)
suite = TestSuite(tests=[TestColumnAllUniqueValues(column_name='feature2')])
suite.run(current_data=test_dataset, reference_data=None, column_mapping=ColumnMapping())
suite._inner_suite.raise_for_error()
assert suite
assert suite.show()
assert suite.json() |
def test_object_many_objects_nameonly(tmpdir_factory, merge_files_manyLR):
fpath = str(tmpdir_factory.mktemp('lf').join('same-id.dlis'))
content = ['data/chap4-7/eflr/envelope.dlis.part', 'data/chap4-7/eflr/file-header.dlis.part', 'data/chap4-7/eflr/match/T.CHANNEL-I.MATCH1-O.16-C.0.dlis.part', 'data/chap4-7/eflr/match/T.CHANNEL-I.MATCH1-O.127-C.0.dlis.part']
merge_files_manyLR(fpath, content)
with dlis.load(fpath) as (f, *_):
with pytest.raises(ValueError) as exc:
_ = f.object('CHANNEL', 'MATCH1')
assert ('Candidates are' in str(exc.value))
assert ('origin=16, copy=0' in str(exc.value))
assert ('origin=127, copy=0' in str(exc.value)) |
class OptionPlotoptionsErrorbarSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TreeNode():
def __init__(self, klass):
self.klass = klass
self.name = klass.__name__
self.children = []
self.parents = []
self.level = None
def add_parent(self, parent):
assert isinstance(parent, TreeNode)
if (parent not in self.parents):
self.parents.append(parent)
def add_child(self, child):
assert isinstance(child, TreeNode)
if (child not in self.children):
self.children.append(child)
def get_level(self):
if (not self.level):
if self.parents:
self.level = (max([x.get_level() for x in self.parents]) + 1)
else:
self.level = 0
return self.level
def get_ancestors(self):
def _get_ancestors(node, ancestors):
ancestors.extend(node.parents)
for p in node.parents:
_get_ancestors(p, ancestors)
ancestors = []
_get_ancestors(self, ancestors)
return ancestors |
def output_excel_footer(worksheet, row, no_file_comments, qn, elapsed_time):
if (not no_file_comments):
worksheet.write(row, 0, f'## {qn} queries scanned')
worksheet.write((row + 1), 0, f'## Total time (seconds): {elapsed_time}')
worksheet.write((row + 2), 0, f'## Rate: {(float(qn) / elapsed_time):.2f} q/s')
return |
class RoleDetail(ResourceDetail):
def before_get_object(self, view_kwargs):
if (view_kwargs.get('role_invite_id') is not None):
role_invite = safe_query_kwargs(RoleInvite, view_kwargs, 'role_invite_id')
view_kwargs['id'] = role_invite.role_id
if (view_kwargs.get('users_events_roles_id') is not None):
users_events_role = safe_query_kwargs(UsersEventsRoles, view_kwargs, 'users_events_roles_id')
view_kwargs['id'] = users_events_role.role_id
if (view_kwargs.get('users_groups_roles_id') is not None):
users_groups_role = safe_query_kwargs(UsersGroupsRoles, view_kwargs, 'users_groups_roles_id')
view_kwargs['id'] = users_groups_role.role_id
def before_update_object(self, role, data, view_kwargs):
if data.get('name'):
if (data['name'] in ['owner', 'organizer', 'coorganizer', 'registrar', 'moderator', 'attendee', 'track_organizer']):
raise UnprocessableEntityError({'data': 'name'}, 'The given name cannot be updated')
def before_delete_object(self, obj, kwargs):
if (obj.name in ['owner', 'organizer', 'coorganizer', 'registrar', 'moderator', 'attendee', 'track_organizer']):
raise UnprocessableEntityError({'data': 'name'}, 'The resource with given name cannot be deleted')
decorators = (api.has_permission('is_admin', methods='PATCH,DELETE'),)
schema = RoleSchema
data_layer = {'session': db.session, 'model': Role, 'methods': {'before_get_object': before_get_object, 'before_delete_object': before_delete_object}} |
def get_prices(games: Iterable['Game'], country: str) -> Iterator[Tuple[(str, Price)]]:
prices = {}
chunk = []
for game in games:
chunk.append(game)
if (len(chunk) == 50):
fetched = {nsuid: price for (nsuid, price) in fetch_prices(country=country, nsuids=[game.nsuid for game in chunk])}
prices.update(fetched)
chunk = []
if chunk:
fetched = {nsuid: price for (nsuid, price) in fetch_prices(country=country, nsuids=[game.nsuid for game in chunk])}
prices.update(fetched)
(yield from prices.items()) |
.gui()
.parametrize(('show_on_startup', 'expected_return_code'), [(False, introduction.Choice.SHOW), (True, introduction.Choice.DONT_SHOW)])
def test_introduction_toggle_checkbox_changes_return_code(qtbot, show_on_startup, expected_return_code):
dialog = introduction.IntroductionDialog(show_on_startup=show_on_startup)
qtbot.addWidget(dialog)
def close_dialog():
while (not dialog.isVisible()):
...
dialog.show_on_startup_checkbox.click()
dialog.ok_button.click()
QtCore.QTimer.singleShot(0, close_dialog)
return_code = dialog.exec()
assert (return_code == expected_return_code) |
.skipif((('raspberrypi' != os.uname()[1]) and ('raspizero' != os.uname()[1])), reason='RPi only (ePhat) test')
class TestE2eRPiEphatEgress():
def get_ping_status(self, fledge_url):
conn =
conn.request('GET', '/fledge/ping')
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
return jdoc
def get_statistics_map(self, fledge_url):
conn =
conn.request('GET', '/fledge/statistics')
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
return utils.serialize_stats_map(jdoc)
def start_south_north(self, reset_and_start_fledge, add_south, south_branch, disable_schedule, remove_data_file, skip_verify_north_interface, remove_directories, enable_schedule, fledge_url, start_north_pi_server_c, pi_host, pi_port, pi_token, wait_time):
add_south(SOUTH_PLUGIN, south_branch, fledge_url, service_name=SVC_NAME)
if (not skip_verify_north_interface):
start_north_pi_server_c(fledge_url, pi_host, pi_port, pi_token, taskname=TASK_NAME, start_task=False)
time.sleep(wait_time)
disable_schedule(fledge_url, SVC_NAME)
if (not skip_verify_north_interface):
enable_schedule(fledge_url, TASK_NAME)
(yield self.start_south_north)
remove_directories('/tmp/fledge-south-{}'.format(SOUTH_PLUGIN))
def test_end_to_end(self, start_south_north, read_data_from_pi, fledge_url, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries, skip_verify_north_interface):
time.sleep((wait_time * 2))
self._verify_ping_and_statistics(fledge_url, skip_verify_north_interface)
self._verify_ingest(fledge_url)
if (not skip_verify_north_interface):
self._verify_egress(read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries)
def _verify_ping_and_statistics(self, fledge_url, skip_verify_north_interface):
ping_response = self.get_ping_status(fledge_url)
assert ping_response['dataRead']
if (not skip_verify_north_interface):
assert ping_response['dataSent']
actual_stats_map = self.get_statistics_map(fledge_url)
assert actual_stats_map['{}{}'.format(ASSET_PREFIX.upper(), ASSET_NAME_W.upper())]
assert actual_stats_map['{}{}'.format(ASSET_PREFIX.upper(), ASSET_NAME_M.upper())]
assert actual_stats_map['{}{}'.format(ASSET_PREFIX.upper(), ASSET_NAME_A.upper())]
assert actual_stats_map['{}{}'.format(ASSET_PREFIX.upper(), ASSET_NAME_C.upper())]
assert actual_stats_map['READINGS']
if (not skip_verify_north_interface):
assert actual_stats_map[TASK_NAME]
assert actual_stats_map['Readings Sent']
def _verify_ingest(self, fledge_url):
asset_name_with_prefix_w = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_W)
asset_name_with_prefix_m = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_M)
asset_name_with_prefix_a = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_A)
asset_name_with_prefix_c = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_C)
conn =
conn.request('GET', '/fledge/asset')
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), 'No asset found'
actual_assets = [i['assetCode'] for i in jdoc]
assert (asset_name_with_prefix_w in actual_assets)
assert (asset_name_with_prefix_m in actual_assets)
assert (asset_name_with_prefix_a in actual_assets)
assert (asset_name_with_prefix_c in actual_assets)
assert jdoc[0]['count']
expected_assets = Counter([asset_name_with_prefix_w, asset_name_with_prefix_m, asset_name_with_prefix_a, asset_name_with_prefix_c])
assert (Counter(actual_assets) == expected_assets)
conn.request('GET', '/fledge/asset/{}'.format(quote(asset_name_with_prefix_w, safe='')))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc_asset = json.loads(r)
for _sensor in SENSOR_READ_KEY_W:
assert len(jdoc_asset), "No data found for asset '{}'".format(asset_name_with_prefix_w)
assert (jdoc_asset[0]['reading'][_sensor] is not None)
conn.request('GET', '/fledge/asset/{}/{}'.format(quote(asset_name_with_prefix_w, safe=''), _sensor))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found for asset '{}' and datapoint '{}'".format(asset_name_with_prefix_w, _sensor)
conn.request('GET', '/fledge/asset/{}'.format(quote(asset_name_with_prefix_m, safe='')))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc_asset = json.loads(r)
for _sensor in SENSOR_READ_KEY_M:
assert len(jdoc_asset), "No data found for asset '{}'".format(asset_name_with_prefix_m)
assert (jdoc_asset[0]['reading'][_sensor] is not None)
conn.request('GET', '/fledge/asset/{}/{}'.format(quote(asset_name_with_prefix_m, safe=''), _sensor))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found for asset '{}' and datapoint '{}'".format(asset_name_with_prefix_m, _sensor)
conn.request('GET', '/fledge/asset/{}'.format(quote(asset_name_with_prefix_a, safe='')))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc_asset = json.loads(r)
for _sensor in SENSOR_READ_KEY_A:
assert len(jdoc_asset), "No data found for asset '{}'".format(asset_name_with_prefix_a)
assert (jdoc_asset[0]['reading'][_sensor] is not None)
conn.request('GET', '/fledge/asset/{}/{}'.format(quote(asset_name_with_prefix_a, safe=''), _sensor))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found for asset '{}' and datapoint '{}'".format(asset_name_with_prefix_a, _sensor)
conn.request('GET', '/fledge/asset/{}'.format(quote(asset_name_with_prefix_c, safe='')))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc_asset = json.loads(r)
for _sensor in SENSOR_READ_KEY_C:
assert len(jdoc_asset), "No data found for asset '{}'".format(asset_name_with_prefix_c)
assert (jdoc_asset[0]['reading'][_sensor] is not None)
conn.request('GET', '/fledge/asset/{}/{}'.format(quote(asset_name_with_prefix_c, safe=''), _sensor))
r = conn.getresponse()
assert (200 == r.status)
r = r.read().decode()
jdoc = json.loads(r)
assert len(jdoc), "No data found for asset '{}' and datapoint '{}'".format(asset_name_with_prefix_c, _sensor)
def _verify_egress(self, read_data_from_pi, pi_host, pi_admin, pi_passwd, pi_db, wait_time, retries):
retry_count = 0
data_from_pi_w = None
data_from_pi_m = None
data_from_pi_a = None
data_from_pi_c = None
asset_name_with_prefix_w = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_W)
asset_name_with_prefix_a = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_A)
asset_name_with_prefix_m = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_M)
asset_name_with_prefix_c = '{}{}'.format(ASSET_PREFIX, ASSET_NAME_C)
while (((data_from_pi_w is None) or (data_from_pi_w == [])) and (retry_count < retries)):
data_from_pi_w = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db, asset_name_with_prefix_w, SENSOR_READ_KEY_W)
data_from_pi_m = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db, asset_name_with_prefix_m, SENSOR_READ_KEY_M)
data_from_pi_a = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db, asset_name_with_prefix_a, SENSOR_READ_KEY_A)
data_from_pi_c = read_data_from_pi(pi_host, pi_admin, pi_passwd, pi_db, asset_name_with_prefix_c, SENSOR_READ_KEY_C)
retry_count += 1
time.sleep((wait_time * 2))
if ((data_from_pi_w is None) or (data_from_pi_m is None) or (data_from_pi_a is None) or (data_from_pi_c is None) or (retry_count == retries)):
assert False, 'Failed to read data from PI'
print('Data read from PI System:\nWeather={}\nMagnetometer={}\nAccelerometer={}\nrgbColor={}\n'.format(data_from_pi_w, data_from_pi_m, data_from_pi_a, data_from_pi_c))
for w in SENSOR_READ_KEY_W:
assert (w in data_from_pi_w)
abs_sum_w = sum([abs(n) for n in data_from_pi_w[w]])
print('Weather (sum of {} absolute values), Sensor={}'.format(len(data_from_pi_w[w]), w), abs_sum_w)
assert abs_sum_w, 'Sum of weather sensor absolute values is 0'
for a in SENSOR_READ_KEY_A:
assert (a in data_from_pi_a)
abs_sum_a = sum([abs(n) for n in data_from_pi_a[a]])
print('Accelerometer (sum of {} absolute values, Sensor={}'.format(len(data_from_pi_a[a]), a), abs_sum_a)
assert abs_sum_a, 'Sum of accelerometer sensor absolute values is 0'
for m in SENSOR_READ_KEY_M:
assert (m in data_from_pi_m)
abs_sum_m = sum([abs(n) for n in data_from_pi_m[m]])
print('Magnetometer (sum of {} absolute values), Sensor={}'.format(len(data_from_pi_m[m]), m), abs_sum_m)
assert abs_sum_m, 'Sum of magnetometer sensor absolute values is 0'
for c in SENSOR_READ_KEY_C:
assert (c in data_from_pi_c)
abs_sum_c = sum([abs(n) for n in data_from_pi_c[c]])
print('RGB colors (sum of {} absolute values), Sensor={}'.format(len(data_from_pi_c[c]), c), abs_sum_c)
assert abs_sum_c, 'Sum of rgb sensors absolute values is 0' |
def construct_article_list(modules_and_symbols):
articles = {}
symbol_to_article = {}
for (mod_name, (module, symbols)) in modules_and_symbols.items():
if len(symbols):
article_name = mod_name.lower()
if (article_name in articles):
suffix = 1
while ((article_name + str(suffix)) in articles):
suffix += 1
article_name = (article_name + str(suffix))
articles[article_name] = (mod_name, module)
symbol_to_article[mod_name] = article_name
suffix = 0
for (symbol_name, symbol) in symbols:
full_name = ((mod_name + '.') + symbol_name)
article_name = full_name.lower()
if (article_name in articles):
suffix += 1
while ((article_name + str(suffix)) in articles):
suffix += 1
article_name = (article_name + str(suffix))
articles[article_name] = (full_name, symbol)
symbol_to_article[full_name] = article_name
return (articles, symbol_to_article) |
class SnowflakeAdapterTeleport(TeleportAdapter):
def __init__(self, db_adapter: BaseAdapter, teleport_credentials: TeleportCredentials):
from dbt.adapters.fal_experimental.adapter_support import new_connection
self._db_adapter = db_adapter
self._credentials = teleport_credentials
url = f's3://{teleport_credentials.s3_bucket}/teleport'
with new_connection(self._db_adapter, 'fal-snowflake:setup-teleport') as conn:
cur = conn.handle.cursor()
create_stage_query = f'''CREATE OR REPLACE STAGE falstage
URL = '{url}' CREDENTIALS = (
aws_key_id='{self._credentials.s3_access_key_id}',
aws_secret_key='{self._credentials.s3_access_key}');'''
create_format_query = "CREATE OR REPLACE FILE FORMAT falparquet type = 'PARQUET';"
cur.execute(create_stage_query)
cur.execute(create_format_query)
def storage_formats(cls):
return ['parquet']
def teleport_from_external_storage(self, relation: BaseRelation, relation_path: str, teleport_info: TeleportInfo) -> None:
assert (teleport_info.format == 'parquet'), 'snowflake only supports parquet format for Teleport'
location = f'/{relation_path}'
with self._db_adapter.connection_named('teleport:copy_from'):
create_macro = self._db_adapter.execute_macro('snowflake__create_table_from_parquet', kwargs={'relation': relation, 'location': location})
self._db_adapter.execute(create_macro)
copy_macro = self._db_adapter.execute_macro('snowflake__copy_from_parquet', kwargs={'relation': relation, 'location': location})
self._db_adapter.execute(copy_macro)
def teleport_to_external_storage(self, relation: BaseRelation, teleport_info: TeleportInfo) -> str:
assert (teleport_info.format == 'parquet'), 'snowflake only supports parquet format for Teleport'
relation_path = teleport_info.build_relation_path(relation)
location = f'/{relation_path}'
rendered_macro = self._db_adapter.execute_macro('snowflake__copy_to_parquet', kwargs={'relation': relation, 'location': location})
with self._db_adapter.connection_named('teleport:copy_to'):
self._db_adapter.execute(rendered_macro)
return relation_path |
def largest_optimized(min_factor, max_factor):
if (min_factor > max_factor):
raise ValueError('min must be <= max')
result = 0
answer = []
for number_a in range(max_factor, (min_factor - 1), (- 1)):
was_bigger = False
for number_b in range(max_factor, (number_a - 1), (- 1)):
if ((number_a * number_b) >= result):
was_bigger = True
test_value = str((number_a * number_b))
if (test_value == test_value[::(- 1)]):
if ((number_a * number_b) > result):
answer = []
result = int(test_value)
answer.append([number_a, number_b])
if (not was_bigger):
break
if (result == 0):
result = None
return (result, answer) |
class TestRefreshingToken():
def test_repr(self):
low_token = make_token_obj('token', False)
cred = MagicMock()
auto_token = RefreshingToken(low_token, cred)
assert repr(auto_token).startswith('RefreshingToken(')
def test_fresh_token_returned(self):
low_token = make_token_obj('token', False)
cred = MagicMock()
auto_token = RefreshingToken(low_token, cred)
assert (auto_token.access_token == 'token')
def test_expiring_token_refreshed(self):
expiring = make_token_obj('expiring', True)
refreshed = make_token_obj('refreshed', False)
cred = MagicMock()
cred.refresh.return_value = refreshed
auto_token = RefreshingToken(expiring, cred)
assert (auto_token.access_token == 'refreshed')
def test_refreshing_token_has_same_attributes_as_regular(self):
token_info = MagicMock()
token = Token(token_info, uses_pkce=False)
token._expires_at = 3000
auto_token = RefreshingToken(token, MagicMock())
token_attributes = [a for a in dir(token) if (not a.startswith('_'))]
auto_attributes = [a for a in dir(auto_token) if (not a.startswith('_'))]
for attribute in token_attributes:
auto_token.__getattribute__(attribute)
assert (attribute in auto_attributes)
def test_refreshing_token_expiration_attributes(self):
token_info = MagicMock()
token = Token(token_info, uses_pkce=False)
token._expires_at = 0
auto_token = RefreshingToken(token, MagicMock())
assert (auto_token.is_expiring is False)
assert (auto_token.expires_in is None)
assert (auto_token.expires_at is None) |
class PKManyToManyTests(TestCase):
def setUp(self):
for idx in range(1, 4):
target = ManyToManyTarget(name=('target-%d' % idx))
target.save()
source = ManyToManySource(name=('source-%d' % idx))
source.save()
for target in ManyToManyTarget.objects.all():
source.targets.add(target)
def test_many_to_many_retrieve(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'source-1', 'targets': [1]}, {'id': 2, 'name': 'source-2', 'targets': [1, 2]}, {'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}]
with self.assertNumQueries(4):
assert (serializer.data == expected)
def test_many_to_many_retrieve_prefetch_related(self):
queryset = ManyToManySource.objects.all().prefetch_related('targets')
serializer = ManyToManySourceSerializer(queryset, many=True)
with self.assertNumQueries(2):
serializer.data
def test_reverse_many_to_many_retrieve(self):
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]}, {'id': 2, 'name': 'target-2', 'sources': [2, 3]}, {'id': 3, 'name': 'target-3', 'sources': [3]}]
with self.assertNumQueries(4):
assert (serializer.data == expected)
def test_many_to_many_update(self):
data = {'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]}
instance = ManyToManySource.objects.get(pk=1)
serializer = ManyToManySourceSerializer(instance, data=data)
assert serializer.is_valid()
serializer.save()
assert (serializer.data == data)
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]}, {'id': 2, 'name': 'source-2', 'targets': [1, 2]}, {'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}]
assert (serializer.data == expected)
def test_reverse_many_to_many_update(self):
data = {'id': 1, 'name': 'target-1', 'sources': [1]}
instance = ManyToManyTarget.objects.get(pk=1)
serializer = ManyToManyTargetSerializer(instance, data=data)
assert serializer.is_valid()
serializer.save()
assert (serializer.data == data)
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'target-1', 'sources': [1]}, {'id': 2, 'name': 'target-2', 'sources': [2, 3]}, {'id': 3, 'name': 'target-3', 'sources': [3]}]
assert (serializer.data == expected)
def test_many_to_many_create(self):
data = {'id': 4, 'name': 'source-4', 'targets': [1, 3]}
serializer = ManyToManySourceSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert (serializer.data == data)
assert (obj.name == 'source-4')
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'source-1', 'targets': [1]}, {'id': 2, 'name': 'source-2', 'targets': [1, 2]}, {'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}, {'id': 4, 'name': 'source-4', 'targets': [1, 3]}]
assert (serializer.data == expected)
def test_many_to_many_unsaved(self):
source = ManyToManySource(name='source-unsaved')
serializer = ManyToManySourceSerializer(source)
expected = {'id': None, 'name': 'source-unsaved', 'targets': []}
with self.assertNumQueries(0):
assert (serializer.data == expected)
def test_reverse_many_to_many_create(self):
data = {'id': 4, 'name': 'target-4', 'sources': [1, 3]}
serializer = ManyToManyTargetSerializer(data=data)
assert serializer.is_valid()
obj = serializer.save()
assert (serializer.data == data)
assert (obj.name == 'target-4')
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]}, {'id': 2, 'name': 'target-2', 'sources': [2, 3]}, {'id': 3, 'name': 'target-3', 'sources': [3]}, {'id': 4, 'name': 'target-4', 'sources': [1, 3]}]
assert (serializer.data == expected) |
class OptionPlotoptionsLineSonificationTracksMappingLowpass(Options):
def frequency(self) -> 'OptionPlotoptionsLineSonificationTracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsLineSonificationTracksMappingLowpassFrequency)
def resonance(self) -> 'OptionPlotoptionsLineSonificationTracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsLineSonificationTracksMappingLowpassResonance) |
class TestCu2QuPen(unittest.TestCase, _TestPenMixin):
def __init__(self, *args, **kwargs):
super(TestCu2QuPen, self).__init__(*args, **kwargs)
self.Glyph = DummyGlyph
self.Pen = DummyPen
self.Cu2QuPen = Cu2QuPen
self.pen_getter_name = 'getPen'
self.draw_method_name = 'draw'
def test_qCurveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((1, 1))'])
def test_qCurveTo_more_than_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.qCurveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((1, 1), (2, 2))'])
def test_curveTo_1_point(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((1, 1))'])
def test_curveTo_2_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((1, 1), (2, 2))'])
def test_curveTo_3_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((0.75, 0.75), (2.25, 2.25), (3, 3))'])
def test_curveTo_more_than_3_points(self):
pen = DummyPen()
quadpen = Cu2QuPen(pen, MAX_ERR)
quadpen.moveTo((0, 0))
quadpen.curveTo((1, 1), (2, 2), (3, 3), (4, 4))
self.assertEqual(str(pen).splitlines(), ['pen.moveTo((0, 0))', 'pen.qCurveTo((0.75, 0.75), (1.625, 1.625), (2, 2))', 'pen.qCurveTo((2.375, 2.375), (3.25, 3.25), (4, 4))']) |
(IStatusBarManager)
class StatusBarManager(HasTraits):
message = Property
messages = List(Str)
status_bar = Any()
def create_status_bar(self, parent):
if (self.status_bar is None):
self.status_bar = wx.StatusBar(parent)
self.status_bar._pyface_control = self
if (len(self.messages) > 1):
self.status_bar.SetFieldsCount(len(self.messages))
for i in range(len(self.messages)):
self.status_bar.SetStatusText(self.messages[i], i)
else:
self.status_bar.SetStatusText(self.message)
return self.status_bar
def destroy(self):
if (self.status_bar is not None):
self.status_bar.Destroy()
self.status_bar._pyface_control = None
self.status_bar = None
def _get_message(self):
if (len(self.messages) > 0):
message = self.messages[0]
else:
message = ''
return message
def _set_message(self, value):
if (len(self.messages) > 0):
old = self.messages[0]
self.messages[0] = value
else:
old = ''
self.messages.append(value)
self.trait_property_changed('message', old, value)
return
def _messages_changed(self):
if (self.status_bar is not None):
for i in range(len(self.messages)):
self.status_bar.SetStatusText(self.messages[i], i)
def _messages_items_changed(self):
if (self.status_bar is not None):
for i in range(len(self.messages)):
self.status_bar.SetStatusText(self.messages[i], i)
return |
class GreetingWorkflowImpl(GreetingWorkflow):
async def get_greeting(self):
global version_found_in_step_1_0, version_found_in_step_1_1
global version_found_in_step_2_0, version_found_in_step_2_1
version_found_in_step_1_0 = Workflow.get_version('first-item', DEFAULT_VERSION, 2)
version_found_in_step_1_1 = Workflow.get_version('first-item', DEFAULT_VERSION, 2)
(await Workflow.sleep(60))
version_found_in_step_2_0 = Workflow.get_version('first-item', DEFAULT_VERSION, 2)
version_found_in_step_2_1 = Workflow.get_version('first-item', DEFAULT_VERSION, 2) |
def setup_logging(verbosity: int, color_logging: bool=False, apprise_notifiers: List[str]=[]) -> None:
add_logging_level('EXTRA_DEBUG', (logging.DEBUG - 1))
add_logging_level('WEBSOCKET_DATA', (logging.DEBUG - 2))
format = '{asctime} [{levelname:^11s}] {name:<42} : {message}'
sh = create_logging_handler(format, color_logging)
logger = logging.getLogger('unifi_protect_backup')
logger.addHandler(sh)
logger.propagate = False
if (verbosity == 0):
logging.basicConfig(level=logging.WARN, handlers=[sh])
logger.setLevel(logging.INFO)
elif (verbosity == 1):
logging.basicConfig(level=logging.WARN, handlers=[sh])
logger.setLevel(logging.DEBUG)
elif (verbosity == 2):
logging.basicConfig(level=logging.WARN, handlers=[sh])
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 3):
logging.basicConfig(level=logging.INFO, handlers=[sh])
logger.setLevel(logging.EXTRA_DEBUG)
elif (verbosity == 4):
logging.basicConfig(level=logging.INFO, handlers=[sh])
logger.setLevel(logging.WEBSOCKET_DATA)
elif (verbosity >= 5):
logging.basicConfig(level=logging.DEBUG, handlers=[sh])
logger.setLevel(logging.WEBSOCKET_DATA) |
class TestMlabNullEngineBase(unittest.TestCase):
def setUp(self):
e = Engine()
e.start()
self._non_null_engine = e
mlab.set_engine(e)
def tearDown(self):
if (not (mlab.get_engine() is self._non_null_engine)):
raise AssertionError('The NullEngine has overridden the default one')
engine_manager.current_engine = None
registry.unregister_engine(self._non_null_engine)
for engine in list(registry.engines):
registry.unregister_engine(engine) |
class KiwoomOpenApiPlusServiceServer(Logging):
def __init__(self, control, host=None, port=None, credentials=None, **kwargs):
if (host is None):
host = config.get_string('koapy.backend.kiwoom_open_api_plus.grpc.host', 'localhost')
host = config.get_string('koapy.backend.kiwoom_open_api_plus.grpc.server.host', host)
if (port is None):
port = config.get_int('koapy.backend.kiwoom_open_api_plus.grpc.port', 0)
port = config.get_int('koapy.backend.kiwoom_open_api_plus.grpc.server.port', port)
if (port == 0):
port = find_free_port_for_host(host)
self.logger.info('Using one of the free ports, final address would be %s:%d', host, port)
self._control = control
self._host = host
self._port = port
self._credentials = credentials
self._kwargs = dict(kwargs)
self._servicer = KiwoomOpenApiPlusServiceServicer(self._control)
self._address = ((self._host + ':') + str(self._port))
grpc_server_signature = inspect.signature(grpc.server)
grpc_server_params = list(grpc_server_signature.parameters.keys())
grpc_server_kwargs = {k: v for (k, v) in self._kwargs.items() if (k in grpc_server_params)}
grpc_server_bound_arguments = grpc_server_signature.bind_partial(**grpc_server_kwargs)
if (grpc_server_bound_arguments.arguments.get('thread_pool') is None):
thread_pool_signature = inspect.signature(ThreadPoolExecutor)
thread_pool_params = list(thread_pool_signature.parameters.keys())
thread_pool_kwargs = {k: v for (k, v) in self._kwargs.items() if (k in thread_pool_params)}
thread_pool_bound_arguments = thread_pool_signature.bind(**thread_pool_kwargs)
if (thread_pool_bound_arguments.arguments.get('max_workers') is None):
max_workers = config.get_int('koapy.backend.kiwoom_open_api_plus.grpc.server.max_workers', 8)
thread_pool_bound_arguments.arguments['max_workers'] = max_workers
thread_pool = ThreadPoolExecutor(*thread_pool_bound_arguments.args, **thread_pool_bound_arguments.kwargs)
grpc_server_bound_arguments.arguments['thread_pool'] = thread_pool
self._thread_pool = thread_pool
else:
self._thread_pool = grpc_server_bound_arguments.arguments['thread_pool']
self._grpc_server_bound_arguments = grpc_server_bound_arguments
self._server = None
self._server_started = False
self._server_stopped = False
self.reinitialize_server()
def reinitialize_server(self):
if (self._server is not None):
self.stop()
self.wait_for_termination()
self._server = grpc.server(*self._grpc_server_bound_arguments.args, **self._grpc_server_bound_arguments.kwargs)
self._server_started = False
self._server_stopped = False
KiwoomOpenApiPlusService_pb2_grpc.add_KiwoomOpenApiPlusServiceServicer_to_server(self._servicer, self._server)
if (self._credentials is None):
if (not is_in_private_network(self._host)):
self.logger.warning('Adding insecure port %s to server, but the address is not private.', self._address)
self._server.add_insecure_port(self._address)
else:
self._server.add_secure_port(self._address, self._credentials)
def get_host(self):
return self._host
def get_port(self):
return self._port
def start(self):
if (self._server_started and self._server_stopped):
self.reinitialize_server()
if (not self._server_started):
self._server.start()
self._server_started = True
def wait_for_termination(self, timeout=None):
return self._server.wait_for_termination(timeout)
def is_running(self):
return self.wait_for_termination(1)
def stop(self, grace=None):
event = self._server.stop(grace)
self._server_stopped = True
return event
def __getattr__(self, name):
return getattr(self._server, name)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
self.wait_for_termination() |
class Arm(BaseArch):
bits = 32
gpr_registers = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'sp', 'lr', 'pc']
gpr_key = 'general'
flag_register = 'cpsr'
flag_register_bit_masks = {'n': , 'z': , 'c': , 'v': , 'q': , 'j': , 'ge': 983040, 'e': 512, 'a': 256, 'i': 128, 'f': 64, 't': 32} |
def get_curr_and_ref_df(data: InputData, min_rel_score: Optional[int]=None, no_feedback_users: bool=False, bin_data: bool=True):
target_column = data.data_definition.get_target_column()
prediction = data.data_definition.get_prediction_columns()
if ((target_column is None) or (prediction is None)):
raise ValueError('Target and prediction were not found in data.')
(_, target_current, target_reference) = data.get_data(target_column.column_name)
recommendations_type = (data.column_mapping.recom_type or RecomType.SCORE)
if (prediction.prediction_probas is not None):
pred_name = prediction.prediction_probas[0].column_name
elif (prediction.predicted_values is not None):
pred_name = prediction.predicted_values.column_name
(_, prediction_current, prediction_reference) = data.get_data(pred_name)
user_column = data.column_mapping.user_id
if (user_column is None):
raise ValueError('User_id was not found in data.')
(_, user_current, user_reference) = data.get_data(user_column)
curr = collect_dataset(user_current, target_current, prediction_current, recommendations_type, min_rel_score, no_feedback_users, bin_data)
ref: Optional[pd.DataFrame] = None
if ((user_reference is not None) and (target_reference is not None) and (prediction_reference is not None)):
ref = collect_dataset(user_reference, target_reference, prediction_reference, recommendations_type, min_rel_score, no_feedback_users, bin_data)
return (curr, ref) |
class M12L16161A(SDRModule):
nbanks = 2
nrows = 2048
ncols = 256
technology_timings = _TechnologyTimings(tREFI=(.0 / 4096), tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
speedgrade_timings = {'default': _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)} |
def extractIdidmybesttranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if (item['tags'] == ['Uncategorized']):
titlemap = [('After Being Transported into a Book, I Adopted the Villain Chapter ', 'After Being Transported into a Book, I Adopted the Villain', 'translated'), ('After Being Transported into a Book, I Adopted the Villain Ch. ', 'After Being Transported into a Book, I Adopted the Villain', 'translated'), ('After Being Transported into a Book, I Adopted the Villain Ch.', 'After Being Transported into a Book, I Adopted the Villain', 'translated'), ('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'), ('Master of Dungeon', 'Master of Dungeon', 'oel')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class UserDeleteView(MenuItemMixin, FormView):
form_class = UserDeletionForm
template_name = 'registration/unregister.html'
menu_parameters = 'account'
_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserDeleteView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(UserDeleteView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
user = self.request.user
user.delete()
logout(self.request)
messages.success(self.request, _('Your account has been removed'))
return HttpResponseRedirect(reverse('forum:index')) |
.parametrize('ops', ALL_OPS)
.parametrize('dtype', FLOAT_TYPES)
def test_reduce_mean(ops, dtype):
X = ops.asarray2f([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [1.0, 2.0], [3.0, 4.0]], dtype=dtype)
lengths = ops.asarray1i([3, 2])
ops.xp.testing.assert_allclose(ops.reduce_mean(X, lengths), [[3.0, 4.0], [2.0, 3.0]])
lengths = ops.asarray1i([3, 0, 2])
ops.xp.testing.assert_allclose(ops.reduce_mean(X, lengths), [[3.0, 4.0], [0.0, 0.0], [2.0, 3.0]])
X = ops.asarray2f([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=dtype)
lengths = ops.asarray1i([3, 0])
ops.xp.testing.assert_allclose(ops.reduce_mean(X, lengths), [[3.0, 4.0], [0.0, 0.0]])
with pytest.raises(IndexError):
ops.reduce_mean(X, ops.xp.array([3, 3], dtype='i'))
with pytest.raises(ValueError):
ops.reduce_mean(X, ops.xp.array([(- 1), 5], dtype='i')) |
class OptionPlotoptionsAreasplineSonificationContexttracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
()
('--batch-size', type=int, default=0, help='Inference batch size')
('--seq-length', type=int, default=0, help='Inference sequence length')
('--activation', type=str, default='fast_gelu', help='Activation function applied on BERT, currently only support fast_gelu on Rocm. CUDA supports both gelu and fast_gelu. No effect if framework is pt.')
('--graph-mode', type=bool, default=True, help='Use CUDA graph or not. hipGraph is not supported yet. No effect if framework is pt.')
('--use-fp16-acc', type=bool, default=True, help='Use fp16 accumulation or not (TensorRT is using fp16_acc). No effect if framework is pt.')
('--use-pretrained-pt-model', type=bool, default=True, help='Whether or not to use the pretrained BERT model weights.')
('--encoders-only', type=bool, default=True, help='Whether or not to run the BERT benchmark with encoders only. If enabled, only the transformer blocks without BERT embeddings are benchmarked.')
def compile_and_benchmark(batch_size: int, seq_length: int, activation: str, graph_mode: bool, use_fp16_acc: bool, use_pretrained_pt_model: bool, encoders_only: bool):
if (detect_target().name() == 'rocm'):
graph_mode = False
assert (activation in 'fast_gelu'), f'Unsupported activation: {activation} on rocm'
pt_model = BertPt(pretrained=use_pretrained_pt_model)._model
pt_model.eval()
hidden_size = pt_model.config.hidden_size
if (batch_size < 1):
batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256]
else:
batch_sizes = [batch_size]
if (seq_length < 1):
seq_lengths = ([64, 128, 384, 512, 1024, 4096] if encoders_only else [64, 128, 384, 512])
else:
seq_lengths = [seq_length]
for seq_length in seq_lengths:
for bs in batch_sizes:
mod = compile_module(bs, seq_length, hidden_size, activation, use_fp16_acc, encoders_only, pt_model)
benchmark(bs, seq_length, hidden_size, mod, graph_mode, encoders_only) |
def test_alias_argument_1():
f = alias_argument()(func_x)
assert (f(1) == 1)
assert (f(x=1) == 1)
with pytest.raises(TypeError):
assert (f(unk=1) == 1)
f = alias_argument(x=['alias'])(func_x)
assert (f(alias=1) == 1)
assert (f(x=1) == 1)
f = alias_argument(x='alias')(func_x)
assert (f(alias=1) == 1)
assert (f(x=1) == 1)
with pytest.raises(ValueError):
f = alias_argument(x={'alias': 'value'})(func_x)
f = alias_argument(x=['alias', 'blias'])(func_x)
assert (f(alias=1) == 1)
with pytest.raises(TypeError):
f = alias_argument(unk=['x'])(func_x)
assert (f(x=1) == 1) |
def request_counter(metrics_name: str) -> Callable:
def wrap(f: Callable):
(f)
def wrapper_sync(self: MetricsGetter, *args, **kwargs):
if self.has_metrics():
self.get_metrics().count(metrics_name, 1)
return f(self, *args, **kwargs)
(f)
async def wrapper_async(self: MetricsGetter, *args, **kwargs):
if self.has_metrics():
self.get_metrics().count(metrics_name, 1)
return (await f(self, *args, **kwargs))
return (wrapper_async if asyncio.iscoroutinefunction(f) else wrapper_sync)
return wrap |
.xfail(raises=ImageComparisonFailure, reason='Matplotlib plots for reasons a different image size.')
.skipif((MID_MEMORY > memory), reason='Travis has too less memory to run it.')
def test_hicPlotMatrix_region_region2_log1p_clearMaskedBins_and_bigwig_vmin_vmax_vertical():
outfile = NamedTemporaryFile(suffix='.png', prefix='hicexplorer_test_h5_', delete=False)
args = '--matrix {0}/Li_et_al_2015.h5 --region chrX:3000000-3500000 --region2 chrX:3100000-3600000 --outFileName {1} --log1p --clearMaskedBins --bigwig {2} --vMinBigwig {3} --vMaxBigwig {4} --bigwigAdditionalVerticalAxis'.format(ROOT, outfile.name, (ROOT + 'bigwig_chrx_2e6_5e6.bw'), 0, 1).split()
test_image_path = ((ROOT + 'hicPlotMatrix') + '/Li_chrX30-35-chrX31-36_log1p_clearmaskedbins_vbigwigmin_vbigwigmax_vertical.png')
compute(hicexplorer.hicPlotMatrix.main, args, 5)
res = compare_images(test_image_path, outfile.name, tolerance)
assert (res is None), res |
class OptionPlotoptionsTreemapSonificationContexttracks(Options):
def activeWhen(self) -> 'OptionPlotoptionsTreemapSonificationContexttracksActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsTreemapSonificationContexttracksActivewhen)
def instrument(self):
return self._config_get('piano')
def instrument(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsTreemapSonificationContexttracksMapping':
return self._config_sub_data('mapping', OptionPlotoptionsTreemapSonificationContexttracksMapping)
def midiName(self):
return self._config_get(None)
def midiName(self, text: str):
self._config(text, js_type=False)
def pointGrouping(self) -> 'OptionPlotoptionsTreemapSonificationContexttracksPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsTreemapSonificationContexttracksPointgrouping)
def roundToMusicalNotes(self):
return self._config_get(True)
def roundToMusicalNotes(self, flag: bool):
self._config(flag, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def timeInterval(self):
return self._config_get(None)
def timeInterval(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get('instrument')
def type(self, text: str):
self._config(text, js_type=False)
def valueInterval(self):
return self._config_get(None)
def valueInterval(self, num: float):
self._config(num, js_type=False)
def valueMapFunction(self):
return self._config_get('linear')
def valueMapFunction(self, value: Any):
self._config(value, js_type=False)
def valueProp(self):
return self._config_get('"x"')
def valueProp(self, text: str):
self._config(text, js_type=False) |
class TestLeakyReluConverter(AITTestCase):
def test_leaky_relu(self):
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.leaky_relu(x, negative_slope=0.05)
model = TestModule().cuda().half()
inputs = [torch.randn(2, 3).half().cuda()]
self.run_test(model, inputs, expected_ops={acc_ops.leaky_relu}) |
.scheduler
def test_surface_init_fails_during_forward_model_callback(copy_case, monkeypatch, try_queue_and_scheduler):
copy_case('snake_oil_field')
rng = np.random.default_rng()
Path('./surface').mkdir()
nx = 5
ny = 10
surf = xtgeo.RegularSurface(ncol=nx, nrow=ny, xinc=1.0, yinc=1.0, values=rng.standard_normal(size=(nx, ny)))
surf.to_file('surface/surf_init_0.irap', fformat='irap_ascii')
config_file_name = 'snake_oil_surface.ert'
parameter_name = 'TOP'
with open(config_file_name, mode='r+', encoding='utf-8') as config_file_handler:
content_lines = config_file_handler.read().splitlines()
index_line_with_surface_top = [index for (index, line) in enumerate(content_lines) if line.startswith(f'SURFACE {parameter_name}')][0]
line_with_surface_top = content_lines[index_line_with_surface_top]
breaking_line_with_surface_top = line_with_surface_top
content_lines[index_line_with_surface_top] = breaking_line_with_surface_top
config_file_handler.seek(0)
config_file_handler.write('\n'.join(content_lines))
try:
run_ert_test_run(config_file_name)
except ErtCliError as err:
assert (f'Failed to initialize parameter {parameter_name!r}' in str(err)) |
class NotifierTest(ForsetiTestCase):
def test_can_convert_created_at_datetime_to_timestamp_string(self):
violations = [dict(created_at_datetime=datetime(1999, 12, 25, 1, 2, 3)), dict(created_at_datetime=datetime(2010, 6, 8, 4, 5, 6))]
expected_timestamps = ['1999-12-25T01:02:03Z', '2010-06-08T04:05:06Z']
violations_with_converted_timestamp = notifier.convert_to_timestamp(violations)
converted_timestamps = []
for i in violations_with_converted_timestamp:
converted_timestamps.append(i['created_at_datetime'])
self.assertEqual(expected_timestamps, converted_timestamps)
('google.cloud.forseti.notifier.notifier.find_notifiers', autospec=True)
('google.cloud.forseti.notifier.notifier.scanner_dao', autospec=True)
def test_no_notifications_for_empty_violations(self, mock_dao, mock_find_notifiers):
mock_dao.map_by_resource.return_value = dict()
mock_service_cfg = mock.MagicMock()
mock_service_cfg.get_global_config.return_value = fake_violations.GLOBAL_CONFIGS
mock_service_cfg.get_notifier_config.return_value = fake_violations.NOTIFIER_CONFIGS
notifier.run('iid-1-2-3', None, mock.MagicMock(), mock_service_cfg)
self.assertFalse(mock_find_notifiers.called)
('google.cloud.forseti.notifier.notifiers.email_violations.EmailViolations', autospec=True)
('google.cloud.forseti.notifier.notifiers.gcs_violations.GcsViolations', autospec=True)
('google.cloud.forseti.notifier.notifier.find_notifiers', autospec=True)
('google.cloud.forseti.notifier.notifier.scanner_dao', autospec=True)
def test_notifications_for_nonempty_violations(self, mock_dao, mock_find_notifiers, mock_gcs_violations_cls, mock_email_violations_cls):
mock_dao.map_by_resource.return_value = fake_violations.VIOLATIONS
mock_service_cfg = mock.MagicMock()
mock_service_cfg.get_global_config.return_value = fake_violations.GLOBAL_CONFIGS
mock_service_cfg.get_notifier_config.return_value = fake_violations.NOTIFIER_CONFIGS
mock_email_violations = mock.MagicMock(spec=email_violations.EmailViolations)
mock_email_violations_cls.return_value = mock_email_violations
mock_email_violations = mock_email_violations_cls.return_value
mock_gcs_violations = mock_gcs_violations_cls.return_value
mock_find_notifiers.side_effect = [mock_email_violations_cls, mock_gcs_violations_cls]
notifier.run('iid-1-2-3', None, mock.MagicMock(), mock_service_cfg)
self.assertTrue(mock_find_notifiers.called)
self.assertEqual(1, mock_email_violations_cls.call_count)
self.assertEqual('iam_policy_violations', mock_email_violations_cls.call_args[0][0])
self.assertEqual(1, mock_email_violations.run.call_count)
self.assertEqual(1, mock_gcs_violations_cls.call_count)
self.assertEqual('iam_policy_violations', mock_gcs_violations_cls.call_args[0][0])
self.assertEqual(1, mock_gcs_violations.run.call_count)
('google.cloud.forseti.notifier.notifiers.email_violations.EmailViolations', autospec=True)
('google.cloud.forseti.notifier.notifiers.gcs_violations.GcsViolations', autospec=True)
('google.cloud.forseti.notifier.notifier.find_notifiers', autospec=True)
('google.cloud.forseti.notifier.notifier.scanner_dao', autospec=True)
('google.cloud.forseti.notifier.notifier.LOGGER', autospec=True)
def test_notifications_are_not_sent_without_valid_scanner_index_id(self, mock_logger, mock_dao, mock_find_notifiers, mock_gcs_violations_cls, mock_email_violations_cls):
mock_dao.get_latest_scanner_index_id.return_value = None
mock_service_cfg = mock.MagicMock()
mock_service_cfg.get_global_config.return_value = fake_violations.GLOBAL_CONFIGS
mock_service_cfg.get_notifier_config.return_value = fake_violations.NOTIFIER_CONFIGS
mock_email_violations = mock.MagicMock(spec=email_violations.EmailViolations)
mock_email_violations_cls.return_value = mock_email_violations
mock_email_violations = mock_email_violations_cls.return_value
mock_gcs_violations = mock_gcs_violations_cls.return_value
mock_find_notifiers.side_effect = [mock_email_violations_cls, mock_gcs_violations_cls]
notifier.run('iid-1-2-3', None, mock.MagicMock(), mock_service_cfg)
self.assertFalse(mock_find_notifiers.called)
self.assertFalse(mock_dao.map_by_resource.called)
self.assertTrue(mock_logger.error.called)
('google.cloud.forseti.notifier.notifier.InventorySummary', autospec=True)
('google.cloud.forseti.notifier.notifier.find_notifiers', autospec=True)
('google.cloud.forseti.notifier.notifier.scanner_dao', autospec=True)
def test_inventory_summary_is_called(self, mock_dao, mock_find_notifiers, mock_inventor_summary):
mock_dao.map_by_resource.return_value = dict()
mock_service_cfg = mock.MagicMock()
mock_service_cfg.get_global_config.return_value = fake_violations.GLOBAL_CONFIGS
mock_service_cfg.get_notifier_config.return_value = fake_violations.NOTIFIER_CONFIGS
notifier.run('iid-1-2-3', None, mock.MagicMock(), mock_service_cfg)
self.assertFalse(mock_find_notifiers.called)
self.assertTrue(mock_inventor_summary.called) |
class LinkableContract(Contract):
unlinked_references: Optional[Tuple[Dict[(str, Any)]]] = None
linked_references: Optional[Tuple[Dict[(str, Any)]]] = None
needs_bytecode_linking = None
def __init__(self, address: bytes, **kwargs: Any) -> None:
if self.needs_bytecode_linking:
raise BytecodeLinkingError('Contract cannot be instantiated until its bytecode is linked.')
validate_address(address)
super().__init__(address=address, **kwargs)
def factory(cls, w3: 'Web3', class_name: str=None, **kwargs: Any) -> Type[Self]:
dep_link_refs = kwargs.get('unlinked_references')
bytecode = kwargs.get('bytecode')
needs_bytecode_linking = False
if (dep_link_refs and bytecode):
if (not is_prelinked_bytecode(to_bytes(hexstr=bytecode), dep_link_refs)):
needs_bytecode_linking = True
kwargs = assoc(kwargs, 'needs_bytecode_linking', needs_bytecode_linking)
return super().factory(w3, class_name, **kwargs)
def constructor(cls, *args: Any, **kwargs: Any) -> ContractConstructor:
if cls.needs_bytecode_linking:
raise BytecodeLinkingError('Contract cannot be deployed until its bytecode is linked.')
return super().constructor(*args, **kwargs)
def link_bytecode(cls, attr_dict: Dict[(str, str)]) -> Type['LinkableContract']:
if ((not cls.unlinked_references) and (not cls.linked_references)):
raise BytecodeLinkingError('Contract factory has no linkable bytecode.')
if (not cls.needs_bytecode_linking):
raise BytecodeLinkingError('Bytecode for this contract factory does not require bytecode linking.')
cls.validate_attr_dict(attr_dict)
bytecode = apply_all_link_refs(cls.bytecode, cls.unlinked_references, attr_dict)
runtime = apply_all_link_refs(cls.bytecode_runtime, cls.linked_references, attr_dict)
linked_class = cls.factory(cls.w3, bytecode_runtime=runtime, bytecode=bytecode)
if linked_class.needs_bytecode_linking:
raise BytecodeLinkingError('Expected class to be fully linked, but class still needs bytecode linking.')
return linked_class
def validate_attr_dict(self, attr_dict: Dict[(str, str)]) -> None:
attr_dict_names = attr_dict.keys()
if ((not self.unlinked_references) and (not self.linked_references)):
raise BytecodeLinkingError('Unable to validate attr dict, this contract has no linked/unlinked references.')
unlinked_refs = (self.unlinked_references or ({},))
linked_refs = (self.linked_references or ({},))
all_link_refs = (unlinked_refs + linked_refs)
all_link_names = {ref['name'] for ref in all_link_refs if ref}
if (attr_dict_names != all_link_names):
raise BytecodeLinkingError('All link references must be defined when calling `link_bytecode` on a contract factory.')
for address in attr_dict.values():
validate_address(address) |
class MsgStub(object):
def __init__(self, channel):
self.VerifyInvariant = channel.unary_unary('/cosmos.crisis.v1beta1.Msg/VerifyInvariant', request_serializer=cosmos_dot_crisis_dot_v1beta1_dot_tx__pb2.MsgVerifyInvariant.SerializeToString, response_deserializer=cosmos_dot_crisis_dot_v1beta1_dot_tx__pb2.MsgVerifyInvariantResponse.FromString) |
class DPSet(app_manager.RyuApp):
def __init__(self, *args, **kwargs):
super(DPSet, self).__init__(*args, **kwargs)
self.name = 'dpset'
self.dps = {}
self.port_state = {}
def _register(self, dp):
LOG.debug('DPSET: register datapath %s', dp)
assert (dp.id is not None)
send_dp_reconnected = False
if (dp.id in self.dps):
self.logger.warning('DPSET: Multiple connections from %s', dpid_to_str(dp.id))
self.logger.debug('DPSET: Forgetting datapath %s', self.dps[dp.id])
self.dps[dp.id].close()
self.logger.debug('DPSET: New datapath %s', dp)
send_dp_reconnected = True
self.dps[dp.id] = dp
if (dp.id not in self.port_state):
self.port_state[dp.id] = PortState()
ev = EventDP(dp, True)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for port in dp.ports.values():
self._port_added(dp, port)
ev.ports.append(port)
self.send_event_to_observers(ev)
if send_dp_reconnected:
ev = EventDPReconnected(dp)
ev.ports = self.port_state.get(dp.id, {}).values()
self.send_event_to_observers(ev)
def _unregister(self, dp):
if (dp not in self.dps.values()):
return
LOG.debug('DPSET: unregister datapath %s', dp)
assert (self.dps[dp.id] == dp)
ev = EventDP(dp, False)
for port in list(self.port_state.get(dp.id, {}).values()):
self._port_deleted(dp, port)
ev.ports.append(port)
self.send_event_to_observers(ev)
del self.dps[dp.id]
del self.port_state[dp.id]
def get(self, dp_id):
return self.dps.get(dp_id)
def get_all(self):
return list(self.dps.items())
def _port_added(self, datapath, port):
self.port_state[datapath.id].add(port.port_no, port)
def _port_deleted(self, datapath, port):
self.port_state[datapath.id].remove(port.port_no)
_ev_cls(ofp_event.EventOFPStateChange, [handler.MAIN_DISPATCHER, handler.DEAD_DISPATCHER])
def dispatcher_change(self, ev):
datapath = ev.datapath
assert (datapath is not None)
if (ev.state == handler.MAIN_DISPATCHER):
self._register(datapath)
elif (ev.state == handler.DEAD_DISPATCHER):
self._unregister(datapath)
_ev_cls(ofp_event.EventOFPSwitchFeatures, handler.CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
if (datapath.ofproto.OFP_VERSION < 4):
datapath.ports = msg.ports
_ev_cls(ofp_event.EventOFPPortStatus, handler.MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
datapath = msg.datapath
port = msg.desc
ofproto = datapath.ofproto
if (reason == ofproto.OFPPR_ADD):
LOG.debug(('DPSET: A port was added.' + '(datapath id = %s, port number = %s)'), dpid_to_str(datapath.id), port.port_no)
self._port_added(datapath, port)
self.send_event_to_observers(EventPortAdd(datapath, port))
elif (reason == ofproto.OFPPR_DELETE):
LOG.debug(('DPSET: A port was deleted.' + '(datapath id = %s, port number = %s)'), dpid_to_str(datapath.id), port.port_no)
self._port_deleted(datapath, port)
self.send_event_to_observers(EventPortDelete(datapath, port))
else:
assert (reason == ofproto.OFPPR_MODIFY)
LOG.debug(('DPSET: A port was modified.' + '(datapath id = %s, port number = %s)'), dpid_to_str(datapath.id), port.port_no)
self.port_state[datapath.id].modify(port.port_no, port)
self.send_event_to_observers(EventPortModify(datapath, port))
def get_port(self, dpid, port_no):
try:
return self.port_state[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no, network_id=None)
def get_ports(self, dpid):
return list(self.port_state[dpid].values()) |
class OefSearchDialogue(Dialogue):
INITIAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({OefSearchMessage.Performative.REGISTER_SERVICE, OefSearchMessage.Performative.UNREGISTER_SERVICE, OefSearchMessage.Performative.SEARCH_SERVICES})
TERMINAL_PERFORMATIVES: FrozenSet[Message.Performative] = frozenset({OefSearchMessage.Performative.OEF_ERROR, OefSearchMessage.Performative.SEARCH_RESULT, OefSearchMessage.Performative.SUCCESS})
VALID_REPLIES: Dict[(Message.Performative, FrozenSet[Message.Performative])] = {OefSearchMessage.Performative.OEF_ERROR: frozenset(), OefSearchMessage.Performative.REGISTER_SERVICE: frozenset({OefSearchMessage.Performative.SUCCESS, OefSearchMessage.Performative.OEF_ERROR}), OefSearchMessage.Performative.SEARCH_RESULT: frozenset(), OefSearchMessage.Performative.SEARCH_SERVICES: frozenset({OefSearchMessage.Performative.SEARCH_RESULT, OefSearchMessage.Performative.OEF_ERROR}), OefSearchMessage.Performative.SUCCESS: frozenset(), OefSearchMessage.Performative.UNREGISTER_SERVICE: frozenset({OefSearchMessage.Performative.SUCCESS, OefSearchMessage.Performative.OEF_ERROR})}
class Role(Dialogue.Role):
AGENT = 'agent'
OEF_NODE = 'oef_node'
class EndState(Dialogue.EndState):
SUCCESSFUL = 0
FAILED = 1
def __init__(self, dialogue_label: DialogueLabel, self_address: Address, role: Dialogue.Role, message_class: Type[OefSearchMessage]=OefSearchMessage) -> None:
Dialogue.__init__(self, dialogue_label=dialogue_label, message_class=message_class, self_address=self_address, role=role) |
class TestDialogues(SimpleDataRequestTestCase):
def setup(cls):
super().setup()
cls. = cast(HttpDialogues, cls._skill.skill_context.
def test_
'Test the HttpDialogues class.'
(_, dialogue) = self. performative=HttpMessage.Performative.REQUEST, method='some_method', url='some_url', version='some_version', headers='some_headers', body=b'some_body')
assert (dialogue.role == HttpDialogue.Role.CLIENT)
assert (dialogue.self_address == str(self.skill.skill_context.skill_id)) |
class ESRolloutWorkerWrapper(Wrapper[Union[(StructuredEnv, LogStatsEnv)]]):
def __init__(self, env: Union[(StructuredEnv, LogStatsEnv)], shared_noise: SharedNoiseTable, agent_instance_seed: int):
super().__init__(env)
self.shared_noise = shared_noise
self.abort = False
self.wrapper_rng = np.random.RandomState(agent_instance_seed)
def set_abort(self):
self.abort = True
def clear_abort(self):
self.abort = False
T = TypeVar('T')
def rollout(self, policy: Union[(Policy, TorchModel)]) -> None:
observation = self.reset()
start_time = time.time()
for _ in itertools.count():
if self.abort:
raise ESAbortException()
with torch.no_grad():
action = policy.compute_action(observation=observation, actor_id=self.actor_id(), maze_state=(self.get_maze_state() if policy.needs_state() else None), env=(self if policy.needs_env() else None), deterministic=False)
(observation, reward, done, _) = self.step(convert_to_numpy(action, cast=None, in_place=False))
if done:
break
self.reset()
logger.debug(f'Rollout took {(time.time() - start_time):.1f} seconds')
def generate_evaluation(self, policy: Union[(Policy, TorchModel)]) -> ESRolloutResult:
self.rollout(policy)
r = ESRolloutResult(is_eval=True)
aggregator = self.get_stats(LogStatsLevel.EPISODE)
r.episode_stats.append(aggregator.last_stats)
return r
def generate_training(self, policy: Union[(Policy, TorchModel)], noise_stddev: float) -> ESRolloutResult:
r = ESRolloutResult(is_eval=False)
noise_idx = self.shared_noise.sample_index(self.wrapper_rng)
v = (noise_stddev * self.shared_noise.get(noise_idx, policy.num_params))
v = torch.from_numpy(v).to(policy._device)
params = get_flat_parameters(policy)
aggregator = self.get_stats(LogStatsLevel.EPISODE)
r.noise_indices.append(noise_idx)
self.seed(noise_idx)
set_flat_parameters(policy, (params + v))
self.rollout(policy)
r.episode_stats.append(aggregator.last_stats)
self.seed(noise_idx)
set_flat_parameters(policy, (params - v))
self.rollout(policy)
r.episode_stats.append(aggregator.last_stats)
set_flat_parameters(policy, params)
return r |
class Default(Plugin):
def __init__(self, parser, cfg):
super().__init__(parser, cfg)
group = parser.add_argument_group('Azure authentication options')
group.add_argument('--ad-authority', metavar='NAME', default=self.cfg('authority', type=Str, default='login.microsoftonline.com'), help='Azure AD authority host')
def instantiate(self, args):
return CredsViaAzureDefault(authority=args.ad_authority) |
def test_vertical_alignment_enum():
r = ft.Row(vertical_alignment=ft.CrossAxisAlignment.STRETCH)
assert isinstance(r.vertical_alignment, ft.CrossAxisAlignment)
assert isinstance(r._get_attr('verticalAlignment'), str)
cmd = r._build_add_commands()
assert (cmd[0].attrs['verticalalignment'] == 'stretch') |
def test_prepare_transaction_replacement_gas_price_defaulting_when_strategy_higher(w3):
def higher_gas_price_strategy(w3, txn):
return 20
w3.eth.set_gas_price_strategy(higher_gas_price_strategy)
current_transaction = SIMPLE_CURRENT_TRANSACTION
new_transaction = {'value': 2}
replacement_transaction = prepare_replacement_transaction(w3, current_transaction, new_transaction)
assert (replacement_transaction['gasPrice'] == 20) |
class LightenerConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 2
def __init__(self) -> None:
self.lightener_flow = LightenerFlow(self, steps={'name': 'user'})
super().__init__()
async def async_step_user(self, user_input: (dict[(str, Any)] | None)=None):
return (await self.lightener_flow.async_step_name(user_input))
async def async_step_lights(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
return (await self.lightener_flow.async_step_lights(user_input))
async def async_step_light_configuration(self, user_input: (dict[(str, Any)] | None)=None) -> FlowResult:
return (await self.lightener_flow.async_step_light_configuration(user_input))
def async_get_options_flow(config_entry: config_entries.ConfigEntry) -> config_entries.OptionsFlow:
return LightenerOptionsFlow(config_entry) |
def test():
ind = GraphApi()
item1 = (1, 'aaaa1')
item2 = (2, 'aaaa2')
item3 = (3, 'aaaa1')
item4 = (4, 'aaaa2')
item5 = (5, 'aaaa1')
item6 = (6, 'aaaa2')
item7 = (7, 'aaaa1')
item8 = (8, 'aaaa2')
ind.insert_link(item1, item2)
ind.insert_link(item2, item3)
ind.insert_link(item3, item4)
ind.insert_link(item4, item5)
ind.insert_link(item5, item6)
ind.insert_link(item6, item7)
ind.insert_link(item7, item8)
ind.insert_link(item8, item1) |
def create_data_format_readers(patch_reader, ffrom, to_size):
dfpatch_size = unpack_size(patch_reader)
if (dfpatch_size > 0):
data_format = unpack_size(patch_reader)
patch = patch_reader.decompress(dfpatch_size)
(dfdiff, ffrom) = create_readers(data_format, ffrom, patch, to_size)
ffrom.seek(0)
else:
dfdiff = None
return (dfdiff, ffrom) |
class ConfigLoader():
_config = {}
_config_last_modified_time = None
def _config_modified():
current_time = time.time()
modified_time = os.path.getmtime(CONFIG_FILE)
if (ConfigLoader._config_last_modified_time is None):
ConfigLoader._config_last_modified_time = modified_time
return True
return (modified_time > ConfigLoader._config_last_modified_time)
def load_config():
if ConfigLoader._config_modified():
with open(CONFIG_FILE, mode='r') as f:
ConfigLoader._config = json.load(f)
ConfigLoader._config_last_modified_time = os.path.getmtime(CONFIG_FILE)
def get(*keys):
ConfigLoader.load_config()
try:
result = reduce((lambda d, key: d[key]), keys, ConfigLoader._config)
except KeyError:
with open('config.json.template', 'r') as f:
config_dict_template = json.load(f)
result = reduce((lambda d, key: d[key]), keys, config_dict_template)
return result |
def optPortLongOnly(cov, mu=None):
inv = np.linalg.inv(cov)
ones = np.ones(shape=(inv.shape[0], 1))
if (mu is None):
mu = ones
w = np.dot(inv, mu)
w /= np.dot(ones.T, w)
w = w.flatten()
threshold = (w < 0)
wpluss = w.copy()
wpluss[threshold] = 0
wpluss = (wpluss / np.sum(wpluss))
return wpluss |
class Testing(Module):
def timeTravel(self, timestamp: int) -> None:
return self.w3.manager.request_blocking(RPC.testing_timeTravel, [timestamp])
def mine(self, num_blocks: int=1) -> None:
return self.w3.manager.request_blocking(RPC.evm_mine, [num_blocks])
def snapshot(self) -> int:
self.last_snapshot_idx = self.w3.manager.request_blocking(RPC.evm_snapshot, [])
return self.last_snapshot_idx
def reset(self) -> None:
return self.w3.manager.request_blocking(RPC.evm_reset, [])
def revert(self, snapshot_idx: Optional[int]=None) -> None:
if (snapshot_idx is None):
revert_target = self.last_snapshot_idx
else:
revert_target = snapshot_idx
return self.w3.manager.request_blocking(RPC.evm_revert, [revert_target]) |
def envelope_from_bytes(bytes_: bytes, separator: bytes=SEPARATOR, logger: Logger=_default_logger) -> Optional[Envelope]:
logger.debug('processing: {!r}'.format(bytes_))
envelope = None
try:
envelope = _decode(bytes_, separator=separator)
except ValueError as e:
logger.error('Bad formatted input: {!r}. {}'.format(bytes_, e))
except Exception as e:
logger.exception('Error when processing a input. Message: {}'.format(str(e)))
return envelope |
def downgrade():
op.drop_index(op.f('ix_accessmanualwebhook_id'), table_name='accessmanualwebhook')
op.drop_table('accessmanualwebhook')
op.execute('alter type connectiontype rename to connectiontype_old')
op.execute("create type connectiontype as enum('postgres', 'mongodb', 'mysql', ' 'snowflake', 'redshift', 'mssql', 'mariadb', 'bigquery', 'saas', 'manual', 'email')")
op.execute('alter table connectionconfig alter column connection_type type connectiontype using connection_type::text::connectiontype')
op.execute('drop type connectiontype_old')
op.execute('alter type privacyrequeststatus rename to privacyrequeststatus_old')
op.execute("create type privacyrequeststatus as enum('in_processing', 'complete', 'pending', 'error', 'paused', 'approved', 'denied', 'canceled', 'identity_unverified')")
op.execute('alter table privacyrequest alter column status type privacyrequeststatus using status::text::privacyrequeststatus')
op.execute('drop type privacyrequeststatus_old') |
.django_db
def test_program_activity_list_success(client, monkeypatch, agency_account_data, helpers):
helpers.mock_current_fiscal_year(monkeypatch)
resp = client.get(url.format(code='007', query_params=''))
expected_result = {'fiscal_year': helpers.get_mocked_current_fiscal_year(), 'toptier_code': '007', 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 4}, 'results': [{'gross_outlay_amount': 100000.0, 'name': 'NAME 3', 'obligated_amount': 100.0}, {'gross_outlay_amount': 1000000.0, 'name': 'NAME 2', 'obligated_amount': 10.0}, {'gross_outlay_amount': 1000000.0, 'name': 'NAME 5', 'obligated_amount': 10.0}, {'gross_outlay_amount': .0, 'name': 'NAME 1', 'obligated_amount': 1.0}]}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result)
query_params = '?fiscal_year=2017'
resp = client.get(url.format(code='007', query_params=query_params))
expected_result = {'fiscal_year': 2017, 'toptier_code': '007', 'messages': [], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 0}, 'results': []}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result)
query_params = '?fiscal_year=2016'
resp = client.get(url.format(code='010', query_params=query_params))
expected_result = {'fiscal_year': 2016, 'toptier_code': '010', 'messages': ['Account data powering this endpoint were first collected in FY2017 Q2 under the DATA Act; as such, there are no data available for prior fiscal years.'], 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 0}, 'results': []}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.