code stringlengths 281 23.7M |
|---|
def _get_num_els_in_scene_range(zarr_dataset: ChunkedDataset, scene_index_start: int, scene_index_end: int) -> dict:
assert (scene_index_end > scene_index_start)
scene_start = zarr_dataset.scenes[scene_index_start]
scene_end = zarr_dataset.scenes[(scene_index_end - 1)]
frame_start = zarr_dataset.frames[scene_start['frame_index_interval'][0]]
frame_end = zarr_dataset.frames[(scene_end['frame_index_interval'][1] - 1)]
return {'num_scenes': (scene_index_end - scene_index_start), 'num_frames': (scene_end['frame_index_interval'][1] - scene_start['frame_index_interval'][0]), 'num_agents': (frame_end['agent_index_interval'][1] - frame_start['agent_index_interval'][0]), 'num_tl_faces': (frame_end['traffic_light_faces_index_interval'][1] - frame_start['traffic_light_faces_index_interval'][0])} |
def main(args):
tf.logging.set_verbosity(tf.logging.INFO)
model_cls_list = [models.get_model(model) for model in args.models]
params_list = [default_parameters() for _ in range(len(model_cls_list))]
params_list = [merge_parameters(params, model_cls.get_parameters()) for (params, model_cls) in zip(params_list, model_cls_list)]
params_list = [import_params(args.checkpoints[i], args.models[i], params_list[i]) for i in range(len(args.checkpoints))]
params_list = [override_parameters(params_list[i], args) for i in range(len(model_cls_list))]
with tf.Graph().as_default():
model_var_lists = []
for (i, checkpoint) in enumerate(args.checkpoints):
tf.logging.info(('Loading %s' % checkpoint))
var_list = tf.train.list_variables(checkpoint)
values = {}
reader = tf.train.load_checkpoint(checkpoint)
for (name, shape) in var_list:
if (not name.startswith(model_cls_list[i].get_name())):
continue
if (name.find('losses_avg') >= 0):
continue
tensor = reader.get_tensor(name)
values[name] = tensor
model_var_lists.append(values)
model_fns = []
for i in range(len(args.checkpoints)):
name = model_cls_list[i].get_name()
model = model_cls_list[i](params_list[i], (name + ('_%d' % i)))
model_fn = model.get_inference_func()
model_fns.append(model_fn)
params = params_list[0]
(sorted_keys, sorted_inputs) = dataset.sort_input_file(args.input)
features = dataset.get_inference_input(sorted_inputs, params)
placeholders = []
for i in range(len(params.device_list)):
placeholders.append({'source': tf.placeholder(tf.int32, [None, None], ('source_%d' % i)), 'source_length': tf.placeholder(tf.int32, [None], ('source_length_%d' % i))})
predictions = parallel.data_parallelism(params.device_list, (lambda f: inference.create_inference_graph(model_fns, f, params)), placeholders)
assign_ops = []
all_var_list = tf.trainable_variables()
for i in range(len(args.checkpoints)):
un_init_var_list = []
name = model_cls_list[i].get_name()
for v in all_var_list:
if v.name.startswith((name + ('_%d' % i))):
un_init_var_list.append(v)
ops = set_variables(un_init_var_list, model_var_lists[i], (name + ('_%d' % i)))
assign_ops.extend(ops)
assign_op = tf.group(*assign_ops)
results = []
with tf.Session(config=session_config(params)) as sess:
sess.run(assign_op)
sess.run(tf.tables_initializer())
start = time.time()
while True:
try:
feats = sess.run(features)
(op, feed_dict) = shard_features(feats, placeholders, predictions)
results.append(sess.run(predictions, feed_dict=feed_dict))
except tf.errors.OutOfRangeError:
break
elapsed = (time.time() - start)
tf.logging.log(tf.logging.INFO, ('total time: %d' % elapsed))
vocab = params.vocabulary['target']
outputs = []
scores = []
for result in results:
for item in result[0]:
outputs.append(item.tolist())
for item in result[1]:
scores.append(item.tolist())
outputs = list(itertools.chain(*outputs))
scores = list(itertools.chain(*scores))
restored_inputs = []
restored_outputs = []
restored_scores = []
for index in range(len(sorted_inputs)):
restored_inputs.append(sorted_inputs[sorted_keys[index]])
restored_outputs.append(outputs[sorted_keys[index]])
restored_scores.append(scores[sorted_keys[index]])
with open(args.output, 'w') as outfile:
count = 0
for (outputs, scores) in zip(restored_outputs, restored_scores):
for (output, score) in zip(outputs, scores):
decoded = []
for idx in output:
if (idx == params.mapping['target'][params.eos]):
break
decoded.append(vocab[idx])
decoded = ' '.join(decoded)
if (not args.verbose):
outfile.write(('%s\n' % decoded))
break
else:
pattern = '%d ||| %s ||| %s ||| %f\n'
source = restored_inputs[count]
values = (count, source, decoded, score)
outfile.write((pattern % values))
count += 1 |
class TestPattern():
def test_default(self, temp_dir, helpers):
config = {'path': 'baz.py', 'pattern': True}
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'dynamic': ['version']}, 'tool': {'hatch': {'metadata': {'hooks': {'custom': {}}}}}})
file_path = (temp_dir / DEFAULT_BUILD_SCRIPT)
file_path.write_text(helpers.dedent("\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n metadata['version'] = '1.2.3'\n "))
version_file = (temp_dir / 'baz.py')
version_file.write_text(helpers.dedent("\n __version__ = '0.0.0'\n "))
build_data = {'artifacts': []}
hook = VersionBuildHook(str(temp_dir), config, None, metadata, '', '')
hook.initialize([], build_data)
assert (version_file.read_text() == helpers.dedent("\n __version__ = '1.2.3'\n "))
assert (build_data['artifacts'] == ['/baz.py'])
def test_custom(self, temp_dir, helpers):
config = {'path': 'baz.py', 'pattern': 'v = "(?P<version>.+)"'}
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'dynamic': ['version']}, 'tool': {'hatch': {'metadata': {'hooks': {'custom': {}}}}}})
file_path = (temp_dir / DEFAULT_BUILD_SCRIPT)
file_path.write_text(helpers.dedent("\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n metadata['version'] = '1.2.3'\n "))
version_file = (temp_dir / 'baz.py')
version_file.write_text(helpers.dedent('\n v = "0.0.0"\n '))
build_data = {'artifacts': []}
hook = VersionBuildHook(str(temp_dir), config, None, metadata, '', '')
hook.initialize([], build_data)
assert (version_file.read_text() == helpers.dedent('\n v = "1.2.3"\n '))
assert (build_data['artifacts'] == ['/baz.py']) |
def collect_results_gpu(result_part, size):
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results
return None |
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
feature = torch.randn(B, T, D)
src_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B, dtype=np.int64))
src_lengths[0] = T
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=(src_lengths[b].item() + 1))
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(prev_output_tokens, pad_idx=1, eos_idx=2, left_pad=False, move_eos_to_beginning=False)
(src_lengths, sorted_order) = src_lengths.sort(descending=True)
forward_input['src_tokens'] = feature.index_select(0, sorted_order)
forward_input['src_lengths'] = src_lengths
forward_input['prev_output_tokens'] = prev_output_tokens
return forward_input |
class Describe_MarkerFactory():
def it_constructs_the_appropriate_marker_object(self, call_fixture):
(marker_code, stream_, offset_, marker_cls_) = call_fixture
marker = _MarkerFactory(marker_code, stream_, offset_)
marker_cls_.from_stream.assert_called_once_with(stream_, marker_code, offset_)
assert (marker is marker_cls_.from_stream.return_value)
(params=[JPEG_MARKER_CODE.APP0, JPEG_MARKER_CODE.APP1, JPEG_MARKER_CODE.SOF0, JPEG_MARKER_CODE.SOF7, JPEG_MARKER_CODE.SOS])
def call_fixture(self, request, stream_, offset_, _App0Marker_, _App1Marker_, _SofMarker_, _Marker_):
marker_code = request.param
if (marker_code == JPEG_MARKER_CODE.APP0):
marker_cls_ = _App0Marker_
elif (marker_code == JPEG_MARKER_CODE.APP1):
marker_cls_ = _App1Marker_
elif (marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES):
marker_cls_ = _SofMarker_
else:
marker_cls_ = _Marker_
return (marker_code, stream_, offset_, marker_cls_)
def _App0Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._App0Marker')
def _App1Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._App1Marker')
def _Marker_(self, request):
return class_mock(request, 'docx.image.jpeg._Marker')
def offset_(self, request):
return instance_mock(request, int)
def _SofMarker_(self, request):
return class_mock(request, 'docx.image.jpeg._SofMarker')
def stream_(self, request):
return instance_mock(request, io.BytesIO) |
class MinDistanceHandle(SliderHandle):
tip = 'min_distance'
def __init__(self, window, player):
super().__init__(window, player, 1, 0.6)
def get_value(self):
return (self.player.min_distance / 5.0)
def set_value(self, value):
self.player.min_distance = (value * 5.0) |
def dump(state):
if (not options.DUMP_PRE_ERROR_STATE):
return
stdout.flush()
stderr.flush()
stdout.write('\n--- Pre-error state dump: \n')
try:
state.dump()
finally:
stdout.write('\n')
stderr.write('\n')
stdout.flush()
stderr.flush() |
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.browser = QWebEngineView()
self.browser.setUrl(QUrl('
self.browser.urlChanged.connect(self.update_urlbar)
self.browser.loadFinished.connect(self.update_title)
self.setCentralWidget(self.browser)
self.status = QStatusBar()
self.setStatusBar(self.status)
navtb = QToolBar('Navigation')
navtb.setIconSize(QSize(16, 16))
self.addToolBar(navtb)
back_btn = QAction(QIcon(os.path.join('images', 'arrow-180.png')), 'Back', self)
back_btn.setStatusTip('Back to previous page')
back_btn.triggered.connect(self.browser.back)
navtb.addAction(back_btn)
next_btn = QAction(QIcon(os.path.join('images', 'arrow-000.png')), 'Forward', self)
next_btn.setStatusTip('Forward to next page')
next_btn.triggered.connect(self.browser.forward)
navtb.addAction(next_btn)
reload_btn = QAction(QIcon(os.path.join('images', 'arrow-circle-315.png')), 'Reload', self)
reload_btn.setStatusTip('Reload page')
reload_btn.triggered.connect(self.browser.reload)
navtb.addAction(reload_btn)
home_btn = QAction(QIcon(os.path.join('images', 'home.png')), 'Home', self)
home_btn.setStatusTip('Go home')
home_btn.triggered.connect(self.navigate_home)
navtb.addAction(home_btn)
navtb.addSeparator()
self. = QLabel()
self. 'lock-nossl.png')))
navtb.addWidget(self.
self.urlbar = QLineEdit()
self.urlbar.returnPressed.connect(self.navigate_to_url)
navtb.addWidget(self.urlbar)
stop_btn = QAction(QIcon(os.path.join('images', 'cross-circle.png')), 'Stop', self)
stop_btn.setStatusTip('Stop loading current page')
stop_btn.triggered.connect(self.browser.stop)
navtb.addAction(stop_btn)
file_menu = self.menuBar().addMenu('&File')
open_file_action = QAction(QIcon(os.path.join('images', 'disk--arrow.png')), 'Open file...', self)
open_file_action.setStatusTip('Open from file')
open_file_action.triggered.connect(self.open_file)
file_menu.addAction(open_file_action)
save_file_action = QAction(QIcon(os.path.join('images', 'disk--pencil.png')), 'Save Page As...', self)
save_file_action.setStatusTip('Save current page to file')
save_file_action.triggered.connect(self.save_file)
file_menu.addAction(save_file_action)
print_action = QAction(QIcon(os.path.join('images', 'printer.png')), 'Print...', self)
print_action.setStatusTip('Print current page')
print_action.triggered.connect(self.print_page)
file_menu.addAction(print_action)
help_menu = self.menuBar().addMenu('&Help')
about_action = QAction(QIcon(os.path.join('images', 'question.png')), 'About MooseAche', self)
about_action.setStatusTip('Find out more about MooseAche')
about_action.triggered.connect(self.about)
help_menu.addAction(about_action)
navigate_mozarella_action = QAction(QIcon(os.path.join('images', 'lifebuoy.png')), 'MooseAche Homepage', self)
navigate_mozarella_action.setStatusTip('Go to MooseAche Homepage')
navigate_mozarella_action.triggered.connect(self.navigate_mozarella)
help_menu.addAction(navigate_mozarella_action)
self.show()
self.setWindowIcon(QIcon(os.path.join('images', 'ma-icon-64.png')))
def update_title(self):
title = self.browser.page().title()
self.setWindowTitle(('%s - MooseAche' % title))
def navigate_mozarella(self):
self.browser.setUrl(QUrl('
def about(self):
dlg = AboutDialog()
dlg.exec_()
def open_file(self):
(filename, _) = QFileDialog.getOpenFileName(self, 'Open file', '', 'Hypertext Markup Language (*.htm *.html);;All files (*.*)')
if filename:
with open(filename, 'r') as f:
html = f.read()
self.browser.setHtml(html)
self.urlbar.setText(filename)
def save_file(self):
(filename, _) = QFileDialog.getSaveFileName(self, 'Save Page As', '', 'Hypertext Markup Language (*.htm *html);;All files (*.*)')
if filename:
html = self.browser.page().toHtml()
with open(filename, 'w') as f:
f.write(html)
def print_page(self):
dlg = QPrintPreviewDialog()
dlg.paintRequested.connect(self.browser.print_)
dlg.exec_()
def navigate_home(self):
self.browser.setUrl(QUrl('
def navigate_to_url(self):
q = QUrl(self.urlbar.text())
if (q.scheme() == ''):
q.setScheme('
self.browser.setUrl(q)
def update_urlbar(self, q):
if (q.scheme() == '
self. 'lock-ssl.png')))
else:
self. 'lock-nossl.png')))
self.urlbar.setText(q.toString())
self.urlbar.setCursorPosition(0) |
def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'):
t = time.time()
file = Path(file)
cookie = Path('cookie')
print(f'Downloading as {file}... ', end='')
file.unlink(missing_ok=True)
cookie.unlink(missing_ok=True)
out = ('NUL' if (platform.system() == 'Windows') else '/dev/null')
os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}')
if os.path.exists('cookie'):
s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}'
else:
s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"'
r = os.system(s)
cookie.unlink(missing_ok=True)
if (r != 0):
file.unlink(missing_ok=True)
print('Download error ')
return r
if (file.suffix == '.zip'):
print('unzipping... ', end='')
os.system(f'unzip -q {file}')
file.unlink()
print(f'Done ({(time.time() - t):.1f}s)')
return r |
class GATModule(nn.Module):
def __init__(self, dim, hidden_dim_multiplier, num_heads, dropout, **kwargs):
super().__init__()
_check_dim_and_num_heads_consistency(dim, num_heads)
self.dim = dim
self.num_heads = num_heads
self.head_dim = (dim // num_heads)
self.input_linear = nn.Linear(in_features=dim, out_features=dim)
self.attn_linear_u = nn.Linear(in_features=dim, out_features=num_heads)
self.attn_linear_v = nn.Linear(in_features=dim, out_features=num_heads, bias=False)
self.attn_act = nn.LeakyReLU(negative_slope=0.2)
self.feed_forward_module = FeedForwardModule(dim=dim, hidden_dim_multiplier=hidden_dim_multiplier, dropout=dropout)
def forward(self, graph, x):
x = self.input_linear(x)
attn_scores_u = self.attn_linear_u(x)
attn_scores_v = self.attn_linear_v(x)
attn_scores = ops.u_add_v(graph, attn_scores_u, attn_scores_v)
attn_scores = self.attn_act(attn_scores)
attn_probs = edge_softmax(graph, attn_scores)
x = x.reshape((- 1), self.head_dim, self.num_heads)
x = ops.u_mul_e_sum(graph, x, attn_probs)
x = x.reshape((- 1), self.dim)
x = self.feed_forward_module(graph, x)
return x |
.usefixtures('save_env')
class TestInstallData(support.TempdirManager):
def test_simple_run(self):
(pkg_dir, dist) = self.create_dist()
cmd = install_data(dist)
cmd.install_dir = inst = os.path.join(pkg_dir, 'inst')
one = os.path.join(pkg_dir, 'one')
self.write_file(one, 'xxx')
inst2 = os.path.join(pkg_dir, 'inst2')
two = os.path.join(pkg_dir, 'two')
self.write_file(two, 'xxx')
cmd.data_files = [one, (inst2, [two])]
assert (cmd.get_inputs() == [one, (inst2, [two])])
cmd.ensure_finalized()
cmd.run()
assert (len(cmd.get_outputs()) == 2)
rtwo = os.path.split(two)[(- 1)]
assert os.path.exists(os.path.join(inst2, rtwo))
rone = os.path.split(one)[(- 1)]
assert os.path.exists(os.path.join(inst, rone))
cmd.outfiles = []
cmd.warn_dir = 1
cmd.ensure_finalized()
cmd.run()
assert (len(cmd.get_outputs()) == 2)
assert os.path.exists(os.path.join(inst2, rtwo))
assert os.path.exists(os.path.join(inst, rone))
cmd.outfiles = []
cmd.root = os.path.join(pkg_dir, 'root')
inst4 = os.path.join(pkg_dir, 'inst4')
three = os.path.join(cmd.install_dir, 'three')
self.write_file(three, 'xx')
cmd.data_files = [one, (inst2, [two]), ('inst3', [three]), (inst4, [])]
cmd.ensure_finalized()
cmd.run()
assert (len(cmd.get_outputs()) == 4)
assert os.path.exists(os.path.join(inst2, rtwo))
assert os.path.exists(os.path.join(inst, rone)) |
class Effect5484(BaseEffect):
runTime = 'early'
type = 'passive'
def handler(fit, implant, context, projectionRange, **kwargs):
fit.appliedImplants.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Special Edition Implant')), 'armorHpBonus2', implant.getModifiedItemAttr('implantSetChristmas'), **kwargs) |
def eth_nodes_to_cmds(nodes_configuration: List[Dict[(str, Any)]], eth_node_descs: List[EthNodeDescription], base_datadir: str, genesis_file: str, chain_id: ChainID, verbosity: str) -> List[Command]:
cmds = []
for (config, node_desc) in zip(nodes_configuration, eth_node_descs):
datadir = eth_node_to_datadir(config['address'], base_datadir)
if (node_desc.blockchain_type == 'geth'):
geth_prepare_datadir(datadir, genesis_file)
commandline = geth_to_cmd(config, datadir, chain_id, verbosity)
elif (node_desc.blockchain_type == 'parity'):
commandline = parity_to_cmd(config, datadir, chain_id, genesis_file, verbosity)
else:
assert False, f"Invalid blockchain type {config['blockchain_type']}"
cmds.append(commandline)
return cmds |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--smiles_file', default='data/guacamol_v1_all.smiles')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--population_size', type=int, default=100)
parser.add_argument('--n_mutations', type=int, default=200)
parser.add_argument('--gene_size', type=int, default=300)
parser.add_argument('--generations', type=int, default=1000)
parser.add_argument('--n_jobs', type=int, default=32)
parser.add_argument('--random_start', action='store_true')
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--patience', type=int, default=5)
parser.add_argument('--suite', default='test')
args = parser.parse_args()
np.random.seed(args.seed)
setup_default_logger()
if (args.output_dir is None):
args.output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(args.output_dir, 'goal_directed_params.json'), 'w') as jf:
json.dump(vars(args), jf, sort_keys=True, indent=4)
optimiser = ChemGEGenerator(smi_file=args.smiles_file, population_size=args.population_size, n_mutations=args.n_mutations, gene_size=args.gene_size, generations=args.generations, n_jobs=args.n_jobs, random_start=args.random_start, patience=args.patience)
json_file_path = os.path.join(args.output_dir, 'goal_directed_results.json')
assess_goal_directed_generation(optimiser, json_output_file=json_file_path, benchmark_version=args.suite) |
def setup_environment():
global _ENV_SETUP_DONE
if _ENV_SETUP_DONE:
return
_ENV_SETUP_DONE = True
_configure_libraries()
custom_module_path = os.environ.get('FASTREID_ENV_MODULE')
if custom_module_path:
setup_custom_environment(custom_module_path)
else:
pass |
def node_mssp_start(wizard):
mssp_module = mod_import(settings.MSSP_META_MODULE)
filename = mssp_module.__file__
text = f'''
MSSP (Mud Server Status Protocol) allows online MUD-listing sites/crawlers
to continuously monitor your game and list information about it. Some of
this, like active player-count, Evennia will automatically add for you,
whereas many fields are manually added info about your game.
To use MSSP you should generally have a publicly open game that external
players can connect to. You also need to register at a MUD listing site to
tell them to list your game.
MSSP has a large number of configuration options and we found it was simply
a lot easier to set them in a file rather than using this wizard. So to
configure MSSP, edit the empty template listing found here:
'{filename}'
'''
wizard.display(text)
wizard.ask_continue()
node_start(wizard) |
def gather_container(container, dst, group=None, cat_dim=0):
group = (group or dist.group.WORLD)
world_size = dist.get_world_size(group)
this_rank = dist.get_rank(group)
def _do_gather(tensor):
if (this_rank == dst):
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
else:
tensor_list = None
dist.gather(tensor, tensor_list, dst=dst, group=group)
return torch.cat(tensor_list, dim=cat_dim)
if isinstance(container, dict):
gathered = dict()
for (k, v) in container.items():
v = _do_gather(v)
gathered[k] = v
return gathered
elif isinstance(container, (list, tuple)):
gathered = [_do_gather(v) for v in container]
if isinstance(container, tuple):
gathered = tuple(gathered)
return gathered
else:
assert isinstance(container, torch.Tensor)
return _do_gather(container) |
def loads(__s: str, *, parse_float: ParseFloat=float) -> dict[(str, Any)]:
src = __s.replace('\r\n', '\n')
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
while True:
pos = skip_chars(src, pos, TOML_WS)
try:
char = src[pos]
except IndexError:
break
if (char == '\n'):
pos += 1
continue
if (char in KEY_INITIAL_CHARS):
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif (char == '['):
try:
second_char: (str | None) = src[(pos + 1)]
except IndexError:
second_char = None
out.flags.finalize_pending()
if (second_char == '['):
(pos, header) = create_list_rule(src, pos, out)
else:
(pos, header) = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif (char != '#'):
raise suffixed_err(src, pos, 'Invalid statement')
pos = skip_comment(src, pos)
try:
char = src[pos]
except IndexError:
break
if (char != '\n'):
raise suffixed_err(src, pos, 'Expected newline or end of document after a statement')
pos += 1
return out.data.dict |
class HCaptcha(CaptchaService):
__name__ = 'HCaptcha'
__type__ = 'anticaptcha'
__version__ = '0.04'
__status__ = 'testing'
__description__ = 'hCaptcha captcha service plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
KEY_PATTERN = '(?:data-sitekey=["\\\']|["\\\']sitekey["\\\']\\s*:\\s*["\\\'])((?:[\\w\\-]|%[0-9a-fA-F]{2})+)'
KEY_FORMAT_PATTERN = '^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
HCAPTCHA_INTERACTIVE_SIG = (((((('602d66a1db89d74c9d9d69afee01960fd55e684a969e0788fd1f10cb45a04c6bdc2d3c50b0370d7' + '578795f59239ee9764ef1c6f2c9f56ce6fc2e1d2358de8fd29f2c7138b1c68bb8aacda8e170b032') + 'e014c61beca34fc0cdd89ec39cca501e5aeb9c3ac938aeb09de3cc1d11673c812b2c9ea51acbbcf') + 'd443f97ae5b5d3b2b031367e19d3213aee12de6717eb0901529c3ebb3ac681fa183c6d14f664568') + '3fc5a5b8655798faa80afcf1a3423451f076cba1d573ccf0b6ab0e7cf4fbf31ade86d7322e7a9f5') + '5077cd4099bed8bc13908f4e0ca1d891b228cbdcf4eabeab14af0d8b45a0b9297ece4270d4cf347') + '71f46d04eb7904d4d4fddccf7eb0dc9301c0cf')
HCAPTCHA_INTERACTIVE_JS = '\n\t\t\twhile(document.children[0].childElementCount > 0) {\n\t\t\t\tdocument.children[0].removeChild(document.children[0].children[0]);\n\t\t\t}\n\t\t\tdocument.children[0].innerHTML = \'<html><head></head><body style="display:inline-block;"><div id="captchadiv" style="display: inline-block;"></div></body></html>\';\n\n\t\t\tgpyload.data.sitekey = request.params.sitekey;\n\n\t\t\tgpyload.getFrameSize = function() {\n\t\t\t\tvar rectAnchor = {top: 0, right: 0, bottom: 0, left: 0},\n\t\t\t\t\trectPopup = {top: 0, right: 0, bottom: 0, left: 0},\n\t\t\t\t\trect;\n\t\t\t\tvar anchor = document.body.querySelector("iframe[src*=\'/hcaptcha.html#frame=checkbox\']");\n\t\t\t\tif (anchor !== null && gpyload.isVisible(anchor)) {\n\t\t\t\t\trect = anchor.getBoundingClientRect();\n\t\t\t\t\trectAnchor = {top: rect.top, right: rect.right, bottom: rect.bottom, left: rect.left};\n\t\t\t\t}\n\t\t\t\tvar popup = document.body.querySelector("iframe[src*=\'/hcaptcha.html\'][src*=\'frame=challenge\']");\n\t\t\t\tif (popup !== null && gpyload.isVisible(popup)) {\n\t\t\t\t\trect = popup.getBoundingClientRect();\n\t\t\t\t\trectPopup = {top: rect.top, right: rect.right, bottom: rect.bottom, left: rect.left};\n\t\t\t\t}\n\t\t\t\tvar left = Math.round(Math.min(rectAnchor.left, rectAnchor.right, rectPopup.left, rectPopup.right));\n\t\t\t\tvar right = Math.round(Math.max(rectAnchor.left, rectAnchor.right, rectPopup.left, rectPopup.right));\n\t\t\t\tvar top = Math.round(Math.min(rectAnchor.top, rectAnchor.bottom, rectPopup.top, rectPopup.bottom));\n\t\t\t\tvar bottom = Math.round(Math.max(rectAnchor.top, rectAnchor.bottom, rectPopup.top, rectPopup.bottom));\n\t\t\t\treturn {top: top, left: left, bottom: bottom, right: right};\n\t\t\t};\n\n\t\t\t// function that is called when the captcha finished loading and is ready to interact\n\t\t\twindow.pyloadCaptchaOnLoadCallback = function() {\n\t\t\t\tvar widgetID = hcaptcha.render (\n\t\t\t\t\t"captchadiv",\n\t\t\t\t\t{size: "compact",\n\t\t\t\t\t \'sitekey\': gpyload.data.sitekey,\n\t\t\t\t\t \'callback\': function() {\n\t\t\t\t\t\tvar hcaptchaResponse = hcaptcha.getResponse(widgetID); // get captcha response\n\t\t\t\t\t\tgpyload.submitResponse(hcaptchaResponse);\n\t\t\t\t\t }}\n\t\t\t\t);\n\t\t\t\tgpyload.activated();\n\t\t\t};\n\n\t\t\tif(typeof hcaptcha !== \'undefined\' && hcaptcha) {\n\t\t\t\twindow.pyloadCaptchaOnLoadCallback();\n\t\t\t} else {\n\t\t\t\tvar js_script = document.createElement(\'script\');\n\t\t\t\tjs_script.type = "text/javascript";\n\t\t\t\tjs_script.src = "//hcaptcha.com/1/api.js?onload=pyloadCaptchaOnLoadCallback&render=explicit";\n\t\t\t\tjs_script.async = true;\n\t\t\t\tdocument.getElementsByTagName(\'head\')[0].appendChild(js_script);\n\t\t\t}'
def detect_key(self, data=None):
html = (data or self.retrieve_data())
m = re.search(self.KEY_PATTERN, html)
if (m is not None):
key = urllib.parse.unquote(m.group(1).strip())
m = re.search(self.KEY_FORMAT_PATTERN, key)
if (m is not None):
self.key = key
self.log_debug('Key: {}'.format(self.key))
return self.key
else:
self.log_debug(key, 'Wrong key format, this probably because it is not a hCaptcha key')
self.log_warning(self._('Key pattern not found'))
return None
def challenge(self, key=None, data=None):
key = (key or self.retrieve_key(data))
return self._challenge_js(key)
def _challenge_js(self, key):
self.log_debug('Challenge hCaptcha interactive')
params = {'url': self.pyfile.url, 'sitekey': key, 'script': {'signature': self.HCAPTCHA_INTERACTIVE_SIG, 'code': self.HCAPTCHA_INTERACTIVE_JS}}
result = self.decrypt_interactive(params, timeout=300)
return result |
def test_formatting():
_ = Catalogue('reahl-component')
date = datetime.date(2012, 1, 10)
with LocaleContextStub() as context:
context.test_locale = 'en_gb'
assert (_.current_locale == 'en_gb')
actual = babel.dates.format_date(date, format='long', locale=_.current_locale)
assert (actual == '10 January 2012')
context.test_locale = 'af'
assert (_.current_locale == 'af')
actual = babel.dates.format_date(date, format='long', locale=_.current_locale)
assert (actual == '10 Januarie 2012') |
class UNet3D(Abstract3DUNet):
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr', num_groups=8, num_levels=4, is_segmentation=True, **kwargs):
super(UNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid, basic_module=DoubleConv, f_maps=f_maps, layer_order=layer_order, num_groups=num_groups, num_levels=num_levels, is_segmentation=is_segmentation, **kwargs) |
_rewriter([gemm_no_inplace])
def local_gemm_to_ger(fgraph, node):
if (node.op == gemm_no_inplace):
(z, a, x, y, b) = node.inputs
if (x.broadcastable[1] and y.broadcastable[0]):
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
try:
bval = ptb.get_underlying_scalar_constant_value(b)
except NotScalarConstantError:
return
if (bval == 1):
rval = ger(z, a, xv, yv)
new_out = [rval]
elif (bval == 0):
zeros = ptb.zeros([x.shape[0], y.shape[1]], x.dtype)
rval = ger(zeros, a, xv, yv)
new_out = [rval]
else:
return
copy_stack_trace(node.outputs, new_out)
return new_out |
def construct_onion_error(reason: OnionRoutingFailureMessage, onion_packet: OnionPacket, our_onion_private_key: bytes) -> bytes:
failure_msg = reason.to_bytes()
failure_len = len(failure_msg)
pad_len = (256 - failure_len)
assert (pad_len >= 0)
error_packet = failure_len.to_bytes(2, byteorder='big')
error_packet += failure_msg
error_packet += pad_len.to_bytes(2, byteorder='big')
error_packet += bytes(pad_len)
shared_secret = get_ecdh(our_onion_private_key, onion_packet.public_key)
um_key = get_bolt04_onion_key(b'um', shared_secret)
hmac_ = hmac_oneshot(um_key, msg=error_packet, digest=hashlib.sha256)
error_packet = (hmac_ + error_packet)
ammag_key = get_bolt04_onion_key(b'ammag', shared_secret)
stream_bytes = generate_cipher_stream(ammag_key, len(error_packet))
error_packet = xor_bytes(error_packet, stream_bytes)
return error_packet |
.parametrize('x, mode, exc', [(set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), 'reduced', None), (set_test_value(pt.dmatrix(), (lambda x: x.T.dot(x))(rng.random(size=(3, 3)).astype('float64'))), 'r', None), (set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), 'reduced', None), (set_test_value(pt.lmatrix(), (lambda x: x.T.dot(x))(rng.integers(1, 10, size=(3, 3)).astype('int64'))), 'complete', UserWarning)])
def test_QRFull(x, mode, exc):
g = nlinalg.QRFull(mode)(x)
if isinstance(g, list):
g_fg = FunctionGraph(outputs=g)
else:
g_fg = FunctionGraph(outputs=[g])
cm = (contextlib.suppress() if (exc is None) else pytest.warns(exc))
with cm:
compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))]) |
_fixtures(WebFixture, FormLayoutFixture)
def test_adding_checkboxes(web_fixture, form_layout_fixture):
class DomainObjectWithBoolean():
fields = ExposedNames()
fields.an_attribute = (lambda i: BooleanField(label='Some input', required=True))
fixture = form_layout_fixture
fixture.domain_object = DomainObjectWithBoolean()
class FormWithInputWithCheckbox(Form):
def __init__(self, view):
super().__init__(view, 'aform')
self.use_layout(FormLayout())
self.layout.add_input(CheckboxInput(self, fixture.domain_object.fields.an_attribute))
browser = Browser(web_fixture.new_wsgi_app(child_factory=FormWithInputWithCheckbox.factory()))
browser.open('/')
assert (not any(((child.tag == 'label') for child in fixture.get_form_group_children(browser))))
[div] = fixture.get_form_group_children(browser)
[checkbox] = div.getchildren()
checkbox_classes = checkbox.attrib['class'].split(' ')
assert ('custom-control' in checkbox_classes)
assert ('custom-checkbox' in checkbox_classes) |
class Resource():
locator = (lambda class_name: locate(('recurly.resources.%s' % class_name)))
def cast_file(cls, response):
klass = cls.locator('BinaryFile')
resource = klass()
setattr(resource, 'data', response.body)
return resource
def cast_error(cls, response):
if ('application/json' in response.content_type):
json_body = json.loads(response.body.decode('utf-8'))
error_json = json_body['error']
error_json['object'] = 'error_may_have_transaction'
error = cls.cast_json(error_json, response=response)
error_type = error.type
name_parts = error_type.split('_')
class_name = ''.join((x.title() for x in name_parts))
msg = ((error.message + '. Recurly Request Id: ') + response.request_id)
else:
class_name = recurly.RecurlyError.error_from_status(response.status)
error = None
msg = ('Unexpected %i Error. Recurly Request Id: %s' % (response.status, response.request_id))
if (not class_name.endswith('Error')):
class_name += 'Error'
klass = locate(('recurly.errors.%s' % class_name))
if klass:
return klass(msg, error)
else:
return recurly.ApiError(msg, error)
def cast_json(cls, properties, class_name=None, response=None):
if ((class_name is None) and ('object' in properties)):
if ((properties['object'] == 'list') and ('data' in properties) and ('has_more' in properties)):
properties['data'] = [Resource.cast_json(i) for i in properties['data']]
return Page(properties)
name_parts = properties['object'].split('_')
class_name = ''.join((x.title() for x in name_parts))
klass = cls.locator(class_name)
if (class_name == Empty):
klass = Empty
if (klass is None):
if recurly.STRICT_MODE:
raise ValueError(('Class could not be found for json: %s' % properties))
else:
return properties
resource = klass()
for (k, v) in properties.items():
if ((k == 'object') and (class_name == 'ErrorMayHaveTransaction')):
continue
attr = None
attr_type = klass.schema.get(k)
if attr_type:
if (v is None):
attr = None
elif (type(attr_type) == type):
attr = attr_type(v)
elif (attr_type == datetime):
attr = datetime.datetime.strptime(v, DT_FORMAT)
elif (isinstance(attr_type, str) and isinstance(v, dict)):
attr = Resource.cast_json(v, class_name=attr_type)
elif (isinstance(attr_type, list) and isinstance(attr_type[0], str) and isinstance(v, list)):
attr = [Resource.cast_json(r, class_name=attr_type[0]) for r in v]
if (recurly.STRICT_MODE and (attr_type is None)):
raise ValueError(('%s could not find property %s in schema %s given value %s' % (klass.__name__, k, klass.schema, v)))
else:
setattr(resource, k, attr)
if response:
response.body = properties
resource.__response = response
return resource
def __repr__(self):
return str(vars(self))
def get_response(self):
return self.__response |
def collect_bn_params(model, bn_candidate_layers):
params = []
names = []
for (nm, m) in model.named_modules():
for candidate in bn_candidate_layers:
if isinstance(m, candidate):
for (np, p) in m.named_parameters():
if (np in ['weight', 'bias']):
params.append(p)
names.append(f'{nm}.{np}')
return (params, names) |
class SeedMixin(BaseMixin):
def __init__(self, random_seed=None, *args, **kwargs):
super(SeedMixin, self).__init__(*args, **kwargs)
self.random_seed = random_seed
self._rng = RNG(seed=self.random_seed)
def make_random_seed(self):
return self._rng.randint(((2 ** 31) - 1)) |
class Site(Object):
name = Unicode.T(default='', xmltagname='Name')
description = Unicode.T(optional=True, xmltagname='Description')
town = Unicode.T(optional=True, xmltagname='Town')
county = Unicode.T(optional=True, xmltagname='County')
region = Unicode.T(optional=True, xmltagname='Region')
country = Unicode.T(optional=True, xmltagname='Country') |
class open_with(Command):
def execute(self):
(app, flags, mode) = self._get_app_flags_mode(self.rest(1))
self.fm.execute_file(files=self.fm.thistab.get_selection(), app=app, flags=flags, mode=mode)
def tab(self, tabnum):
return self._tab_through_executables()
def _get_app_flags_mode(self, string):
app = ''
flags = ''
mode = 0
split = string.split()
if (len(split) == 1):
part = split[0]
if self._is_app(part):
app = part
elif self._is_flags(part):
flags = part
elif self._is_mode(part):
mode = part
elif (len(split) == 2):
part0 = split[0]
part1 = split[1]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
elif self._is_mode(part1):
mode = part1
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
elif (len(split) >= 3):
part0 = split[0]
part1 = split[1]
part2 = split[2]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
if self._is_mode(part2):
mode = part2
elif self._is_mode(part1):
mode = part1
if self._is_flags(part2):
flags = part2
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
return (app, flags, int(mode))
def _is_app(self, arg):
return ((not self._is_flags(arg)) and (not arg.isdigit()))
def _is_flags(arg):
from ranger.core.runner import ALLOWED_FLAGS
return all(((x in ALLOWED_FLAGS) for x in arg))
def _is_mode(arg):
return all(((x in '') for x in arg)) |
def test_dist_name(copy_sample):
td = copy_sample('altdistname')
make_wheel_in((td / 'pyproject.toml'), td)
res = (td / 'package_dist1-0.1-py2.py3-none-any.whl')
assert_isfile(res)
with unpack(res) as td_unpack:
assert_isdir(Path(td_unpack, 'package_dist1-0.1.dist-info')) |
class UnzipWrapper():
def __init__(self, fp):
self.__decoder = zlib.decompressobj((- zlib.MAX_WBITS))
self.__data = b''
self.__crc = (zlib.crc32(self.__data) & CRC_MASK)
self.__fp = fp
self.__size = 0
self.__is_fully_read = False
def read(self, sz=(- 1)):
amt_read = 0
ans = []
if self.__data:
if ((sz < 0) or (len(self.__data) < sz)):
ans.append(self.__data)
amt_read += len(self.__data)
self.__data = b''
else:
(self.__data, ret) = (self.__data[sz:], self.__data[:sz])
return ret
if (not self.__is_fully_read):
while ((not self.__decoder.unused_data) and ((sz < 0) or (amt_read < sz))):
chunk = self.__fp.read(1024)
if chunk:
if self.__decoder.unconsumed_tail:
chunk = (self.__decoder.unconsumed_tail + chunk)
chunk = self.__decoder.decompress(chunk)
ans.append(chunk)
amt_read += len(chunk)
self.__size += len(chunk)
self.__crc = zlib.crc32(chunk, self.__crc)
else:
if (not self.__decoder.unused_data):
raise ValueError('unexpected end of compressed gzip data, before reading trailer')
break
if self.__decoder.unused_data:
tail = self.__decoder.unused_data
if (len(tail) < 8):
tail += read_amt(self.__fp, (8 - len(tail)))
self.__fp.read()
(crc, size) = struct.unpack(b'<LL', tail)
if ((crc & CRC_MASK) != (self.__crc & CRC_MASK)):
raise ValueError('gzip stream is corrupted, CRC does not match')
self.__is_fully_read = True
ans = b''.join(ans)
if ((len(ans) > sz) and (sz > (- 1))):
(ans, self.__data) = (ans[:sz], ans[sz:])
return ans
def readline(self, sz=(- 1)):
data = self.read()
idx = data.find(b'\n')
if (idx > 0):
if ((sz < 0) or (idx < sz)):
(line, self.__data) = (data[:(idx + 1)], data[(idx + 1):])
else:
(line, self.__data) = (data[:sz], data[sz:])
elif (sz > (- 1)):
(line, self.__data) = (data[:sz], data[sz:])
else:
line = data
return line
def close(self):
self.__fp.close()
def fileno(self):
return self.__fp.fileno()
def __iter__(self):
ans = self.readline()
if ans:
(yield ans)
def next(self):
ans = self.readline()
if (not ans):
raise StopIteration()
return ans |
def open_url(url: str, cache_dir: str=None, num_attempts: int=10, verbose: bool=True) -> Any:
assert is_url(url)
assert (num_attempts >= 1)
url_md5 = hashlib.md5(url.encode('utf-8')).hexdigest()
if (cache_dir is not None):
cache_files = glob.glob(os.path.join(cache_dir, (url_md5 + '_*')))
if (len(cache_files) == 1):
return open(cache_files[0], 'rb')
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print(('Downloading %s ...' % url), end='', flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if (len(res.content) == 0):
raise IOError('No data received')
if (len(res.content) < 8192):
content_str = res.content.decode('utf-8')
if ('download_warning' in res.headers.get('Set-Cookie', '')):
links = [html.unescape(link) for link in content_str.split('"') if ('export=download' in link)]
if (len(links) == 1):
url = requests.compat.urljoin(url, links[0])
raise IOError('Google Drive virus checker nag')
if ('Google Drive - Quota exceeded' in content_str):
raise IOError('Google Drive quota exceeded')
match = re.search('filename="([^"]*)"', res.headers.get('Content-Disposition', ''))
url_name = (match[1] if match else url)
url_data = res.content
if verbose:
print(' done')
break
except:
if (not attempts_left):
if verbose:
print(' failed')
raise
if verbose:
print('.', end='', flush=True)
if (cache_dir is not None):
safe_name = re.sub('[^0-9a-zA-Z-._]', '_', url_name)
cache_file = os.path.join(cache_dir, ((url_md5 + '_') + safe_name))
temp_file = os.path.join(cache_dir, ((((('tmp_' + uuid.uuid4().hex) + '_') + url_md5) + '_') + safe_name))
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, 'wb') as f:
f.write(url_data)
os.replace(temp_file, cache_file)
return io.BytesIO(url_data) |
def regex_match_score(prediction, pattern):
try:
compiled = re.compile(pattern, flags=((re.IGNORECASE + re.UNICODE) + re.MULTILINE))
except BaseException:
print(('Regular expression failed to compile: %s' % pattern))
return False
return (compiled.match(prediction) is not None) |
_settings(MEDIA_ROOT=tempfile.mkdtemp())
class TestReviewWavefront(SetUpTest, TestCase):
fixtures = ['fixtures/simplemenu.json']
def setUp(self):
super(TestReviewWavefront, self).setUp()
login = self.client.login(username='creator', password='password')
self.assertTrue(login)
url = reverse('wavefront_create')
uploaded_thumbnail = SimpleUploadedFile(self.thumbnail_content.name, self.thumbnail_content.read())
uploaded_file = SimpleUploadedFile(self.zipfile_content.name, self.zipfile_content.read())
data = {'name': 'odm texturing', 'description': 'Test upload a wavefront', 'thumbnail_image': uploaded_thumbnail, 'file': uploaded_file}
self.client.post(url, data, follow=True)
self.object = Wavefront.objects.first()
self.client.logout()
def test_approve_wavefront(self):
login = self.client.login(username='staff', password='password')
self.assertTrue(login)
url = reverse('wavefront_review', kwargs={'pk': self.object.id})
response = self.client.post(url, {'approval': 'approve', 'comment': 'This should be in Approve page.'})
self.assertIn('\n3D Model odm texturing approved by staff.\nThis should be in Approve page.', mail.outbox[(- 1)].body)
url = reverse('wavefront_detail', kwargs={'pk': self.object.id})
self.assertRedirects(response, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This should be in Approve page.')
self.assertContains(response, 'Approved Date')
self.client.logout()
def test_reject_model(self):
login = self.client.login(username='staff', password='password')
self.assertTrue(login)
url = reverse('wavefront_review', kwargs={'pk': self.object.id})
response = self.client.post(url, {'approval': 'reject', 'comment': 'This should be in requiring update page.'})
self.assertIn('\n3D Model odm texturing rejected by staff.\nThis should be in requiring update page.', mail.outbox[(- 1)].body)
url = reverse('wavefront_detail', kwargs={'pk': self.object.id})
self.assertRedirects(response, url)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This should be in requiring update page.')
self.assertContains(response, 'Reviewed by Staff now')
self.client.logout()
self.client.login(username='creator', password='password')
url = reverse('wavefront_require_action')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '1 record found.')
self.assertContains(response, 'odm texturing') |
_optionals.HAS_CPLEX.require_in_instance
class CplexOptimizer(OptimizationAlgorithm):
def __init__(self, disp: bool=False, cplex_parameters: Optional[Dict[(str, Any)]]=None) -> None:
self._disp = disp
self._cplex_parameters = cplex_parameters
def is_cplex_installed():
return _optionals.HAS_CPLEX
def disp(self) -> bool:
return self._disp
def disp(self, disp: bool):
self._disp = disp
def cplex_parameters(self) -> Optional[Dict[(str, Any)]]:
return self._cplex_parameters
_parameters.setter
def cplex_parameters(self, parameters: Optional[Dict[(str, Any)]]):
self._cplex_parameters = parameters
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
return ''
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
mod = to_docplex_mp(problem)
sol = mod.solve(log_output=self._disp, cplex_parameters=self._cplex_parameters)
if (sol is None):
warn('CPLEX cannot solve the model')
x = ([0.0] * mod.number_of_variables)
return OptimizationResult(x=x, fval=problem.objective.evaluate(x), variables=problem.variables, status=OptimizationResultStatus.FAILURE, raw_results=None)
else:
x = sol.get_values(mod.iter_variables())
return OptimizationResult(x=x, fval=sol.get_objective_value(), variables=problem.variables, status=self._get_feasibility_status(problem, x), raw_results=sol) |
(everythings(min_int=(- ), max_int=, allow_null_bytes_in_keys=False, allow_datetime_microseconds=False), booleans())
def test_bson_converter(everything: Everything, detailed_validation: bool):
converter = bson_make_converter(detailed_validation=detailed_validation)
raw = converter.dumps(everything, codec_options=CodecOptions(tz_aware=True))
assert (converter.loads(raw, Everything, codec_options=CodecOptions(tz_aware=True)) == everything) |
class IndexedDataset(FairseqDataset):
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert (magic == self._HDR_MAGIC), "Index file doesn't match expected format. Make sure that --dataset-impl is configured properly."
version = f.read(8)
assert (struct.unpack('<Q', version) == (1,))
(code, self.element_size) = struct.unpack('<QQ', f.read(16))
self.dtype = _code_to_dtype[code]
(self._len, self.s) = struct.unpack('<QQ', f.read(16))
self.dim_offsets = read_longs(f, (self._len + 1))
self.data_offsets = read_longs(f, (self._len + 1))
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if ((i < 0) or (i >= self._len)):
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
_cache(maxsize=8)
def __getitem__(self, i) -> torch.Tensor:
if (not self.data_file):
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def exists(path):
return (PathManager.exists(index_file_path(path)) and PathManager.exists(data_file_path(path)))
def supports_prefetch(self):
return False |
def distorted_inputs(data_dir, batch_size):
filenames = [os.path.join(data_dir, ('data_batch_%d.bin' % i)) for i in xrange(1, 6)]
for f in filenames:
if (not tf.gfile.Exists(f)):
raise ValueError(('Failed to find file: ' + f))
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
float_image = None
if (tf.__version__ == '0.11.0rc2'):
float_image = tf.image.per_image_whitening(distorted_image)
else:
float_image = tf.image.per_image_standardization(distorted_image)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int((NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue))
return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=True) |
(cache={}, maxmem=None)
def bipartition_indices(N):
result = []
if (N <= 0):
return result
for i in range((2 ** (N - 1))):
part = [[], []]
for n in range(N):
bit = ((i >> n) & 1)
part[bit].append(n)
result.append((tuple(part[1]), tuple(part[0])))
return result |
class Dragon(Monster):
def __init__(self, name: str, hasWings: bool):
self.name = name
self.hasWings = hasWings
self.canBreatheFire = True
def copy(self) -> Monster:
try:
return deepcopy(self)
except:
raise CloneNotSupportedException |
_on_failure
.parametrize('channels_per_node', [CHAIN])
.parametrize('number_of_nodes', [3, 4, 5])
def test_mediated_transfer(raiden_network: List[RaidenService], number_of_nodes, deposit, token_addresses, network_wait, bench):
apps = raiden_network
token_address = token_addresses[0]
chain_state = views.state_from_raiden(apps[0])
token_network_registry_address = apps[0].default_registry.address
token_network_address = views.get_token_network_address_by_token_address(chain_state, token_network_registry_address, token_address)
with bench():
amount = PaymentAmount(10)
with bench('transfer'):
secrethash = transfer(initiator_app=apps[0], target_app=apps[(- 1)], token_address=token_address, amount=amount, identifier=PaymentID(1), timeout=(network_wait * number_of_nodes), routes=[apps])
while (len(apps) > 1):
app1 = apps.pop(0)
app2 = apps[0]
with block_timeout_for_transfer_by_secrethash(app2, secrethash):
wait_assert(assert_succeeding_transfer_invariants, token_network_address, app1, (deposit - amount), [], app2, (deposit + amount), []) |
class DescribeRenderedPageBreak():
def it_raises_on_preceding_fragment_when_page_break_is_not_first_in_paragrah(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/(w:r/(w:t"abc",w:lastRenderedPageBreak,w:lastRenderedPageBreak))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[(- 1)]
page_break = RenderedPageBreak(lrpb, fake_parent)
with pytest.raises(ValueError, match='only defined on first rendered page-br'):
page_break.preceding_paragraph_fragment
def it_produces_None_for_preceding_fragment_when_page_break_is_leading(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/(w:pPr/w:ind,w:r/(w:lastRenderedPageBreak,w:t"foo",w:t"bar"))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
assert (preceding_fragment is None)
def it_can_split_off_the_preceding_paragraph_content_when_in_a_run(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/( w:pPr/w:ind ,w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar") ,w:r/w:t"barfoo")'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"foo")'
assert (preceding_fragment is not None)
assert (preceding_fragment._p.xml == xml(expected_cxml))
def and_it_can_split_off_the_preceding_paragraph_content_when_in_a_hyperlink(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/( w:pPr/w:ind ,w:hyperlink/w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar") ,w:r/w:t"barfoo")'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
preceding_fragment = page_break.preceding_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:hyperlink/w:r/(w:t"foo",w:t"bar"))'
assert (preceding_fragment is not None)
assert (preceding_fragment._p.xml == xml(expected_cxml))
def it_raises_on_following_fragment_when_page_break_is_not_first_in_paragrah(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/(w:r/(w:lastRenderedPageBreak,w:lastRenderedPageBreak,w:t"abc"))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[(- 1)]
page_break = RenderedPageBreak(lrpb, fake_parent)
with pytest.raises(ValueError, match='only defined on first rendered page-br'):
page_break.following_paragraph_fragment
def it_produces_None_for_following_fragment_when_page_break_is_trailing(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/(w:pPr/w:ind,w:r/(w:t"foo",w:t"bar",w:lastRenderedPageBreak))'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
assert (following_fragment is None)
def it_can_split_off_the_following_paragraph_content_when_in_a_run(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/( w:pPr/w:ind ,w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar") ,w:r/w:t"foo")'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"bar",w:r/w:t"foo")'
assert (following_fragment is not None)
assert (following_fragment._p.xml == xml(expected_cxml))
def and_it_can_split_off_the_following_paragraph_content_when_in_a_hyperlink(self, fake_parent: t.ProvidesStoryPart):
p_cxml = 'w:p/( w:pPr/w:ind ,w:hyperlink/w:r/(w:t"foo",w:lastRenderedPageBreak,w:t"bar") ,w:r/w:t"baz" ,w:r/w:t"qux")'
p = cast(CT_P, element(p_cxml))
lrpb = p.lastRenderedPageBreaks[0]
page_break = RenderedPageBreak(lrpb, fake_parent)
following_fragment = page_break.following_paragraph_fragment
expected_cxml = 'w:p/(w:pPr/w:ind,w:r/w:t"baz",w:r/w:t"qux")'
assert (following_fragment is not None)
assert (following_fragment._p.xml == xml(expected_cxml)) |
class RotationInvariantPooling(nn.Module):
def __init__(self, nInputPlane, nOrientation=8):
super(RotationInvariantPooling, self).__init__()
self.nInputPlane = nInputPlane
self.nOrientation = nOrientation
def forward(self, x):
(N, c, h, w) = x.size()
x = x.view(N, (- 1), self.nOrientation, h, w)
(x, _) = x.max(dim=2, keepdim=False)
return x |
class RFC822Name(GeneralName):
def __init__(self, value: str) -> None:
if isinstance(value, str):
try:
value.encode('ascii')
except UnicodeEncodeError:
raise ValueError('RFC822Name values should be passed as an A-label string. This means unicode characters should be encoded via a library like idna.')
else:
raise TypeError('value must be string')
(name, address) = parseaddr(value)
if (name or (not address)):
raise ValueError('Invalid rfc822name value')
self._value = value
def value(self) -> str:
return self._value
def _init_without_validation(cls, value: str) -> RFC822Name:
instance = cls.__new__(cls)
instance._value = value
return instance
def __repr__(self) -> str:
return f'<RFC822Name(value={self.value!r})>'
def __eq__(self, other: object) -> bool:
if (not isinstance(other, RFC822Name)):
return NotImplemented
return (self.value == other.value)
def __hash__(self) -> int:
return hash(self.value) |
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
(test_loss, correct) = (0, 0)
with torch.no_grad():
for (X, y) in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f'''Test Error:
Accuracy: {(100 * correct):>0.1f}%, Avg loss: {test_loss:>8f}
''') |
class PreferencesButton(Gtk.HBox):
def __init__(self, browser, model):
super().__init__()
sort_orders = [(_('_Title'), self.__compare_title), (_('_People'), self.__compare_people), (_('_Date'), self.__compare_date), (_('_Date Added'), self.__compare_date_added), (_('_Original Date'), self.__compare_original_date), (_('_Genre'), self.__compare_genre), (_('_Rating'), self.__compare_rating), (_('Play_count'), self.__compare_avgplaycount)]
menu = Gtk.Menu()
sort_item = Gtk.MenuItem(label=_('Sort _by...'), use_underline=True)
sort_menu = Gtk.Menu()
active = config.getint('browsers', 'album_sort', 1)
item = None
for (i, (label, func)) in enumerate(sort_orders):
item = RadioMenuItem(group=item, label=label, use_underline=True)
model.set_sort_func((100 + i), func)
if (i == active):
model.set_sort_column_id((100 + i), Gtk.SortType.ASCENDING)
item.set_active(True)
item.connect('toggled', util.DeferredSignal(self.__sort_toggled_cb), model, i)
sort_menu.append(item)
sort_item.set_submenu(sort_menu)
menu.append(sort_item)
pref_item = MenuItem(_('_Preferences'), Icons.PREFERENCES_SYSTEM)
menu.append(pref_item)
connect_obj(pref_item, 'activate', Preferences, browser)
menu.show_all()
button = MenuButton(SymbolicIconImage(Icons.EMBLEM_SYSTEM, Gtk.IconSize.MENU), arrow=True)
button.set_menu(menu)
self.pack_start(button, False, False, 0)
def __sort_toggled_cb(self, item, model, num):
if item.get_active():
config.set('browsers', 'album_sort', str(num))
model.set_sort_column_id((100 + num), Gtk.SortType.ASCENDING)
def __compare_title(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_title(a1, a2)
def __compare_people(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_people(a1, a2)
def __compare_date(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_date(a1, a2)
def __compare_date_added(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_date_added(a1, a2)
def __compare_original_date(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_original_date(a1, a2)
def __compare_genre(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_genre(a1, a2)
def __compare_rating(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_rating(a1, a2)
def __compare_avgplaycount(self, model, i1, i2, data):
(a1, a2) = (model.get_value(i1), model.get_value(i2))
return compare_avgplaycount(a1, a2) |
class Callback_Functions():
def read_mem(ql: Qiling, *args):
user_data = args[(- 1)]
buff = ql.mem.read(user_data['address'], user_data['bytes_size'])
ql.log.info(f"Hook was triggered at -> {user_data['address']}")
ql.log.info(buff)
def read_reg(ql: Qiling, *args):
user_data = args[(- 1)]
buff = ql.reg.read(user_data['register_name'])
ql.log.info(f"Hook was triggered at -> {user_data['register_name']}")
ql.log.info(buff)
def write_mem(ql: Qiling, *args):
user_data = args[(- 1)]
buff = ql.mem.write(user_data['address'], user_data['value'])
ql.log.info(f"Hook was triggered at -> {user_data['address']}")
ql.log.info(buff)
def write_reg(ql: Qiling, *args):
user_data = args[(- 1)]
buff = ql.reg.write(user_data['register_name'], user_data['value'])
ql.log.info(f"Hook was triggered at -> {user_data['register_name']}")
ql.log.info(buff)
def emu_start(ql: Qiling, *args):
user_data = args[(- 1)]
ql.emu_start(begin=user_data['start'], end=user_data['end'])
def emu_stop(ql: Qiling, *args):
ql.log.info('killer switch found, stopping')
ql.emu_stop()
def save(ql: Qiling, *args):
ql.save() |
('plaintext, encoding, expected_parts, expected_encoding', [(u'', consts.SMPP_ENCODING_DEFAULT, [b'\x00'], consts.SMPP_ENCODING_DEFAULT), (u'', consts.SMPP_ENCODING_DEFAULT, [b'\x04\x10\x04O'], consts.SMPP_ENCODING_ISO10646), (u'e', consts.SMPP_ENCODING_ISO88591, [b'\xe9'], consts.SMPP_ENCODING_ISO88591)])
def test_make_parts_single(plaintext, encoding, expected_parts, expected_encoding):
assert (make_parts(plaintext, encoding) == (expected_parts, expected_encoding, consts.SMPP_MSGTYPE_DEFAULT)) |
def nth_product(index, *args):
pools = list(map(tuple, reversed(args)))
ns = list(map(len, pools))
c = reduce(mul, ns)
if (index < 0):
index += c
if (not (0 <= index < c)):
raise IndexError
result = []
for (pool, n) in zip(pools, ns):
result.append(pool[(index % n)])
index //= n
return tuple(reversed(result)) |
def on_vid_button_clicked():
global recording
if (not recording):
mode_tabs.setEnabled(False)
encoder = H264Encoder()
if (vid_tab.filetype.currentText() in ['mp4', 'mkv', 'mov', 'ts', 'avi']):
output = FfmpegOutput(f"{(vid_tab.filename.text() if vid_tab.filename.text() else 'test')}.{vid_tab.filetype.currentText()}")
else:
output = FileOutput(f"{(vid_tab.filename.text() if vid_tab.filename.text() else 'test')}.{vid_tab.filetype.currentText()}")
picam2.start_encoder(encoder, output, vid_tab.quality)
rec_button.setText('Stop recording')
recording = True
else:
picam2.stop_encoder()
rec_button.setText('Start recording')
mode_tabs.setEnabled(True)
recording = False |
def CheckProcList():
toRun = dict()
toCreate = dict()
procs = ops.processes.processlist.get_processlist()
for secproduct in filter((lambda x: (x.proctype == 'SECURITY_PRODUCT')), procs):
psps = re.search('^!!! (.*) !!!$', secproduct.friendlyname)
psps = re.split('\\sor\\s', psps.group(1))
for psp in psps:
psp = psp.split(' ')[0].lower()
if (os.path.exists(os.path.join(pyScriptsDir, 'lib', 'ops', 'psp', '{0}.py'.format(psp))) or os.path.exists(os.path.join(ops.DATA, 'pspFPs', '{0}-fp.xml'.format(psp)))):
toRun[psp] = None
else:
toCreate[psp] = None
return (toRun.keys(), toCreate.keys()) |
class BNAfterConvTranspose(torch.nn.Module):
def __init__(self, padding=0, stride=1, dilation=1, groups=1, output_padding=0):
super(BNAfterConvTranspose, self).__init__()
self.conv1 = torch.nn.ConvTranspose2d(10, 10, 3, padding=padding, stride=stride, dilation=dilation, groups=groups, output_padding=output_padding)
self.bn1 = torch.nn.BatchNorm2d(10)
self.relu1 = torch.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
return x |
class Effect3995(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, beacon, context, projectionRange, **kwargs):
fit.ship.multiplyItemAttr('signatureRadius', beacon.getModifiedItemAttr('signatureRadiusMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs) |
def evaluate_metrics(all_prediction, SLOT_LIST):
(total, turn_acc, joint_acc, F1_pred, F1_count) = (0, 0, 0, 0, 0)
for (idx, dial) in all_prediction.items():
for (k, cv) in dial['turns'].items():
if (set(cv['turn_belief']) == set(cv['pred_belief'])):
joint_acc += 1
total += 1
temp_acc = compute_acc(set(cv['turn_belief']), set(cv['pred_belief']), SLOT_LIST)
turn_acc += temp_acc
(temp_f1, temp_r, temp_p, count) = compute_prf(set(cv['turn_belief']), set(cv['pred_belief']))
F1_pred += temp_f1
F1_count += count
joint_acc_score = ((joint_acc / float(total)) if (total != 0) else 0)
turn_acc_score = ((turn_acc / float(total)) if (total != 0) else 0)
F1_score = ((F1_pred / float(F1_count)) if (F1_count != 0) else 0)
return (joint_acc_score, F1_score, turn_acc_score) |
class MyOp(COp):
__props__ = ('nin', 'name')
def __init__(self, nin, name):
self.nin = nin
self.name = name
def make_node(self, *inputs):
assert (len(inputs) == self.nin)
inputs = list(map(as_variable, inputs))
for input in inputs:
if (input.type is not tdouble):
raise Exception('Error 1')
outputs = [double((self.name + '_R'))]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
def perform(self, node, inputs, out_):
(out,) = out_
out[0] = self.impl(*inputs)
def c_code_cache_version(self):
return (1,) |
class CollectReport(BaseReport):
when = 'collect'
def __init__(self, nodeid: str, outcome: "Literal['passed', 'failed', 'skipped']", longrepr: Union[(None, ExceptionInfo[BaseException], Tuple[(str, int, str)], str, TerminalRepr)], result: Optional[List[Union[(Item, Collector)]]], sections: Iterable[Tuple[(str, str)]]=(), **extra) -> None:
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = (result or [])
self.sections = list(sections)
self.__dict__.update(extra)
def location(self) -> Optional[Tuple[(str, Optional[int], str)]]:
return (self.fspath, None, self.fspath)
def __repr__(self) -> str:
return '<CollectReport {!r} lenresult={} outcome={!r}>'.format(self.nodeid, len(self.result), self.outcome) |
class TestPredictor(unittest.TestCase):
def setUp(self):
test_path = os.path.dirname(os.path.realpath(__file__))
src = SourceField()
tgt = TargetField()
self.dataset = torchtext.data.TabularDataset(path=os.path.join(test_path, 'data/eng-fra.txt'), format='tsv', fields=[('src', src), ('tgt', tgt)])
src.build_vocab(self.dataset)
tgt.build_vocab(self.dataset)
encoder = EncoderRNN(len(src.vocab), 10, 10, rnn_cell='lstm')
decoder = DecoderRNN(len(tgt.vocab), 10, 10, tgt.sos_id, tgt.eos_id, rnn_cell='lstm')
self.seq2seq = Seq2seq(encoder, decoder)
for param in self.seq2seq.parameters():
param.data.uniform_((- 0.08), 0.08)
(Seq2seq, '__call__', return_value=([], None, dict(inputs=[], length=([10] * 64), sequence=MagicMock())))
(Seq2seq, 'eval')
def test_set_eval_mode(self, mock_eval, mock_call):
mock_mgr = MagicMock()
mock_mgr.attach_mock(mock_eval, 'eval')
mock_mgr.attach_mock(mock_call, 'call')
evaluator = Evaluator(batch_size=64)
with patch('seq2seq.evaluator.evaluator.torch.stack', return_value=None), patch('seq2seq.loss.NLLLoss.eval_batch', return_value=None):
evaluator.evaluate(self.seq2seq, self.dataset)
num_batches = int(math.ceil((len(self.dataset) / evaluator.batch_size)))
expected_calls = ([call.eval()] + (num_batches * [call.call(ANY)]))
self.assertEquals(expected_calls, mock_mgr.mock_calls) |
class TypeAreaMultiHeadAttention(nn.Module):
def __init__(self, n_head: int, d_model: int, dropout: float=0.1):
super().__init__()
self.dim_per_head = d_model
self.n_head = n_head
self.linear_qs = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_ks = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_vs = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_qa = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_ka = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_va = nn.Linear(d_model, (n_head * d_model), bias=False)
self.linear_final = nn.Linear((n_head * d_model), d_model, bias=False)
scaling_factor = ((4 * d_model) ** 0.5)
self.attention = TypeAreaScaledDotProductAttention(scale=scaling_factor)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def forward(self, q_a: torch.Tensor, k_a: torch.Tensor, v_a: torch.Tensor, q_s: torch.Tensor, k_s: torch.Tensor, v_s: torch.Tensor, mask: torch.Tensor=None) -> torch.Tensor:
n_head = self.n_head
dim_per_head = self.dim_per_head
(sz_b, len_q, len_k, len_v) = (q_a.size(0), q_a.size(1), k_a.size(1), v_a.size(1))
residual_a = q_a
residual_s = q_s
q_a = self.linear_qa(q_a).view(sz_b, n_head, len_q, dim_per_head)
k_a = self.linear_ka(k_a).view(sz_b, n_head, len_k, dim_per_head)
v_a = self.linear_va(v_a).view(sz_b, n_head, len_v, dim_per_head)
q_s = self.linear_qs(q_s).view(sz_b, n_head, len_q, dim_per_head)
k_s = self.linear_ks(k_s).view(sz_b, n_head, len_k, dim_per_head)
v_s = self.linear_vs(v_s).view(sz_b, n_head, len_v, dim_per_head)
if (mask is not None):
mask = mask.unsqueeze(1)
output = self.attention(q_a, k_a, v_a, q_s, k_s, v_s, mask=mask)
output = output.transpose(1, 2).contiguous().view(sz_b, len_q, (- 1))
output = self.dropout(self.linear_final(output))
output += (residual_a + residual_s)
output = self.layer_norm(output)
return output |
class AugMixAugment():
def __init__(self, ops, alpha=1.0, width=3, depth=(- 1), blended=False):
self.ops = ops
self.alpha = alpha
self.width = width
self.depth = depth
self.blended = blended
def _calc_blended_weights(self, ws, m):
ws = (ws * m)
cump = 1.0
rws = []
for w in ws[::(- 1)]:
alpha = (w / cump)
cump *= (1 - alpha)
rws.append(alpha)
return np.array(rws[::(- 1)], dtype=np.float32)
def _apply_blended(self, img, mixing_weights, m):
img_orig = img.copy()
ws = self._calc_blended_weights(mixing_weights, m)
for w in ws:
depth = (self.depth if (self.depth > 0) else np.random.randint(1, 4))
ops = np.random.choice(self.ops, depth, replace=True)
img_aug = img_orig
for op in ops:
img_aug = op(img_aug)
img = Image.blend(img, img_aug, w)
return img
def _apply_basic(self, img, mixing_weights, m):
img_shape = (img.size[0], img.size[1], len(img.getbands()))
mixed = np.zeros(img_shape, dtype=np.float32)
for mw in mixing_weights:
depth = (self.depth if (self.depth > 0) else np.random.randint(1, 4))
ops = np.random.choice(self.ops, depth, replace=True)
img_aug = img
for op in ops:
img_aug = op(img_aug)
mixed += (mw * np.asarray(img_aug, dtype=np.float32))
np.clip(mixed, 0, 255.0, out=mixed)
mixed = Image.fromarray(mixed.astype(np.uint8))
return Image.blend(img, mixed, m)
def __call__(self, img):
mixing_weights = np.float32(np.random.dirichlet(([self.alpha] * self.width)))
m = np.float32(np.random.beta(self.alpha, self.alpha))
if self.blended:
mixed = self._apply_blended(img, mixing_weights, m)
else:
mixed = self._apply_basic(img, mixing_weights, m)
return mixed |
def load_dataset(path, split, add_targets=False, split_and_preprocess=False, batch_size=1, prefetch_factor=2):
return DataLoader(FlagSimpleDatasetIterative(path=path, split=split, add_targets=add_targets, split_and_preprocess=split_and_preprocess), batch_size=batch_size, prefetch_factor=prefetch_factor, shuffle=False, num_workers=0) |
class spherical_caps_pdf(PDF):
def __init__(self, shape, origin, importance_sampled_list):
self.shape = shape
self.origin = origin
self.importance_sampled_list = importance_sampled_list
self.l = len(importance_sampled_list)
def value(self, ray_dir):
PDF_value = 0.0
for i in range(self.l):
PDF_value += np.where((ray_dir.dot(self.ax_w_list[i]) > self.cosmax_list[i]), (1 / (((1 - self.cosmax_list[i]) * 2) * np.pi)), 0.0)
PDF_value = (PDF_value / self.l)
return PDF_value
def generate(self):
shape = self.shape
origin = self.origin
importance_sampled_list = self.importance_sampled_list
l = self.l
mask = (np.random.rand(shape) * l).astype(int)
mask_list = ([None] * l)
cosmax_list = ([None] * l)
ax_u_list = ([None] * l)
ax_v_list = ([None] * l)
ax_w_list = ([None] * l)
for i in range(l):
ax_w_list[i] = (importance_sampled_list[i].center - origin).normalize()
a = vec3.where((np.abs(ax_w_list[i].x) > 0.9), vec3(0, 1, 0), vec3(1, 0, 0))
ax_v_list[i] = ax_w_list[i].cross(a).normalize()
ax_u_list[i] = ax_w_list[i].cross(ax_v_list[i])
mask_list[i] = (mask == i)
target_distance = np.sqrt((importance_sampled_list[i].center - origin).dot((importance_sampled_list[i].center - origin)))
cosmax_list[i] = np.sqrt((1 - (np.clip((importance_sampled_list[i].bounded_sphere_radius / target_distance), 0.0, 1.0) ** 2)))
self.cosmax_list = cosmax_list
self.ax_w_list = ax_w_list
phi = ((np.random.rand(shape) * 2) * np.pi)
r2 = np.random.rand(shape)
cosmax = np.select(mask_list, cosmax_list)
ax_w = vec3.select(mask_list, ax_w_list)
ax_v = vec3.select(mask_list, ax_v_list)
ax_u = vec3.select(mask_list, ax_u_list)
z = (1.0 + (r2 * (cosmax - 1.0)))
x = (np.cos(phi) * np.sqrt((1.0 - (z ** 2))))
y = (np.sin(phi) * np.sqrt((1.0 - (z ** 2))))
ray_dir = (((ax_u * x) + (ax_v * y)) + (ax_w * z))
return ray_dir |
def _create_effnet(model_kwargs, variant, pretrained=False):
features_only = False
model_cls = EfficientNet
if model_kwargs.pop('features_only', False):
features_only = True
model_kwargs.pop('num_classes', 0)
model_kwargs.pop('num_features', 0)
model_kwargs.pop('head_conv', None)
model_cls = EfficientNetFeatures
model = build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfgs[variant], pretrained_strict=(not features_only), **model_kwargs)
if features_only:
model.default_cfg = default_cfg_for_features(model.default_cfg)
return model |
def contractreceivechannelbatchunlock_from_event(canonical_identifier: CanonicalIdentifier, event: DecodedEvent) -> ContractReceiveChannelBatchUnlock:
data = event.event_data
args = data['args']
return ContractReceiveChannelBatchUnlock(canonical_identifier=canonical_identifier, receiver=args['receiver'], sender=args['sender'], locksroot=args['locksroot'], unlocked_amount=args['unlocked_amount'], returned_tokens=args['returned_tokens'], transaction_hash=event.transaction_hash, block_number=event.block_number, block_hash=event.block_hash) |
class ErrorMessageBox(ErrorWidget):
def __init__(self, view):
super().__init__(view)
alert = self.add_child(Alert(view, _('An error occurred:'), 'danger'))
alert.add_child(HTMLElement(view, 'hr'))
alert.add_child(P(view, text=self.error_message))
a = alert.add_child(A(view, Url(self.error_source_href), description='Ok'))
a.use_layout(ButtonLayout(style='primary')) |
class _cupy_lombscargle_wrapper(object):
def __init__(self, grid, block, kernel):
if isinstance(grid, int):
grid = (grid,)
if isinstance(block, int):
block = (block,)
self.grid = grid
self.block = block
self.kernel = kernel
def __call__(self, x, y, freqs, pgram, y_dot):
kernel_args = (x.shape[0], freqs.shape[0], x, y, freqs, pgram, y_dot)
self.kernel(self.grid, self.block, kernel_args) |
def test_cuda_mig_visible_devices_and_memory_limit_and_nthreads(loop):
uuids = get_gpu_count_mig(return_uuids=True)[1]
if (len(uuids) > 0):
cuda_visible_devices = ','.join([i.decode('utf-8') for i in uuids])
else:
pytest.skip('No MIG devices found')
with patch.dict(os.environ, {'CUDA_VISIBLE_DEVICES': cuda_visible_devices}):
nthreads = len(cuda_visible_devices)
with popen(['dask', 'scheduler', '--port', '9359', '--no-dashboard']):
with popen(['dask', 'cuda', 'worker', '127.0.0.1:9359', '--host', '127.0.0.1', '--nthreads', str(nthreads), '--no-dashboard', '--worker-class', 'dask_cuda.utils_test.MockWorker']):
with Client('127.0.0.1:9359', loop=loop) as client:
assert wait_workers(client, n_gpus=len(uuids))
def get_visible_devices():
return os.environ['CUDA_VISIBLE_DEVICES']
result = client.run(get_visible_devices)
wait(result)
assert all(((len(v.split(',')) == len(uuids)) for v in result.values()))
for i in range(len(uuids)):
assert (set((bytes(v.split(',')[i], 'utf-8') for v in result.values())) == set(uuids)) |
def test_update_mixin_missing_attrs(gl):
class M(UpdateMixin, FakeManager):
_update_attrs = gl_types.RequiredOptional(required=('foo',), optional=('bar', 'baz'))
mgr = M(gl)
data = {'foo': 'bar', 'baz': 'blah'}
mgr._update_attrs.validate_attrs(data=data)
data = {'baz': 'blah'}
with pytest.raises(AttributeError) as error:
mgr._update_attrs.validate_attrs(data=data)
assert ('foo' in str(error.value)) |
def test_process_search(s1_product: SentinelOne):
s1_product.log = logging.getLogger('pytest_surveyor')
s1_product._queries = {}
s1_product.process_search(Tag('test_query'), {}, 'FileName containsCIS "svchost.exe"')
assert (len(s1_product._queries[Tag('test_query')]) == 1)
assert (s1_product._queries[Tag('test_query')][0].parameter is None)
assert (s1_product._queries[Tag('test_query')][0].operator is None)
assert (s1_product._queries[Tag('test_query')][0].search_value is None)
assert (s1_product._queries[Tag('test_query')][0].full_query == 'FileName containsCIS "svchost.exe"')
assert ((s1_product._queries[Tag('test_query')][0].end_date - timedelta(days=14)) == s1_product._queries[Tag('test_query')][0].start_date) |
_callback_query((tools.option_filter('reposts') & tools.is_admin))
def reposts_config(bot: AutoPoster, callback_query: CallbackQuery):
data = callback_query.data.split()
value = (bool(int(data[2])) if data[2].isdigit() else data[2])
if (data[1] == 'global'):
bot.config['settings']['send_reposts'] = value
elif ((data[2] == 'reset') or (bot.config['settings']['send_reposts'] == value)):
if ('send_reposts' in bot.config['domains'][data[1]].keys()):
bot.config['domains'][data[1]].pop('send_reposts')
else:
bot.config['domains'][data[1]]['send_reposts'] = value
bot.save_config()
(info, reply_markup) = tools.generate_setting_info(bot, data[1])
callback_query.edit_message_text(info, reply_markup=reply_markup, disable_web_page_preview=True) |
def get_aggregation_strategies(aggregation_strategies):
import numpy as np
try:
from pypsa.clustering.spatial import _make_consense
except Exception:
from pypsa.clustering.spatial import _make_consense
bus_strategies = dict(country=_make_consense('Bus', 'country'))
bus_strategies.update(aggregation_strategies.get('buses', {}))
generator_strategies = {'build_year': (lambda x: 0), 'lifetime': (lambda x: np.inf)}
generator_strategies.update(aggregation_strategies.get('generators', {}))
return (bus_strategies, generator_strategies) |
def setUpModule():
global cell, kpts
L = 4
n = 15
cell = pgto.Cell()
cell.build(unit='B', verbose=5, output='/dev/null', a=((L, 0, 0), (0, L, 0), (0, 0, L)), mesh=[n, n, n], atom=[['He', (((L / 2.0) - 0.5), (L / 2.0), ((L / 2.0) - 0.5))], ['He', ((L / 2.0), (L / 2.0), ((L / 2.0) + 0.5))]], basis={'He': [[0, (0.8, 1.0)], [0, (1.0, 1.0)], [0, (1.2, 1.0)]]})
numpy.random.seed(4)
kpts = numpy.random.random((1, 3)) |
class Migration(migrations.Migration):
dependencies = [('conditions', '0021_related_name')]
operations = [migrations.AddField(model_name='condition', name='locked', field=models.BooleanField(default=False, help_text='Designates whether this condition can be changed.', verbose_name='Locked'))] |
def sane_samples_from_playlist(pathserv, playlist_file):
samples = []
rejected = []
for (sample, filename) in pathserv.playlist_generator_from_file(playlist_file):
ext = os.path.splitext(sample)[1]
if ((ext in {'.dbg', '.htm', '.html', '.json', '.log', '.pkl', '.py', '.txt'}) or (not os.path.isfile(filename))):
rejected.append(sample)
else:
samples.append(sample)
if (len(rejected) > 0):
raise mpexceptions.ExceptionBadSampleInPlaylist(rejected)
return samples |
class InMemoryLogRotationContext(LogRotationContextInterface):
def __init__(self, expired_logs, all_logs):
self.expired_logs = expired_logs
self.all_logs = all_logs
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
if ((ex_type is None) and (ex_value is None) and (ex_traceback is None)):
for log in self.expired_logs:
self.all_logs.remove(log)
def yield_logs_batch(self):
filename = 'inmemory_model_filename_placeholder'
filename = '.'.join((filename, 'txt.gz'))
(yield ([log_and_repo.stored_log for log_and_repo in self.expired_logs], filename)) |
class AutoConfig(object):
def __init__(self):
raise EnvironmentError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.')
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
if ('distilbert' in pretrained_model_name_or_path):
return DistilBertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('openai-gpt' in pretrained_model_name_or_path):
return OpenAIGPTConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('gpt2' in pretrained_model_name_or_path):
return GPT2Config.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('transfo-xl' in pretrained_model_name_or_path):
return TransfoXLConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return XLMConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm', 'roberta'".format(pretrained_model_name_or_path)) |
.parametrize('has_artifacts', [False, True])
def test_msr_format_params(has_artifacts: bool):
preset = PresetManager(None).default_preset_for_game(RandovaniaGame.METROID_SAMUS_RETURNS).get_preset()
assert isinstance(preset.configuration, MSRConfiguration)
configuration = dataclasses.replace(preset.configuration, artifacts=MSRArtifactConfig(prefer_metroids=True, prefer_stronger_metroids=True, prefer_bosses=False, required_artifacts=(20 if has_artifacts else 0)))
result = RandovaniaGame.METROID_SAMUS_RETURNS.data.layout.preset_describer.format_params(configuration)
assert (dict(result) == {'Logic Settings': ['All tricks disabled'], 'Item Pool': [('Size: 194 of 211' if has_artifacts else 'Size: 174 of 211'), 'Starts with Scan Pulse', 'Progressive Beam, Progressive Jump, Progressive Suit', 'Energy Reserve Tank, Aeion Reserve Tank, Missile Reserve Tank'], 'Gameplay': ['Starts at Surface - East - Landing Site'], 'Difficulty': [], 'Goal': (['20 Metroid DNA', 'Prefers Standard Metroids, Prefers Stronger Metroids'] if has_artifacts else ['Defeat Ridley']), 'Game Changes': ['Super Missile needs Launcher, Power Bomb needs Main', 'Charge Door Buff, Beam Door Buff', 'Open Area 3 Interior East Shortcut, Remove Area Exit Path Grapple Blocks, Remove Surface Scan Pulse Crumble Blocks, Remove Area 1 Chozo Seal Crumble Blocks']}) |
def initialize(security_class, cmdpairs, security_level='read-write', restrict_path=None):
global _allowed_requests, _security_level
(security_level, restrict_path) = _set_security_level(security_class, security_level, restrict_path, cmdpairs)
_security_level = security_level
if restrict_path:
reset_restrict_path(rpath.RPath(Globals.local_connection, restrict_path))
_allowed_requests = _set_allowed_requests(security_class, security_level) |
def test_arrange_items(view):
item1 = BeePixmapItem(QtGui.QImage())
item1.do_flip()
view.scene.addItem(item1)
item2 = BeePixmapItem(QtGui.QImage())
item2.setRotation(90)
view.scene.addItem(item2)
item3 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item3)
with patch.object(item1, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
with patch.object(item2, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
with patch.object(item3, 'bounding_rect_unselected', return_value=QtCore.QRectF(5, 5, 20, 30)):
command = commands.ArrangeItems(view.scene, [item1, item2, item3], [QtCore.QPointF(1, 2), QtCore.QPointF(203, 204), QtCore.QPointF(307, 308)])
command.redo()
assert (item1.pos() == QtCore.QPointF(101, 2))
assert (item2.pos() == QtCore.QPointF(283, 204))
assert (item3.pos() == QtCore.QPointF(302, 303))
command.undo()
assert (item1.pos() == QtCore.QPointF(0, 0))
assert (item2.pos() == QtCore.QPointF(0, 0))
assert (item3.pos() == QtCore.QPointF(0, 0)) |
def get_activations(files, data_type, model, batch_size, size, length, dims, device):
model.eval()
if (batch_size > len(files)):
print('Warning: batch size is bigger than the data size. Setting batch size to data size')
batch_size = len(files)
transform = torchvision.transforms.Compose([transforms_vid.ClipResize((size, size)), transforms_vid.ClipToTensor(), transforms_vid.ClipNormalize(mean=[114.7748, 107.7354, 99.475], std=[1, 1, 1])])
if (data_type == 'video'):
ds = VideoDataset(files, length, transform)
elif (data_type == 'frame'):
ds = FrameDataset(files, length, transform)
else:
raise NotImplementedError
dl = torch.utils.data.DataLoader(ds, batch_size=batch_size, drop_last=False, num_workers=cpu_count())
pred_arr = torch.zeros(len(files), dims).to(device)
start_idx = 0
for batch in tqdm(dl):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)
if ((pred.size(2) != 1) or (pred.size(3) != 1) or (pred.size(4) != 1)):
pred = adaptive_avg_pool3d(pred, output_size=(1, 1, 1))
pred = pred.squeeze(4).squeeze(3).squeeze(2)
pred_arr[start_idx:(start_idx + pred.shape[0])] = pred
start_idx = (start_idx + pred.shape[0])
pred_arr = pred_arr.cpu().numpy()
return pred_arr |
def import_CSV(filename: os.PathLike) -> list[btypes.PyTrackObject]:
objects = []
with open(filename, 'r') as csv_file:
csvreader = csv.DictReader(csv_file, delimiter=',', quotechar='|')
for (i, row) in enumerate(csvreader):
data = {k: float(v) for (k, v) in row.items()}
data['ID'] = i
obj = btypes.PyTrackObject.from_dict(data)
objects.append(obj)
return objects |
def get_num_layer_layer_wise(var_name, num_max_layer=12):
if (var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed')):
return 0
elif var_name.startswith('backbone.downsample_layers'):
stage_id = int(var_name.split('.')[2])
if (stage_id == 0):
layer_id = 0
elif (stage_id == 1):
layer_id = 2
elif (stage_id == 2):
layer_id = 3
elif (stage_id == 3):
layer_id = num_max_layer
return layer_id
elif var_name.startswith('backbone.stages'):
stage_id = int(var_name.split('.')[2])
block_id = int(var_name.split('.')[3])
if (stage_id == 0):
layer_id = 1
elif (stage_id == 1):
layer_id = 2
elif (stage_id == 2):
layer_id = (3 + (block_id // 3))
elif (stage_id == 3):
layer_id = num_max_layer
return layer_id
else:
return (num_max_layer + 1) |
def test_cmd_list_input_with_simple_cmd_strings():
cmd1 = get_cmd('tests/testfiles/cmds/args.sh', 'tests\\testfiles\\cmds\\args.bat')
cmd2 = get_cmd('tests/testfiles/cmds/args2.sh', 'tests\\testfiles\\cmds\\args2.bat')
context = Context({'a': 'one', 'b': 'two two', 'c': 'three', 'd': cmd1, 'e': cmd2, 'cmd': ['{d} {a} "{b}" {c}', '{e} four "five six" seven']})
pypyr.steps.cmd.run_step(context)
assert ('cmdOut' not in context) |
def get_valid_reader_names(reader):
new_readers = []
for reader_name in reader:
if (reader_name in OLD_READER_NAMES):
raise ValueError("Reader name '{}' has been deprecated, use '{}' instead.".format(reader_name, OLD_READER_NAMES[reader_name]))
if (reader_name in PENDING_OLD_READER_NAMES):
new_name = PENDING_OLD_READER_NAMES[reader_name]
warnings.warn("Reader name '{}' is being deprecated and will be removed soon.Please use '{}' instead.".format(reader_name, new_name), FutureWarning, stacklevel=2)
new_readers.append(new_name)
else:
new_readers.append(reader_name)
return new_readers |
def main(base_dir, lang_pair):
(src, tgt) = lang_pair.split('-')
files = list(map((lambda x: os.path.join(base_dir, x)), os.listdir(base_dir)))
files = list(filter((lambda x: ('tok' in x)), files))
src_vocab_size = 32003
src_model_prefix = os.path.join(base_dir, 'm_bpe')
src_model_path = (src_model_prefix + '.model')
src_data_path = os.path.join(base_dir, 'train.tok.{}'.format(src))
tgt_data_path = os.path.join(base_dir, 'train.tok.{}'.format(tgt))
if (not os.path.exists((src_model_prefix + '.model'))):
input_files = '{},{}'.format(src_data_path, tgt_data_path)
spm.SentencePieceTrainer.train('--input={} --model_prefix={} --vocab_size={} --model_type=bpe'.format(input_files, src_model_prefix, src_vocab_size))
sp_bpe = spm.SentencePieceProcessor()
src_files = filter((lambda x: x.endswith(src)), files)
sp_bpe.load(src_model_path)
for org_f in src_files:
pro_f = org_f.replace('tok', 'bpe')
print('Processing {} -> {}'.format(org_f, pro_f))
with open(org_f) as f1, open(pro_f, 'w') as f2:
for (i, line) in enumerate(f1):
line = sp_bpe.encode_as_pieces(line.strip())
f2.write(' '.join(line))
f2.write('\n')
tgt_files = filter((lambda x: x.endswith(tgt)), files)
for org_f in tgt_files:
pro_f = org_f.replace('tok', 'bpe')
print('Processing {} -> {}'.format(org_f, pro_f))
with open(org_f) as f1, open(pro_f, 'w') as f2:
for (i, line) in enumerate(f1):
line = sp_bpe.encode_as_pieces(line.strip())
f2.write(' '.join(line))
f2.write('\n') |
def gcd1(u, v):
assert (u > 0)
assert (v > 0)
shift = 0
while ((not (u & 1)) and (not (v & 1))):
shift += 1
u >>= 1
v >>= 1
while (not (u & 1)):
u >>= 1
while True:
while (not (v & 1)):
v >>= 1
if (u > v):
(u, v) = (v, u)
v -= u
if (not v):
break
result = (u << shift)
return result |
class T5Config(PretrainedConfig):
pretrained_config_archive_map = T5_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size=32128, n_positions=512, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_heads=8, relative_attention_num_buckets=32, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, **kwargs):
super(T5Config, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.n_positions = n_positions
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
def max_position_embeddings(self):
return self.n_positions
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.num_heads
def num_hidden_layers(self):
return self.num_layers |
def match_context(ds: List[Tuple[(str, str, int)]], docs: List[List[str]], sample_context=2) -> List[ContextualizedExample]:
phrases = []
for tuple in ds:
p1 = tuple[0]
p2 = tuple[1]
phrases.append(p1)
phrases.append(p2)
phrases = list(set(phrases))
raw_texts = [' '.join(doc) for doc in docs]
phrase2context: Dict[(str, List[EntityContext])] = defaultdict(list)
kw_processor = KeywordProcessor()
kw_processor.add_keywords_from_list(phrases)
for raw_text in raw_texts:
keywords_found = kw_processor.extract_keywords(raw_text, span_info=True)
for (kw, start, end) in keywords_found:
left_ctx = raw_text[:start].strip()
right_ctx = raw_text[end:].strip()
phrase2context[kw].append(EntityContext(left_ctx, kw, right_ctx))
phrase2context = dict(phrase2context)
contextualized_ds = []
def _sample_contextualized(phrase, contexts, k):
if (len(contexts) == 0):
return [EntityContext('', phrase, '')]
elif (len(contexts) < k):
return contexts
else:
return random.sample(contexts, k)
for (p1, p2, label) in ds:
p1_context = phrase2context.get(p1, [])
p2_context = phrase2context.get(p2, [])
p1_samples = _sample_contextualized(p1, p1_context, sample_context)
p2_samples = _sample_contextualized(p2, p2_context, sample_context)
for p1s in p1_samples:
for p2s in p2_samples:
contextualized_ds.append(ContextualizedExample([p1s, p2s], label))
return contextualized_ds |
class TFAgent(RLAgent):
RESOURCE_SCOPE = 'resource'
SOLVER_SCOPE = 'solvers'
def __init__(self, world, id, json_data):
self.tf_scope = 'agent'
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
super().__init__(world, id, json_data)
self._build_graph(json_data)
self._init_normalizers()
return
def __del__(self):
self.sess.close()
return
def save_model(self, out_path):
with self.sess.as_default(), self.graph.as_default():
try:
save_path = self.saver.save(self.sess, out_path, write_meta_graph=False, write_state=False)
Logger.print(('Model saved to: ' + save_path))
except:
Logger.print(('Failed to save model to: ' + save_path))
return
def load_model(self, in_path):
with self.sess.as_default(), self.graph.as_default():
self.saver.restore(self.sess, in_path)
self._load_normalizers()
Logger.print(('Model loaded from: ' + in_path))
return
def _get_output_path(self):
assert (self.output_dir != '')
file_path = (((self.output_dir + '/agent') + str(self.id)) + '_model.ckpt')
return file_path
def _get_int_output_path(self):
assert (self.int_output_dir != '')
file_path = (self.int_output_dir + '/agent{:d}_models/agent{:d}_int_model_{:010d}.ckpt'.format(self.id, self.id, self.iter))
return file_path
def _build_graph(self, json_data):
with self.sess.as_default(), self.graph.as_default():
with tf.variable_scope(self.tf_scope):
self._build_nets(json_data)
with tf.variable_scope(self.SOLVER_SCOPE):
self._build_losses(json_data)
self._build_solvers(json_data)
self._initialize_vars()
self._build_saver()
return
def _init_normalizers(self):
with self.sess.as_default(), self.graph.as_default():
self._s_norm.update()
self._g_norm.update()
self._a_norm.update()
return
def _build_nets(self, json_data):
pass
def _build_losses(self, json_data):
pass
def _build_solvers(self, json_data):
pass
def _tf_vars(self, scope=''):
with self.sess.as_default(), self.graph.as_default():
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=((self.tf_scope + '/') + scope))
assert (len(res) > 0)
return res
def _build_normalizers(self):
with self.sess.as_default(), self.graph.as_default(), tf.variable_scope(self.tf_scope):
with tf.variable_scope(self.RESOURCE_SCOPE):
self._s_norm = TFNormalizer(self.sess, 's_norm', self.get_state_size(), self.world.env.build_state_norm_groups(self.id))
self._s_norm.set_mean_std((- self.world.env.build_state_offset(self.id)), (1 / self.world.env.build_state_scale(self.id)))
self._g_norm = TFNormalizer(self.sess, 'g_norm', self.get_goal_size(), self.world.env.build_goal_norm_groups(self.id))
self._g_norm.set_mean_std((- self.world.env.build_goal_offset(self.id)), (1 / self.world.env.build_goal_scale(self.id)))
self._a_norm = TFNormalizer(self.sess, 'a_norm', self.get_action_size())
self._a_norm.set_mean_std((- self.world.env.build_action_offset(self.id)), (1 / self.world.env.build_action_scale(self.id)))
return
def _load_normalizers(self):
self._s_norm.load()
self._g_norm.load()
self._a_norm.load()
return
def _update_normalizers(self):
super()._update_normalizers()
return
def _initialize_vars(self):
self.sess.run(tf.global_variables_initializer())
return
def _build_saver(self):
vars = self._get_saver_vars()
self.saver = tf.train.Saver(vars, max_to_keep=0)
return
def _get_saver_vars(self):
with self.sess.as_default(), self.graph.as_default():
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.tf_scope)
vars = [v for v in vars if ((('/' + self.SOLVER_SCOPE) + '/') not in v.name)]
assert (len(vars) > 0)
return vars
def _weight_decay_loss(self, scope):
vars = self._tf_vars(scope)
vars_no_bias = [v for v in vars if ('bias' not in v.name)]
loss = tf.add_n([tf.nn.l2_loss(v) for v in vars_no_bias])
return loss
def _train(self):
with self.sess.as_default(), self.graph.as_default():
super()._train()
return |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0041_auto__1313')]
operations = [migrations.AlterField(model_name='sponsorshippackage', name='logo_dimension', field=models.PositiveIntegerField(blank=True, default=175, help_text='Internal value used to control logos dimensions at sponsors page'))] |
def _process_image_files_batch(coder, thread_index, ranges, name, all_sets, vocab, num_shards):
num_threads = len(ranges)
assert (not (num_shards % num_threads))
num_shards_per_batch = int((num_shards / num_threads))
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], (num_shards_per_batch + 1)).astype(int)
num_files_in_thread = (ranges[thread_index][1] - ranges[thread_index][0])
counter = 0
for s in xrange(num_shards_per_batch):
shard = ((thread_index * num_shards_per_batch) + s)
output_filename = ('%s-%.5d-of-%.5d' % (name, shard, num_shards))
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[(s + 1)], dtype=int)
for i in files_in_shard:
sequence_example = _to_sequence_example(all_sets[i], coder, vocab)
if (not sequence_example):
print(('fail for set: ' + all_sets[i]['set_id']))
continue
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if (not (counter % 100)):
print(('%s [thread %d]: Processed %d of %d images in thread batch.' % (datetime.now(), thread_index, counter, num_files_in_thread)))
sys.stdout.flush()
writer.close()
print(('%s [thread %d]: Wrote %d images to %s' % (datetime.now(), thread_index, shard_counter, output_file)))
sys.stdout.flush()
shard_counter = 0
print(('%s [thread %d]: Wrote %d images to %d shards.' % (datetime.now(), thread_index, counter, num_files_in_thread)))
sys.stdout.flush() |
class TestQuantSimRangeLearning():
def test_cpu_model_quantize_op_input_params_update(self):
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=False, default_output_bw=6, quant_scheme=QuantScheme.training_range_learning_with_tf_init)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('conv2d_1/Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
np.random.seed(0)
dummy_input = ((7 * np.random.randn(20, 28, 28, 3)) + 8)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
for (name, quantizer) in sim._activation_quantizers.items():
if (name not in ['conv2d_input_quantized', 'conv2d/BiasAdd_quantized', 'conv2d_1/BiasAdd_quantized']):
assert quantizer.tensor_quantizer.isEncodingValid
with sim.session.graph.as_default():
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_weight_encoding_min = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
conv2d_weight_encoding_max = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
conv2d_output_encoding_min = sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.encoding_min])
conv2d_output_encoding_max = sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.encoding_max])
assert (8 == sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.bit_width]))
assert (6 == sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.bit_width]))
assert sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.use_symmetric_encoding])
assert (not sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.use_symmetric_encoding]))
assert (conv2d_weight_encoding_min != 0.0)
assert (conv2d_weight_encoding_max != 0.0)
assert (conv2d_output_encoding_min == 0.0)
assert (conv2d_output_encoding_max != 0.0)
sess.close()
sim.session.close()
.cuda
def test_gpu_model_quantize_op_input_params_update(self):
tf.compat.v1.reset_default_graph()
with tf.device('/gpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=True, default_output_bw=4, default_param_bw=6, quant_scheme=QuantScheme.training_range_learning_with_tf_init)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('conv2d_1/Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
np.random.seed(0)
dummy_input = ((7 * np.random.randn(20, 28, 28, 3)) + 8)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
for (name, quantizer) in sim._activation_quantizers.items():
if (name not in ['conv2d_input_quantized', 'conv2d/BiasAdd_quantized', 'conv2d_1/BiasAdd_quantized']):
assert quantizer.tensor_quantizer.isEncodingValid
with sim.session.graph.as_default():
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_weight_encoding_min = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
conv2d_weight_encoding_max = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
conv2d_output_encoding_min = sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.encoding_min])
conv2d_output_encoding_max = sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.encoding_max])
assert (6 == sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.bit_width]))
assert (4 == sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.bit_width]))
assert sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.use_symmetric_encoding])
assert (not sim.session.run(conv2d_output_quant_op.inputs[QuantizeOpIndices.use_symmetric_encoding]))
assert (conv2d_weight_encoding_min != 0.0)
assert (conv2d_weight_encoding_max != 0.0)
assert (conv2d_output_encoding_min == 0.0)
assert (conv2d_output_encoding_max != 0.0)
sess.close()
sim.session.close()
def _compute_gradients_numpy(x, encoding_min, encoding_max, bitwidth, use_symmetric_endcoding):
steps = (np.power(2, bitwidth) - 1).astype(np.float32)
scaling = ((encoding_max - encoding_min) / steps).astype(np.float32)
offset = np.round((encoding_min / scaling)).astype(np.float32)
r_x_by_s_plus_round_o = (np.round((x / scaling)) + np.round(offset)).astype(np.float32)
r_x_by_s_minus_x_by_s = (np.round((x / scaling)) - (x / scaling)).astype(np.float32)
n = 0.0
if use_symmetric_endcoding:
n = ((- 1) * (np.power(2, bitwidth) + 1).astype(np.float32))
p = (np.power(2, bitwidth) - 1).astype(np.float32)
dq_by_dmax = []
dq_by_dx = [0.0, 0.0, 0.0, 0.0]
r_x_by_s_plus_round_o_flat = np.ndarray.flatten(r_x_by_s_plus_round_o)
r_x_by_s_minus_x_by_s_flat = np.ndarray.flatten(r_x_by_s_minus_x_by_s)
for (i, each_elem) in enumerate(r_x_by_s_plus_round_o_flat):
if (n <= each_elem <= p):
dq_by_dmax.append(((r_x_by_s_minus_x_by_s_flat[i] * 1.0) / steps))
dq_by_dx[i] = 1.0
elif (each_elem < n):
dq_by_dmax.append(((n * 1.0) / steps))
else:
dq_by_dmax.append(((p * 1.0) / steps))
dq_by_dmax_reduced = np.sum(dq_by_dmax).astype(np.float32)
return (dq_by_dx, (- dq_by_dmax_reduced.astype(np.float32)), dq_by_dmax_reduced.astype(np.float32))
.tf1
def test_qc_custom_gradient_backward_pass(self):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
with tf.device('/cpu:0'):
inputs = tf.keras.Input(shape=(2, 2, 1))
conv_op = tf.keras.layers.Conv2D(1, (2, 2), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform')(inputs)
_ = tf.nn.relu(conv_op)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['Relu'], use_cuda=False, quant_scheme=QuantScheme.training_range_learning_with_tf_init)
np.random.seed(0)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
with sim.session.graph.as_default():
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_weight_quant_op_output = sim.session.graph.get_tensor_by_name('conv2d/Conv2D/ReadVariableOp_quantized:0')
inp_tensor = conv2d_weight_quant_op.inputs[0]
np.random.seed(0)
w_shape = inp_tensor.shape
inp_data = np.random.rand(2, w_shape[1], w_shape[2], w_shape[3])
grads = tf.gradients(conv2d_weight_quant_op_output, [inp_tensor, conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min], conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max]])
assert (len(grads) == 3)
(dlossbydx, dlossbydmin, dlossbydmax) = grads
assert (dlossbydx is not None)
assert (dlossbydmin is not None)
assert (dlossbydmax is not None)
with sim.session.graph.as_default():
enc_min = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
enc_max = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
bw = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.bit_width])
use_symmetric_encoding = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.use_symmetric_encoding])
min_gradient = sim.session.run([dlossbydmin], feed_dict={inp_tensor: inp_data})[0]
max_gradient = sim.session.run([dlossbydmax], feed_dict={inp_tensor: inp_data})[0]
input_gradient = sim.session.run([dlossbydx], feed_dict={inp_tensor: inp_data})[0]
(numpy_dq_by_dx, numpy_min_grad, numpy_max_grad) = TestQuantSimRangeLearning._compute_gradients_numpy(inp_data.reshape((- 1)), enc_min, enc_max, bw, use_symmetric_encoding)
assert np.isclose(numpy_min_grad.astype(float), min_gradient, atol=1e-06)
assert np.isclose(numpy_max_grad.astype(float), max_gradient, atol=1e-06)
assert np.allclose(numpy_dq_by_dx, input_gradient)
sess.close()
sim.session.close()
.tf1
def test_qat_fp16(self, iterations=5):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
np.random.seed(0)
with tf.device('/cpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 4))
conv_op = tf.keras.layers.Conv2D(2, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform', padding='SAME')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10, bias_initializer='random_uniform')(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False, quant_scheme=QuantScheme.post_training_tf, default_output_bw=16, default_param_bw=16, default_data_type=QuantizationDataType.float)
def dummy_forward_pass(sess, _):
model_output = sess.graph.get_tensor_by_name('dense/BiasAdd_quantized:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
sess.run(model_output, feed_dict={model_input: dummy_input})
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
sim.compute_encodings(dummy_forward_pass, None)
inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')
w_shape = inp_tensor.shape
batches = 32
inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])
logits = sim.session.graph.get_tensor_by_name('dense/BiasAdd_quantized:0')
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
with sim.session.graph.as_default():
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')
loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)
update_ops = []
global_step = tf.compat.v1.train.create_global_step()
initialize_uninitialized_vars(sim.session)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001)
gradients = optimizer.compute_gradients(loss, var_list)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(loss, name='train_op')
for _ in range(iterations):
weights_before_train = sim.session.run(conv2d_weight_quant_op.inputs[0])
_ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
weights_after_train = sim.session.run(conv2d_weight_quant_op.inputs[0])
assert np.allclose(weights_before_train, weights_after_train, atol=0.01)
assert (not np.allclose(weights_before_train, weights_after_train, atol=0.001))
def test_qc_custom_gradient_training_loop_range_learning(self, iterations=1):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
with tf.device('/cpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 1))
conv_op = tf.keras.layers.Conv2D(1, (2, 2), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10)(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False, quant_scheme=QuantScheme.training_range_learning_with_tf_init)
np.random.seed(0)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')
np.random.seed(0)
w_shape = inp_tensor.shape
batches = 32
inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])
logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
with sim.session.graph.as_default():
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')
loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)
update_ops = []
global_step = tf.compat.v1.train.create_global_step()
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001)
gradients = optimizer.compute_gradients(loss, var_list)
init_global = tf.compat.v1.global_variables_initializer()
init_local = tf.compat.v1.local_variables_initializer()
init = tf.group(init_global, init_local)
sim.session.run(init)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(loss, name='train_op')
time_taken_by_default_grad = 0
for i in range(iterations):
start_time = time.perf_counter()
_ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
exec_time = (time.perf_counter() - start_time)
time_taken_by_default_grad = (time_taken_by_default_grad + exec_time)
default_grad_avg_time = (time_taken_by_default_grad / iterations)
print('Avg time taken by custom grad', default_grad_avg_time)
sess.close()
sim.session.close()
return default_grad_avg_time
def test_qc_custom_gradient_training_loop_pass_through(self, iterations=1):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
with tf.device('/cpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 1))
conv_op = tf.keras.layers.Conv2D(1, (2, 2), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10)(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False, quant_scheme=QuantScheme.training_range_learning_with_tf_enhanced_init)
np.random.seed(0)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')
np.random.seed(0)
w_shape = inp_tensor.shape
batches = 32
inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])
logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
with sim.session.graph.as_default():
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')
loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)
update_ops = []
global_step = tf.compat.v1.train.create_global_step()
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001)
gradients = optimizer.compute_gradients(loss, var_list)
init_global = tf.compat.v1.global_variables_initializer()
init_local = tf.compat.v1.local_variables_initializer()
init = tf.group(init_global, init_local)
sim.session.run(init)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(loss, name='train_op')
time_taken_by_default_grad = 0
for i in range(iterations):
start_time = time.perf_counter()
_ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
exec_time = (time.perf_counter() - start_time)
time_taken_by_default_grad = (time_taken_by_default_grad + exec_time)
default_grad_avg_time = (time_taken_by_default_grad / iterations)
print('Avg time taken by custom grad', default_grad_avg_time)
sess.close()
sim.session.close()
return default_grad_avg_time
def test_accumulator_overflow(self):
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(8, 8, 3))
_ = tf.keras.layers.Conv2D(10, (2, 2), kernel_initializer=tf.constant_initializer(255))(inputs)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
(layer, range_used) = check_accumulator_overflow(sess, 8, 27)
assert ('conv2d/Conv2D' == layer)
assert ((100 * range_used) == pytest.approx(0.5836, 0.001))
sess.close()
def _compare_range_learning_with_default_grad(self):
iterations = 10
pass_through_grad_avg_time = self.test_qc_custom_gradient_training_loop_pass_through(iterations)
range_learning_avg_time = self.test_qc_custom_gradient_training_loop_range_learning(iterations)
print('% increase ', (((range_learning_avg_time - pass_through_grad_avg_time) / pass_through_grad_avg_time) * 100))
def test_qc_custom_gradient_training_loop_param_learning(self):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(0)
np.random.seed(0)
with tf.device('/cpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 1))
conv_op = tf.keras.layers.Conv2D(1, (2, 2), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform', padding='SAME')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10)(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=False, quant_scheme=QuantScheme.training_range_learning_with_tf_init)
for quant_op_name in sim._param_quantizers.keys():
print(sim._param_quantizers[quant_op_name])
for quant_op_name in sim._activation_quantizers.keys():
print(sim._activation_quantizers[quant_op_name])
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('dense/MatMul:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
sess.run(model_output, feed_dict={model_input: dummy_input})
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
relu_output_quant_op = sim.session.graph.get_operation_by_name('Relu_quantized')
sim.compute_encodings(dummy_forward_pass, None)
inp_tensor = sim.session.graph.get_tensor_by_name('input_1:0')
np.random.seed(0)
w_shape = inp_tensor.shape
batches = 32
inp_data = np.random.rand(batches, w_shape[1], w_shape[2], w_shape[3])
logits = sim.session.graph.get_tensor_by_name('dense/MatMul:0')
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
with sim.session.graph.as_default():
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.compat.v1.placeholder(tf.float32, [None, 10], name='labels')
loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)
update_ops = []
global_step = tf.compat.v1.train.create_global_step()
initialize_uninitialized_vars(sim.session)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.001)
gradients = optimizer.compute_gradients(loss, var_list)
sim.compute_encodings(dummy_forward_pass, None)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
conv_inp_tensor = conv2d_weight_quant_op.inputs[0]
grads = tf.gradients(loss, [conv_inp_tensor, conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min], conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max]])
(dqbydx, dqbydmin, dqbydmax) = grads
input_gradient = sim.session.run([dqbydx], feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})[0]
min_gradient = sim.session.run([dqbydmin], feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})[0]
max_gradient = sim.session.run([dqbydmax], feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})[0]
weights_before_train = sim.session.run(conv2d_weight_quant_op.inputs[0])
encoding_min_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
encoding_max_before_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
relu_output_encoding_min_before_train = sim.session.run(relu_output_quant_op.inputs[QuantizeOpIndices.encoding_min])
relu_output_encoding_max_before_train = sim.session.run(relu_output_quant_op.inputs[QuantizeOpIndices.encoding_max])
with tf.control_dependencies([update_op]):
train_op = tf.identity(loss, name='train_op')
for quant_op_name in sim._param_quantizers.keys():
print(((quant_op_name + '_min_before_train = ') + str(sim.session.run(sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_min]))))
print(((quant_op_name + '_max_before_train = ') + str(sim.session.run(sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_max]))))
_ = sim.session.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
for quant_op_name in sim._param_quantizers.keys():
print(((quant_op_name + '_min = ') + str(sim.session.run(sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_min]))))
print(((quant_op_name + '_max = ') + str(sim.session.run(sim.session.graph.get_operation_by_name(quant_op_name).inputs[QuantizeOpIndices.encoding_max]))))
weights_after_train = sim.session.run(conv2d_weight_quant_op.inputs[0])
relu_output_encoding_min_after_train = sim.session.run(relu_output_quant_op.inputs[QuantizeOpIndices.encoding_min])
relu_output_encoding_max_after_train = sim.session.run(relu_output_quant_op.inputs[QuantizeOpIndices.encoding_max])
encoding_min_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
encoding_max_after_train = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
assert (not np.allclose(weights_before_train, weights_after_train, atol=1e-06))
assert (encoding_min_before_train != encoding_min_after_train)
assert (encoding_max_before_train != encoding_max_after_train)
assert (relu_output_encoding_min_before_train != relu_output_encoding_min_after_train)
assert (relu_output_encoding_max_before_train != relu_output_encoding_max_after_train)
baseline = sim.session.run(logits, feed_dict={inp_tensor: inp_data})
sim.export('/tmp', 'quant_sim_model')
after_sim_export = sim.session.run(logits, feed_dict={inp_tensor: inp_data})
assert np.allclose(baseline, after_sim_export)
sess.close()
sim.session.close()
.cuda
.parametrize('quant_scheme', [QuantScheme.post_training_tf, QuantScheme.training_range_learning_with_tf_init, QuantScheme.post_training_tf_enhanced, QuantScheme.training_range_learning_with_tf_enhanced_init])
def test_initialization_and_export_non_strict_symmetric(self, quant_scheme) -> None:
tf.compat.v1.reset_default_graph()
with tf.device('/gpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 4))
conv_op = tf.keras.layers.Conv2D(2, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform', padding='SAME')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10, bias_initializer='random_uniform')(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=True, quant_scheme=quant_scheme)
def dummy_forward_pass(_sess, _):
model_output = _sess.graph.get_tensor_by_name('dense/BiasAdd_quantized:0')
model_input = _sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
_sess.run(model_output, feed_dict={model_input: dummy_input})
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
sim.compute_encodings(dummy_forward_pass, None)
initialized_encoding_min = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
initialized_encoding_max = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert (initialized_encoding_min == (- initialized_encoding_max))
else:
assert (initialized_encoding_min != (- initialized_encoding_max))
sim.export('/tmp/', 'quant_sim_model')
with open('/tmp/quant_sim_model.encodings') as json_file:
encoding_data = json.load(json_file)
param_encodings = encoding_data['param_encodings']
for encodings in param_encodings.values():
for encoding_info in encodings:
encoding_min = encoding_info['min']
encoding_max = encoding_info['max']
scale = encoding_info['scale']
offset = encoding_info['offset']
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert (encoding_min == ((- encoding_max) - scale))
else:
assert np.isclose(encoding_min, ((- encoding_max) - scale))
assert (offset == (- 128))
assert np.isclose(encoding_min, (scale * offset), atol=1e-06)
assert np.isclose(encoding_max, (encoding_min + (scale * 255)), atol=1e-06)
.cuda
.parametrize('quant_scheme', [QuantScheme.post_training_tf, QuantScheme.training_range_learning_with_tf_init, QuantScheme.post_training_tf_enhanced, QuantScheme.training_range_learning_with_tf_enhanced_init])
def test_initialization_and_export_non_strict_symmetric_per_channel(self, quant_scheme) -> None:
tf.compat.v1.reset_default_graph()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True', 'is_symmetric': 'True'}, 'strict_symmetric': 'False', 'per_channel_quantization': 'True'}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Squeeze': {'is_output_quantized': 'False'}, 'Pad': {'is_output_quantized': 'False'}, 'Mean': {'is_output_quantized': 'False'}, 'Gemm': {'per_channel_quantization': 'False'}}, 'supergroups': [{'op_list': ['Conv', 'Relu']}, {'op_list': ['Conv', 'Clip']}, {'op_list': ['Add', 'Relu']}, {'op_list': ['Gemm', 'Relu']}], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
with tf.device('/gpu:0'):
inputs = tf.keras.Input(shape=(32, 32, 4))
conv_op = tf.keras.layers.Conv2D(2, (3, 3), kernel_initializer=tf.random_uniform_initializer((- 1), 2), bias_initializer='random_uniform', padding='SAME')(inputs)
relu_op = tf.nn.relu(conv_op)
reshape = tf.keras.layers.Flatten()(relu_op)
_ = tf.keras.layers.Dense(10, bias_initializer='random_uniform')(reshape)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph())
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1'], ['dense/BiasAdd'], use_cuda=True, quant_scheme=quant_scheme, config_file='./quantsim_config.json')
def dummy_forward_pass(_sess, _):
model_output = _sess.graph.get_tensor_by_name('dense/BiasAdd_quantized:0')
model_input = _sess.graph.get_tensor_by_name('input_1:0')
shape = model_input.shape
dummy_input = np.random.randn(1, shape[1], shape[2], shape[3])
_sess.run(model_output, feed_dict={model_input: dummy_input})
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
sim.compute_encodings(dummy_forward_pass, None)
initialized_encoding_min = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_min])
initialized_encoding_max = sim.session.run(conv2d_weight_quant_op.inputs[QuantizeOpIndices.encoding_max])
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert all((initialized_encoding_min == (- initialized_encoding_max)))
else:
assert (not all((initialized_encoding_min == (- initialized_encoding_max))))
sim.export('/tmp/', 'quant_sim_model')
with open('/tmp/quant_sim_model.encodings') as json_file:
encoding_data = json.load(json_file)
param_encodings = encoding_data['param_encodings']
for encodings in param_encodings.values():
for encoding_info in encodings:
encoding_min = encoding_info['min']
encoding_max = encoding_info['max']
scale = encoding_info['scale']
offset = encoding_info['offset']
if (quant_scheme in RANGE_LEARNING_SCHEMES):
assert (encoding_min == ((- encoding_max) - scale))
else:
assert np.isclose(encoding_min, ((- encoding_max) - scale))
assert (offset == (- 128))
assert np.isclose(encoding_min, (scale * offset), atol=1e-06)
assert np.isclose(encoding_max, (encoding_min + (scale * 255)), atol=1e-06) |
def filter_protocol(hostmap, *, allowed_protocols: Iterable[str]=None) -> Sequence[ServerAddr]:
if (allowed_protocols is None):
allowed_protocols = {PREFERRED_NETWORK_PROTOCOL}
eligible = []
for (host, portmap) in hostmap.items():
for protocol in allowed_protocols:
port = portmap.get(protocol)
if port:
eligible.append(ServerAddr(host, port, protocol=protocol))
return eligible |
def get_args():
parser = argparse.ArgumentParser(description='STPM anomaly detection')
parser.add_argument('--phase', default='train')
parser.add_argument('--data_path', type=str, default='D:/dataset/mvtec_anomaly_detection')
parser.add_argument('--obj', type=str, default='zipper')
parser.add_argument('--img_resize', type=int, default=256)
parser.add_argument('--img_cropsize', type=int, default=224)
parser.add_argument('--validation_ratio', type=float, default=0.2)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.4)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--vis', type=eval, choices=[True, False], default=True)
parser.add_argument('--save_path', type=str, default='./mvtec_results')
args = parser.parse_args()
return args |
def test(model, path, dataset):
data_path = os.path.join(path, dataset)
image_root = '{}/images/'.format(data_path)
gt_root = '{}/masks/'.format(data_path)
model.eval()
num1 = len(os.listdir(gt_root))
test_loader = test_dataset(image_root, gt_root, 352)
maes = []
dscs = []
ious = []
for i in range(num1):
(image, gt, name) = test_loader.load_data()
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-08)
image = image.cuda()
(res, res1) = model(image)
res = F.upsample((res + res1), size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = ((res - res.min()) / ((res.max() - res.min()) + 1e-08))
input = res
target = np.array(gt)
N = gt.shape
smooth = 1
input_flat = np.reshape(input, (- 1))
target_flat = np.reshape(target, (- 1))
input_flat = (input_flat >= 0.5)
(dice, iou, mae) = calcuate_score(target_flat, input_flat)
maes.append(mae)
dscs.append(dice)
ious.append(iou)
return (np.mean(dscs), np.mean(ious), np.mean(maes)) |
class _State():
clock_init: float = None
clock_now: float = None
caption_pid: int = None
pmt_pids: list = field(default_factory=list)
captions: list = field(default_factory=list)
def seconds(self, ts):
n = (ts - self.clock_init)
if (n < 0):
n += _CLOCK_FREQ
return float((n / _CLOCK_FREQ))
def done(self):
ret = []
self.captions.append((self.clock_now, ''))
for (cur, nex) in zip(self.captions, self.captions[1:]):
if cur[1]:
start = self.seconds(cur[0])
end = self.seconds(nex[0])
ret.append(Caption(start, end, cur[1]))
return ret |
def calculate_metric_for_tensor(cal_func, tensor1, tensor2=None, LP_list=None):
if (LP_list is None):
metric_for_named_tensors = cal_func(tensor1, tensor2)
return metric_for_named_tensors
else:
assert (type(LP_list) is list)
metric_for_named_tensors_with_LP_dict = {}
for p in LP_list:
if (p == 'inf'):
Lp = float('inf')
else:
Lp = float(p)
metric_for_named_tensors = cal_func(tensor1, tensor2, p=Lp)
metric_for_named_tensors_with_LP_dict[p] = metric_for_named_tensors
return metric_for_named_tensors_with_LP_dict |
class InputsWidget(QtWidgets.QWidget):
NO_LABEL_INPUTS = (BooleanInput,)
def __init__(self, procedure_class, inputs=(), parent=None, hide_groups=True, inputs_in_scrollarea=False):
super().__init__(parent)
self._procedure_class = procedure_class
self._procedure = procedure_class()
self._inputs = inputs
self._setup_ui()
self._layout(inputs_in_scrollarea)
self._hide_groups = hide_groups
self._setup_visibility_groups()
def _setup_ui(self):
parameter_objects = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameter_objects[name]
if (parameter.ui_class is not None):
element = parameter.ui_class(parameter)
elif isinstance(parameter, parameters.FloatParameter):
element = ScientificInput(parameter)
elif isinstance(parameter, parameters.IntegerParameter):
element = IntegerInput(parameter)
elif isinstance(parameter, parameters.BooleanParameter):
element = BooleanInput(parameter)
elif isinstance(parameter, parameters.ListParameter):
element = ListInput(parameter)
elif isinstance(parameter, parameters.Parameter):
element = StringInput(parameter)
setattr(self, name, element)
def _layout(self, inputs_in_scrollarea):
vbox = QtWidgets.QVBoxLayout(self)
vbox.setSpacing(6)
vbox.setContentsMargins(0, 0, 0, 0)
self.labels = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
if (not isinstance(getattr(self, name), self.NO_LABEL_INPUTS)):
label = QtWidgets.QLabel(self)
label.setText(('%s:' % parameters[name].name))
vbox.addWidget(label)
self.labels[name] = label
vbox.addWidget(getattr(self, name))
if inputs_in_scrollarea:
scroll_area = QtWidgets.QScrollArea()
scroll_area.setWidgetResizable(True)
scroll_area.setFrameStyle(QtWidgets.QScrollArea.Shape.NoFrame)
scroll_area.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
inputs = QtWidgets.QWidget(self)
inputs.setLayout(vbox)
inputs.setSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
scroll_area.setWidget(inputs)
vbox = QtWidgets.QVBoxLayout(self)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(scroll_area, 1)
self.setLayout(vbox)
def _setup_visibility_groups(self):
groups = {}
parameters = self._procedure.parameter_objects()
for name in self._inputs:
parameter = parameters[name]
group_state = {g: True for g in parameter.group_by}
for (group_name, condition) in parameter.group_by.items():
if ((group_name not in self._inputs) or (group_name == name)):
continue
if isinstance(getattr(self, group_name), BooleanInput):
condition = bool(condition)
if (group_name not in groups):
groups[group_name] = []
groups[group_name].append((name, condition, group_state))
for (group_name, group) in groups.items():
toggle = partial(self.toggle_group, group_name=group_name, group=group)
group_el = getattr(self, group_name)
if isinstance(group_el, BooleanInput):
group_el.toggled.connect(toggle)
toggle(group_el.isChecked())
elif isinstance(group_el, StringInput):
group_el.textChanged.connect(toggle)
toggle(group_el.text())
elif isinstance(group_el, (IntegerInput, ScientificInput)):
group_el.valueChanged.connect(toggle)
toggle(group_el.value())
elif isinstance(group_el, ListInput):
group_el.currentTextChanged.connect(toggle)
toggle(group_el.currentText())
else:
raise NotImplementedError(f'Grouping based on {group_name} ({group_el}) is not implemented.')
def toggle_group(self, state, group_name, group):
for (name, condition, group_state) in group:
if callable(condition):
group_state[group_name] = condition(state)
else:
group_state[group_name] = (state == condition)
visible = all(group_state.values())
if self._hide_groups:
getattr(self, name).setHidden((not visible))
else:
getattr(self, name).setDisabled((not visible))
if (name in self.labels):
if self._hide_groups:
self.labels[name].setHidden((not visible))
else:
self.labels[name].setDisabled((not visible))
def set_parameters(self, parameter_objects):
for name in self._inputs:
element = getattr(self, name)
element.set_parameter(parameter_objects[name])
def get_procedure(self):
self._procedure = self._procedure_class()
parameter_values = {}
for name in self._inputs:
element = getattr(self, name)
parameter_values[name] = element.parameter.value
self._procedure.set_parameters(parameter_values)
return self._procedure |
def _make_link_replacements() -> List[Tuple[(str, str)]]:
top_level = ['Bloq', 'CompositeBloq', 'BloqBuilder', 'Register', 'Signature', 'Side', 'BloqInstance', 'Connection', 'Soquet']
replacements = [(f'`{name}`', f'[`{name}`](/reference/qualtran/{name}.md)') for name in top_level]
return replacements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.