code stringlengths 281 23.7M |
|---|
def forward_step(model, inputs, labels, criterion):
(tweet_input_ids, gif_inputs, gif_ids) = inputs
if opt.use_gpu:
tweet_input_ids = tweet_input_ids.cuda()
gif_inputs = [i.cuda() for i in gif_inputs]
labels = labels.cuda()
score = model(tweet_input_ids, gif_inputs)
loss = ((criterion(score, labels) + criterion(score.T, labels)) / 2)
y_true = labels
y_pred = score
return (loss, y_pred, y_true) |
class rpctouch(scan):
def __init__(self, job, timeout=60):
scan.__init__(self, job)
if (len(job) > 1):
self.port = job[0].split('|')[1]
self.scan_type = _whats_your_name()
self.timeout = timeout
def execute_scan(self, verbose):
redir_cmd = scan.gettunnel(self, self.target, 'tcp', self.port)
PATH_TO_RPCTOUCH = scan.find_newest_touch(self, 'Rpctouch', 'exe')
PATH_TO_RPCXML = scan.find_newest_touch(self, 'Rpctouch', 'xml')
rpccmd = ops.cmd.getDszCommand('run', dszquiet=(not verbose))
rpc_cmd_list = []
rpc_cmd_list.append(('--InConfig %s' % PATH_TO_RPCXML))
rpc_cmd_list.append(('--TargetIp %s' % '127.0.0.1'))
rpc_cmd_list.append(('--TargetPort %s' % redir_cmd.lplisten))
rpc_cmd_list.append(('--NetworkTimeout %s' % self.timeout))
if (int(self.port) == 445):
rpc_cmd_list.append(('--Protocol %s' % 'SMB'))
elif (int(self.port) == 139):
rpc_cmd_list.append(('--Protocol %s' % 'NBT'))
rpc_cmd_list.append(('--NetBIOSName %s' % '*SMBSERVER'))
rpc_cmd_list.append(('--TouchLanguage %s' % 'False'))
rpc_cmd_list.append(('--TouchArchitecture %s' % 'False'))
outconfig = os.path.join(ops.LOGDIR, 'Logs', ('%s_%s_%s.xml' % (os.path.basename(PATH_TO_RPCTOUCH), self.target, dsz.Timestamp())))
rpc_cmd_list.append(('--OutConfig %s' % outconfig))
rpc_cmd_string = ((PATH_TO_RPCTOUCH + ' ') + ' '.join(rpc_cmd_list))
rpccmd.command = ('cmd /C %s' % rpc_cmd_string)
rpccmd.arglist.append('-redirect')
rpccmd.arglist.append(('-directory %s' % os.path.join(ops.DSZDISKSDIR, 'lib', 'x86-Windows')))
rpccmd.prefixes.append('local')
rpccmd.prefixes.append('log')
rpcobject = rpccmd.execute()
ops.networking.redirect.stop_tunnel(dsz_cmd=redir_cmd)
cmd_output = {}
cmd_output['error'] = None
screenlog = os.path.join(ops.PROJECTLOGDIR, rpcobject.commandmetadata.screenlog)
f = open(screenlog, 'r')
screenlog_lines = f.readlines()
f.close()
error = False
for line in screenlog_lines:
re_out = re.search('] SMB String:', line.strip())
if (re_out is not None):
self.os = line.split(':')[(- 1)].strip()
if ((self.os is None) or (self.os == '(none)')):
error = True
self.timestamp = dsz.Timestamp()
if (error == False):
self.success = True
def return_success_message(self):
return ('RPCtouch response for %s' % self.target)
def verify_escalation(self, escalation_rule):
rpctouch = self
try:
eval_res = eval(escalation_rule)
if ((eval_res == True) or (eval_res == False)):
return True
else:
return False
except:
return False
def check_escalation(self, escalation_rule):
rpctouch = self
try:
if eval(escalation_rule):
return True
else:
return False
except:
return False
def return_data(self):
return scan.return_data(self)
def get_display_headers(self):
return ['Targeted Address', 'Port', 'OS', 'Time Stamp']
def get_data_fields(self):
return ['target', 'port', 'os', 'timestamp']
def get_raw_fields(self):
return (self.get_data_fields() + ['success'])
def verify_job(self, job):
if ((not (len(job) == 2)) or (not (int(job[1]) in [139, 445]))):
return False
return True
def min_time(self):
return 30
def min_range(self):
return 5 |
class ArchiveTestingMixin():
def assertArchiveMembers(self, archive_filepath, root_dir, expected):
archive_filepath = str(archive_filepath)
root_dir = str(root_dir)
with zipfile.ZipFile(archive_filepath, mode='r') as zf:
observed = set(zf.namelist())
expected = {((root_dir + '/') + member) for member in expected}
self.assertEqual(observed, expected)
def assertExtractedArchiveMembers(self, extract_dir, root_dir, expected):
extract_dir = str(extract_dir)
root_dir = str(root_dir)
observed = set()
for (root, _, filenames) in os.walk(extract_dir):
for filename in filenames:
observed.add(os.path.join(root, filename))
expected = {os.path.join(extract_dir, root_dir, member) for member in expected}
self.assertEqual(observed, expected) |
class MLPFunction(Parameterized, Serializable):
def __init__(self, name, input_pls, hidden_layer_sizes, output_nonlinearity=None):
Parameterized.__init__(self)
Serializable.quick_init(self, locals())
self._name = name
self._input_pls = input_pls
self._layer_sizes = (list(hidden_layer_sizes) + [None])
self._output_nonlinearity = output_nonlinearity
self._output_t = self.get_output_for(*self._input_pls)
def get_output_for(self, *inputs, reuse=False):
with tf.variable_scope(self._name, reuse=reuse):
value_t = mlp(inputs=inputs, output_nonlinearity=self._output_nonlinearity, layer_sizes=self._layer_sizes)
return value_t
def eval(self, *inputs):
feeds = {pl: val for (pl, val) in zip(self._input_pls, inputs)}
return tf_utils.get_default_session().run(self._output_t, feeds)
def get_params_internal(self, **tags):
if (len(tags) > 0):
raise NotImplementedError
scope = tf.get_variable_scope().name
scope += ((('/' + self._name) + '/') if len(scope) else (self._name + '/'))
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
def output_t(self):
return self._output_t |
def run_evaluation(labelmap, groundtruth, detections, exclusions):
(categories, class_whitelist) = read_labelmap(labelmap)
logging.info('CATEGORIES (%d):\n%s', len(categories), pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(categories)
(boxes, labels, _) = read_csv(groundtruth, class_whitelist, 0)
start = time.time()
for image_key in boxes:
if (image_key in excluded_keys):
logging.info('Found excluded timestamp in ground truth: %s. It will be ignored.', image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(image_key, {standard_fields.InputDataFields.groundtruth_boxes: np.array(boxes[image_key], dtype=float), standard_fields.InputDataFields.groundtruth_classes: np.array(labels[image_key], dtype=int), standard_fields.InputDataFields.groundtruth_difficult: np.zeros(len(boxes[image_key]), dtype=bool)})
print_time('convert groundtruth', start)
(boxes, labels, scores) = read_csv(detections, class_whitelist, 50)
start = time.time()
for image_key in boxes:
if (image_key in excluded_keys):
logging.info('Found excluded timestamp in detections: %s. It will be ignored.', image_key)
continue
pascal_evaluator.add_single_detected_image_info(image_key, {standard_fields.DetectionResultFields.detection_boxes: np.array(boxes[image_key], dtype=float), standard_fields.DetectionResultFields.detection_classes: np.array(labels[image_key], dtype=int), standard_fields.DetectionResultFields.detection_scores: np.array(scores[image_key], dtype=float)})
print_time('convert detections', start)
start = time.time()
metrics = pascal_evaluator.evaluate()
print_time('run_evaluator', start)
pprint.pprint(metrics, indent=2) |
class TestVersion():
def test_bot_api_version_and_info(self):
assert (__bot_api_version__ is constants.BOT_API_VERSION)
assert (__bot_api_version_info__ is constants.BOT_API_VERSION_INFO)
def test_version_and_info(self):
assert (__version__ == str(__version_info__))
.parametrize(('version', 'expected'), [(Version(1, 2, 3, 'alpha', 4), '1.2.3a4'), (Version(2, 3, 4, 'beta', 5), '2.3.4b5'), (Version(1, 2, 3, 'candidate', 4), '1.2.3rc4'), (Version(1, 2, 0, 'alpha', 4), '1.2a4'), (Version(2, 3, 0, 'beta', 5), '2.3b5'), (Version(1, 2, 0, 'candidate', 4), '1.2rc4'), (Version(1, 2, 3, 'final', 0), '1.2.3'), (Version(1, 2, 0, 'final', 0), '1.2')])
def test_version_str(self, version, expected):
assert (str(version) == expected)
.parametrize('use_tuple', [True, False])
def test_version_info(self, use_tuple):
version = Version(1, 2, 3, 'beta', 4)
assert isinstance(version, tuple)
assert (version.major == version[0])
assert (version.minor == version[1])
assert (version.micro == version[2])
assert (version.releaselevel == version[3])
assert (version.serial == version[4])
class TestClass():
def __new__(cls, *args):
if use_tuple:
return tuple(args)
return Version(*args)
assert isinstance(TestClass(1, 2, 3, 'beta', 4), (tuple if use_tuple else Version))
assert (version == TestClass(1, 2, 3, 'beta', 4))
assert (not (version < TestClass(1, 2, 3, 'beta', 4)))
assert (version > TestClass(1, 2, 3, 'beta', 3))
assert (version > TestClass(1, 2, 3, 'alpha', 4))
assert (version < TestClass(1, 2, 3, 'candidate', 0))
assert (version < TestClass(1, 2, 3, 'final', 0))
assert (version < TestClass(1, 2, 4, 'final', 0))
assert (version < TestClass(1, 3, 4, 'final', 0))
assert (version < (1, 3))
assert (version >= (1, 2, 3, 'alpha'))
assert (version > (1, 1))
assert (version <= (1, 2, 3, 'beta', 4))
assert (version < (1, 2, 3, 'candidate', 4))
assert (not (version > (1, 2, 3, 'candidate', 4)))
assert (version < (1, 2, 4))
assert (version > (1, 2, 2)) |
def create_exp_dir(path, scripts_to_save=None):
if (not os.path.exists(path)):
os.makedirs(path)
print('Experiment dir : {}'.format(path))
if (scripts_to_save is not None):
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file) |
def from_numpy(array, dtype, scope=None, device: str=''):
scope = (scope or Scope.default)
device = (device or scope.device)
if (isinstance(array, ma.core.MaskedArray) and (array.ndim == 1)):
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif (isinstance(array, np.ndarray) and (array.ndim == 1)):
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f'cannot convert numpy array of type {array.dtype}') |
class ExplicitNamespacePackageFinder(ImportlibFinder):
def find_module(self, modname: str, module_parts: Sequence[str], processed: list[str], submodule_path: (Sequence[str] | None)) -> (ModuleSpec | None):
if processed:
modname = '.'.join([*processed, modname])
if (util.is_namespace(modname) and (modname in sys.modules)):
submodule_path = sys.modules[modname].__path__
return ModuleSpec(name=modname, location='', origin='namespace', type=ModuleType.PY_NAMESPACE, submodule_search_locations=submodule_path)
return None
def contribute_to_path(self, spec: ModuleSpec, processed: list[str]) -> (Sequence[str] | None):
return spec.submodule_search_locations |
class MemTimer(Process):
def __init__(self, monitor_pid, interval, pipe, backend, max_usage=False, *args, **kw):
self.monitor_pid = monitor_pid
self.interval = interval
self.pipe = pipe
self.cont = True
self.backend = backend
self.max_usage = max_usage
self.n_measurements = 1
self.timestamps = kw.pop('timestamps', False)
self.include_children = kw.pop('include_children', False)
self.mem_usage = [_get_memory(self.monitor_pid, self.backend, timestamps=self.timestamps, include_children=self.include_children)]
super(MemTimer, self).__init__(*args, **kw)
def run(self):
self.pipe.send(0)
stop = False
while True:
cur_mem = _get_memory(self.monitor_pid, self.backend, timestamps=self.timestamps, include_children=self.include_children)
if (not self.max_usage):
self.mem_usage.append(cur_mem)
else:
self.mem_usage[0] = max(cur_mem, self.mem_usage[0])
self.n_measurements += 1
if stop:
break
stop = self.pipe.poll(self.interval)
self.pipe.send(self.mem_usage)
self.pipe.send(self.n_measurements) |
def perform_qat(config: argparse.Namespace):
data_pipeline = ImageNetDataPipeline(config)
input_shape = (image_net_config.dataset['image_width'], image_net_config.dataset['image_height'], image_net_config.dataset['image_channels'])
tf.keras.backend.clear_session()
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
tf.keras.backend.set_session(tf.Session(config=tf_config))
model = ResNet50(weights='imagenet', input_shape=input_shape)
update_ops_name = [op.name for op in model.updates]
model = update_keras_bn_ops_trainable_flag(model, trainable=False, load_save_path=config.logdir)
sess = tf.keras.backend.get_session()
add_image_net_computational_nodes_in_graph(sess, model.output.name, image_net_config.dataset['images_classes'])
accuracy = data_pipeline.evaluate(sess)
logger.info('Original Model Top-1 accuracy = %.2f', accuracy)
logger.info('Starting Model QuantSim...')
(BN_folded_sess, _) = aimet_bnf.fold_all_batch_norms(sess, input_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]])
quant_sim = create_quant_sim_model(sess=BN_folded_sess, start_op_names=['input_1'], output_op_names=[model.output.name.split(':')[0]], use_cuda=config.use_cuda, parity_config_file=config.parity_config_file, evaluator=data_pipeline.evaluate)
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info('Model Top-1 Accuracy on Quant Simulator = %.2f', accuracy)
logger.info('Model QuantSim Done')
logger.info('Starting Model QAT')
data_pipeline.train(quant_sim.session, update_ops_name=update_ops_name)
accuracy = data_pipeline.evaluate(quant_sim.session)
logger.info('Quantization aware trained model Top-1 Accuracy on Quant Simulator = %.2f', accuracy)
logger.info('Saving Quantized model graph')
quant_sim.export(path=config.logdir, filename_prefix='quantized_model')
logger.info('Quantized model graph is saved!')
logger.info('Model QAT Done') |
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, dropout=0.0, number_net=4):
super(VGG, self).__init__()
self.inplances = 64
self.number_net = number_net
self.conv1 = nn.Conv2d(3, self.inplances, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(self.inplances)
self.conv2 = nn.Conv2d(self.inplances, self.inplances, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(self.inplances)
self.relu = nn.ReLU(True)
self.layer1 = self._make_layers(128, 2)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
if (depth == 16):
num_layer = 3
elif (depth == 8):
num_layer = 1
elif (depth == 19):
num_layer = 4
self.layer2 = self._make_layers(256, num_layer)
fix_planes = self.inplances
for i in range(self.number_net):
self.inplances = fix_planes
setattr(self, ('layer3_' + str(i)), self._make_layers(512, num_layer))
setattr(self, ('layer4_' + str(i)), self._make_layers(512, num_layer))
setattr(self, ('classifier_' + str(i)), nn.Linear(512, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _make_layers(self, input, num_layer):
layers = []
for i in range(num_layer):
conv2d = nn.Conv2d(self.inplances, input, kernel_size=3, padding=1)
layers += [conv2d, nn.BatchNorm2d(input), nn.ReLU(inplace=True)]
self.inplances = input
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
logits = []
embedding = []
input = x
for i in range(self.number_net):
x = getattr(self, ('layer3_' + str(i)))(input)
x = getattr(self, ('layer4_' + str(i)))(x)
embedding.append(x)
x = x.view(x.size(0), (- 1))
x = getattr(self, ('classifier_' + str(i)))(x)
logits.append(x)
return (logits, embedding) |
def get_article(article_id: str, links: bool=False, url: str=URL) -> str:
articles = _feed(url).entries
try:
article = articles[int(article_id)]
except (IndexError, ValueError):
max_id = (len(articles) - 1)
msg = f'Unknown article ID, use ID from 0 to {max_id}'
raise SystemExit(f'Error: {msg}')
try:
html = article.content[0].value
except AttributeError:
html = article.summary
to_text = html2text.HTML2Text()
to_text.ignore_links = (not links)
text = to_text.handle(html)
return f'''# {article.title}
{text}''' |
def add_footer(finished_epoch, merged_filename):
print('Adding footer')
nmap_footer = f'<runstats><finished time="{finished_epoch}" timestr="" elapsed="" summary="" exit="success"/></runstats>'
nmap_footer += '</nmaprun>'
with open(merged_filename, 'a') as fh:
fh.write(nmap_footer) |
class TestLayers():
def setup_class(self):
torch.manual_seed(5)
self.Cin = 3
self.Hin = 224
self.Win = 224
self.batch_size = 4
self.use_gpu = False
self.IMAGE = torch.rand((self.batch_size, self.Cin, self.Hin, self.Win))
self.softmax = torch.nn.Softmax(dim=1)
self.model = models.alexnet(pretrained=True)
self.model.eval()
self.output = self.model(self.IMAGE)
self.golden_softmax = self.softmax(self.output)
self.golden_label = list(torch.argmax(self.golden_softmax, dim=1))[0].item()
def test_golden_inference(self):
assert (self.golden_label == 556)
def test_single_conv_neuron(self):
p = FaultInjection(self.model, self.batch_size, layer_types=[torch.nn.Conv2d], use_cuda=self.use_gpu)
(b, layer, C, H, W, err_val) = ([0], [3], [4], [2], [4], [10000])
inj = p.declare_neuron_fault_injection(batch=b, layer_num=layer, dim1=C, dim2=H, dim3=W, value=err_val)
inj_output = inj(self.IMAGE)
inj_softmax = self.softmax(inj_output)
inj_label = list(torch.argmax(inj_softmax, dim=1))[0].item()
assert (inj_label == 578)
def test_single_linear_layer(self):
p = FaultInjection(self.model, self.batch_size, layer_types=[torch.nn.Linear], use_cuda=self.use_gpu)
assert (p.get_total_layers() == 3)
assert (p.get_layer_dim(2) == 2)
assert (p.get_layer_type(2) == torch.nn.Linear)
def test_inj_all_layers(self):
p = FaultInjection(self.model, self.batch_size, layer_types=['all'], use_cuda=self.use_gpu)
assert (p.get_total_layers() == 21)
assert (p.get_layer_dim(2) == 4)
assert (p.get_layer_type(2) == torch.nn.MaxPool2d)
assert (p.get_layer_type(20) == torch.nn.Linear)
def test_inj_all_layers_injections(self):
p = FaultInjection(self.model, self.batch_size, layer_types=['all'], use_cuda=self.use_gpu)
(b, layer, C, H, W, err_val) = ([0, 1, 2, 3], [0, 1, 2, 20], [4, 4, 0, 4], [2, 2, 2, None], [4, 2, 2, None], [10000, 10000, 10000, 1000])
inj = p.declare_neuron_fault_injection(batch=b, layer_num=layer, dim1=C, dim2=H, dim3=W, value=err_val)
inj_output = inj(self.IMAGE)
inj_softmax = self.softmax(inj_output)
inj_label_0 = list(torch.argmax(inj_softmax, dim=1))[0].item()
inj_label_1 = list(torch.argmax(inj_softmax, dim=1))[1].item()
inj_label_2 = list(torch.argmax(inj_softmax, dim=1))[2].item()
inj_label_3 = list(torch.argmax(inj_softmax, dim=1))[3].item()
assert (inj_label_0 == 722)
assert (inj_label_1 == 723)
assert (inj_label_2 == 968)
assert (inj_label_3 == 4)
def test_single_linear_neuron_inj(self):
p = FaultInjection(self.model, self.batch_size, layer_types=[torch.nn.Linear], use_cuda=self.use_gpu)
(b, layer, C, H, W, err_val) = (0, 2, 888, None, None, 10000)
inj = p.declare_neuron_fault_injection(batch=[b], layer_num=[layer], dim1=[C], dim2=[H], dim3=[W], value=[err_val])
inj_output = inj(self.IMAGE)
inj_softmax = self.softmax(inj_output)
inj_label = list(torch.argmax(inj_softmax, dim=1))[0].item()
assert (inj_label == 888)
def test_combo_layers(self):
p = FaultInjection(self.model, self.batch_size, layer_types=[torch.nn.Conv2d, torch.nn.Linear], use_cuda=self.use_gpu)
(b, layer, C, H, W, err_val) = ([0, 1], [1, 7], [5, 888], [5, None], [3, None], [20000, 10000])
inj = p.declare_neuron_fault_injection(batch=b, layer_num=layer, dim1=C, dim2=H, dim3=W, value=err_val)
inj_output = inj(self.IMAGE)
inj_softmax = self.softmax(inj_output)
inj_label_1 = list(torch.argmax(inj_softmax, dim=1))[0].item()
inj_label_2 = list(torch.argmax(inj_softmax, dim=1))[1].item()
assert (p.get_total_layers() == 8)
assert (inj_label_1 == 695)
assert (inj_label_2 == 888) |
class XcelIfcCL2FLAdapter(Component):
def recv_rdy(s):
return (s.entry is None)
def recv(s, msg):
assert (s.entry is None)
s.entry = msg
def construct(s, ReqType, RespType):
s.left = XcelMinionIfcCL(ReqType, RespType, req=s.recv, req_rdy=s.recv_rdy)
s.right = XcelMasterIfcFL()
s.entry = None
_once
def up_xcelifc_cl_fl_blk():
if ((s.entry is not None) and s.left.resp.rdy()):
req = s.entry
s.entry = None
if (req.type_ == XcelMsgType.READ):
resp = RespType(req.type_, s.right.read(req.addr))
elif (req.type_ == XcelMsgType.WRITE):
s.right.write(req.addr, req.data)
resp = RespType(req.type_, 0)
assert s.left.resp.rdy()
s.left.resp(resp)
s.add_constraints((M(s.left.req) > U(up_xcelifc_cl_fl_blk))) |
class CarliniL2():
def __init__(self, sess, model, image_size, num_channels, num_labels, batch_size=100, confidence=L2_CONFIDENCE, targeted=L2_TARGETED, learning_rate=L2_LEARNING_RATE, binary_search_steps=L2_BINARY_SEARCH_STEPS, max_iterations=L2_MAX_ITERATIONS, abort_early=L2_ABORT_EARLY, initial_const=L2_INITIAL_CONST):
self.model = model
self.sess = sess
self.image_size = image_size
self.num_channels = num_channels
self.num_labels = num_labels
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.repeat = (binary_search_steps >= 10)
shape = (self.batch_size, self.image_size, self.image_size, self.num_channels)
modifier = tf.Variable(np.zeros(shape, dtype=np.float32))
self.max_mod = tf.reduce_max(modifier)
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((self.batch_size, self.num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(self.batch_size), dtype=tf.float32)
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (self.batch_size, self.num_labels))
self.assign_const = tf.placeholder(tf.float32, [self.batch_size])
self.newimg = (tf.tanh((modifier + self.timg)) / 2)
self.output = self.model(self.newimg)
self.l2dist = tf.reduce_sum(tf.square((self.newimg - (tf.tanh(self.timg) / 2))), [1, 2, 3])
real = tf.reduce_sum((self.tlab * self.output), 1)
other = tf.reduce_max((((1 - self.tlab) * self.output) - (self.tlab * 10000)), 1)
if self.TARGETED:
loss1 = tf.maximum(0.0, ((other - real) + self.CONFIDENCE))
else:
loss1 = tf.maximum(0.0, ((real - other) + self.CONFIDENCE))
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss1 = tf.reduce_sum((self.const * loss1))
self.loss = (self.loss1 + self.loss2)
self.grads = tf.reduce_max(tf.gradients(self.loss, [modifier]))
start_vars = set((x.name for x in tf.global_variables()))
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if (x.name not in start_vars)]
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=([modifier] + new_vars))
def attack(self, X, Y):
nb_classes = Y.shape[1]
y_target = np.copy(Y)
if self.TARGETED:
for i in range(Y.shape[0]):
current = int(np.argmax(Y[i]))
target = np.random.choice(other_classes(nb_classes, current))
y_target[i] = np.eye(nb_classes)[target]
X_adv = np.zeros_like(X)
for i in tqdm(range(0, X.shape[0], self.batch_size)):
start = i
end = (i + self.batch_size)
end = np.minimum(end, X.shape[0])
X_adv[start:end] = self.attack_batch(X[start:end], y_target[start:end])
return X_adv
def attack_batch(self, imgs, labs):
def compare(x, y):
if (not isinstance(x, (float, int, np.int64))):
x = np.copy(x)
x[y] -= self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return (x == y)
else:
return (x != y)
batch_size = imgs.shape[0]
imgs = np.arctanh((imgs * 1.999999))
lower_bound = np.zeros(batch_size)
CONST = (np.ones(batch_size) * self.initial_const)
upper_bound = (np.ones(batch_size) * .0)
o_bestl2 = ([.0] * batch_size)
o_bestscore = ([(- 1)] * batch_size)
o_bestattack = ([np.zeros(imgs[0].shape)] * batch_size)
for outer_step in range(self.BINARY_SEARCH_STEPS):
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = ([.0] * batch_size)
bestscore = ([(- 1)] * batch_size)
if ((self.repeat == True) and (outer_step == (self.BINARY_SEARCH_STEPS - 1))):
CONST = upper_bound
self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST})
prev = 1000000.0
for iteration in range(self.MAX_ITERATIONS):
(_, l, l2s, scores, nimg) = self.sess.run([self.train, self.loss, self.l2dist, self.output, self.newimg], feed_dict={K.learning_phase(): 0})
if (self.ABORT_EARLY and ((iteration % (self.MAX_ITERATIONS // 10)) == 0)):
if (l > (prev * 0.9999)):
break
prev = l
for (e, (l2, sc, ii)) in enumerate(zip(l2s, scores, nimg)):
if ((l2 < bestl2[e]) and compare(sc, np.argmax(batchlab[e]))):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if ((l2 < o_bestl2[e]) and compare(sc, np.argmax(batchlab[e]))):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
for e in range(batch_size):
if (compare(bestscore[e], np.argmax(batchlab[e])) and (bestscore[e] != (- 1))):
upper_bound[e] = min(upper_bound[e], CONST[e])
if (upper_bound[e] < .0):
CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2)
else:
lower_bound[e] = max(lower_bound[e], CONST[e])
if (upper_bound[e] < .0):
CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2)
else:
CONST[e] *= 10
o_bestl2 = np.array(o_bestl2)
print(('sucess rate: %.4f' % (1 - (np.sum((o_bestl2 == .0)) / self.batch_size))))
return o_bestattack |
class Migration(migrations.Migration):
dependencies = [('companies', '0001_initial')]
operations = [migrations.AlterField(model_name='company', name='about_markup_type', field=models.CharField(max_length=30, choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='restructuredtext', blank=True), preserve_default=True)] |
class AddressBookPage(HTML5Page):
def __init__(self, view):
super().__init__(view)
self.body.use_layout(Container())
layout = ResponsiveLayout('md', colour_theme='dark', bg_scheme='primary')
navbar = Navbar(view, css_id='my_nav').use_layout(layout)
navbar.layout.set_brand_text('Address book')
navbar.layout.add(TextNode(view, 'All your addresses in one place'))
self.body.add_child(navbar) |
def get_enabled_activation_quantizers(sim: QuantizationSimModel) -> List[TensorQuantizer]:
enabled_activation_quantizers = []
for quant_wrapper in sim.quant_wrappers():
for quantizer in quant_wrapper.input_quantizers:
if quantizer.is_enabled():
enabled_activation_quantizers.append(quantizer)
for quantizer in quant_wrapper.output_quantizers:
if quantizer.is_enabled():
enabled_activation_quantizers.append(quantizer)
return enabled_activation_quantizers |
class MyStringList(UserList[str]):
data: List[str]
def __init__(self, strings: List[str]):
self.data = strings
def __getitem__(self, index: int) -> str:
return self.data[index]
def __setitem__(self, index: int, item: str) -> str:
oldString = self.data[index]
self.data[index] = item
return oldString
def __len__(self) -> int:
return len(self.data) |
def test_editable_wheel_src_module(copy_sample):
td = copy_sample('module3')
make_wheel_in((td / 'pyproject.toml'), td, editable=True)
whl_file = (td / 'module3-0.1-py2.py3-none-any.whl')
assert_isfile(whl_file)
with unpack(whl_file) as unpacked:
pth_path = Path(unpacked, 'module3.pth')
assert_isfile(pth_path)
assert (pth_path.read_text() == str((td / 'src')))
assert_isdir(Path(unpacked, 'module3-0.1.dist-info')) |
.parametrize('max_labels,expected', [(10, [0.0, '', 2.0, '', 4.0, '', 6.0, '', 8.0, '', 10.0, '']), (5, [0.0, '', '', 3.0, '', '', 6.0, '', '', 9.0, '', '']), (3, [0.0, '', '', '', 4.0, '', '', '', 8.0, '', '', ''])])
def test_max_labels_step(max_labels, expected):
colorbar = cm.StepColormap((['red', 'blue'] * 5), vmin=0, vmax=10, max_labels=max_labels)
try:
colorbar.render()
except AssertionError:
pass
assert (colorbar.tick_labels == expected) |
def validator(func: Callable[(..., Any)]):
(func)
def wrapper(*args: Any, **kwargs: Any):
try:
return (True if func(*args, **kwargs) else ValidationError(func, _func_args_as_dict(func, *args, **kwargs)))
except Exception as exp:
return ValidationError(func, _func_args_as_dict(func, *args, **kwargs), str(exp))
return wrapper |
def typed_dict_get_signature_callback(ctx: MethodSigContext) -> CallableType:
signature = ctx.default_signature
if (isinstance(ctx.type, TypedDictType) and (len(ctx.args) == 2) and (len(ctx.args[0]) == 1) and isinstance(ctx.args[0][0], StrExpr) and (len(signature.arg_types) == 2) and (len(signature.variables) == 1) and (len(ctx.args[1]) == 1)):
key = ctx.args[0][0].value
value_type = get_proper_type(ctx.type.items.get(key))
ret_type = signature.ret_type
if value_type:
default_arg = ctx.args[1][0]
if (isinstance(value_type, TypedDictType) and isinstance(default_arg, DictExpr) and (len(default_arg.items) == 0)):
value_type = value_type.copy_modified(required_keys=set())
tv = signature.variables[0]
assert isinstance(tv, TypeVarType)
return signature.copy_modified(arg_types=[signature.arg_types[0], make_simplified_union([value_type, tv])], ret_type=ret_type)
return signature |
def make_commitment_output_to_local_witness_script(revocation_pubkey: bytes, to_self_delay: int, delayed_pubkey: bytes) -> bytes:
script = bfh(construct_script([opcodes.OP_IF, revocation_pubkey, opcodes.OP_ELSE, to_self_delay, opcodes.OP_CHECKSEQUENCEVERIFY, opcodes.OP_DROP, delayed_pubkey, opcodes.OP_ENDIF, opcodes.OP_CHECKSIG]))
return script |
def KPbands(material, host_material, return_edges_only=False, plot_result=False, t=0, p=(np.pi / 2), fraction=0.2, points=50, vin=None):
Delta = material.spin_orbit_splitting
Eg = material.band_gap
Ev0 = material.valence_band_offset
Ec0 = (material.valence_band_offset + Eg)
a0 = material.lattice_constant
g1 = material.gamma1
g2 = material.gamma2
g3 = material.gamma3
ac = material.a_c
av = material.a_v
b = material.b
Ep = material.interband_matrix_element
me_eff = (material.eff_mass_electron_Gamma * constants.electron_mass)
exx = ((host_material.lattice_constant - a0) / a0)
ezz = ((((- 2) * material.c12) / material.c11) * exx)
if return_edges_only:
result = eight_band_strain_hamiltonian(0, 0, 0, Ev0, Ec0, exx, ezz, me_eff, g1, g2, g3, a0, Delta, ac, av, b, Ep)
(so, lh, hh, c) = result[::2]
if (a0 < host_material.lattice_constant):
(lh, hh) = (hh, lh)
return (c, hh, lh, so)
allk = kvector(a=a0, t=t, p=p, fraction=fraction, points=points, vin=vin)
bands = []
for (kx, ky, kz) in allk:
bands.append(eight_band_strain_hamiltonian(kx, ky, kz, Ev0, Ec0, exx, ezz, me_eff, g1, g2, g3, a0, Delta, ac, av, b, Ep))
output = [np.array([np.linalg.norm(vect) for vect in allk])]
for (i, band) in enumerate(zip(*bands)):
output.append(np.array(band))
if plot_result:
import matplotlib.pyplot as plt
for i in range(1, len(output)):
plt.plot((((output[0] * a0) / 2) / np.pi), (output[i] / q), 'k')
plt.xlabel('k (2*pi/a)')
plt.ylabel('Energy (eV)')
plt.show()
output = np.array(output)
return output |
def generate_random_search_bash(model_name, data_name, task_num, bash_save_path='../fb_jobs/'):
if os.path.exists(bash_save_path):
remove_all_files(bash_save_path)
if (bash_save_path and (not os.path.exists(bash_save_path))):
os.makedirs(bash_save_path)
search_space = HypeParameterSpace(model_name=model_name)
random_search_job = KGERandomSearchJob(data_name=data_name, search_space=search_space, graph_on=1, mask_on=1)
for i in range(task_num):
(task_id, parameter_id) = random_search_job.single_task_trial((i + 1))
with open((((bash_save_path + 'run_') + task_id) + '.sh'), 'w') as rsh_i:
command_i = ('bash run.sh ' + parameter_id)
rsh_i.write(command_i)
print('{} jobs are generated in {}'.format(task_num, bash_save_path)) |
def test_lane():
lane = xodr.Lane()
lane._set_lane_id(1)
lane.add_userdata(xodr.UserData('key', 'value'))
prettyprint(lane.get_element())
lane = xodr.Lane(xodr.LaneType.driving, 1, 1, 1, 1, 2)
lane._set_lane_id(1)
prettyprint(lane.get_element())
lane2 = xodr.Lane(xodr.LaneType.driving, 1, 1, 1, 1, 2)
lane2._set_lane_id(1)
lane3 = xodr.Lane(xodr.LaneType.driving, 1, 1, 1, 3, 2)
lane3._set_lane_id(1)
assert (lane == lane2)
assert (lane != lane3)
assert (version_validation('t_road_lanes_laneSection_left_lane', lane, wanted_schema='xodr') == ValidationResponse.OK)
lane3._set_lane_id((- 1))
assert (version_validation('t_road_lanes_laneSection_right_lane', lane3, wanted_schema='xodr') == ValidationResponse.OK)
lane3._set_lane_id(0)
assert (version_validation('t_road_lanes_laneSection_center_lane', lane3, wanted_schema='xodr') == ValidationResponse.OK) |
def test_ConnectionState_keepalive_protocol_switch_interaction() -> None:
cs = ConnectionState()
cs.process_client_switch_proposal(_SWITCH_UPGRADE)
cs.process_event(CLIENT, Request)
cs.process_keep_alive_disabled()
cs.process_event(CLIENT, Data)
assert (cs.states == {CLIENT: SEND_BODY, SERVER: SEND_RESPONSE})
cs.process_event(CLIENT, EndOfMessage)
assert (cs.states == {CLIENT: MIGHT_SWITCH_PROTOCOL, SERVER: SEND_RESPONSE})
cs.process_event(SERVER, Response)
assert (cs.states == {CLIENT: MUST_CLOSE, SERVER: SEND_BODY}) |
def test_first_last_marks(item_names_for):
tests_content = '\n import pytest\n\n .order("last")\n def test_1(): pass\n\n .order("first")\n def test_2(): pass\n\n def test_3(): pass\n '
assert (item_names_for(tests_content) == ['test_2', 'test_3', 'test_1']) |
class Input(Widget):
def __init__(self, input_type='', default_value='', *args, **kwargs):
if ('_class' not in kwargs):
kwargs['_class'] = input_type
super(Input, self).__init__(*args, **kwargs)
self.type = 'input'
self.attributes['value'] = str(default_value)
self.attributes['type'] = input_type
self.attributes['autocomplete'] = 'off'
self.attributes[Widget.EVENT_ONCHANGE] = ("var params={};params['value']=document.getElementById('%(emitter_identifier)s').value;remi.sendCallbackParam('%(emitter_identifier)s','%(event_name)s',params);" % {'emitter_identifier': str(self.identifier), 'event_name': Widget.EVENT_ONCHANGE})
def set_value(self, value):
self.attributes['value'] = str(value)
def get_value(self):
return self.attributes['value']
_set_on_listener('(self, emitter, value)')
_event
def onchange(self, value):
self.attributes['value'] = value
return (value,)
def set_read_only(self, readonly):
if readonly:
self.attributes['readonly'] = None
else:
try:
del self.attributes['readonly']
except KeyError:
pass |
class SubGraphView(object):
def __init__(self, inside_ops=(), passthrough_ts=()):
inside_ops = util.make_list_of_op(inside_ops)
passthrough_ts = util.make_list_of_t(passthrough_ts)
ops_and_ts = (inside_ops + passthrough_ts)
if ops_and_ts:
self._graph = util.get_unique_graph(ops_and_ts)
self._ops = inside_ops
(inputs, outputs, insides) = select.compute_boundary_ts(inside_ops)
all_tensors = frozenset(((inputs + outputs) + list(insides)))
self._passthrough_ts = [t for t in passthrough_ts if (t not in all_tensors)]
self._input_ts = (inputs + self._passthrough_ts)
self._output_ts = (outputs + self._passthrough_ts)
else:
self._graph = None
self._passthrough_ts = []
self._input_ts = []
self._output_ts = []
self._ops = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for (k, v) in iteritems(self.__dict__):
if (k == '_graph'):
setattr(result, k, v)
else:
setattr(result, k, list(v))
return result
def _assign_from(self, other):
if (not isinstance(other, SubGraphView)):
raise TypeError('Expected SubGraphView, got: {}'.format(type(other)))
self._graph = other._graph
self._ops = list(other._ops)
self._passthrough_ts = list(other._passthrough_ts)
self._input_ts = list(other._input_ts)
self._output_ts = list(other._output_ts)
def copy(self):
return copy.copy(self)
def _remap_default(self, remove_input_map=True, remove_output_map=True):
if ((not remove_input_map) and (not remove_output_map)):
return
(inputs, outputs, _) = select.compute_boundary_ts(self._ops)
if remove_input_map:
self._input_ts = (list(inputs) + self._passthrough_ts)
if remove_output_map:
self._output_ts = (list(outputs) + self._passthrough_ts)
def remap_default(self, remove_input_map=True, remove_output_map=True):
res = self.copy()
res._remap_default(remove_input_map, remove_output_map)
return res
def _remap_inputs(self, new_input_indices):
new_input_indices = _finalize_indices(new_input_indices, self._input_ts)
_check_within_range(new_input_indices, len(self._input_ts), repetition=False)
self._input_ts = [self._input_ts[i] for i in new_input_indices]
def _remap_outputs(self, new_output_indices):
new_output_indices = _finalize_indices(new_output_indices, self._output_ts)
_check_within_range(new_output_indices, len(self._output_ts), repetition=True)
self._output_ts = [self._output_ts[i] for i in new_output_indices]
def _remap_outputs_make_unique(self):
output_ts = list(self._output_ts)
self._output_ts = []
util.concatenate_unique(self._output_ts, output_ts)
def _remap_outputs_to_consumers(self):
self._remap_outputs_make_unique()
output_ts = list(self._output_ts)
self._output_ts = []
for t in output_ts:
self._output_ts += ([t] * len(t.consumers()))
def remap_outputs_make_unique(self):
res = copy.copy(self)
res._remap_outputs_make_unique()
return res
def remap_outputs_to_consumers(self):
res = copy.copy(self)
res._remap_outputs_to_consumers()
return res
def _remove_unused_ops(self, control_inputs=True):
ops = select.get_walks_union_ops(self.connected_inputs, self.connected_outputs, within_ops=self._ops, control_inputs=control_inputs)
self._ops = [op for op in self._ops if (op in ops)]
def remove_unused_ops(self, control_inputs=True):
res = copy.copy(self)
res._remove_unused_ops(control_inputs)
return res
def remap_inputs(self, new_input_indices):
res = self.copy()
res._remap_inputs(new_input_indices)
return res
def remap_outputs(self, new_output_indices):
res = copy.copy(self)
res._remap_outputs(new_output_indices)
return res
def remap(self, new_input_indices=None, new_output_indices=None):
res = copy.copy(self)
if (new_input_indices is not None):
res._remap_inputs(new_input_indices)
if (new_output_indices is not None):
res._remap_outputs(new_output_indices)
return res
def find_op_by_name(self, op_name):
res = [op for op in self._ops if (op.name == op_name)]
if (not res):
raise ValueError('{} not in subgraph.'.format(op_name))
if (len(res) > 1):
raise AssertionError('More than 1 op named: {}!'.format(op_name))
return res[0]
def __str__(self):
if (not self):
return 'SubGraphView: empty'
def op_name(op):
return op.name
def tensor_name(t):
if (t in self._passthrough_ts):
return '{} *'.format(t.name)
else:
return t.name
def print_list(name, iterable, get_name):
if iterable:
print('** {}[{}]:'.format(name, len(iterable)), file=res)
print('\n'.join([' {}'.format(get_name(elem)) for elem in iterable]), file=res)
else:
print('** {}: empty'.format(name), file=res)
res = StringIO()
print('SubGraphView (graphid={}):'.format(id(self.graph)), file=res)
print_list('ops', self._ops, op_name)
print_list('inputs', self._input_ts, tensor_name)
print_list('outputs', self._output_ts, tensor_name)
return res.getvalue()
def graph(self):
return self._graph
def ops(self):
return self._ops
def inputs(self):
return util.ListView(self._input_ts)
def connected_inputs(self):
return [t for t in self._input_ts if (t not in self._passthrough_ts)]
def outputs(self):
return util.ListView(self._output_ts)
def connected_outputs(self):
return [t for t in self._output_ts if (t not in self._passthrough_ts)]
def passthroughs(self):
return util.ListView(self._passthrough_ts)
def __bool__(self):
return (self._graph is not None)
__nonzero__ = __bool__
def op(self, op_id):
return self._ops[op_id]
def is_passthrough(self, t):
return (t in self._passthrough_ts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def input_index(self, t):
try:
subgraph_id = self._input_ts.index(t)
except:
raise ValueError("Can't find {} in inputs of subgraph {}.".format(t.name, self.name))
return subgraph_id
def output_index(self, t):
try:
subgraph_id = self._output_ts.index(t)
except:
raise ValueError("Can't find {} in outputs of subgraph {}.".format(t.name, self.name))
return subgraph_id
def consumers(self):
ops_set = frozenset(self._ops)
res = []
for output in self._output_ts:
consumers = [op for op in output.consumers() if (op not in ops_set)]
util.concatenate_unique(res, consumers)
return res |
.usefixtures('needs_assert_rewrite')
def test_assert_called_kwargs_with_introspection(mocker: MockerFixture) -> None:
stub = mocker.stub()
complex_kwargs = dict(foo={'bar': 1, 'baz': 'spam'})
wrong_kwargs = dict(foo={'goo': 1, 'baz': 'bran'})
stub(**complex_kwargs)
stub.assert_called_with(**complex_kwargs)
stub.assert_called_once_with(**complex_kwargs)
with assert_argument_introspection(complex_kwargs, wrong_kwargs):
stub.assert_called_with(**wrong_kwargs)
stub.assert_called_once_with(**wrong_kwargs) |
class DisplayNotebook(ttk.Notebook):
def __init__(self, parent):
logger.debug('Initializing %s', self.__class__.__name__)
super().__init__(parent)
parent.add(self)
tk_vars = get_config().tk_vars
self.wrapper_var = tk_vars['display']
self.runningtask = tk_vars['runningtask']
self.set_wrapper_var_trace()
self.add_static_tabs()
self.static_tabs = [child for child in self.tabs()]
logger.debug('Initialized %s', self.__class__.__name__)
def set_wrapper_var_trace(self):
logger.debug('Setting wrapper var trace')
self.wrapper_var.trace('w', self.update_displaybook)
def add_static_tabs(self):
logger.debug('Adding static tabs')
for tab in ('job queue', 'analysis'):
if (tab == 'job queue'):
continue
if (tab == 'analysis'):
helptext = {'stats': 'Summary statistics for each training session'}
frame = Analysis(self, tab, helptext)
else:
frame = self.add_frame()
self.add(frame, text=tab.title())
def add_frame(self):
logger.debug('Adding frame')
frame = ttk.Frame(self)
frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=5, pady=5)
return frame
def command_display(self, command):
build_tabs = getattr(self, '{}_tabs'.format(command))
build_tabs()
def extract_tabs(self, command='extract'):
logger.debug('Build extract tabs')
helptext = 'Updates preview from output every 5 seconds to limit disk contention'
PreviewExtract(self, 'preview', helptext, 5000, command)
logger.debug('Built extract tabs')
def train_tabs(self):
logger.debug('Build train tabs')
for tab in ('graph', 'preview'):
if (tab == 'graph'):
helptext = 'Graph showing Loss vs Iterations'
GraphDisplay(self, 'graph', helptext, 5000)
elif (tab == 'preview'):
helptext = 'Training preview. Updated on every save iteration'
PreviewTrain(self, 'preview', helptext, 1000)
logger.debug('Built train tabs')
def convert_tabs(self):
logger.debug('Build convert tabs')
self.extract_tabs(command='convert')
logger.debug('Built convert tabs')
def remove_tabs(self):
for child in self.tabs():
if (child in self.static_tabs):
continue
logger.debug('removing child: %s', child)
child_name = child.split('.')[(- 1)]
child_object = self.children[child_name]
child_object.close()
self.forget(child)
def update_displaybook(self, *args):
command = self.wrapper_var.get()
self.remove_tabs()
if ((not command) or (command not in ('extract', 'train', 'convert'))):
return
self.command_display(command) |
def test_dependency_constraints_file(tmp_path, build_frontend_env):
if (utils.platform == 'linux'):
pytest.skip("linux doesn't pin individual tool versions, it pins manylinux images instead")
project_dir = (tmp_path / 'project')
project_with_expected_version_checks.generate(project_dir)
tool_versions = {'pip': '23.1.2', 'setuptools': '67.7.2', 'wheel': '0.38.3', 'virtualenv': '20.23.0'}
constraints_file = (tmp_path / 'constraints file.txt')
constraints_file.write_text(textwrap.dedent('\n pip=={pip}\n setuptools=={setuptools}\n wheel=={wheel}\n virtualenv=={virtualenv}\n importlib-metadata<3,>=0.12; python_version < "3.8"\n '.format(**tool_versions)))
build_environment = {}
for (package_name, version) in tool_versions.items():
env_name = f'EXPECTED_{package_name.upper()}_VERSION'
build_environment[env_name] = version
cibw_environment_option = ' '.join((f'{k}={v}' for (k, v) in build_environment.items()))
actual_wheels = utils.cibuildwheel_run(project_dir, add_env={'CIBW_ENVIRONMENT': cibw_environment_option, 'CIBW_DEPENDENCY_VERSIONS': str(constraints_file), 'CIBW_SKIP': 'cp36-*', **build_frontend_env})
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0') if ('-cp36' not in w)]
assert (set(actual_wheels) == set(expected_wheels)) |
class DistributionFinder(MetaPathFinder):
class Context():
name = None
def __init__(self, **kwargs):
vars(self).update(kwargs)
def path(self) -> List[str]:
return vars(self).get('path', sys.path)
def find_distributions(self, context=Context()) -> Iterable[Distribution]: |
class SEResNetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes, reduction=reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if (self.downsample is not None):
residual = self.downsample(x)
out = (self.se_module(out) + residual)
out = self.relu(out)
return out |
class TimedLoop(BlockingLoop):
def __init__(self, hyperparams):
self._hp = self._default_hparams()
self._override_defaults(hyperparams)
super(TimedLoop, self).__init__(hyperparams)
def _default_hparams(self):
default_dict = AttrDict(ask_confirmation=True, absolute_grasp_action=True, ask_traj_ok=True)
parent_params = super(TimedLoop, self)._default_hparams()
parent_params.update(default_dict)
return parent_params
def rollout(self, policy, i_trial, i_traj):
self._init()
(agent_data, policy_outputs) = ({}, [])
done = False
policy.reset()
initial_env_obs = self.reset_env(i_traj)
obs = self._post_process_obs(initial_env_obs, agent_data, True)
self.traj_log_dir = (self._hp.log_dir + '/verbose/traj{}'.format(i_traj))
if (not os.path.exists(self.traj_log_dir)):
os.makedirs(self.traj_log_dir)
policy.set_log_dir(self.traj_log_dir)
self.env.start()
step_duration = self.env._hp.move_duration
last_tstep = time.time()
while ((self._cache_cntr < self._hp.T) and (not done)):
if (time.time() > (last_tstep + step_duration)):
if ((time.time() - last_tstep) > (step_duration * 1.05)):
print('')
print('Warning, loop takes too long: {}s!!!'.format((time.time() - last_tstep)))
print('')
last_tstep = time.time()
print('tstep', self._cache_cntr)
pi_t = policy.act(**get_policy_args(policy, obs, self._cache_cntr, i_traj, agent_data))
if ('done' in pi_t):
done = pi_t['done']
try:
TIME_FOR_GET_OBS = 0.05
tstamp_get_obs = ((last_tstep + step_duration) - TIME_FOR_GET_OBS)
obs = self.env.step(pi_t['actions'], tstamp_get_obs, blocking=False)
obs = self._post_process_obs(obs, agent_data)
except Environment_Exception as e:
print(e)
return ({'traj_ok': False}, None, None)
policy_outputs.append(pi_t)
if (((self._hp.T - 1) == self._cache_cntr) or obs['env_done'][(- 1)]):
done = True
self.env.finish()
traj_ok = self.env.valid_rollout()
if self._hp.rejection_sample:
assert self.env.has_goal(), 'Rejection sampling enabled but env has no goal'
traj_ok = self.env.goal_reached()
print('goal_reached', traj_ok)
if self._hp.ask_confirmation:
traj_ok = self.env.ask_confirmation()
agent_data['traj_ok'] = traj_ok
if ('images' in obs):
agent_data['camera_info'] = self.env.camera_info
if ('depth_images' in obs):
agent_data['depth_camera_info'] = self.env.depth_camera_info
self._required_rollout_metadata(agent_data, self._cache_cntr, traj_ok)
return (agent_data, obs, policy_outputs) |
def test_flicker_hhd13():
flicker = parse(',00')
assert (flicker.version == HHD_VERSION_13)
assert (flicker.lc == 29)
assert (flicker.startcode.data == '')
assert (flicker.de1.data == '')
assert (flicker.de2.data == '15,00')
assert (flicker.de3.data is None)
assert (flicker.render() == 'C30303B') |
class StickSlipOscillator(DynSys):
def _t(self, v):
return (((self.t0 * np.sign(v)) - (self.alpha * v)) + (self.beta * (v ** 3)))
def _rhs(x, v, th, t, a, alpha, b, beta, eps, gamma, t0, vs, w):
tq = (((t0 * np.sign((v - vs))) - (alpha * v)) + (beta * ((v - vs) ** 3)))
xdot = v
vdot = (((eps * ((gamma * np.cos(th)) - tq)) + (a * x)) - (b * (x ** 3)))
thdot = w
return (xdot, vdot, thdot)
def _postprocessing(x, v, th):
return (x, v, np.cos(th)) |
class Logger(object):
def __init__(self, path):
self.logger = logging.getLogger()
self.path = path
self.setup_file_logger()
print('Logging to file: ', self.path)
def setup_file_logger(self):
hdlr = logging.FileHandler(self.path, 'w+')
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
def log(self, message):
print(message)
self.logger.info(message) |
def parse_paths(x: (Any | None)) -> (list[Path] | None):
if (x is None):
return None
if isinstance(x, str):
msg = f"""Specifying paths as a string in 'pyproject.toml' is deprecated and will result in an error in v0.5. Please use a list of strings instead: ["{x}"]."""
warnings.warn(msg, category=FutureWarning, stacklevel=1)
x = [x]
paths = [Path(p) for p in to_list(x)]
for p in paths:
if (not p.exists()):
msg = f"The path '{p}' does not exist."
raise FileNotFoundError(msg)
return [Path(p).resolve() for path in paths for p in glob.glob(path.as_posix())] |
def info_nce(query, positive_key, negative_keys=None, temperature=0.1, reduction='mean', negative_mode='unpaired'):
if (query.dim() != 2):
raise ValueError('<query> must have 2 dimensions.')
if (positive_key.dim() != 2):
raise ValueError('<positive_key> must have 2 dimensions.')
if (negative_keys is not None):
if ((negative_mode == 'unpaired') and (negative_keys.dim() != 2)):
raise ValueError("<negative_keys> must have 2 dimensions if <negative_mode> == 'unpaired'.")
if ((negative_mode == 'paired') and (negative_keys.dim() != 3)):
raise ValueError("<negative_keys> must have 3 dimensions if <negative_mode> == 'paired'.")
if (len(query) != len(positive_key)):
raise ValueError('<query> and <positive_key> must must have the same number of samples.')
if (negative_keys is not None):
if ((negative_mode == 'paired') and (len(query) != len(negative_keys))):
raise ValueError("If negative_mode == 'paired', then <negative_keys> must have the same number of samples as <query>.")
if (query.shape[(- 1)] != positive_key.shape[(- 1)]):
raise ValueError('Vectors of <query> and <positive_key> should have the same number of components.')
if (negative_keys is not None):
if (query.shape[(- 1)] != negative_keys.shape[(- 1)]):
raise ValueError('Vectors of <query> and <negative_keys> should have the same number of components.')
(query, positive_key, negative_keys) = normalize(query, positive_key, negative_keys)
if (negative_keys is not None):
positive_logit = torch.sum((query * positive_key), dim=1, keepdim=True)
if (negative_mode == 'unpaired'):
negative_logits = (query transpose(negative_keys))
elif (negative_mode == 'paired'):
query = query.unsqueeze(1)
negative_logits = (query transpose(negative_keys))
negative_logits = negative_logits.squeeze(1)
logits = torch.cat([positive_logit, negative_logits], dim=1)
labels = torch.zeros(len(logits), dtype=torch.long, device=query.device)
else:
logits = (query transpose(positive_key))
labels = torch.arange(len(query), device=query.device)
return F.cross_entropy((logits / temperature), labels, reduction=reduction) |
class ChannetDwsConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, groups=1, dropout_rate=0.0):
super(ChannetDwsConvBlock, self).__init__()
self.dw_conv = dwconv3x3(in_channels=in_channels, out_channels=in_channels, stride=stride)
self.pw_conv = channet_conv1x1(in_channels=in_channels, out_channels=out_channels, groups=groups, dropout_rate=dropout_rate)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x |
class TestSEVIRICalibrationAlgorithm(unittest.TestCase):
def setUp(self):
self.algo = SEVIRICalibrationAlgorithm(platform_id=PLATFORM_ID, scan_time=datetime(2020, 8, 15, 13, 0, 40))
def test_convert_to_radiance(self):
result = self.algo.convert_to_radiance(COUNTS_INPUT, GAIN, OFFSET)
xr.testing.assert_allclose(result, RADIANCES_OUTPUT)
assert (result.dtype == np.float32)
def test_ir_calibrate(self):
result = self.algo.ir_calibrate(RADIANCES_OUTPUT, CHANNEL_NAME, CAL_TYPE1)
xr.testing.assert_allclose(result, TBS_OUTPUT1, rtol=1e-05)
assert (result.dtype == np.float32)
result = self.algo.ir_calibrate(RADIANCES_OUTPUT, CHANNEL_NAME, CAL_TYPE2)
xr.testing.assert_allclose(result, TBS_OUTPUT2, rtol=1e-05)
with pytest.raises(NotImplementedError):
self.algo.ir_calibrate(RADIANCES_OUTPUT, CHANNEL_NAME, CAL_TYPEBAD)
def test_vis_calibrate(self):
result = self.algo.vis_calibrate(VIS008_RADIANCE, VIS008_SOLAR_IRRADIANCE)
xr.testing.assert_allclose(result, VIS008_REFLECTANCE)
assert result.sun_earth_distance_correction_applied
assert (result.dtype == np.float32) |
class G4FHandler(LLMHandler):
def __init__(self, settings, path, llm):
self.history = []
self.prompts = []
self.key = 'g4f'
self.settings = settings
self.llm = llm
self.path = path
def send_message(self, window, message):
self.history.append({'User': 'User', 'Message': ((window.bot_prompt + '\n') + '\n'.join(self.prompts))})
return self.generate_response(window, message)
def send_message_stream(self, window, message, on_update, extra_args):
self.history.append({'User': 'User', 'Message': ((window.bot_prompt + '\n') + '\n'.join(self.prompts))})
return self.generate_response_stream(window, message, on_update, extra_args)
def get_suggestions(self, window, message):
return ''
message = (message + '\nUser:')
return self.generate_response(window, message)
def set_history(self, prompts, window):
self.history = ((((window.bot_prompt + '\n') + '\n'.join(prompts)) + '\n') + window.get_chat(window.chat[(len(window.chat) - window.memory):(len(window.chat) - 1)]))
def convert_history(self, history: dict) -> dict:
result = []
for message in history:
result.append({'role': message['User'].lower(), 'content': message['Message']})
return result
def set_history(self, prompts, window):
self.history = window.chat[(len(window.chat) - window.memory):(len(window.chat) - 1)]
self.prompts = prompts |
('/update/view_domain', methods=['POST'])
_params([dict(name='rooms', type=dict, required=False, nullable=False), dict(name='domain_name', type=str, required=True, nullable=False), dict(name='cnames', type=dict, required=False, nullable=False)], need_username=True)
_wrapper_json
_web_opration_log('update_view_domain', get_op_info=add_view_domain_log)
def update_view_domain(username, domain_name, rooms=None, cnames=None):
return ViewRecordDal.upsert_view_domain(username, domain_name, rooms, cnames, 'update') |
class Test_threaded(unittest.TestCase):
def test_line_reader(self):
class TestLines(serial.threaded.LineReader):
def __init__(self):
super(TestLines, self).__init__()
self.received_lines = []
def handle_line(self, data):
self.received_lines.append(data)
ser = serial.serial_for_url(PORT, baudrate=115200, timeout=1)
with serial.threaded.ReaderThread(ser, TestLines) as protocol:
protocol.write_line('hello')
protocol.write_line('world')
time.sleep(1)
self.assertEqual(protocol.received_lines, ['hello', 'world'])
def test_framed_packet(self):
class TestFramedPacket(serial.threaded.FramedPacket):
def __init__(self):
super(TestFramedPacket, self).__init__()
self.received_packets = []
def handle_packet(self, packet):
self.received_packets.append(packet)
def send_packet(self, packet):
self.transport.write(self.START)
self.transport.write(packet)
self.transport.write(self.STOP)
ser = serial.serial_for_url(PORT, baudrate=115200, timeout=1)
with serial.threaded.ReaderThread(ser, TestFramedPacket) as protocol:
protocol.send_packet(b'1')
protocol.send_packet(b'2')
protocol.send_packet(b'3')
time.sleep(1)
self.assertEqual(protocol.received_packets, [b'1', b'2', b'3']) |
.script
def _batch_detection(batch_size: int, class_out, box_out, anchor_boxes, indices, classes, img_scale: Optional[torch.Tensor]=None, img_size: Optional[torch.Tensor]=None):
batch_detections = []
for i in range(batch_size):
img_scale_i = (None if (img_scale is None) else img_scale[i])
img_size_i = (None if (img_size is None) else img_size[i])
detections = generate_detections(class_out[i], box_out[i], anchor_boxes, indices[i], classes[i], img_scale_i, img_size_i)
batch_detections.append(detections)
return torch.stack(batch_detections, dim=0) |
def _get_config_directory():
try:
repo_dpath = dirname(dirname(__file__))
except NameError:
import mmseg
repo_dpath = dirname(dirname(mmseg.__file__))
config_dpath = join(repo_dpath, 'configs')
if (not exists(config_dpath)):
raise Exception('Cannot find config path')
return config_dpath |
def train(epoch, criterion_list, optimizer):
train_loss = 0.0
train_loss_cls = 0.0
train_loss_div = 0.0
train_loss_kd = 0.0
correct = ([0] * args.num_branches)
total = ([0] * args.num_branches)
lr = adjust_lr(optimizer, epoch)
start_time = time.time()
criterion_cls = criterion_list[0]
criterion_div = criterion_list[1]
criterion_kd = criterion_list[2]
net.train()
batch_start_time = 0.0
for (batch_idx, (input, target, index, contrast_idx)) in enumerate(trainloader):
input = input.float()
input = input.cuda()
target = target.cuda()
index = index.cuda()
contrast_idx = contrast_idx.cuda()
optimizer.zero_grad()
(logits, embedding) = net(input)
loss_cls = 0.0
loss_div = 0.0
loss_kd = 0.0
ensemble_logits = 0.0
for i in range(len(logits)):
loss_cls = (loss_cls + criterion_cls(logits[i], target))
for i in range(len(logits)):
ensemble_logits = (ensemble_logits + logits[i])
ensemble_logits = (ensemble_logits / len(logits))
ensemble_logits = ensemble_logits.detach()
loss_div = (loss_div + criterion_div(logits[(- 1)], ensemble_logits))
loss_kd = (loss_kd + criterion_kd(embedding, index, contrast_idx))
loss = (((args.gamma * loss_cls) + (args.alpha * loss_div)) + (args.beta * loss_kd))
loss.backward()
optimizer.step()
train_loss += (loss.item() / len(trainloader))
train_loss_cls += ((args.gamma * loss_cls.item()) / len(trainloader))
train_loss_div += ((args.alpha * loss_div.item()) / len(trainloader))
train_loss_kd += ((args.beta * loss_kd.item()) / len(trainloader))
for i in range(len(logits)):
(_, predicted) = logits[i].max(1)
correct[i] += predicted.eq(target).sum().item()
total[i] += target.size(0)
print('Batch:{}, Time:{:.3f}'.format(batch_idx, (time.time() - batch_start_time)))
batch_start_time = time.time()
acc = []
for i in range(args.num_branches):
acc.append((correct[i] / total[i]))
with open(((('result/' + str(os.path.basename(__file__).split('.')[0])) + args.arch) + '.txt'), 'a+') as f:
f.write('Epoch:{0}\t lr:{1:.3f}\t duration:{2:.3f}\n train_loss:{3:.5f}\t train_loss_cls:{4:.5f}\t train_loss_div:{5:.5f}\t train_loss_kd:{6:.5f}\n accuracy: {7} \n'.format(epoch, lr, (time.time() - start_time), train_loss, train_loss_cls, train_loss_div, train_loss_kd, str(acc))) |
class VersionField(SemVerField):
default_error_messages = {'invalid': _('Enter a valid version number in X.Y.Z format.')}
description = _('Version')
def __init__(self, *args, **kwargs):
self.partial = kwargs.pop('partial', False)
if self.partial:
warnings.warn('Use of `partial=True` will be removed in 3.0.', DeprecationWarning, stacklevel=2)
self.coerce = kwargs.pop('coerce', False)
super(VersionField, self).__init__(*args, **kwargs)
def deconstruct(self):
(name, path, args, kwargs) = super(VersionField, self).deconstruct()
kwargs['partial'] = self.partial
kwargs['coerce'] = self.coerce
return (name, path, args, kwargs)
def to_python(self, value):
if ((value is None) or (value == '')):
return value
if isinstance(value, base.Version):
return value
if self.coerce:
return base.Version.coerce(value, partial=self.partial)
else:
return base.Version(value, partial=self.partial) |
class PrimaryBroadcastToEdges(PrimaryBroadcast):
def __init__(self, child, broadcast_domain, name=None):
name = (name or 'broadcast to edges')
super().__init__(child, broadcast_domain, name)
self.broadcast_type = 'primary to edges'
def _evaluates_on_edges(self, dimension):
return True |
class TestHyperCubic(QiskitNatureTestCase):
def test_init(self):
size = (2, 2, 2)
edge_parameter = ((1.0 + 1j), 0.0, ((- 2.0) - 2j))
onsite_parameter = 5.0
boundary_condition = (BoundaryCondition.OPEN, BoundaryCondition.PERIODIC, BoundaryCondition.OPEN)
hyper_cubic = HyperCubicLattice(size, edge_parameter, onsite_parameter, boundary_condition)
with self.subTest('Check the graph.'):
target_graph = PyGraph(multigraph=False)
target_graph.add_nodes_from(range(8))
weighted_edge_list = [(0, 1, (1.0 + 1j)), (2, 3, (1.0 + 1j)), (4, 5, (1.0 + 1j)), (6, 7, (1.0 + 1j)), (0, 2, 0.0), (1, 3, 0.0), (4, 6, 0.0), (5, 7, 0.0), (0, 4, ((- 2.0) - 2j)), (1, 5, ((- 2.0) - 2j)), (2, 6, ((- 2.0) - 2j)), (3, 7, ((- 2.0) - 2j)), (0, 0, 5.0), (1, 1, 5.0), (2, 2, 5.0), (3, 3, 5.0), (4, 4, 5.0), (5, 5, 5.0), (6, 6, 5.0), (7, 7, 5.0)]
target_graph.add_edges_from(weighted_edge_list)
self.assertTrue(is_isomorphic(hyper_cubic.graph, target_graph, edge_matcher=(lambda x, y: (x == y))))
with self.subTest('Check the number of nodes.'):
self.assertEqual(hyper_cubic.num_nodes, 8)
with self.subTest('Check the set of nodes.'):
self.assertSetEqual(set(hyper_cubic.node_indexes), set(range(8)))
with self.subTest('Check the set of weights.'):
target_set = {(0, 1, (1.0 + 1j)), (2, 3, (1.0 + 1j)), (4, 5, (1.0 + 1j)), (6, 7, (1.0 + 1j)), (0, 2, 0.0), (1, 3, 0.0), (4, 6, 0.0), (5, 7, 0.0), (0, 4, ((- 2.0) - 2j)), (1, 5, ((- 2.0) - 2j)), (2, 6, ((- 2.0) - 2j)), (3, 7, ((- 2.0) - 2j)), (0, 0, 5.0), (1, 1, 5.0), (2, 2, 5.0), (3, 3, 5.0), (4, 4, 5.0), (5, 5, 5.0), (6, 6, 5.0), (7, 7, 5.0)}
self.assertSetEqual(set(hyper_cubic.weighted_edge_list), target_set)
with self.subTest('Check the adjacency matrix.'):
target_matrix = np.array([[5.0, (1.0 + 1j), 0.0, 0.0, ((- 2.0) - 2j), 0.0, 0.0, 0.0], [(1.0 - 1j), 5.0, 0.0, 0.0, 0.0, ((- 2.0) - 2j), 0.0, 0.0], [0.0, 0.0, 5.0, (1.0 + 1j), 0.0, 0.0, ((- 2.0) - 2j), 0.0], [0.0, 0.0, (1.0 - 1j), 5.0, 0.0, 0.0, 0.0, ((- 2.0) - 2j)], [((- 2.0) + 2j), 0.0, 0.0, 0.0, 5.0, (1.0 + 1j), 0.0, 0.0], [0.0, ((- 2.0) + 2j), 0.0, 0.0, (1.0 - 1j), 5.0, 0.0, 0.0], [0.0, 0.0, ((- 2.0) + 2j), 0.0, 0.0, 0.0, 5.0, (1.0 + 1j)], [0.0, 0.0, 0.0, ((- 2.0) + 2j), 0.0, 0.0, (1.0 - 1j), 5.0]])
assert_array_equal(hyper_cubic.to_adjacency_matrix(weighted=True), target_matrix) |
class Conv4(nn.Module):
def __init__(self):
super(Conv4, self).__init__()
builder = get_builder()
self.convs = nn.Sequential(builder.conv3x3(3, 64, first_layer=True), nn.ReLU(), builder.conv3x3(64, 64), nn.ReLU(), nn.MaxPool2d((2, 2)), builder.conv3x3(64, 128), nn.ReLU(), builder.conv3x3(128, 128), nn.ReLU(), nn.MaxPool2d((2, 2)))
self.linear = nn.Sequential(builder.conv1x1(((32 * 32) * 8), 256), nn.ReLU(), builder.conv1x1(256, 256), nn.ReLU(), builder.conv1x1(256, 10))
def forward(self, x):
out = self.convs(x)
out = out.view(out.size(0), 8192, 1, 1)
out = self.linear(out)
return out.squeeze() |
def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length):
num_total_tokens = (max_seq_length * num_samples)
samples = defaultdict(list)
i = 0
while (i < num_total_tokens):
tokenized_samples = next(train_iterator)
i += len(tokenized_samples['input_ids'])
samples = {k: (samples[k] + tokenized_samples[k]) for k in ['input_ids', 'attention_mask', 'special_tokens_mask']}
def group_texts(examples):
result = {k: [t[i:(i + max_seq_length)] for i in range(0, num_total_tokens, max_seq_length)] for (k, t) in examples.items()}
return result
grouped_samples = group_texts(samples)
return grouped_samples |
def test_handler_bad_request(mocker):
mock_dynamic_configuration(mocker, MOCKED_SCHEMA)
response = call_create_order(generate_api_gw_event({'order_item_count': 5}))
assert (response['statusCode'] == HTTPStatus.BAD_REQUEST)
body_dict = json.loads(response['body'])
assert (body_dict == {'error': 'invalid input'}) |
_test
def test_locallyconnected_2d():
num_samples = 5
filters = 3
stack_size = 4
num_row = 6
num_col = 8
padding = 'valid'
for strides in [(1, 1), (2, 2)]:
layer_test(local.LocallyConnected2D, kwargs={'filters': filters, 'kernel_size': 3, 'padding': padding, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': strides, 'data_format': 'channels_last'}, input_shape=(num_samples, num_row, num_col, stack_size))
layer_test(local.LocallyConnected2D, kwargs={'filters': filters, 'kernel_size': (3, 3), 'padding': padding, 'kernel_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'strides': strides, 'data_format': 'channels_first'}, input_shape=(num_samples, stack_size, num_row, num_col)) |
class BarFactory(factory.Factory):
foo = factory.SubFactory(FooFactory)
def _create(cls, model_class: type[Bar], foo: Foo) -> Bar:
assert (foo.value == foo.expected)
bar: Bar = super()._create(model_class, foo=foo)
foo.bar = bar
return bar
class Meta():
model = Bar |
class TransformLayer(nn.Module):
def __init__(self, transform_type, in_dim, out_dim, hidden_dim=None):
super(TransformLayer, self).__init__()
if (transform_type == 'linear'):
self.module = LinearTransform(in_dim, out_dim)
elif (transform_type == 'conv'):
self.module = ConvTransform(in_dim, out_dim, hidden_dim)
else:
raise NotImplementedError(('Unknown post combine transform type: %s' % transform_type))
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) |
class RPCTransformer(RPCTransformerBase, GDALTransformerBase):
def __init__(self, rpcs, **rpc_options):
if (not isinstance(rpcs, (RPC, dict))):
raise ValueError('RPCTransformer requires RPC')
super().__init__(rpcs, **rpc_options)
def __repr__(self):
return '<{} RPCTransformer>'.format(((self.closed and 'closed') or 'open')) |
class Directory(entry.Entry):
def __init__(self, api, adaptor):
self._cpi_nsentry = super(Directory, self)
self._cpi_nsentry.__init__(api, adaptor)
def init_instance(self, url, flags, session):
pass
def init_instance_async(self, url, flags, session):
pass
def change_dir(self, url, flags, ttype):
pass
def change_dir_async(self, url, flags, ttype):
pass
def list(self, npat, ttype):
pass
def list_async(self, npat, ttype):
pass
def find(self, npat, flags, ttype):
pass
def find_async(self, npat, flags, ttype):
pass
def exists(self, name, ttype):
pass
def exists_async(self, name, ttype):
pass
def is_dir(self, name, ttype):
pass
def is_dir_async(self, name, ttype):
pass
def is_entry(self, name, ttype):
pass
def is_entry_async(self, name, ttype):
pass
def is_link(self, name, ttype):
pass
def is_link_async(self, name, ttype):
pass
def read_link(self, name, ttype):
pass
def read_link_async(self, name, ttype):
pass
def get_num_entries(self, ttype):
pass
def get_num_entries_async(self, ttype):
pass
def get_entry(self, num, ttype):
pass
def get_entry_async(self, num, ttype):
pass
def copy(self, src, tgt, flags, ttype):
pass
def copy_async(self, src, tgt, flags, ttype):
pass
def link(self, src, tgt, flags, ttype):
pass
def link_async(self, src, tgt, flags, ttype):
pass
def move(self, src, tgt, flags, ttype):
pass
def move_async(self, src, tgt, flags, ttype):
pass
def remove(self, tgt, flags, ttype):
pass
def remove_async(self, tgt, flags, ttype):
pass
def make_dir(self, tgt, flags, ttype):
pass
def make_dir_async(self, tgt, flags, ttype):
pass |
def test_sha256_secrethash():
assert (sha256_secrethash(Secret(b'')) == b"\xe3\xb0\xc4B\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99o\xb9$'\xaeA\xe4d\x9b\x93L\xa4\x95\x99\x1bxR\xb8U")
assert (sha256_secrethash(Secret(b'a')) == b'\xca\x97\x81\x12\xca\x1b\xbd\xca\xfa\xc21\xb3\x9a#\xdcM\xa7\x86\xef\xf8\x14|Nr\xb9\x80w\x85\xaf\xeeH\xbb')
secret = Secret(b'secretsecretsecretsecretsecretse')
assert (sha256_secrethash(secret) == b'\xd4h:"\xc1\xce9\x82M\x93\x1e\xed\xc6\x8e\xa8\xfaRY\xce\xb05(\xb1\xa2/pu\x86>\xf8\xba\xf0') |
def test_func_cyclic_invalid():
class Top(ComponentLevel2):
def construct(s):
s.a = Wire(Bits32)
s.b = Wire(Bits32)
s.c = Wire(Bits32)
def assignc(a, b):
s.c = a
assign(a, b)
def assignb(a, b):
s.b = b
assignc(a, b)
def assign(a, b):
s.a = b
assignb(b, a)
def up_write():
assign(1, 2)
def done(s):
return False
def line_trace(s):
return '{} {}'.format(s.a, s.b)
try:
_test_model(Top)
except InvalidFuncCallError as e:
print('{} is thrown\n{}'.format(e.__class__.__name__, e))
return
raise Exception("Should've thrown InvalidFuncCallError.") |
def main():
global args, best_prec
use_gpu = torch.cuda.is_available()
args.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
print(args.device)
print('=> Building model...')
model = None
model = models.__dict__[args.arch](bit=args.bit)
net = models.__dict__[args.arch](bit=args.bit)
snn = models.__dict__[args.arch](spike=True, bit=args.bit)
criterion = nn.CrossEntropyLoss()
if use_gpu:
model = model.cuda()
net = net.cuda()
snn = snn.cuda()
cudnn.benchmark = True
else:
print('Cuda is not available!')
if (not os.path.exists('result')):
os.makedirs('result')
fdir = (((('result/' + str(args.arch)) + '_') + str(args.bit)) + 'bit_ft')
if (not os.path.exists(fdir)):
os.makedirs(fdir)
if (not args.init):
args.init = (((('result/' + str(args.arch)) + '_') + str(args.bit)) + 'bit/model_best.pth.tar')
if args.init:
if os.path.isfile(args.init):
print('=> loading pre-trained model')
checkpoint = torch.load(args.init, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
snn.load_state_dict(checkpoint['state_dict'])
net.load_state_dict(checkpoint['state_dict'])
else:
print('No pre-trained model found !')
exit()
print('=> loading cifar10 data...')
normalize = transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.262])
train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=2)
test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), normalize]))
testloader = torch.utils.data.DataLoader(test_dataset, batch_size=100, shuffle=False, num_workers=2)
if args.evaluate:
validate(testloader, snn, criterion)
model.show_params()
return
duration = ((2 ** args.bit) - 1)
num_layers = int(args.arch[6:])
num_blocks = ((num_layers - 2) // 6)
model.eval()
snn.eval()
best_acc = 0
acc = 0
if (not args.force):
best_acc = validate(trainloader, snn, nn.CrossEntropyLoss())
bypass_blocks(model, num_blocks)
model.layer4.idem = True
bypass_blocks(snn, num_blocks)
snn.layer4.idem = True
criterion = nn.MSELoss()
for layer_id in range((num_layers - 2)):
segment_id = (((layer_id // 2) // num_blocks) + 1)
block_id = ((layer_id // 2) % num_blocks)
is_odd = (layer_id % 2)
print(('=======We are tuning Layer %d Segment %d Block %d' % (layer_id, segment_id, block_id)))
m = getattr(model, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
m.idem = False
if is_odd:
m.inter = False
else:
m.inter = True
m = getattr(net, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
if is_odd:
tuner = m.part2
else:
tuner = m.part1
tuner.idem = False
tuner.relu.act_alpha.requires_grad_(False)
optimizer = torch.optim.SGD(filter((lambda p: p.requires_grad), tuner.parameters()), lr=0.001, momentum=0.9, weight_decay=0.0001)
m = getattr(snn, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
record = m.state_dict()
for (k, v) in record.items():
record[k] = v.cpu()
for epoch in range(8):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
for (i, (input, target)) in enumerate(trainloader):
data_time.update((time.time() - end))
input = input.to(args.device)
target = target.to(args.device)
with torch.no_grad():
target_map = model(input)
m = getattr(snn, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
m.idem = True
in_maps = snn(input)
if is_odd:
part1 = m.part1
part2 = m.part2
mid_maps = part1(in_maps)
out_maps = part2(mid_maps, in_maps)
in_maps = in_maps.sum(1).div(duration)
mid_maps = mid_maps.sum(1).div(duration)
out_maps = out_maps.sum(1).div(duration)
output = tuner(mid_maps, in_maps)
else:
part1 = m.part1
out_maps = part1(in_maps)
in_maps = in_maps.sum(1).div(duration)
out_maps = out_maps.sum(1).div(duration)
output = tuner(in_maps)
output.data = out_maps.data
loss = criterion(output, target_map)
losses.update(loss.item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if is_odd:
part2.load_state_dict(tuner.state_dict())
else:
part1.load_state_dict(tuner.state_dict())
m = getattr(snn, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
m.idem = False
m.inter = False
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses))
if (not args.force):
for i in range(((layer_id // 2) + 1), ((num_layers - 2) // 2)):
switch_on(snn, i, num_blocks)
snn.layer4.idem = False
acc = validate(trainloader, snn, nn.CrossEntropyLoss())
for i in range(((layer_id // 2) + 1), ((num_layers - 2) // 2)):
switch_off(snn, i, num_blocks)
snn.layer4.idem = True
if ((acc > best_acc) or args.force):
print('Update...')
best_acc = acc
m = getattr(snn, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
record = m.state_dict()
for (k, v) in record.items():
record[k] = v.cpu()
m = getattr(snn, ('layer' + str(segment_id)))
m = getattr(m, str(block_id))
m.load_state_dict(record)
snn.layer4.idem = False
torch.save({'state_dict': snn.state_dict()}, os.path.join(fdir, 'model_best.pth.tar'))
validate(testloader, snn, nn.CrossEntropyLoss()) |
def test_derived_projected_crs():
wkt = 'DERIVEDPROJCRS["derived projectedCRS",\n BASEPROJCRS["WGS 84 / UTM zone 31N",\n BASEGEOGCRS["WGS 84",\n DATUM["World Geodetic System 1984",\n ELLIPSOID["WGS 84",6378137,298.,\n LENGTHUNIT["metre",1]]],\n PRIMEM["Greenwich",0,\n ANGLEUNIT["degree",0.]]],\n CONVERSION["UTM zone 31N",\n METHOD["Transverse Mercator",\n ID["EPSG",9807]],\n PARAMETER["Latitude of natural origin",0,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8801]],\n PARAMETER["Longitude of natural origin",3,\n ANGLEUNIT["degree",0.],\n ID["EPSG",8802]],\n PARAMETER["Scale factor at natural origin",0.9996,\n SCALEUNIT["unity",1],\n ID["EPSG",8805]],\n PARAMETER["False easting",500000,\n LENGTHUNIT["metre",1],\n ID["EPSG",8806]],\n PARAMETER["False northing",0,\n LENGTHUNIT["metre",1],\n ID["EPSG",8807]]]],\n DERIVINGCONVERSION["unnamed",\n METHOD["PROJ unimplemented"],\n PARAMETER["foo",1.0,UNIT["metre",1]]],\n CS[Cartesian,2],\n AXIS["(E)",east,\n ORDER[1],\n LENGTHUNIT["metre",1,\n ID["EPSG",9001]]],\n AXIS["(N)",north,\n ORDER[2],\n LENGTHUNIT["metre",1,\n ID["EPSG",9001]]]]'
crs = CRS(wkt)
assert crs.is_derived
assert (crs.type_name == 'Derived Projected CRS') |
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return (self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input)) |
class AIPSW():
def __init__(self, df, exposure, outcome, selection, generalize=True, weights=None):
self.df = df.copy()
self.sample = (df[selection] == 1)
self.target = (df[selection] == 0)
self.generalize = generalize
self.exposure = exposure
self.outcome = outcome
self.selection = selection
if (weights is not None):
raise ValueError('AIPSW is not stable with `weights`. This functionality is currently unavailable')
self.weight = weights
self.ipsw = None
self.iptw = None
self._denominator_model = False
self._outcome_model = False
self._YA1 = None
self._YA0 = None
self.risk_difference = None
self.risk_ratio = None
def sampling_model(self, model_denominator, model_numerator='1', stabilized=True, print_results=True):
dmodel = propensity_score(self.df, ((self.selection + ' ~ ') + model_denominator), print_results=print_results)
self.df['__denom__'] = dmodel.predict(self.df)
self._denominator_model = True
if stabilized:
nmodel = propensity_score(self.df, ((self.selection + ' ~ ') + model_numerator), print_results=print_results)
self.df['__numer__'] = np.where(self.sample, nmodel.predict(self.df), 0)
else:
self.df['__numer__'] = np.where(self.sample, 1, 0)
if self.generalize:
self.df['__ipsw__'] = (self.df['__numer__'] / self.df['__denom__'])
elif stabilized:
self.df['__ipsw__'] = (((1 - self.df['__denom__']) / self.df['__denom__']) * (self.df['__numer__'] / (1 - self.df['__numer__'])))
else:
self.df['__ipsw__'] = ((1 - self.df['__denom__']) / self.df['__denom__'])
self.ipsw = self.df['__ipsw__']
def treatment_model(self, model_denominator, model_numerator='1', bound=None, stabilized=True, print_results=False):
(d, n, self.iptw) = iptw_calculator(df=self.df, treatment=self.exposure, model_denom=model_denominator, model_numer=model_numerator, weight=self.weight, stabilized=stabilized, standardize='population', bound=bound, print_results=print_results)
def outcome_model(self, model, outcome_type='binary', print_results=True):
if (self.exposure not in model):
warnings.warn((("It looks like '" + self.exposure) + "' is not included in the outcome model."))
if (outcome_type == 'binary'):
linkdist = sm.families.family.Binomial()
elif (outcome_type == 'normal'):
linkdist = sm.families.family.Gaussian()
elif (outcome_type == 'poisson'):
linkdist = sm.families.family.Poisson()
else:
raise ValueError("Only 'binary', 'normal', and 'poisson' distributed outcomes are available")
df = self.df[self.sample].copy()
if (self.weight is None):
m = smf.glm(((self.outcome + ' ~ ') + model), df, family=linkdist)
self._outcome_model = m.fit()
else:
m = smf.glm(((self.outcome + ' ~ ') + model), df, family=linkdist, freq_weights=df[self.weight])
self._outcome_model = m.fit()
if print_results:
print('')
print('Outcome Model')
print(self._outcome_model.summary())
print('')
dfa = self.df.copy()
dfa[self.exposure] = 1
self._YA1 = self._outcome_model.predict(dfa)
dfn = self.df.copy()
dfn[self.exposure] = 0
self._YA0 = self._outcome_model.predict(dfn)
def fit(self):
if (not self._denominator_model):
raise ValueError('sampling_model() function must be specified before effect measure estimation')
if (not self._outcome_model):
raise ValueError('outcome_model() function must be specified before effect measure estimation')
if (self.weight is not None):
if (self.iptw is None):
self.df['__ipw__'] = (self.ipsw * self.df[self.weight])
else:
self.df['__ipw__'] = ((self.ipsw * self.iptw) * self.df[self.weight])
elif (self.iptw is None):
self.df['__ipw__'] = self.ipsw
else:
self.df['__ipw__'] = (self.ipsw * self.iptw)
if self.generalize:
part1 = self._YA1
part2 = np.where((self.sample & (self.df[self.exposure] == 1)), (self.df['__ipw__'] * (self.df[self.outcome] - self._YA1)), 0)
r1 = np.mean((part1 + part2))
part1 = self._YA0
part2 = np.where((self.sample & (self.df[self.exposure] == 0)), (self.df['__ipw__'] * (self.df[self.outcome] - self._YA0)), 0)
r0 = np.mean((part1 + part2))
else:
part1 = np.where((self.sample & (self.df[self.exposure] == 1)), (self.df['__ipw__'] * (self.df[self.outcome] - self._YA1)), 0)
part2 = ((1 - self.df[self.selection]) * self._YA1)
r1 = (np.sum((part1 + part2)) / np.sum((1 - self.df[self.selection])))
part1 = np.where((self.sample & (self.df[self.exposure] == 0)), (self.df['__ipw__'] * (self.df[self.outcome] - self._YA0)), 0)
part2 = ((1 - self.df[self.selection]) * self._YA0)
r0 = (np.sum((part1 + part2)) / np.sum((1 - self.df[self.selection])))
self.risk_difference = (r1 - r0)
self.risk_ratio = (r1 / r0)
def summary(self, decimal=4):
print('')
print(' Augmented Inverse Probability of Sampling Weights ')
print('')
fmt = 'Treatment: {:<15} Sample Observations: {:<20}'
print(fmt.format(self.exposure, self.df[self.sample].shape[0]))
fmt = 'Outcome: {:<15} Target Observations: {:<20}'
print(fmt.format(self.outcome, self.df[self.target].shape[0]))
fmt = 'Target estimate: {:<15}'
if self.generalize:
print(fmt.format('Generalize'))
else:
print(fmt.format('Transport'))
print('')
print('Risk Difference: ', round(float(self.risk_difference), decimal))
print('Risk Ratio: ', round(float(self.risk_ratio), decimal))
print('') |
def setup(parser):
parser.add_squirrel_selection_arguments()
parser.add_squirrel_query_arguments()
parser.add_argument('--channel-priorities', dest='channel_priorities', metavar='CHA', help='\nList of 2-character band/instrument code combinations to try. For example,\ngiving ```HH,BH``` would first try to get ```HH?``` channels and then fallback\nto ```BH?``` if these are not available. The first matching waveforms are\nreturned. Use in combination with ``--sample-rate-min`` and\n``--sample-rate-max`` to constrain the sample rate.\n'.strip())
parser.add_argument('--sample-rate-min', dest='sample_rate_min', metavar='FLOAT', type=float, help='Minimum sample rate [Hz] to consider.')
parser.add_argument('--sample-rate-max', dest='sample_rate_max', metavar='FLOAT', type=float, help='Maximum sample rate [Hz] to consider.') |
class IterationTimeLoggerTest(unittest.TestCase):
def test_iteration_time_logger_test_on_train_step_end(self) -> None:
logger = MagicMock(spec=TensorBoardLogger)
logger.writer = MagicMock(spec=SummaryWriter)
state = MagicMock(spec=State)
recorded_durations = {'train_iteration_time': [1, 3, 5, 7, 9], 'eval_iteration_time': [], 'predict_iteration_time': [11, 13, 15, 17, 19]}
state.train_state.iteration_timer.recorded_durations = recorded_durations.copy()
state.eval_state.iteration_timer.recorded_durations = recorded_durations.copy()
state.predict_state.iteration_timer.recorded_durations = recorded_durations.copy()
callback = IterationTimeLogger(logger=logger, moving_avg_window=4)
train_unit = DummyTrainUnit(input_dim=2)
train_unit.train_progress.increment_step()
train_unit.train_progress.increment_step()
eval_unit = DummyEvalUnit(input_dim=2)
eval_unit.eval_progress.increment_step()
eval_unit.eval_progress.increment_step()
predict_unit = DummyPredictUnit(input_dim=2)
predict_unit.predict_progress.increment_step()
predict_unit.predict_progress.increment_step()
callback = IterationTimeLogger(logger=logger, moving_avg_window=4)
callback.on_train_step_end(state, train_unit)
callback.on_eval_step_end(state, eval_unit)
callback.on_predict_step_end(state, predict_unit)
logger.writer.add_scalar.assert_has_calls([call('Train Iteration Time (seconds)', 6.0, 2), call('Prediction Iteration Time (seconds)', 16.0, 2)])
def test_with_train_epoch(self) -> None:
my_unit = DummyTrainUnit(input_dim=2)
logger = MagicMock(spec=TensorBoardLogger)
logger.writer = MagicMock(spec=SummaryWriter)
callback = IterationTimeLogger(logger, moving_avg_window=1, log_every_n_steps=3)
dataloader = generate_random_dataloader(num_samples=12, input_dim=2, batch_size=2)
train(my_unit, dataloader, max_epochs=2, callbacks=[callback])
self.assertEqual(logger.writer.add_scalar.call_count, 4) |
class TimeBudgetRetryPolicy(RetryPolicy):
def __init__(self, policy: RetryPolicy, budget: float):
assert (budget >= 0), 'The time budget must not be negative.'
self.subpolicy = policy
self.budget = budget
def yield_attempts(self) -> Iterator[Optional[float]]:
start_time = time.time()
(yield self.budget)
for _ in self.subpolicy:
elapsed = (time.time() - start_time)
time_remaining = (self.budget - elapsed)
if (time_remaining <= 0):
break
(yield time_remaining) |
def migrate_tests_in_file(file_path: str) -> None:
try:
with open(file_path, 'r+') as fd:
content = fd.read()
new_content = MIGRATE_REGEX.sub('\\(\\2)\\ndef \\1():\\n pass\\n', content)
if (new_content != content):
new_content = (new_content.rstrip('\n') + '\n')
fd.seek(0)
fd.write(new_content)
print(f'migrated: {file_path}')
else:
print(f'skipped: {file_path}')
except OSError:
pass |
def get_argparser():
parser = argparse.ArgumentParser(prog='DermoSegDiff', description='DermoSegDiff: A Boundary-aware Segmentation Diffusion Model for Skin Lesion Delineation', epilog='')
parser.add_argument('-c', '--config_file', type=str, required=True, help='')
parser.add_argument('-n', '--model_name', type=str, help='')
parser.add_argument('-s', '--input_size', type=int, help='')
parser.add_argument('-b', '--batch_size', type=int, help='')
parser.add_argument('-l', '--learning_rate', type=float, help='')
parser.add_argument('-t', '--timesteps', type=int, help='')
parser.add_argument('-S', '--ds_mode', type=str, choices=['linear', 'quadratic', 'cosine', 'sigmoid'], help='linear, quadratic, cosine, sigmoid')
parser.add_argument('-e', '--epochs', type=int, help='')
parser.add_argument('--beta_start', type=float, help='')
parser.add_argument('--beta_end', type=float, help='')
parser.add_argument('-D', '--model_dim_mults', type=int, nargs='*', help='1 2 4')
parser.add_argument('-E', '--ensemble', type=int, help='')
parser.add_argument('--model_dim_x', type=int, help='128')
parser.add_argument('--model_dim_g', type=int, help='64')
parser.add_argument('--model_dim_x_mults', type=int, nargs='*', help='1 2 3 4 05 06')
parser.add_argument('--model_dim_g_mults', type=int, nargs='*', help='1 2 4 8 16 32')
parser.add_argument('--training_optimizer_betas', type=float, nargs='*', help='0.7 0.98')
parser.add_argument('--training_scheduler_factor', type=float, help='0.5')
parser.add_argument('--training_scheduler_patience', type=int, help='5')
parser.add_argument('--augmentation_p', type=float, help='0.5')
parser.add_argument('-v', '--verbose', action='store_true')
return parser |
def main():
if (len(sys.argv) != 2):
print(('Usage: %s <expected-python-version>' % sys.argv[0]))
exit(1)
expected_impl = None
expected_major = None
expected_minor = None
expected_version_string = sys.argv[1]
match = re.match('^(\\d+)\\.(\\d+)$', expected_version_string)
if (match is not None):
expected_impl = 'CPython'
expected_major = match.group(1)
expected_minor = match.group(2)
match = re.match('^(\\d+)\\.(\\d+).\\d+$', expected_version_string)
if (match is not None):
expected_impl = 'CPython'
expected_major = match.group(1)
expected_minor = match.group(2)
if (expected_version_string == 'pypy'):
expected_impl = 'PyPy'
expected_major = '2'
expected_minor = '7'
if (expected_version_string == 'pypy3'):
expected_impl = 'PyPy'
expected_major = '3'
expected_minor = '*'
match = re.match('^pypy(\\d+)\\.(\\d+).*?$', expected_version_string)
if (match is not None):
expected_impl = 'PyPy'
expected_major = match.group(1)
expected_minor = match.group(2)
if (expected_impl is None):
print(('Unknown python version specified: %s' % expected_version_string))
exit(1)
impl = platform.python_implementation()
version = sys.version_info
if (impl != expected_impl):
print(('Wrong platform detected. %s was expected, but this script is running on %s!' % (expected_impl, impl)))
exit(2)
if (((expected_minor != '*') and (expected_minor != str(version.minor))) or (expected_major != str(version.major))):
print(('Wrong version detected. %s.%s was expected, but this script is running on %s.%s!' % (expected_major, expected_minor, version.major, version.minor)))
exit(2)
print(('All OK. The detected python version is %s %s.%s' % (impl, version.major, version.minor))) |
(id='run_python', name='Run a Python script', description='Run a specified Python script', outputs={'success': RunPythonFileOutput, 'error': RunPythonFileError})
def run_python_file(params: RunPythonFileInput) -> typing.Tuple[(str, typing.Union[(RunPythonFileOutput, RunPythonFileError)])]:
run_results = subprocess.run([sys.executable, params.filename], capture_output=True)
if (run_results.returncode == 0):
return ('success', RunPythonFileOutput(str(run_results.stdout, 'utf-8'), str(run_results.stderr, 'utf-8')))
return ('error', RunPythonFileError(run_results.returncode, str(run_results.stdout, 'utf-8'), str(run_results.stderr, 'utf-8'))) |
(scope='session')
def vcr_config():
def remove_headers(response: dict):
response['headers'] = {}
return response
return {'filter_headers': [('authorization', 'secret_...'), ('user-agent', None), ('cookie', None)], 'before_record_response': remove_headers, 'match_on': ['method', 'remove_page_id_for_matches']} |
class DE():
def __init__(self, lde_len):
self.length = 0
self.lde = 0
self.lde_length = lde_len
self.encoding = None
self.data = None
def parse(self, data, version):
self.version = version
if (not data):
return data
self.lde = int(data[0:self.lde_length])
data = data[self.lde_length:]
self.length = bit_sum(self.lde, 5)
self.data = data[0:self.length]
return data[self.length:]
def set_encoding(self):
if (self.data is None):
self.encoding = ENCODING_BCD
elif (self.encoding is not None):
pass
elif re.match('^[0-9]{1,}$', self.data):
self.encoding = ENCODING_BCD
else:
self.encoding = ENCODING_ASC
def render_length(self):
self.set_encoding()
if (self.data is None):
return ''
l = (len(self.render_data()) // 2)
if (self.encoding == ENCODING_BCD):
return h(l, 2)
if (self.version == HHD_VERSION_14):
l = (l + (1 << BIT_ENCODING))
return h(l, 2)
return ('1' + h(l, 1))
def render_data(self):
self.set_encoding()
if (self.data is None):
return ''
if (self.encoding == ENCODING_ASC):
return asciicode(self.data)
if ((len(self.data) % 2) == 1):
return (self.data + 'F')
return self.data |
def delete_migrated_content(apps, schema_editor):
Release = apps.get_model('downloads', 'Release')
Page = apps.get_model('pages', 'Page')
db_alias = schema_editor.connection.alias
releases = Release.objects.using(db_alias).filter(release_page__isnull=True, content__startswith=MARKER)
for release in releases:
try:
name = release.name
if ('Release' not in name):
name = (release.name + ' Release')
page = Page.objects.get(title=name)
except (Page.DoesNotExist, Page.MultipleObjectsReturned):
continue
else:
release.release_page = page
release.content = ''
release.save() |
class SaltBackend(base.BaseBackend):
HAS_RUN_SALT = True
NAME = 'salt'
def __init__(self, host: str, *args: Any, **kwargs: Any):
self.host = host
self._client: Optional[salt.client.LocalClient] = None
super().__init__(self.host, *args, **kwargs)
def client(self) -> salt.client.LocalClient:
if (self._client is None):
self._client = salt.client.LocalClient()
return self._client
def run(self, command: str, *args: str, **kwargs: Any) -> base.CommandResult:
command = self.get_command(command, *args)
out = self.run_salt('cmd.run_all', [command])
return self.result(out['retcode'], self.encode(command), stdout=out['stdout'], stderr=out['stderr'])
def run_salt(self, func: str, args: Any=None) -> Any:
out = self.client.cmd(self.host, func, (args or []))
if (self.host not in out):
raise RuntimeError('Error while running {}({}): {}. Minion not connected ?'.format(func, args, out))
return out[self.host]
def get_hosts(cls, host: str, **kwargs: Any) -> list[str]:
if (host is None):
host = '*'
if any(((c in host) for c in '*[?')):
client = salt.client.LocalClient()
if ('' in host):
hosts = client.cmd(host, 'test.true', tgt_type='compound').keys()
else:
hosts = client.cmd(host, 'test.true').keys()
if (not hosts):
raise RuntimeError("No host matching '{}'".format(host))
return sorted(hosts)
return super().get_hosts(host, **kwargs) |
class TestCreateCursor(EndianTest):
def setUp(self):
self.req_args_0 = {'back_blue': 49245, 'back_green': 35528, 'back_red': 27716, 'cid': , 'fore_blue': 55026, 'fore_green': 62740, 'fore_red': 58690, 'mask': , 'source': , 'x': 48400, 'y': 36047}
self.req_bin_0 = b']\x00\x08\x00~\xdfr`\x1c\x15\xec1J\xc8>mB\xe5\x14\xf5\xf2\xd6Dl\xc8\x8a]\xc0\x10\xbd\xcf\x8c'
def testPackRequest0(self):
bin = request.CreateCursor._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.CreateCursor._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class SimpleSchedulePass(BasePass):
def __call__(self, top):
if (not hasattr(top._dag, 'all_constraints')):
raise PassOrderError('all_constraints')
top._sched = PassMetadata()
self.schedule_intra_cycle(top)
self.schedule_ff(top)
self.schedule_posedge_flip(top)
def schedule_intra_cycle(self, top):
if (not hasattr(top, '_sched')):
raise Exception('Please create top._sched pass metadata namespace first!')
V = (top._dag.final_upblks - top.get_all_update_ff())
E = set()
Es = {v: [] for v in V}
InD = {v: 0 for v in V}
for (u, v) in top._dag.all_constraints:
if ((u in V) and (v in V)):
InD[v] += 1
Es[u].append(v)
E.add((u, v))
import os
if ('MAMBA_DAG' in os.environ):
dump_dag(top, V, E)
top._sched.update_schedule = update_schedule = []
Q = [v for v in V if (not InD[v])]
import random
while Q:
random.shuffle(Q)
u = Q.pop()
update_schedule.append(u)
for v in Es[u]:
InD[v] -= 1
if (not InD[v]):
Q.append(v)
check_schedule(top, update_schedule, V, E, InD)
def schedule_ff(self, top):
if (not hasattr(top, '_sched')):
raise Exception('Please create top._sched pass metadata namespace first!')
top._sched.schedule_ff = list(top.get_all_update_ff().copy())
def schedule_posedge_flip(self, top):
if (not hasattr(top, '_sched')):
raise Exception('Please create top._sched pass metadata namespace first!')
hostobj_signals = defaultdict(list)
for x in reversed(sorted(top._dsl.all_signals, key=(lambda x: x.get_host_component().get_component_level()))):
if x._dsl.needs_double_buffer:
hostobj_signals[x.get_host_component()].append(x)
done = False
while (not done):
next_hostobj_signals = defaultdict(list)
done = True
for (x, y) in hostobj_signals.items():
if (len(y) > 1):
next_hostobj_signals[x].extend(y)
elif (x is top):
next_hostobj_signals[x].extend(y)
else:
x = x.get_parent_object()
next_hostobj_signals[x].append(y[0])
done = False
hostobj_signals = next_hostobj_signals
strs = []
for (x, y) in hostobj_signals.items():
if (len(y) == 1):
strs.append(f' {repr(y[0])}._flip()')
elif (x is top):
for z in sorted(y, key=repr):
strs.append(f' {repr(z)}._flip()')
else:
repr_x = repr(x)
pos = (len(repr_x) + 1)
strs.append(f' x = {repr_x}')
for z in sorted(y, key=repr):
strs.append(f' x.{repr(z)[pos:]}._flip()')
if (not strs):
def no_double_buffer():
pass
top._sched.schedule_posedge_flip = [no_double_buffer]
else:
lines = (((['def compile_double_buffer( s ):'] + [' def double_buffer():']) + strs) + [' return double_buffer'])
l = locals()
custom_exec(compile('\n'.join(lines), filename='ff_flips', mode='exec'), globals(), l)
linecache.cache['ff_flips'] = (1, None, lines, 'ff_flips')
top._sched.schedule_posedge_flip = [l['compile_double_buffer'](top)] |
class BaseSubModel(pybamm.BaseModel):
def __init__(self, param, domain=None, name='Unnamed submodel', external=False, options=None, phase=None):
super().__init__(name)
self.domain = domain
self.name = name
self.external = external
if ((options is None) or (type(options) == dict)):
options = pybamm.BatteryModelOptions(options)
self.options = options
self.param = param
if ((param is None) or (domain is None)):
self.domain_param = None
else:
self.domain_param = param.domain_params[self.domain]
if (phase is not None):
self.phase_param = self.domain_param.phase_params[phase]
self.set_phase(phase)
def set_phase(self, phase):
if (phase is not None):
if (self.domain is None):
raise ValueError('Phase must be None if domain is None')
options_phase = getattr(self.options, self.domain)['particle phases']
if ((options_phase == '1') and (phase != 'primary')):
raise ValueError("Phase must be 'primary' if there is only one phase")
elif ((options_phase == '2') and (phase not in ['primary', 'secondary'])):
raise ValueError("Phase must be either 'primary' or 'secondary' if there are two phases")
if ((options_phase == '1') and (phase == 'primary')):
self.phase_name = ''
else:
self.phase_name = (phase + ' ')
self.phase = phase
def domain(self):
return self._domain
def domain(self, domain):
if (domain is not None):
domain = domain.lower()
ok_domain_list = ['negative', 'separator', 'positive', None]
if (domain in ok_domain_list):
self._domain = domain
if (domain is not None):
self._Domain = domain.capitalize()
else:
raise pybamm.DomainError(f"Domain '{domain}' not recognised (must be one of {ok_domain_list})")
def domain_Domain(self):
return (self._domain, self._Domain)
def get_fundamental_variables(self):
return {}
def get_coupled_variables(self, variables):
return {}
def set_rhs(self, variables):
pass
def set_algebraic(self, variables):
pass
def set_boundary_conditions(self, variables):
pass
def set_initial_conditions(self, variables):
pass
def set_events(self, variables):
pass |
def sidexside_plot(df1, df2, col, cmap, supt, subt1, subt2, figsize=(12, 12)):
sub_args = {'gridspec_kw': {'width_ratios': [1, 0.86]}, 'figsize': figsize}
(fig, arr) = matplotlib.pyplot.subplots(1, 2, **sub_args)
arc_args = {'column': col, 'cmap': cmap, 'lw': 6, 'alpha': 0.9, 'legend': True}
for (ar, df, t) in zip([0, 1], (df1, df2), (subt1, subt2)):
if (ar == 1):
(arc_args['legend'], cax) = (False, None)
else:
divider = make_axes_locatable(arr[ar])
cax = divider.append_axes('right', size='10%', pad=0.3)
df.plot(ax=arr[ar], cax=cax, **arc_args)
arr[ar].set_title(t, fontsize=20)
carto_elements(arr[ar])
fig.suptitle(supt, y=0.8, fontsize=25)
fig.tight_layout() |
class Effect4135(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, beacon, context, projectionRange, **kwargs):
fit.ship.boostItemAttr('shieldEmDamageResonance', beacon.getModifiedItemAttr('shieldEmDamageResistanceBonus'), stackingPenalties=True, **kwargs) |
class Tol_matrix():
def __init__(self, *tuples, prototype='atomic', factor=1.0):
f = factor
self.prototype = prototype
if (prototype == 'atomic'):
f *= 0.5
attrindex = 5
self.radius_type = 'covalent'
elif (prototype == 'molecular'):
attrindex = 5
self.radius_type = 'covalent'
f *= 1.2
elif (prototype == 'metallic'):
attrindex = 7
self.radius_type = 'metallic'
f *= 0.5
elif (prototype == 'vdW'):
attrindex = 6
self.radius_type = 'vdW'
else:
self.radius_type = 'N/A'
self.f = f
H = Element('H')
m = [([0.0] * (len(H.elements_list) + 1))]
for (i, tup1) in enumerate(H.elements_list):
m.append([0.0])
for (j, tup2) in enumerate(H.elements_list):
if (tup1[attrindex] is None):
if (tup1[5] is None):
val1 = None
else:
val1 = tup1[5]
else:
val1 = tup1[attrindex]
if (tup2[attrindex] is None):
if (tup2[5] is None):
val2 = None
else:
val2 = tup1[5]
else:
val2 = tup2[attrindex]
if ((val1 is not None) and (val2 is not None)):
m[(- 1)].append((f * (val1 + val2)))
else:
m[(- 1)].append(None)
self.matrix = np.array(m)
self.custom_values = []
try:
for tup in tuples:
self.set_tol(*tup)
except:
msg = 'Error: Cannot not set custom tolerance value(s).\n'
msg += 'All entries should be entered using the following form:\n'
msg += '(specie1, specie2, value), where the value is in Angstrom.'
raise RuntimeError(msg)
self.radius_list = []
for i in range(len(self.matrix)):
if (i == 0):
continue
x = self.get_tol(i, i)
self.radius_list.append(x)
def get_tol(self, specie1, specie2):
if (self.prototype == 'single value'):
return self.matrix[0][0]
index1 = Element.number_from_specie(specie1)
index2 = Element.number_from_specie(specie2)
if ((index1 is not None) and (index2 is not None)):
return self.matrix[index1][index2]
else:
return None
def set_tol(self, specie1, specie2, value):
index1 = Element.number_from_specie(specie1)
index2 = Element.number_from_specie(specie2)
if ((index1 is None) or (index2 is None)):
return
self.matrix[index1][index2] = float(value)
if (index1 != index2):
self.matrix[index2][index1] = float(value)
if (((index1, index2) not in self.custom_values) and ((index2, index1) not in self.custom_values)):
larger = max(index1, index2)
smaller = min(index1, index2)
self.custom_values.append((smaller, larger))
def from_matrix(self, matrix, prototype='atomic', factor=1.0, begin_with=0):
m = np.array(matrix)
tups = []
for (i, row) in enumerate(matrix):
for (j, value) in enumerate(row):
if (j > i):
continue
tups.append((((i + 1) - begin_with), ((j + 1) - begin_with), matrix[i][j]))
tm = Tol_matrix(*tups, prototype=prototype, factor=factor)
return tm
def from_radii(self, radius_list, prototype='atomic', factor=1.0, begin_with=0):
tups = []
f = (factor * 0.5)
for (i, r1) in enumerate(radius_list):
for (j, r2) in enumerate(radius_list):
if (j > i):
continue
tups.append((((i + 1) - begin_with), ((j + 1) - begin_with), (f * (r1 + r2))))
tm = Tol_matrix(*tups, prototype=prototype, factor=factor)
return tm
def from_single_value(self, value):
tm = Tol_matrix()
tm.prototype = 'single value'
tm.matrix = np.array([[value]])
tm.custom_values = [(1, 1)]
tm.radius_type = 'N/A'
return tm
def __getitem__(self, index):
new_index = Element.number_from_specie(index)
return self.matrix[index]
def __str__(self):
s = '\n--Tol_matrix class object--'
s += ('\nPrototype: ' + str(self.prototype))
s += ('\nAtomic radius type: ' + str(self.radius_type))
s += ('\nRadius scaling factor: ' + str(self.f))
if (self.prototype == 'single value'):
s += ('\nCustom tolerance value: ' + str(self.matrix([0][0])))
elif (self.custom_values == []):
s += '\nCustom tolerance values: None'
else:
s += '\nCustom tolerance values:'
for tup in self.custom_values:
name1 = str(Element(tup[0]).short_name)
name2 = str(Element(tup[1]).short_name)
s += '\n{:s}-{:s}: {:6.3f}'.format(name1, name2, self.get_tol(tup[0], tup[1]))
return s
def to_file(self, filename=None):
if (filename == None):
given = False
else:
given = True
if (filename == None):
filename = 'custom_tol_matrix'
if os.path.exists(filename):
i = 1
while True:
outdir = ((filename + '_') + str(i))
if (not os.path.exists(outdir)):
break
i += 1
if (i > 10000):
return 'Cannot create file: too many files already created.'
else:
outdir = filename
try:
np.save(filename, [self])
return (('Output file to ' + outdir) + '.npy')
except:
return 'Error: Could not save Tol_matrix to file.'
def from_file(self, filename):
try:
tm = np.load(filename)[0]
if (type(tm) == Tol_matrix):
return tm
else:
raise RuntimeError('invalid file for Tol_matrix: ', filename)
except:
raise RuntimeError('Could not load Tol_matrix from file: ', filename) |
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
d = builder.maybe_spill(builder.call_c(dict_new_op, [], o.line))
loop_params = list(zip(o.indices, o.sequences, o.condlists, o.is_async))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [builder.read(d), k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return builder.read(d) |
def _update_grant(graphql_client, grant, **kwargs):
query = '\n mutation updateGrant($input: UpdateGrantInput!){\n updateGrant(input: $input) {\n __typename\n\n ... on Grant {\n id\n }\n\n ... on GrantErrors {\n errors {\n validationConference: conference\n validationName: name\n validationFullName: fullName\n validationGender: gender\n validationGrantType: grantType\n validationOccupation: occupation\n validationAgeGroup: ageGroup\n validationPythonUsage: pythonUsage\n validationCommunityContribution: communityContribution\n validationBeenToOtherEvents: beenToOtherEvents\n validationInterestedInVolunteering: interestedInVolunteering\n validationNeedsFundsForTravel: needsFundsForTravel\n validationNeedVisa: needVisa\n validationNeedAccommodation: needAccommodation\n validationWhy: why\n validationNotes: notes\n validationTravellingFrom: travellingFrom\n validationWebsite: website\n validationTwitterHandle: twitterHandle\n validationGithubHandle: githubHandle\n validationLinkedinUrl: linkedinUrl\n validationMastodonHandle: mastodonHandle\n nonFieldErrors\n }\n }\n }\n }\n '
defaults = {'name': grant.name, 'fullName': grant.full_name, 'conference': grant.conference.code, 'ageGroup': grant.age_group, 'gender': grant.gender, 'occupation': grant.occupation, 'grantType': grant.grant_type, 'pythonUsage': grant.python_usage, 'communityContribution': grant.community_contribution, 'beenToOtherEvents': grant.been_to_other_events, 'interestedInVolunteering': grant.interested_in_volunteering, 'needsFundsForTravel': grant.needs_funds_for_travel, 'needVisa': grant.need_visa, 'needAccommodation': grant.need_accommodation, 'why': grant.why, 'notes': grant.notes, 'travellingFrom': grant.travelling_from, 'website': grant.website, 'twitterHandle': grant.twitter_handle, 'githubHandle': grant.github_handle, 'linkedinUrl': grant.linkedin_url, 'mastodonHandle': grant.mastodon_handle}
variables = {**defaults, **kwargs, 'conference': grant.conference.code, 'instance': grant.id}
response = graphql_client.query(query, variables={'input': variables})
return response |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view((- 1), 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1) |
class GumballMachine():
soldOutState: State
noQuarterState: State
hasQuarterState: State
soldState: State
state: State
count: int = 0
def __init__(self, numberGumballs: int):
self.soldOutState = SoldOutState(self)
self.noQuarterState = NoQuarterState(self)
self.hasQuarterState = HasQuarterState(self)
self.soldState = SoldState(self)
self.count = numberGumballs
if (numberGumballs > 0):
self.state = self.noQuarterState
else:
self.state = self.soldOutState
def insertQuarter(self) -> None:
self.state.insertQuarter()
def ejectQuarter(self) -> None:
self.state.ejectQuarter()
def turnCrank(self) -> None:
self.state.turnCrank()
self.state.dispense()
def releaseBall(self) -> None:
print('A gumball comes rolling out the slot...')
if (self.count > 0):
self.count -= 1
def getCount(self) -> int:
return self.count
def refill(self, count: int) -> None:
self.count += count
print(f'The gumball machine was just refilled; its new count is: {self.count}')
self.state.refill()
def setState(self, state: State) -> None:
self.state = state
def getState(self) -> State:
return self.state
def getSoldOutState(self) -> State:
return self.soldOutState
def getNoQuarterState(self) -> State:
return self.noQuarterState
def getHasQuarterState(self) -> State:
return self.hasQuarterState
def getSoldState(self) -> State:
return self.soldState
def __str__(self) -> str:
result: StringBuffer = StringBuffer()
result.append('\nMighty Gumball, Inc.')
result.append('\nJava-enabled Standing Gumball Model #2004')
result.append(f'''
Inventory: {self.count} gumball''')
if (self.count != 1):
result.append('s')
result.append('\n')
result.append(f'''Machine is {self.state}
''')
return str(result)
def __repr__(self) -> str:
return str(self) |
class Effect3417(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Projectile Turret')), 'damageMultiplier', ship.getModifiedItemAttr('eliteBonusViolatorsRole1'), **kwargs) |
def regional_task(population_grid_points, transport_network, departure_datetime):
import r5py
regional_task = r5py.RegionalTask(transport_network, population_grid_points.at[(1, 'geometry')], population_grid_points, departure=departure_datetime)
(yield regional_task)
del regional_task
time.sleep(0.5)
jpype.java.lang.System.gc() |
class SampleBuffer(object):
def __init__(self, size):
self.idx_to_slot = np.empty(shape=[size], dtype=int)
self.slot_to_idx = np.empty(shape=[size], dtype=int)
self.count = 0
self.clear()
return
def clear(self):
self.idx_to_slot.fill(MathUtil.INVALID_IDX)
self.slot_to_idx.fill(MathUtil.INVALID_IDX)
self.count = 0
return
def is_valid(self, idx):
return (self.idx_to_slot[idx] != MathUtil.INVALID_IDX)
def get_size(self):
return self.idx_to_slot.shape[0]
def add(self, idx):
for i in idx:
if (not self.is_valid(i)):
new_slot = self.count
assert (new_slot >= 0)
self.idx_to_slot[i] = new_slot
self.slot_to_idx[new_slot] = i
self.count += 1
return
def free(self, idx):
for i in idx:
if self.is_valid(i):
slot = self.idx_to_slot[i]
last_slot = (self.count - 1)
last_idx = self.slot_to_idx[last_slot]
self.idx_to_slot[last_idx] = slot
self.slot_to_idx[slot] = last_idx
self.idx_to_slot[i] = MathUtil.INVALID_IDX
self.slot_to_idx[last_slot] = MathUtil.INVALID_IDX
self.count -= 1
return
def sample(self, n):
if (self.count > 0):
slots = np.random.randint(0, self.count, size=n)
idx = self.slot_to_idx[slots]
else:
idx = np.empty(shape=[0], dtype=int)
return idx
def check_consistency(self):
valid = True
if (self.count < 0):
valid = False
if valid:
for i in range(self.get_size()):
if self.is_valid(i):
s = self.idx_to_slot[i]
if (self.slot_to_idx[s] != i):
valid = False
break
s2i = self.slot_to_idx[i]
if (s2i != MathUtil.INVALID_IDX):
i2s = self.idx_to_slot[s2i]
if (i2s != i):
valid = False
break
count0 = np.sum((self.idx_to_slot == MathUtil.INVALID_IDX))
count1 = np.sum((self.slot_to_idx == MathUtil.INVALID_IDX))
valid &= (count0 == count1)
return valid |
def node_home(caller):
text = '\n The |cHome|n location of an object is often only used as a backup - this is where the object\n will be moved to if its location is deleted. The home location can also be used as an actual\n home for characters to quickly move back to.\n\n If unset, the global home default (|w{default}|n) will be used.\n\n {current}\n '.format(default=settings.DEFAULT_HOME, current=_get_current_value(caller, 'home'))
helptext = "\n The home can be given as a #dbref but can also be specified using the protfunc\n '$obj(name)'. Use |wSE|nearch to find objects in the database.\n\n The home location is commonly not used except as a backup; using the global default is often\n enough.\n\n |c$protfuncs|n\n {pfuncs}\n ".format(pfuncs=_format_protfuncs())
text = (text, helptext)
options = _wizard_options('home', 'location', 'destination', search=True)
options.append({'key': '_default', 'goto': (_set_property, dict(prop='home', processor=(lambda s: s.strip())))})
return (text, options) |
def gumbel_softmax_conditional_sample(logits, temperature, one_hot_z, eps=1e-20, detach=False):
U = torch.rand(logits.shape).to(device)
log_U = torch.log((U + eps))
log_U_k = (one_hot_z * log_U).sum(dim=(- 1), keepdim=True)
if detach:
logits = logits.detach()
gumbel_conditional_sample = (- torch.log(((- log_U_k) + (((- log_U) / (torch.exp(logits) + eps)) * (1 - one_hot_z)))))
return F.softmax((gumbel_conditional_sample / temperature), dim=(- 1)) |
class WSGISOAPHandler(object):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, environ, start_response):
return self.handler(environ, start_response)
def handler(self, environ, start_response):
if (environ['REQUEST_METHOD'] == 'GET'):
return self.do_get(environ, start_response)
elif (environ['REQUEST_METHOD'] == 'POST'):
return self.do_post(environ, start_response)
else:
start_response('405 Method not allowed', [('Content-Type', 'text/plain')])
return ['Method not allowed']
def do_get(self, environ, start_response):
path = environ.get('PATH_INFO').lstrip('/')
query = environ.get('QUERY_STRING')
if ((path != '') and (path not in self.dispatcher.methods.keys())):
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [('Method not found: %s' % path)]
elif (path == ''):
response = self.dispatcher.wsdl()
else:
(req, res, doc) = self.dispatcher.help(path)
if ((len(query) == 0) or (query == 'request')):
response = req
else:
response = res
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response]
def do_post(self, environ, start_response):
length = int(environ['CONTENT_LENGTH'])
request = environ['wsgi.input'].read(length)
response = self.dispatcher.dispatch(request)
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response] |
def pytask_execute_log_end(session: Session, reports: list[ExecutionReport]) -> bool:
session.execution_end = time.time()
counts = count_outcomes(reports, TaskOutcome)
if session.config['show_traceback']:
console.print()
if counts[TaskOutcome.FAIL]:
console.rule(Text('Failures', style=TaskOutcome.FAIL.style), style=TaskOutcome.FAIL.style)
console.print()
for report in reports:
if ((report.outcome == TaskOutcome.FAIL) or ((report.outcome == TaskOutcome.SKIP_PREVIOUS_FAILED) and (session.config['verbose'] >= 2))):
console.print(report)
console.rule(style='dim')
description_total = ('Collected task' if (len(reports) == 1) else 'Collected tasks')
panel = create_summary_panel(counts, TaskOutcome, description_total)
console.print(panel)
session.hook.pytask_log_session_footer(session=session, duration=(session.execution_end - session.execution_start), outcome=(TaskOutcome.FAIL if counts[TaskOutcome.FAIL] else TaskOutcome.SUCCESS))
if counts[TaskOutcome.FAIL]:
raise ExecutionError
return True |
class SourceHandlerMixin():
tables: List[Union[(Path, SubQuery, Table)]]
columns: List[Column]
union_barriers: List[Tuple[(int, int)]]
def end_of_query_cleanup(self, holder: SubQueryLineageHolder) -> None:
for (i, tbl) in enumerate(self.tables):
holder.add_read(tbl)
self.union_barriers.append((len(self.columns), len(self.tables)))
for (i, (col_barrier, tbl_barrier)) in enumerate(self.union_barriers):
(prev_col_barrier, prev_tbl_barrier) = ((0, 0) if (i == 0) else self.union_barriers[(i - 1)])
col_grp = self.columns[prev_col_barrier:col_barrier]
tbl_grp = self.tables[prev_tbl_barrier:tbl_barrier]
if holder.write:
if (len(holder.write) > 1):
raise SQLLineageException
tgt_tbl = list(holder.write)[0]
lateral_aliases = set()
for (idx, tgt_col) in enumerate(col_grp):
tgt_col.parent = tgt_tbl
for lateral_alias_ref in col_grp[(idx + 1):]:
if any(((src_col[0] == tgt_col.raw_name) for src_col in lateral_alias_ref.source_columns)):
lateral_aliases.add(tgt_col.raw_name)
break
for src_col in tgt_col.to_source_columns(self.get_alias_mapping_from_table_group(tbl_grp, holder)):
if (len((write_columns := holder.write_columns)) == len(col_grp)):
tgt_col = write_columns[idx]
is_lateral_alias_ref = False
for wc in holder.write_columns:
if (wc.raw_name == '*'):
continue
if ((src_col.raw_name == wc.raw_name) and (src_col.raw_name in lateral_aliases)):
is_lateral_alias_ref = True
for lateral_alias_col in holder.get_source_columns(wc):
holder.add_column_lineage(lateral_alias_col, tgt_col)
break
if is_lateral_alias_ref:
continue
holder.add_column_lineage(src_col, tgt_col)
def get_alias_mapping_from_table_group(cls, table_group: List[Union[(Path, Table, SubQuery)]], holder: SubQueryLineageHolder) -> Dict[(str, Union[(Path, Table, SubQuery)])]:
return {**{tgt: src for (src, tgt, attr) in holder.graph.edges(data=True) if ((attr.get('type') == EdgeType.HAS_ALIAS) and (src in table_group))}, **{table.raw_name: table for table in table_group if isinstance(table, Table)}, **{str(table): table for table in table_group if isinstance(table, Table)}} |
.skipif((not torch.cuda.is_available()), reason='The test requires GPUs to run.')
.gpu_only
.usefixtures('toggle_batching')
.parametrize('src_sharding_type', _sharding_types())
.parametrize('dst_sharding_type', _sharding_types())
.parametrize('use_async', [True, False])
_with_pet(nproc=2)
def test_torchrec(src_sharding_type: str, dst_sharding_type: str, use_async: bool, tmp_path: Path) -> None:
dist.init_process_group(backend='nccl')
local_rank = int(os.environ['LOCAL_RANK'])
device = torch.device(f'cuda:{local_rank}')
torch.cuda.set_device(device)
torch.manual_seed((42 + dist.get_rank()))
src_dmp = _initialize_dmp(device=device, sharding_type=src_sharding_type)
smallest_shard_sz = sys.maxsize
for v in src_dmp.state_dict().values():
if (not isinstance(v, ShardedTensor)):
continue
for shard in v.local_shards():
smallest_shard_sz = min(smallest_shard_sz, (shard.tensor.nelement() * shard.tensor.element_size()))
override_max_shard_size_bytes(((smallest_shard_sz // 2) - 1)).__enter__()
if use_async:
future = torchsnapshot.Snapshot.async_take(path=str(tmp_path), app_state={'dmp': src_dmp})
snapshot = future.wait()
else:
snapshot = torchsnapshot.Snapshot.take(path=str(tmp_path), app_state={'dmp': src_dmp})
torch.manual_seed((777 + dist.get_rank()))
dst_dmp = _initialize_dmp(device=device, sharding_type=dst_sharding_type)
src_gathered = _gather_state_dict(src_dmp.state_dict(), 'dmp')
dst_gathered = _gather_state_dict(dst_dmp.state_dict(), 'dmp')
for (key, src_tensor) in src_gathered.items():
assert (not torch.allclose(src_tensor, dst_gathered[key]))
snapshot.restore(app_state={'dmp': dst_dmp})
dst_gathered = _gather_state_dict(dst_dmp.state_dict(), 'dmp')
for (key, src_tensor) in src_gathered.items():
assert torch.allclose(src_tensor, dst_gathered[key]), key
for (key, src) in src_gathered.items():
dst = torch.rand_like(src)
assert (not torch.allclose(src, dst))
snapshot.read_object(path=key, obj_out=dst)
assert torch.allclose(src, dst) |
class Collection():
def __init__(self, name='molecules'):
self.name = name
self._data = {}
self.filename = op.join(op.dirname(__file__), (name + '.json'))
with open(self.filename, 'r') as f:
self.content = json.load(f)
def __getitem__(self, name):
self._read(name)
if (len(self._data) == 0):
names = ''
for dct in self.content:
names += (dct['name'] + ', ')
msg = (name + ' is not supported\n')
msg += 'Available molecules are:\n'
msg += names
raise NameError(msg)
else:
return self._data
def __iter__(self):
for dct in self.content:
(yield dct['name'])
def _read(self, name):
if (self.name == 'molecules'):
for dct in self.content:
if (dct['name'].lower() == name.lower()):
pos = dct['xyz']
symbols = dct['elements']
self._data = Molecule(symbols, pos)
elif (self.name == 'clusters'):
for dct in self.content:
if (dct['name'] == int(name)):
self._data = dct
def show_names(self):
names = []
for dct in self.content:
names.append(dct['name'])
print(names) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.