code stringlengths 281 23.7M |
|---|
def test_protocol_not_resolved():
multiplexer = Multiplexer([Mock()])
envelope = Envelope(to='1', sender='2', protocol_specification_id=FipaMessage.protocol_specification_id, message=b'some bytes')
with pytest.raises(ValueError):
multiplexer._get_protocol_id_for_envelope(envelope) |
def _get_samples_from_metrics(metrics, metric_name, label_matches, nonzero_only=False):
result = []
for metric in metrics:
if ((metric_name is None) or (metric.name == metric_name)):
for sample in metric.samples:
labels = sample.labels
value = sample.value
if ((label_matches is None) or set(label_matches.items()).issubset(set(labels.items()))):
if (nonzero_only and (int(value) == 0)):
continue
result.append(sample)
return result |
def pytest_collection_modifyitems(session: pytest.Session, config: pytest.Config, items: List[pytest.Item]) -> None:
for item in items:
if (any(((fx in ['db', 'broker_db_setup', 'transactional_db', 'django_db_reset_sequences']) for fx in getattr(item, 'fixturenames', ()))) or ('django_db' in [m.name for m in item.own_markers]) or (item.cls and hasattr(item.cls, 'databases') and (len(item.cls.databases) > 0))):
item.add_marker('database')
if any(((('elasticsearch' in fx) and ('index' in fx)) for fx in getattr(item, 'fixturenames', ()))):
item.add_marker('elasticsearch')
if ('spark' in getattr(item, 'fixturenames', ())):
item.add_marker('spark') |
class TestUtils(unittest.TestCase):
def test_app_utils(self):
git_url = '
branch = 'develop'
app = App(name=git_url, branch=branch, bench=Bench('.'))
self.assertTrue(all([(app.name == git_url), (app.branch == branch), (app.tag == branch), (app.is_url is True), (app.on_disk is False), (app.org == 'frappe'), (app.url == git_url)]))
def test_is_valid_frappe_branch(self):
with self.assertRaises(InvalidRemoteException):
is_valid_frappe_branch(' frappe_branch='random-branch')
is_valid_frappe_branch(' frappe_branch='random-branch')
is_valid_frappe_branch(' frappe_branch='develop')
is_valid_frappe_branch(' frappe_branch='v13.29.0')
def test_app_states(self):
bench_dir = './sandbox'
sites_dir = os.path.join(bench_dir, 'sites')
if (not os.path.exists(sites_dir)):
os.makedirs(sites_dir)
fake_bench = Bench(bench_dir)
self.assertTrue(hasattr(fake_bench.apps, 'states'))
fake_bench.apps.states = {'frappe': {'resolution': {'branch': 'develop', 'commit_hash': '234rwefd'}, 'version': '14.0.0-dev'}}
fake_bench.apps.update_apps_states()
self.assertEqual(fake_bench.apps.states, {})
frappe_path = os.path.join(bench_dir, 'apps', 'frappe')
os.makedirs(os.path.join(frappe_path, 'frappe'))
subprocess.run(['git', 'init'], cwd=frappe_path, capture_output=True, check=True)
with open(os.path.join(frappe_path, 'frappe', '__init__.py'), 'w+') as f:
f.write("__version__ = '11.0'")
subprocess.run(['git', 'add', '.'], cwd=frappe_path, capture_output=True, check=True)
subprocess.run(['git', 'config', 'user.email', 'bench-test_app_'], cwd=frappe_path, capture_output=True, check=True)
subprocess.run(['git', 'config', 'user.name', 'App States Test'], cwd=frappe_path, capture_output=True, check=True)
subprocess.run(['git', 'commit', '-m', 'temp'], cwd=frappe_path, capture_output=True, check=True)
fake_bench.apps.update_apps_states(app_name='frappe')
self.assertIn('frappe', fake_bench.apps.states)
self.assertIn('version', fake_bench.apps.states['frappe'])
self.assertEqual('11.0', fake_bench.apps.states['frappe']['version'])
shutil.rmtree(bench_dir)
def test_ssh_ports(self):
app = App(':22:frappe/frappe')
self.assertEqual((app.use_ssh, app.org, app.repo, app.app_name), (True, 'frappe', 'frappe', 'frappe')) |
_type(BGP_MSG_ROUTE_REFRESH)
class BGPRouteRefresh(BGPMessage):
_PACK_STR = '!HBB'
_MIN_LEN = (BGPMessage._HDR_LEN + struct.calcsize(_PACK_STR))
def __init__(self, afi, safi, demarcation=0, type_=BGP_MSG_ROUTE_REFRESH, len_=None, marker=None):
super(BGPRouteRefresh, self).__init__(marker=marker, len_=len_, type_=type_)
self.afi = afi
self.safi = safi
self.demarcation = demarcation
self.eor_sent = False
def parser(cls, buf):
(afi, demarcation, safi) = struct.unpack_from(cls._PACK_STR, six.binary_type(buf))
return {'afi': afi, 'safi': safi, 'demarcation': demarcation}
def serialize_tail(self):
return bytearray(struct.pack(self._PACK_STR, self.afi, self.demarcation, self.safi)) |
def get_infix_items(tokens, callback=infix_error):
internal_assert((len(tokens) >= 3), 'invalid infix tokens', tokens)
((arg1, func, arg2), tokens) = (tokens[:3], tokens[3:])
args = (list(arg1) + list(arg2))
while tokens:
args = [callback([args, func, []])]
((func, newarg), tokens) = (tokens[:2], tokens[2:])
args += list(newarg)
return (func, args) |
class IRLogServiceFactory():
def load_all(cls, ir: 'IR', aconf: Config) -> None:
services = aconf.get_config('log_services')
if (services is not None):
for config in services.values():
srv = IRLogService(ir, config)
extant_srv = ir.log_services.get(srv.name, None)
if extant_srv:
ir.post_error(('Duplicate LogService %s; keeping definition from %s' % (srv.name, extant_srv.location)))
elif srv.is_active():
ir.log_services[srv.name] = srv
ir.save_resource(srv) |
class Solution():
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
def search_for_div(n1, n2, neighbors):
if (n1 not in neighbors):
return (- 1.0)
stack = [(n1, 1.0)]
visited = set()
while stack:
(curr, val) = stack.pop()
if (curr in visited):
continue
visited.add(curr)
if (curr != n1):
neighbors[n1][curr] = val
if (curr == n2):
return val
for (ne, val2) in neighbors.get(curr, {}).items():
if (ne in visited):
continue
stack.append((ne, (val * val2)))
return (- 1.0)
neighbors = {}
for ((n1, n2), res) in zip(equations, values):
if (n1 not in neighbors):
neighbors[n1] = {}
if (n2 not in neighbors):
neighbors[n2] = {}
neighbors[n1][n2] = res
if (res != 0.0):
neighbors[n2][n1] = (1 / res)
ret = []
for (n1, n2) in queries:
if (neighbors.get(n1, {}).get(n2) is not None):
ret.append(neighbors[n1][n2])
continue
ret.append(search_for_div(n1, n2, neighbors))
return ret |
def test_keystore_init_container():
config = ''
r = helm_template(config)
i = r['statefulset'][uname]['spec']['template']['spec']['initContainers'][(- 1)]
assert (i['name'] != 'keystore')
config = '\nkeystore:\n - secretName: test\n '
r = helm_template(config)
i = r['statefulset'][uname]['spec']['template']['spec']['initContainers'][(- 1)]
assert (i['name'] == 'keystore') |
def check_performance(thr_and_double, shape_and_axes):
(thr, double) = thr_and_double
dtype = (numpy.complex128 if double else numpy.complex64)
dtype = dtypes.normalize_type(dtype)
(shape, axes) = shape_and_axes
data = numpy.arange(product(shape)).reshape(shape).astype(dtype)
shift = FFTShift(data, axes=axes)
shiftc = shift.compile(thr)
data_dev = thr.to_device(data)
res_dev = thr.empty_like(data)
attempts = 10
times = []
for i in range(attempts):
t1 = time.time()
shiftc(res_dev, data_dev)
thr.synchronize()
times.append((time.time() - t1))
res_ref = numpy.fft.fftshift(data, axes=axes)
assert diff_is_negligible(res_dev.get(), res_ref)
return (min(times), (product(shape) * dtype.itemsize)) |
def test_bind_callbacks_and_overlap(manager):
layout = textwrap.dedent('\n from libqtile import widget\n from qtile_extras.popup.toolkit import (\n PopupCircularProgress,\n PopupRelativeLayout,\n PopupText,\n PopupSlider\n )\n self.popup = PopupRelativeLayout(\n self,\n controls=[\n PopupSlider(\n pos_x=0.1,\n pos_y=0.1,\n width=0.8,\n height=0.8,\n value=0.5,\n name="slider1"\n ),\n PopupCircularProgress(\n pos_x=0.1,\n pos_y=0.1,\n width=0.8,\n height=0.8,\n value=0.25,\n name="progress1"\n )\n ],\n margin=0\n )\n\n self.popup.show()\n ')
manager.c.eval(layout)
(_, info) = manager.c.eval('self.popup.info()')
info = eval(info)
assert (info['controls'][0]['value'] == 0.5)
assert (info['controls'][1]['value'] == 0.25)
(_, out) = manager.c.eval("self.popup.bind_callbacks(slider1={'Button1': lambda p=self.popup: p.update_controls(slider1=0.1)},progress1={'Button1': lambda p=self.popup: p.update_controls(progress1=0.9)})")
(_, out) = manager.c.eval('self.popup.process_button_click(110, 20, 1)')
(_, info) = manager.c.eval('self.popup.info()')
info = eval(info)
assert (info['controls'][0]['value'] == 0.1)
assert (info['controls'][1]['value'] == 0.9) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Microbenchmarks')
parser.add_argument('-c', '--config', type=str, help='The benchmark config file.')
parser.add_argument('-w', '--warmup', type=int, default=1, help='Number of warm up iterations.')
parser.add_argument('-i', '--iteration', type=int, default=1, help='Number of benchmark iterations.')
parser.add_argument('-b', '--backward', action='store_true', help='Include backward pass.')
parser.add_argument('-d', '--device', type=str, default='cpu', help='Target device for benchmark.')
parser.add_argument('-o', '--output-prefix', type=str, default='benchmark_result', help='File name prefix to write benchmark results.')
parser.add_argument('-r', '--resume-id', type=str, default=None, help='Define a resume op_run_id to continue benchmark, skip all previous configs.')
parser.add_argument('-s', '--stop_id', type=str, default=None, help='Define a stop op_run_id (exclusive) to stop benchmark, skip remaining configs.')
parser.add_argument('-a', '--append', action='store_true', help='Append to output file, rather than overwrite.')
parser.add_argument('--cuda-l2-cache', default='off', nargs='?', choices=['on', 'off'], help='Set option for CUDA GPU L2 cache between iterations in discrete mode.')
parser.add_argument('--ncu', action='store_true', help='Run NSight Compute to collect metrics.')
parser.add_argument('--ncu-bin', type=str, default=None, help='Path to the NSight Compute (ncu) binary.')
parser.add_argument('--ncu-args-file', type=str, default=None, help='NSight Compute extra command line options (metrics etc.).')
parser.add_argument('--ncu-warmup', type=int, default=None, help='NSight Systems number of warmup runs.')
parser.add_argument('--ncu-iteration', type=int, default=None, help='NSight Systems number of measured iteration runs.')
parser.add_argument('--nsys', action='store_true', help='Run NSight Systems to collect metrics.')
parser.add_argument('--nsys-bin', type=str, default=None, help='Path to the NSight Systems (nsys) binary.')
parser.add_argument('--nsys-args-file', type=str, default=None, help='NSight Systems extra command line options (metrics etc.).')
parser.add_argument('--nsys-warmup', type=int, default=None, help='NSight Systems number of warmup runs.')
parser.add_argument('--nsys-iteration', type=int, default=None, help='NSight Systems number of measured iteration runs.')
parser.add_argument('--run-batch-size', type=int, default=50, help='Batch run input size (number of input configs to run in one launch), used by both NCU and NSYS.')
parser.add_argument('--batch-cuda-device', type=int, default=1, help='CUDA GPU device ID to run batch job.')
parser.add_argument('--batch-cmd', type=str, default=None, help='Run batch job command.')
parser.add_argument('--exec-mode', type=str, default='discrete', nargs='?', choices=['discrete', 'continuous', 'continuous_events'], help='Set execution mode of the operators (discrete, continuous, continuous_events). Default=discrete')
parser.add_argument('-p', '--profile', action='store_true', help='Enable profiler and tracing.')
parser.add_argument('--cupti-profiler', action='store_true', help='Run CUPTI Profiler to measure performance events directly,The measurements will be written to the profile trace file.See --cupti_profiler_metrics for supported metrics.')
parser.add_argument('--cupti-profiler-metrics', type=str, default='kineto__cuda_core_flops', help='Comma separated list of metrics to measure on the CUDA deviceYou can use any metrics available here: eg: L2 misses, L1 bank conflicts.\n Additionally, Two special metrics are useful for measuring FLOPS\n- kineto__cuda_core_flops = CUDA floating point op counts\n- kineto__tensor_core_insts = Tensor core op counts\n')
parser.add_argument('--cupti-profiler-measure-per-kernel', action='store_true', help='Run CUPTI Profiler measurements for every GPU kernelWarning : this can be slow')
parser.add_argument('--et', action='store_true', help='Collect execution trace.')
parser.add_argument('-l', '--log-level', default='INFO', help='Log output verbosity.')
parser.add_argument('--version', action='store_true', help='Print version.')
args = parser.parse_args()
logger = init_logging(getattr(logging, args.log_level.upper(), logging.INFO))
if args.version:
logger.info(f'PARAM train compute version: {__version__}')
return
elif (not args.config):
parser.print_usage()
return
load_modules(lib_pytorch)
load_modules(workloads_pytorch)
run_options = get_benchmark_options()
run_options['warmup'] = args.warmup
run_options['iteration'] = args.iteration
run_options['device'] = args.device
run_options['cuda_l2_cache'] = (args.cuda_l2_cache == 'on')
run_options['resume_op_run_id'] = args.resume_id
run_options['stop_op_run_id'] = args.stop_id
run_options['run_batch_size'] = args.run_batch_size
run_options['batch_cuda_device'] = args.batch_cuda_device
if args.backward:
run_options['pass_type'] = ExecutionPass.BACKWARD
else:
run_options['pass_type'] = ExecutionPass.FORWARD
run_options['op_exec_mode'] = OpExecutionMode(args.exec_mode)
run_options['run_ncu'] = args.ncu
run_options['run_nsys'] = args.nsys
pid = os.getpid()
start_time = datetime.now()
timestamp = int(datetime.timestamp(start_time))
out_file_prefix = f'{args.output_prefix}_{pid}_{timestamp}'
out_file_name = f'{out_file_prefix}.json'
write_option = ('a' if args.append else 'w')
if args.batch_cmd:
run_options['batch_cmd'] = args.batch_cmd
if args.ncu_bin:
run_options['ncu_bin'] = args.ncu_bin
if args.ncu_warmup:
run_options['ncu_warmup'] = args.ncu_warmup
if args.ncu_iteration:
run_options['ncu_iteration'] = args.ncu_iteration
if args.ncu_args_file:
with open(args.ncu_args_file, 'r') as ncu_file:
run_options['ncu_args'] = ncu_file.read().strip()
if args.nsys_bin:
run_options['nsys_bin'] = args.nsys_bin
if args.nsys_warmup:
run_options['nsys_warmup'] = args.nsys_warmup
if args.nsys_iteration:
run_options['nsys_iteration'] = args.nsys_iteration
if args.nsys_args_file:
with open(args.nsys_args_file, 'r') as nsys_file:
run_options['nsys_args'] = nsys_file.read().strip()
if (args.cupti_profiler and (not run_options['device'].startswith('cuda'))):
logger.warning('Cannot use --cupti_profiler when not running on cuda device')
args.cupti_profiler = False
if (args.cupti_profiler and (not args.profile)):
logger.warning('Enabling pytorch profiler as --cupti_profiler was added')
args.profile = True
run_options['cmd_args'] = args.__dict__
with open(out_file_name, write_option) as out_file:
run_options['out_file_prefix'] = args.output_prefix
run_options['out_stream'] = out_file
benchmark_setup = {'run_options': run_options, 'sys_info': get_sys_info(), 'start_time': start_time.isoformat(timespec='seconds')}
print(json.dumps(benchmark_setup, default=str), file=out_file)
if args.cupti_profiler:
with torch.autograd.profiler.profile(enabled=True, use_cuda=True, use_kineto=True) as _:
logger.info('Running dummy profiler warmup for CUPTI.')
bench_config = BenchmarkConfig(run_options)
bench_config.load_json_file(args.config)
benchmark = make_default_benchmark(bench_config)
use_cuda = False
if run_options['device'].startswith('cuda'):
use_cuda = True
et = None
if args.et:
et_file = f'{out_file_prefix}_et.json'
et = ExecutionTraceObserver()
et.register_callback(et_file)
et.start()
cupti_profiler_config = (_ExperimentalConfig(profiler_metrics=args.cupti_profiler_metrics.split(','), profiler_measure_per_kernel=args.cupti_profiler_measure_per_kernel) if args.cupti_profiler else None)
with torch.autograd.profiler.profile(args.profile, use_cuda=use_cuda, use_kineto=True, record_shapes=False, experimental_config=cupti_profiler_config, use_cpu=((not args.cupti_profiler) or args.cupti_profiler_measure_per_kernel)) as prof:
with record_function(f"[param|{run_options['device']}]"):
benchmark.run()
if et:
et.stop()
et.unregister_callback()
logger.info(f'Exeution trace: {et_file}')
print(json.dumps({'finish_time': datetime.now().isoformat(timespec='seconds')}), file=out_file)
if (args.profile and prof):
trace_file = f'{out_file_prefix}_trace.json'
logger.info(f'Kineto trace: {trace_file}')
prof.export_chrome_trace(trace_file)
print(json.dumps({'trace_file': trace_file}), file=out_file)
logger.info(f'Benchmark result: {out_file_name}') |
def extractMindlesstlWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
(name='export', help='Export a filter from database')
_context
('filter_name', type=str)
('--output-path', '-o', type=Path(writable=True), help='Path where you want to save the exported filter to')
def export_filter(ctx: Context, filter_name: str, output_path: str) -> None:
if output_path:
filters.export_filter(ctx.database, filter_name, pathlib.Path(output_path))
else:
filters.export_filter(ctx.database, filter_name) |
class Benchmark():
def __init__(self):
self.total = {}
def measure(self, name):
if (name not in self.total):
self.total[name] = 0
start = time.time()
try:
(yield)
finally:
total = (time.time() - start)
self.total[name] += total
def measure_func(self, f):
(f)
def _f(*args, **kwargs):
with benchmark.measure(f.__name__):
return f(*args, **kwargs)
return _f
def reset(self):
self.total = {}
def print(self):
scores = [(total, name) for (name, total) in self.total.items()]
scores.sort(reverse=True)
print('---')
for (total, name) in scores:
print(('%.4f\t%s' % (total, name))) |
class RunGridArgs():
patterns: tp.List[str] = field(default_factory=list)
monitor: bool = True
interval: float = 5
trim: tp.Optional[int] = None
trim_last: bool = False
silent: bool = False
dry_run: bool = False
cancel: bool = False
clear: bool = False
init: tp.Optional[bool] = False
jupyter: bool = False
folder: tp.Optional[int] = None
log: tp.Optional[int] = None
tail: tp.Optional[int] = None
_from_commandline: bool = False |
class Command():
def __init__(self, description=None, commands=None, flags=None):
self.parser = ArgumentParser()
self.description = description
self.commands = commands
self.has_flags = False
self.flags = flags
if (flags is not None):
self.has_flags = True
for flag in flags:
parser_args = {}
if ((not flag.required) and (flag.dtype not in ['bool'])):
parser_args['nargs'] = '?'
if (flag.dtype != 'bool'):
parser_args['action'] = 'store'
elif (flag.dtype == 'bool'):
parser_args['action'] = 'store_true'
if (flag.dtype == 'bool'):
pass
elif (flag.dtype == 'str'):
parser_args['type'] = str
elif (flag.dtype == 'int'):
parser_args['type'] = int
else:
error('Unsupported dtype: {}'.format(flag.dtype))
return
self.parser.add_argument(*flag.aliases, help=flag.description, **parser_args) |
def jacobian_add(p: 'PlainPoint3D', q: 'PlainPoint3D') -> 'PlainPoint3D':
if (not p[1]):
return q
if (not q[1]):
return p
U1 = ((p[0] * (q[2] ** 2)) % P)
U2 = ((q[0] * (p[2] ** 2)) % P)
S1 = ((p[1] * (q[2] ** 3)) % P)
S2 = ((q[1] * (p[2] ** 3)) % P)
if (U1 == U2):
if (S1 != S2):
return cast('PlainPoint3D', (0, 0, 1))
return jacobian_double(p)
H = (U2 - U1)
R = (S2 - S1)
H2 = ((H * H) % P)
H3 = ((H * H2) % P)
U1H2 = ((U1 * H2) % P)
nx = ((((R ** 2) - H3) - (2 * U1H2)) % P)
ny = (((R * (U1H2 - nx)) - (S1 * H3)) % P)
nz = (((H * p[2]) * q[2]) % P)
return cast('PlainPoint3D', (nx, ny, nz)) |
class ItemFetcher(common.LogBase.LoggerMixin):
loggerPath = 'Main.SiteArchiver'
FETCH_DISTANCE = (1000 * 1000)
def sync_wg_proxy(self):
if (getattr(self, '__wg', None) is None):
self.__wg = WebRequest.WebGetRobust()
return self.__wg
def __init__(self, rules, target_url, db_sess, start_url, job, cookie_lock=None, wg_proxy=None, response_queue=None):
super().__init__()
self.response_queue = response_queue
self.job = job
self.db_sess = db_sess
if wg_proxy:
self.wg_proxy = wg_proxy
else:
self.wg_proxy = self.sync_wg_proxy
for item in PLUGINS:
assert issubclass(item, WebMirror.processor.ProcessorBase.PageProcessor), ("Item '%s' does not inherit from '%s'" % (item, WebMirror.processor.ProcessorBase.PageProcessor))
self.plugin_modules = {}
for item in PLUGINS:
key = item.want_priority
if (key in self.plugin_modules):
self.plugin_modules[key].append(item)
else:
self.plugin_modules[key] = [item]
self.filter_modules = []
for item in FILTERS:
self.filter_modules.append(item)
baseRules = [ruleset for ruleset in rules if (ruleset['netlocs'] is None)].pop(0)
rules = [ruleset for ruleset in rules if (ruleset['netlocs'] != None)]
rules.sort(key=(lambda x: x['netlocs']))
self.ruleset = rules
self.relinkable = set()
for item in self.ruleset:
if item['fileDomains']:
self.relinkable.update(item['fileDomains'])
if item['netlocs']:
self.relinkable.update(item['netlocs'])
netloc = urllib.parse.urlsplit(target_url).netloc
self.rules = None
for ruleset in self.ruleset:
if (netloc in ruleset['netlocs']):
self.rules = ruleset
if (not self.rules):
self.log.warn("Using base ruleset for URL: '%s'!", target_url)
self.rules = baseRules
assert self.rules
self.target_url = target_url
self.start_url = start_url
self.mon_con = statsd.StatsClient(host=settings.GRAPHITE_DB_IP, port=8125, prefix='ReadableWebProxy.Processing')
def getEmptyRet(self, mimetype, message):
return {'plainLinks': [], 'rsrcLinks': [], 'title': 'Error: Unknown dispatch type', 'contents': ("Error. Dispatch did not return content! File mimetype '%s'.<br><br>%s" % (mimetype, message)), 'mimeType': 'text/html'}
def plugin_dispatch(self, plugin, url, content, fName, mimeType, no_ret=False):
self.log.info("Dispatching file '%s' with mime-type '%s' to plugin: '%s'", fName, mimeType, plugin)
assert isinstance(content, (str, bytes)), ("Content must be a string/bytes. It's currently type: '%s'" % type(content))
params = {'pageUrl': url, 'pgContent': content, 'mimeType': mimeType, 'db_sess': self.db_sess, 'baseUrls': self.start_url, 'loggerPath': self.loggerPath, 'badwords': self.rules['badwords'], 'decompose': self.rules['decompose'], 'decomposeBefore': self.rules['decomposeBefore'], 'fileDomains': self.rules['fileDomains'], 'allImages': self.rules['allImages'], 'decompose_svg': self.rules['decompose_svg'], 'ignoreBadLinks': self.rules['IGNORE_MALFORMED_URLS'], 'stripTitle': self.rules['stripTitle'], 'relinkable': self.relinkable, 'destyle': self.rules['destyle'], 'preserveAttrs': self.rules['preserveAttrs'], 'type': self.rules['type'], 'message_q': self.response_queue, 'job': self.job, 'wg_proxy': self.wg_proxy}
ret = plugin.process(params)
if no_ret:
return
assert (ret != None), ('Return from %s was None!' % plugin)
assert (('mimeType' in ret) or ('file' in ret)), ("Neither mimetype or file in ret for url '%s', plugin '%s'" % (url, plugin))
return ret
def cr_fetch(self, itemUrl):
itemUrl = itemUrl.strip().replace(' ', '%20')
error = None
ret = {'success': False}
try:
rpc_interface = common.get_rpyc.RemoteFetchInterface()
rpc_interface.check_ok()
raw_job = WebMirror.JobUtils.buildjob(module='SmartWebRequest', call='chromiumGetRenderedItem', dispatchKey='fetcher', jobid=(- 1), args=[itemUrl], kwargs={}, additionalData={'mode': 'fetch'}, postDelay=0)
ret = rpc_interface.dispatch_request(raw_job)
rpc_interface.close()
except:
self.log.error('Failure fetching content!')
raise
if ret['success']:
(content, fileN, mType) = ret['ret']
else:
self.log.error('Failed to fetch page!')
for line in ret['traceback']:
self.log.error(line)
error = '\n'.join(ret['traceback'])
(content, mType) = (None, None)
if ((not content) or (not mType)):
if error:
raise DownloadException(("Failed to retreive file from page '%s'!\n\nFetch traceback:\n%s\n\nEnd fetch traceback." % (itemUrl, error)))
raise DownloadException(("Failed to retreive file from page '%s'!" % itemUrl))
if (mType and (';' in mType)):
mType = mType.split(';')[0].strip()
if ('%2F' in mType):
mType = mType.replace('%2F', '/')
self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, (len(content) / 1000.0))
if ('debug' in sys.argv):
self.log.info('Title: %s', title)
self.log.info('Mime: %s', mType)
self.log.info('Fname: %s', fileN)
self.log.info('Content: ')
self.log.info('%s', content)
return (content, fileN, mType)
def getItem(self, itemUrl):
spc = WebMirror.rules.load_special_case_sites()
casehandler = WebMirror.SpecialCase.getSpecialCaseHandler(specialcase=spc, joburl=itemUrl)
if (casehandler == ['chrome_render_fetch']):
self.log.info('Synchronous rendered chromium fetch!')
(content, fileN, mType) = self.cr_fetch(itemUrl)
return (content, fileN, mType)
else:
return self.__plain_local_fetch(itemUrl)
def __plain_local_fetch(self, itemUrl):
itemUrl = itemUrl.strip().replace(' ', '%20')
error = None
ret = {'success': False}
try:
rpc_interface = common.get_rpyc.RemoteFetchInterface()
rpc_interface.check_ok()
raw_job = WebMirror.JobUtils.buildjob(module='SmartWebRequest', call='smartGetItem', dispatchKey='fetcher', jobid=(- 1), args=[itemUrl], kwargs={}, additionalData={'mode': 'fetch'}, postDelay=0)
ret = rpc_interface.dispatch_request(raw_job)
rpc_interface.close()
except:
self.log.error('Failure fetching content!')
raise
if ret['success']:
(content, fileN, mType) = ret['ret']
else:
self.log.error('Failed to fetch page!')
for line in ret['traceback']:
self.log.error(line)
error = '\n'.join(ret['traceback'])
(content, mType) = (None, None)
if ((not content) or (not mType)):
if error:
raise DownloadException(("Failed to retreive file from page '%s'!\n\nFetch traceback:\n%s\n\nEnd fetch traceback." % (itemUrl, error)))
raise DownloadException(("Failed to retreive file from page '%s'!" % itemUrl))
if (mType and (';' in mType)):
mType = mType.split(';')[0].strip()
if ('%2F' in mType):
mType = mType.replace('%2F', '/')
self.log.info("Retreived file of type '%s', name of '%s' with a size of %0.3f K", mType, fileN, (len(content) / 1000.0))
return (content, fileN, mType)
def dispatchContent(self, content, fName, mimeType):
assert bool(content)
assert mimeType, ("Mimetype must not be none. URL: '%s'" % self.target_url)
for filter_plg in self.filter_modules:
if (((mimeType.lower() in filter_plg.wanted_mimetypes) or filter_plg.mimetype_catchall) and filter_plg.wantsUrl(self.target_url) and filter_plg.wantsFromContent(content)):
self.plugin_dispatch(filter_plg, self.target_url, content, fName, mimeType, no_ret=True)
keys = list(self.plugin_modules.keys())
keys.sort(reverse=True)
for key in keys:
for plugin in self.plugin_modules[key]:
if ((mimeType.lower() in plugin.wanted_mimetypes) and plugin.wantsUrl(self.target_url) and plugin.wantsFromContent(content)):
ret = self.plugin_dispatch(plugin, self.target_url, content, fName, mimeType)
if (not ('file' in ret)):
ret['rawcontent'] = content
return ret
self.log.error("Did not know how to dispatch request for url: '%s', mimetype: '%s'!", self.target_url, mimeType)
return self.getEmptyRet(mimeType, 'No plugin wanted content!')
def fetch(self, preretrieved):
if (not preretrieved):
(content, fName, mimeType) = self.getItem(self.target_url)
else:
(content, fName, mimeType) = preretrieved
if (content and (not mimeType)):
mimeType = 'application/octet-stream'
started_at = time.time()
try:
ret = self.dispatchContent(content, fName, mimeType)
except Exception:
self.log.error(("Failure processing content from url '%s', mimetype '%s', filename: '%s'" % (self.target_url, mimeType, fName)))
raise
fetchtime = ((time.time() - started_at) * 1000)
cleaned_mime = mimeType
for replace in ['/', '\\', ':', '.']:
cleaned_mime = cleaned_mime.replace(replace, '-')
self.mon_con.timing('{}'.format(cleaned_mime), fetchtime)
return ret |
class SelectionSet(object):
def __init__(self):
self.__stored_selection_list__ = []
self.__selection_set__ = None
self.__name__ = 'SelectionSet#'
def name(self):
return self.__name__
def name(self, new_name):
self.__name__ = new_name
if self.__selection_set__:
pm.rename(self.__selection_set__, new_name)
def replace(self):
try:
pm.select(self.__stored_selection_list__, replace=True)
except pm.MayaNodeError:
pass
def add(self):
pm.select(self.__stored_selection_list__, add=1)
def subtract(self):
pm.select(self.__stored_selection_list__, d=1)
def and_(self):
pm.select(set(self.__stored_selection_list__).intersection(pm.ls(sl=1)))
def or_(self):
pm.select(set(self.__stored_selection_list__).remove(pm.ls(sl=1)))
def save(self):
if self.__stored_selection_list__:
self.__selection_set__ = pm.sets(self.__stored_selection_list__, name=self.name)
else:
self.__selection_set__ = pm.sets(name=self.name)
if self.__selection_set__.hasAttr('selectionManagerData'):
pass
else:
self.__selection_set__.addAttr('selectionManagerData', at='compound', nc=1)
self.__selection_set__.addAttr('version', dt='string', p='selectionManagerData')
self.__selection_set__.selectionManagerData.version.set(__version__, type='string')
def restore(self, selection_set):
self.__selection_set__ = selection_set
self.__stored_selection_list__ = pm.sets(selection_set, q=True)
self.__name__ = selection_set.name()
def update(self):
self.__stored_selection_list__ = pm.ls(sl=1)
if self.__selection_set__:
pm.delete(self.__selection_set__)
self.save() |
def generateGenericMessage(type, t, v, name, iteration):
meta = generateGenericMeta(type, t, v)
data = {'customData': [{'key': 'name', 'value': name}, {'key': 'iteration', 'value': iteration}]}
links = []
msg = {}
msg['meta'] = meta
msg['data'] = data
msg['links'] = links
return msg |
def upgrade():
bind: Connection = op.get_bind()
enabled_notices: ResultProxy = bind.execute(text('SELECT id, data_uses, version FROM privacynotice WHERE disabled = false;'))
expanded_system_data_uses: Set = get_expanded_system_data_uses(bind)
for privacy_notice_row in enabled_notices:
notice_data_uses: set[str] = set((privacy_notice_row['data_uses'] or []))
systems_applicable: bool = bool(notice_data_uses.intersection(expanded_system_data_uses))
if systems_applicable:
continue
disable_privacy_notice_and_bump_version(bind, privacy_notice_row['id'], privacy_notice_row['version'])
create_corresponding_historical_record(bind, privacy_notice_row['id']) |
def to_absolute_path(path: AnyPath) -> AnyPath:
klass = type(path)
_path = Path(path)
if ('_DORA_ORIGINAL_DIR' not in os.environ):
try:
import hydra.utils
except ImportError:
if (not _path.is_absolute()):
_path = (Path(os.getcwd()) / _path)
else:
_path = Path(hydra.utils.to_absolute_path(str(_path)))
return klass(_path)
else:
original_cwd = Path(os.environ['_DORA_ORIGINAL_DIR'])
if _path.is_absolute():
return klass(_path)
else:
return klass((original_cwd / _path)) |
class Settings():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
self.ship_limit = 3
self.bullet_width = 15
self.bullet_height = 3
self.bullet_color = (60, 60, 60)
self.bullets_allowed = 3
self.target_height = 120
self.target_width = 15
self.target_color = (180, 60, 10)
self.miss_limit = 3
self.speedup_scale = 2.1
self.levelup_hits = 10
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
self.ship_speed = 3.0
self.bullet_speed = 12.0
self.target_speed = 1.5
def increase_speed(self):
self.ship_speed *= self.speedup_scale
self.bullet_speed *= self.speedup_scale
self.target_speed *= self.speedup_scale |
class TestDoublewallet(JSONTestCase):
service = None
wida = '04aebef49c24086f603db7a6d157f915c5c9411a'
widb = '5e27c10c9cb253c93a771732fd7dcbb56d34bc47'
passphrasea = 'pass.'
passphraseb = 'pass.'
data_subdir = 'test_rest_backend'
def setUp(self):
self.service = WalletService(WalletREST())
responses.add(responses.GET, self._url('wallets/{:s}'.format(self.wida)), json=self._read('test_transfer_asset-30-GET_wallets_{:s}.json'.format(self.wida)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}'.format(self.widb)), json=self._read('test_transfer_asset-60-GET_wallets_{:s}.json'.format(self.widb)), status=200)
super(TestDoublewallet, self).setUp()
self.wala = self.service.wallet(self.wida, passphrase=self.passphrasea)
self.walb = self.service.wallet(self.widb, passphrase=self.passphraseb)
def _url(self, path):
return ''.join([self.service.backend.base_url, path])
def test_transfer_asset(self):
responses.add(responses.POST, self._url('wallets/{:s}/transactions'.format(self.wala.wid)), json=self._read('test_transfer_asset-10-POST_transfer_{:s}.json'.format(self.wala.wid)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}/addresses'.format(self.wala.wid)), json=self._read('test_transfer_asset-20-GET_addresses_{:s}.json'.format(self.wala.wid)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}'.format(self.wala.wid)), json=self._read('test_transfer_asset-30-GET_wallets_{:s}.json'.format(self.wala.wid)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}/transactions'.format(self.walb.wid)), json=self._read('test_transfer_asset-40-GET_transactions_{:s}.json'.format(self.walb.wid)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}/addresses'.format(self.walb.wid)), json=self._read('test_transfer_asset-50-GET_addresses_{:s}.json'.format(self.walb.wid)), status=200)
responses.add(responses.GET, self._url('wallets/{:s}'.format(self.walb.wid)), json=self._read('test_transfer_asset-60-GET_wallets_{:s}.json'.format(self.walb.wid)), status=200)
asset_id = AssetID('', '6b8d07d69639e9413dd637a1a815a7323c69c86abbafb66dbfdb1aa7')
tx_out = self.wala.transfer('addr_test1qqpwa4lv202c9q4fag5kepr0jjnreq8yxrjgau7u4ulppa9c69u4ed55s8p7nuef3z65fkjjxcslwdu3h75zl7zeuzgqv3l7cc', 2, assets=[(asset_id, 1)])
self.assertEqual(tx_out.amount_in, 0)
self.assertEqual(tx_out.amount_out, 2)
self.assertEqual(len(tx_out.inputs), 2)
self.assertEqual(len(tx_out.local_inputs), 2)
self.assertEqual(len(tx_out.outputs), 2)
self.assertEqual(len(tx_out.local_outputs), 1)
assetsa = self.wala.assets()
self.assertEqual(len(assetsa), 1)
self.assertIn(asset_id, assetsa)
self.assertEqual(assetsa[asset_id].total, 1)
self.assertEqual(assetsa[asset_id].available, 1)
tx_in = self.walb.transactions()[0]
self.assertEqual(tx_in.amount_in, 2)
self.assertEqual(tx_in.amount_out, 0)
self.assertEqual(len(tx_in.inputs), 2)
self.assertEqual(len(tx_in.local_inputs), 0)
self.assertEqual(len(tx_in.outputs), 2)
self.assertEqual(len(tx_in.local_outputs), 1)
assetsb = self.walb.assets() |
(name='generate_data')
def fixture_generate_data() -> pd.DataFrame:
data = {'X_UTME': [1.3, 2.0, 3.0, 4.0, 5.2, 6.0, 9.0], 'Y_UTMN': [11.0, 21.0, 31.0, 41.1, 51.0, 61.0, 91.0], 'Z_TVDSS': [21.0, 22.0, 23.0, 24.0, 25.3, 26.0, 29.0], 'MDEPTH': [13.0, 23.0, 33.0, 43.0, 53.2, 63.0, 93.0], 'GR': [133.0, 2234.0, (- 999), 1644.0, 2225.5, 6532.0, 92.0], 'FACIES': [1, (- 999), 3, 4, 4, 1, 1], 'ZONES': [1, 2, 3, 3, 3, 4, (- 999)]}
return pd.DataFrame(data) |
class MemcachedCache(BaseCache):
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
BaseCache.__init__(self, default_timeout)
if ((servers is None) or isinstance(servers, (list, tuple))):
if (servers is None):
servers = ['127.0.0.1:11211']
self._client = self.import_preferred_memcache_lib(servers)
if (self._client is None):
raise RuntimeError('no memcache module found')
else:
self._client = servers
self.key_prefix = to_native(key_prefix)
def _normalize_key(self, key):
key = to_native(key, 'utf-8')
if self.key_prefix:
key = (self.key_prefix + key)
return key
def _normalize_timeout(self, timeout):
if (timeout is None):
timeout = self.default_timeout
if (timeout > 0):
timeout = (int(time()) + timeout)
return timeout
def get(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.get(key)
def get_dict(self, *keys):
key_mapping = {}
have_encoded_keys = False
for key in keys:
encoded_key = self._normalize_key(key)
if (not isinstance(key, str)):
have_encoded_keys = True
if _test_memcached_key(key):
key_mapping[encoded_key] = key
d = rv = self._client.get_multi(key_mapping.keys())
if (have_encoded_keys or self.key_prefix):
rv = {}
for (key, value) in iteritems(d):
rv[key_mapping[key]] = value
if (len(rv) < len(keys)):
for key in keys:
if (key not in rv):
rv[key] = None
return rv
def add(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.add(key, value, timeout)
def set(self, key, value, timeout=None):
key = self._normalize_key(key)
timeout = self._normalize_timeout(timeout)
return self._client.set(key, value, timeout)
def get_many(self, *keys):
d = self.get_dict(*keys)
return [d[key] for key in keys]
def set_many(self, mapping, timeout=None):
new_mapping = {}
for (key, value) in _items(mapping):
key = self._normalize_key(key)
new_mapping[key] = value
timeout = self._normalize_timeout(timeout)
failed_keys = self._client.set_multi(new_mapping, timeout)
return (not failed_keys)
def delete(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.delete(key)
def delete_many(self, *keys):
new_keys = []
for key in keys:
key = self._normalize_key(key)
if _test_memcached_key(key):
new_keys.append(key)
return self._client.delete_multi(new_keys)
def has(self, key):
key = self._normalize_key(key)
if _test_memcached_key(key):
return self._client.append(key, '')
return False
def clear(self):
return self._client.flush_all()
def inc(self, key, delta=1):
key = self._normalize_key(key)
return self._client.incr(key, delta)
def dec(self, key, delta=1):
key = self._normalize_key(key)
return self._client.decr(key, delta)
def import_preferred_memcache_lib(self, servers):
try:
import pylibmc
except ImportError:
pass
else:
return pylibmc.Client(servers)
try:
from google.appengine.api import memcache
except ImportError:
pass
else:
return memcache.Client()
try:
import memcache
except ImportError:
pass
else:
return memcache.Client(servers) |
class TestSeek():
def test_validate_if_record_opened(playback: PyK4APlayback):
with pytest.raises(K4AException, match='Playback not opened.'):
playback.seek(1)
def test_bad_file(playback_bad: PyK4APlayback):
playback_bad.open()
with pytest.raises(K4AException):
playback_bad.seek(10)
def test_good_file(playback: PyK4APlayback):
playback.open()
playback.seek(10)
def test_seek_eof(playback: PyK4APlayback):
playback.open()
with pytest.raises(EOFError):
playback.seek(9999) |
def get_scene_name_from_task(task):
scene = None
while (task is not None):
if (task.entity_type == 'Scene'):
scene = task
break
elif (task.type and (task.type.name == 'Scene')):
scene = task
if (task.entity_type == 'Sequence'):
break
else:
task = task.parent
if scene:
scene_name = scene.name
else:
scene_name = '001'
return scene_name |
def blend_frame(temp_frame: Frame, paste_frame: Frame) -> Frame:
frame_enhancer_blend = (1 - (frame_processors_globals.frame_enhancer_blend / 100))
(paste_frame_height, paste_frame_width) = paste_frame.shape[0:2]
temp_frame = cv2.resize(temp_frame, (paste_frame_width, paste_frame_height))
temp_frame = cv2.addWeighted(temp_frame, frame_enhancer_blend, paste_frame, (1 - frame_enhancer_blend), 0)
return temp_frame |
class OptionPlotoptionsVariablepieStatesSelectMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
class ScheduleEEfile(BaseRawItemized):
__tablename__ = 'real_efile_se_f57_vw'
line_number = db.Column('line_num', db.String)
filing_form = db.Column('filing_form', db.String)
is_notice = db.Column(db.Boolean, index=True)
file_number = db.Column('repid', db.Integer, index=True, primary_key=True)
related_line_number = db.Column('rel_lineno', db.Integer, primary_key=True)
committee_id = db.Column('comid', db.String, doc=docs.COMMITTEE_ID)
payee_prefix = db.Column('prefix', db.String)
payee_first_name = db.Column('fname', db.String)
payee_middle_name = db.Column('mname', db.String)
payee_last_name = db.Column('lname', db.String)
payee_suffix = db.Column('suffix', db.String)
payee_street_1 = db.Column('str1', db.String)
payee_street_2 = db.Column('str2', db.String)
payee_city = db.Column('city', db.String)
payee_state = db.Column('state', db.String)
payee_zip = db.Column('zip', db.String)
filer_first_name = db.Column('pcf_lname', db.String)
filer_middle_name = db.Column('pcf_mname', db.String)
filer_last_name = db.Column('pcf_fname', db.String)
filer_suffix = db.Column('pcf_suffix', db.String)
filer_prefix = db.Column('pcf_prefix', db.String)
candidate_id = db.Column('so_canid', db.String)
candidate_name = db.Column('so_can_name', db.String, doc=docs.CANDIDATE_NAME)
candidate_prefix = db.Column('so_can_prefix', db.String)
candidate_first_name = db.Column('so_can_fname', db.String)
candidate_middle_name = db.Column('so_can_mname', db.String)
candidate_suffix = db.Column('so_can_suffix', db.String)
candidate_office = db.Column('so_can_off', db.String, doc=docs.OFFICE)
candidate_office_state = db.Column('so_can_state', db.String, doc=docs.STATE_GENERIC)
candidate_office_district = db.Column('so_can_dist', db.String, doc=docs.DISTRICT)
expenditure_description = db.Column('exp_desc', db.String)
expenditure_date = db.Column('exp_date', db.Date)
expenditure_amount = db.Column('amount', db.Integer)
office_total_ytd = db.Column('ytd', db.Float)
category_code = db.Column('cat_code', db.String)
support_oppose_indicator = db.Column('supop', db.String, doc=docs.SUPPORT_OPPOSE_INDICATOR)
notary_sign_date = db.Column('not_date', db.Date)
dissemination_date = db.Column('dissem_dt', db.Date, doc=docs.DISSEMINATION_DATE)
cand_fulltxt = db.Column(TSVECTOR, doc=docs.CANDIDATE_FULL_SEARCH)
candidate_party = db.Column('cand_pty_affiliation', db.String, doc=docs.PARTY)
most_recent = db.Column('most_recent', db.Boolean, doc=docs.MOST_RECENT)
filing = db.relationship('EFilings', primaryjoin='and_(\n ScheduleEEfile.file_number == EFilings.file_number,\n )', foreign_keys=file_number, lazy='joined', innerjoin='True')
committee = db.relationship('CommitteeHistory', primaryjoin="and_(\n ScheduleEEfile.committee_id == CommitteeHistory.committee_id,\n extract('year', ScheduleEEfile.load_timestamp) +cast(extract('year',\n ScheduleEEfile.load_timestamp), Integer) % 2 == CommitteeHistory.cycle,\n )", foreign_keys=committee_id, lazy='joined')
_property
def payee_name(self):
name = name_generator(self.payee_last_name, self.payee_prefix, self.payee_first_name, self.payee_middle_name, self.payee_suffix)
name = (name if name else None)
return name |
class OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMapping(Options):
def pitch(self) -> 'OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingPitch':
return self._config_sub_data('pitch', OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingPitch)
def playDelay(self) -> 'OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingPlaydelay)
def rate(self) -> 'OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingRate':
return self._config_sub_data('rate', OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingRate)
def text(self):
return self._config_get(None)
def text(self, text: str):
self._config(text, js_type=False)
def time(self) -> 'OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingTime':
return self._config_sub_data('time', OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingTime)
def volume(self) -> 'OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingVolume':
return self._config_sub_data('volume', OptionPlotoptionsTreemapSonificationDefaultspeechoptionsMappingVolume) |
_required
_required
_required(UserAdminPermission)
_required
_POST
def dc_user_profile_form(request, username):
user = get_edited_user(request, username, sr=('dc_bound', 'userprofile'))
profile = user.userprofile
if (request.POST['action'] == 'update'):
uform = AdminUserForm(request, user, request.POST)
upform = AdminUserProfileForm(request, profile, request.POST)
if (uform.is_valid() and upform.is_valid()):
args = (uform.cleaned_data['username'],)
ustatus = uform.save(action='update', args=args)
upstatus = upform.save(action='update', args=args)
if (((ustatus == 200) and (upstatus in (200, 204))) or ((upstatus == 200) and (ustatus in (200, 204)))):
messages.success(request, _('User profile was successfully updated'))
user = user.__class__.objects.get(pk=user.pk)
if ((request.user == user) and (not user.is_admin(dc=request.dc))):
redirect_to = '/'
else:
redirect_to = 'dc_user_list'
return redirect(redirect_to)
else:
uform = AdminUserForm(request, user, init=True)
upform = AdminUserProfileForm(request, profile, init=True)
context = {'uform': uform, 'upform': upform, 'user': user, 'profile': profile, 'ssh_keys': user.usersshkey_set.all().order_by('id')}
if (not request.user.is_staff):
context['disabled_api_key'] = True
return render(request, 'gui/dc/profile/profile_page.html', context) |
def compile_shader(complete_path: str, shader_type: ShaderType, include_dirs: Optional[List[str]]=()) -> str:
if (not os.path.exists(complete_path)):
raise RuntimeError('Shader {} does not exists!'.format(complete_path))
options = []
temp_file = tempfile.NamedTemporaryFile(delete=False)
options.extend(('-f', complete_path))
options.extend(('-o', temp_file.name))
options.extend(('-i', str(_default_include_dir)))
for include_dir in include_dirs:
if ((not os.path.exists(include_dir)) or os.path.isdir(include_dir)):
raise RuntimeError('{} does not exists or is not a directory!'.format(include_dir))
options.extend(['-i', include_dir])
options.extend(('--platform', _get_platform()))
options.extend(('--profile', _get_profile(shader_type)))
options.extend(('--type', shader_type.value))
if (platform.system() == 'Windows'):
options.extend(['-O', ('1' if (shader_type == ShaderType.COMPUTE) else '3')])
shaderc_bin = str((_default_bin_path / 'shadercRelease{}'.format(_os_exe_suffix)))
os.chmod(shaderc_bin, 508)
run_args = ([shaderc_bin] + options)
run_info = subprocess.run(run_args, capture_output=True, text=True)
if (run_info.returncode != 0):
raise RuntimeError('Error compiling shader {}:\n{}'.format(complete_path, run_info.stdout))
return temp_file.name |
class ValueNet(nn.Module):
def __init__(self, obs_shapes, non_lin=nn.Tanh):
super().__init__()
self.perception_network = DenseBlock(in_keys='observation', out_keys='latent', in_shapes=obs_shapes['observation'], hidden_units=[32, 32], non_lin=non_lin)
module_init = make_module_init_normc(std=1.0)
self.perception_network.apply(module_init)
self.value_head = LinearOutputBlock(in_keys='latent', out_keys='value', in_shapes=self.perception_network.out_shapes(), output_units=1)
module_init = make_module_init_normc(std=0.01)
self.value_head.apply(module_init)
self.net = InferenceBlock(in_keys='observation', out_keys='value', in_shapes=list(obs_shapes.values()), perception_blocks={'latent': self.perception_network, 'value': self.value_head})
def forward(self, x):
return self.net(x) |
def get_settings_rules():
s = sublime.load_settings('color_helper.sublime-settings')
rules = s.get('color_rules', [])
user_rules = s.get('user_color_rules', [])
names = {rule['name']: i for (i, rule) in enumerate(rules) if ('name' in rule)}
for urule in user_rules:
name = urule.get('name')
if ((name is not None) and (name in names)):
index = names[name]
rules[index] = merge_rules(rules[index], urule)
else:
rules.append(urule)
return rules |
class PythonPackageVersionBumper():
IGNORE_DIRS = (Path('.git'),)
def __init__(self, root_dir: Path, python_pkg_dir: Path, new_version: Version, files_to_pattern: PatternByPath, specifier_set_patterns: Sequence[str], package_name: Optional[str]=None, ignore_dirs: Sequence[Path]=()):
self.root_dir = root_dir
self.python_pkg_dir = python_pkg_dir
self.new_version = new_version
self.files_to_pattern = files_to_pattern
self.specifier_set_patterns = specifier_set_patterns
self.package_name = (package_name or self.python_pkg_dir.name)
self.ignore_dirs = (ignore_dirs or self.IGNORE_DIRS)
self.repo = Repo(self.root_dir)
self._current_version: Optional[str] = None
self._executed: bool = False
self._result: Optional[bool] = None
def is_executed(self) -> bool:
return self._executed
def result(self) -> bool:
if (not self.is_executed):
raise ValueError('not executed yet')
return cast(bool, self._result)
_executed
def run(self) -> bool:
if (not self.is_different_from_latest_tag()):
logging.info(f'The package {self.python_pkg_dir} has no changes since last tag.')
return False
new_version_string = str(self.new_version)
current_version_str = self.update_version_for_package(new_version_string)
current_version: Version = Version(current_version_str)
current_version_str = str(current_version)
self._current_version = current_version_str
self.update_version_for_files()
return self.update_version_specifiers(current_version, self.new_version)
def update_version_for_files(self) -> None:
for (filepath, regex_template) in self.files_to_pattern.items():
self.update_version_for_file(filepath, cast(str, self._current_version), str(self.new_version), version_regex_template=regex_template)
def update_version_for_package(self, new_version: str) -> str:
version_path = (self.python_pkg_dir / Path('__version__.py'))
path_regexp = []
if version_path.exists():
regex_template = '(?<=__version__ = [\'"])({version})(?=")'
path = version_path
path_regexp.append((path, regex_template))
if PYPROJECT_TOML.exists():
regex_template = '(?<=\\nversion = \\")(?P<version>{version})(?=\\"\\n)'
path = PYPROJECT_TOML
path_regexp.append((path, regex_template))
for (path, regex_template) in path_regexp:
content = path.read_text()
pattern = regex_template.format(version='.*')
current_version_candidates = re.findall(pattern, content)
more_than_one_match = (len(current_version_candidates) > 1)
if more_than_one_match:
raise ValueError(f'find more than one match for current version in {path}: {current_version_candidates}')
current_version = current_version_candidates[0]
self.update_version_for_file(path, current_version, new_version, version_regex_template=regex_template)
return current_version
def update_version_for_file(self, path: Path, current_version: str, new_version: str, version_regex_template: Optional[str]=None) -> None:
if (version_regex_template is not None):
regex_str = version_regex_template.format(package_name=self.package_name, version=current_version)
else:
regex_str = current_version
pattern = re.compile(regex_str)
content = path.read_text()
content = pattern.sub(new_version, content)
path.write_text(content)
def update_version_specifiers(self, old_version: Version, new_version: Version) -> bool:
old_specifier_set = compute_specifier_from_version_custom(old_version)
new_specifier_set = compute_specifier_from_version_custom(new_version)
logging.info(f'Old version specifier: {old_specifier_set}')
logging.info(f'New version specifier: {new_specifier_set}')
if (old_specifier_set == new_specifier_set):
logging.info("Not updating version specifier - they haven't changed.")
return False
for file in filter((lambda p: (not p.is_dir())), self.root_dir.rglob('*')):
dir_root = Path(file.parts[0])
if (dir_root in self.ignore_dirs):
logging.info(f"Skipping '{file}'...")
continue
logging.info(f"Replacing '{old_specifier_set}' with '{new_specifier_set}' in '{file}'... ")
try:
content = file.read_text()
except UnicodeDecodeError as e:
logging.info(f'Cannot read {file}: {str(e)}. Continue...')
else:
content = self._replace_specifier_sets(old_specifier_set, new_specifier_set, content)
file.write_text(content)
return True
def _replace_specifier_sets(self, old_specifier_set: str, new_specifier_set: str, content: str) -> str:
old_specifier_set_regex = get_regex_from_specifier_set(old_specifier_set)
for pattern_template in self.specifier_set_patterns:
regex = pattern_template.format(package_name=self.package_name, specifier_set=old_specifier_set_regex)
pattern = re.compile(regex)
if (pattern.search(content) is not None):
content = pattern.sub(new_specifier_set, content)
return content
def is_different_from_latest_tag(self) -> bool:
assert (len(self.repo.tags) > 0), 'no git tags found'
latest_tag_str = str(self.repo.tags[(- 1)])
args = (latest_tag_str, '--', str(self.python_pkg_dir))
logging.info(f"Running 'git diff {' '.join(args)}'")
diff = self.repo.git.diff(*args)
return (diff != '') |
def generate_code(means, precision, offset, name='my_elliptic', modifiers='static const'):
cgen.assert_valid_identifier(name)
n_features = means.shape[0]
decision_boundary = offset
classifier_name = f'{name}_classifier'
means_name = f'{name}_means'
precisions_name = f'{name}_precisions'
predict_function_name = f'{name}_predict'
includes = '\n // This code is generated by emlearn\n\n #include <eml_distance.h>\n '
pre = '\n\n'.join([includes, cgen.array_declare(means_name, n_features, modifiers=modifiers, values=means), cgen.array_declare(precisions_name, (n_features * n_features), modifiers=modifiers, values=precision.flatten(order='C'))])
main = f'''
#include <stdio.h>
// Data definitions
{modifiers} EmlEllipticEnvelope {classifier_name} = {{
{n_features},
{decision_boundary},
{means_name},
{precisions_name}
}};
// Prediction function
float {predict_function_name}(const float *features, int n_features) {{
float dist = 0.0;
const int class = eml_elliptic_envelope_predict(&{classifier_name},
features, n_features, &dist);
return dist;
}}
'''
code = (pre + main)
return code |
def getModelName(model):
if (model['framework'] == 'caffe2'):
model_file_name = model['files']['predict']['filename']
elif model.get('files', {}).get('model', {}).get('filename', None):
model_file_name = model['files']['model']['filename']
elif ('name' in model):
model_file_name = model['name']
else:
model_file_name = 'model'
model_name = os.path.splitext(model_file_name)[0].replace(' ', '_')
return model_name |
class OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
def test_create_start_workflow_request_override_workflow_options():
client = WorkflowClient(None, 'the-namespace', None, DEFAULT_DATA_CONVERTER_INSTANCE)
wm = WorkflowMethod()
options = WorkflowOptions(workflow_id='workflow-id', workflow_id_reuse_policy=WorkflowIdReusePolicy.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, workflow_run_timeout=timedelta(seconds=200), workflow_execution_timeout=timedelta(seconds=100), workflow_task_timeout=timedelta(seconds=60), task_queue='task-queue', cron_schedule='cron-schedule', memo={'name': 'bob'}, search_attributes={'name': 'alex'})
request = create_start_workflow_request(client, wm, [], options)
assert (request.workflow_id == 'workflow-id')
assert (request.workflow_id_reuse_policy == WorkflowIdReusePolicy.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE)
assert (request.workflow_run_timeout == timedelta(seconds=200))
assert (request.workflow_execution_timeout == timedelta(seconds=100))
assert (request.workflow_task_timeout == timedelta(seconds=60))
assert (request.task_queue == 'task-queue')
assert (request.cron_schedule == 'cron-schedule')
assert (request.memo.fields['name'].data == b'"bob"')
assert (request.search_attributes.indexed_fields['name'].data == b'"alex"') |
def filter_dlp_sensitivity_data(json):
option_list = ['name']
json = remove_invalid_fields(json)
dictionary = {}
for attribute in option_list:
if ((attribute in json) and (json[attribute] is not None)):
dictionary[attribute] = json[attribute]
return dictionary |
(scope='function')
def create_maya_test_db():
import anima
anima.stalker_server_internal_address = 'internal'
anima.stalker_server_external_address = 'external'
logger.debug('initializing db')
db.setup({'sqlalchemy.url': 'sqlite:///:memory:'})
db.init()
logger.debug('initializing db complete')
logger.debug('creating temp repository path')
temp_repo_path = tempfile.mkdtemp()
(yield temp_repo_path)
DBSession.remove()
shutil.rmtree(temp_repo_path, ignore_errors=True) |
class Pairwise(Op):
__slots__ = ('_prev', '_has_prev')
def __init__(self, source=None):
Op.__init__(self, source)
self._has_prev = False
def on_source(self, *args):
value = (args[0] if (len(args) == 1) else (args if args else NO_VALUE))
if self._has_prev:
self.emit(self._prev, value)
else:
self._has_prev = True
self._prev = value |
class MenuService():
async def get(*, pk: int) -> Menu:
async with async_db_session() as db:
menu = (await MenuDao.get(db, menu_id=pk))
if (not menu):
raise errors.NotFoundError(msg='')
return menu
async def get_menu_tree(*, title: (str | None)=None, status: (int | None)=None) -> list[dict[(str, Any)]]:
async with async_db_session() as db:
menu_select = (await MenuDao.get_all(db, title=title, status=status))
menu_tree = (await get_tree_data(menu_select))
return menu_tree
async def get_role_menu_tree(*, pk: int) -> list[dict[(str, Any)]]:
async with async_db_session() as db:
role = (await RoleDao.get_with_relation(db, pk))
if (not role):
raise errors.NotFoundError(msg='')
menu_ids = [menu.id for menu in role.menus]
menu_select = (await MenuDao.get_role_menus(db, False, menu_ids))
menu_tree = (await get_tree_data(menu_select))
return menu_tree
async def get_user_menu_tree(*, request: Request) -> list[dict[(str, Any)]]:
async with async_db_session() as db:
roles = request.user.roles
menu_ids = []
menu_tree = []
if roles:
for role in roles:
menu_ids.extend([menu.id for menu in role.menus])
menu_select = (await MenuDao.get_role_menus(db, request.user.is_superuser, menu_ids))
menu_tree = (await get_tree_data(menu_select))
return menu_tree
async def create(*, obj: CreateMenu) -> None:
async with async_db_session.begin() as db:
title = (await MenuDao.get_by_title(db, obj.title))
if title:
raise errors.ForbiddenError(msg='')
if obj.parent_id:
parent_menu = (await MenuDao.get(db, obj.parent_id))
if (not parent_menu):
raise errors.NotFoundError(msg='')
(await MenuDao.create(db, obj))
async def update(*, pk: int, obj: UpdateMenu) -> int:
async with async_db_session.begin() as db:
menu = (await MenuDao.get(db, pk))
if (not menu):
raise errors.NotFoundError(msg='')
if (menu.title != obj.title):
if (await MenuDao.get_by_title(db, obj.title)):
raise errors.ForbiddenError(msg='')
if obj.parent_id:
parent_menu = (await MenuDao.get(db, obj.parent_id))
if (not parent_menu):
raise errors.NotFoundError(msg='')
count = (await MenuDao.update(db, pk, obj))
return count
async def delete(*, pk: int) -> int:
async with async_db_session.begin() as db:
children = (await MenuDao.get_children(db, pk))
if children:
raise errors.ForbiddenError(msg=',')
count = (await MenuDao.delete(db, pk))
return count |
def loadCommandsInDirectory(commandsDirectory):
for file in os.listdir(commandsDirectory):
(fileName, fileExtension) = os.path.splitext(file)
if (fileExtension == '.py'):
module = imp.load_source(fileName, os.path.join(commandsDirectory, file))
if hasattr(module, 'lldbinit'):
module.lldbinit()
if hasattr(module, 'lldbcommands'):
module._loadedFunctions = {}
for command in module.lldbcommands():
loadCommand(module, command, commandsDirectory, fileName, fileExtension) |
def treino_teste_split(dataset, porcentagem):
percent = ((porcentagem * len(dataset)) // 100)
data_treino = random.sample(dataset, percent)
data_teste = [data for data in dataset if (data not in data_treino)]
def montar(dataset):
(x, y) = ([], [])
for data in dataset:
x.append(data[1:3])
y.append(data[0])
return (x, y)
(x_train, y_train) = montar(data_treino)
(x_test, y_test) = montar(data_teste)
return (x_train, y_train, x_test, y_test) |
def test_condition_cfg(condition_cfg, a, b, c, func):
s0 = Assignment(b, Constant(7))
s1 = Assignment(a, b)
s4 = Assignment(b, Constant(8))
s5 = Assignment(a, b)
s6 = Assignment(ListOperation([c]), Call(func, [b]))
s7 = Assignment(a, c)
s10 = Assignment(b, Constant(6))
s11 = Assignment(a, b)
rd = ReachingDefinitions(condition_cfg)
(n0, n1, n2, n3, n4, n5, n6) = (n for n in condition_cfg.nodes)
assert (rd.reach_in_block(n0) == set())
assert (rd.reach_out_block(n0) == {s0, s1})
assert (rd.reach_in_block(n1) == {s0, s1})
assert (rd.reach_out_block(n1) == {s10, s11})
assert (rd.reach_in_block(n2) == {s0, s1})
assert (rd.reach_out_block(n2) == {s0, s1})
assert (rd.reach_in_block(n3) == {s0, s1})
assert (rd.reach_out_block(n3) == {s4, s5})
assert (rd.reach_out_block(n3) == {s4, s1})
assert (rd.reach_in_block(n4) == {s0, s1})
assert (rd.reach_out_block(n4) == {s6, s7, s0})
assert (rd.reach_in_block(n5) == {s0, s1, s4, s6, s7})
assert (rd.reach_out_block(n5) == {s0, s1, s4, s6, s7})
assert (rd.reach_in_block(n6) == {s0, s1, s4, s6, s7, s10})
assert (rd.reach_in_block(n6) == {s0, s11, s4, s6, s7, s10})
assert (rd.reach_out_block(n6) == {s0, s1, s4, s6, s7, s10})
assert (rd.reach_in_stmt(n0, 0) == set())
assert (rd.reach_out_stmt(n0, 0) == {s0})
assert (rd.reach_in_stmt(n0, 1) == {s0})
assert (rd.reach_out_stmt(n0, 1) == {s0, s1})
assert (rd.reach_in_stmt(n1, 0) == {s0, s1})
assert (rd.reach_out_stmt(n1, 0) == {s1, s10})
assert (rd.reach_in_stmt(n1, 1) == {s1, s10})
assert (rd.reach_out_stmt(n1, 1) == {s1, s10} == {s11, s10})
assert (rd.reach_in_stmt(n2, 0) == {s0, s1})
assert (rd.reach_out_stmt(n2, 0) == {s0, s1})
assert (rd.reach_in_stmt(n3, 0) == {s0, s1})
assert (rd.reach_out_stmt(n3, 0) == {s1, s4})
assert (rd.reach_out_stmt(n3, 1) == {s1, s4} == {s4, s5})
assert (rd.reach_in_stmt(n4, 0) == {s0, s1})
assert (rd.reach_out_stmt(n4, 0) == {s0, s1, s6})
assert (rd.reach_in_stmt(n4, 1) == {s0, s1, s6})
assert (rd.reach_out_stmt(n4, 1) == {s0, s6, s7})
assert (rd.reach_in_stmt(n5, 0) == {s0, s1, s4, s6, s7})
assert (rd.reach_out_stmt(n5, 0) == {s0, s1, s4, s6, s7})
assert (rd.reach_in_stmt(n6, 0) == {s0, s1, s4, s6, s7, s10})
assert (rd.reach_out_stmt(n6, 0) == {s0, s1, s4, s6, s7, s10}) |
def process_video() -> None:
if analyse_video(facefusion.globals.target_path, facefusion.globals.trim_frame_start, facefusion.globals.trim_frame_end):
return
fps = (detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0)
logger.info(wording.get('creating_temp'), __name__.upper())
create_temp(facefusion.globals.target_path)
logger.info(wording.get('extracting_frames_fps').format(fps=fps), __name__.upper())
extract_frames(facefusion.globals.target_path, fps)
temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
if temp_frame_paths:
for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
logger.info(wording.get('processing'), frame_processor_module.NAME)
frame_processor_module.process_video(facefusion.globals.source_paths, temp_frame_paths)
frame_processor_module.post_process()
else:
logger.error(wording.get('temp_frames_not_found'), __name__.upper())
return
logger.info(wording.get('merging_video_fps').format(fps=fps), __name__.upper())
if (not merge_video(facefusion.globals.target_path, fps)):
logger.error(wording.get('merging_video_failed'), __name__.upper())
return
if facefusion.globals.skip_audio:
logger.info(wording.get('skipping_audio'), __name__.upper())
move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
else:
logger.info(wording.get('restoring_audio'), __name__.upper())
if (not restore_audio(facefusion.globals.target_path, facefusion.globals.output_path)):
logger.warn(wording.get('restoring_audio_skipped'), __name__.upper())
move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
logger.info(wording.get('clearing_temp'), __name__.upper())
clear_temp(facefusion.globals.target_path)
if is_video(facefusion.globals.output_path):
logger.info(wording.get('processing_video_succeed'), __name__.upper())
else:
logger.error(wording.get('processing_video_failed'), __name__.upper()) |
def register_sub_dataset_with_given_images(*args, **kwargs):
new_dataset = COCOSubsetWithGivenImages(*args, **kwargs)
AdhocDatasetManager.add(new_dataset)
AdhocDatasetManager.add(new_dataset)
try:
(yield new_dataset.new_ds_name)
finally:
AdhocDatasetManager.remove(new_dataset) |
def find_best_ordering(obj, num_perms_to_eval=None):
if (num_perms_to_eval is None):
num_perms_to_eval = (True if (len(obj.exprs) <= 10) else 100000)
best_ordering = None
best_time = float('inf')
stats_zip = tuple(zip(obj.expr_usage_stats, obj.expr_timing_aves, obj.exprs))
if (num_perms_to_eval is True):
perms_to_eval = permutations(stats_zip)
else:
perms_to_eval = [stats_zip, sorted(stats_zip, key=(lambda u_t_e: ((- u_t_e[0]), u_t_e[1]))), sorted(stats_zip, key=(lambda u_t_e: (u_t_e[1], (- u_t_e[0]))))]
if num_perms_to_eval:
max_usage = max(obj.expr_usage_stats)
max_time = max(obj.expr_timing_aves)
for i in range(1, num_perms_to_eval):
a = (i / num_perms_to_eval)
perms_to_eval.append(sorted(stats_zip, key=(lambda u_t_e: ((((- a) * u_t_e[0]) / max_usage) + (((1 - a) * u_t_e[1]) / max_time)))))
for perm in perms_to_eval:
(perm_expr_usage_stats, perm_expr_timing_aves) = zip(*[(usage, timing) for (usage, timing, expr) in perm])
perm_time = time_for_ordering(perm_expr_usage_stats, perm_expr_timing_aves)
if (perm_time < best_time):
best_time = perm_time
best_ordering = [(obj.exprs.index(expr), parse_expr_repr(expr)) for (usage, timing, expr) in perm]
return (best_ordering, best_time) |
class MetricSet(object):
def __init__(self, registry) -> None:
self._lock = threading.Lock()
self._counters = {}
self._gauges = {}
self._timers = {}
self._histograms = {}
self._registry = registry
self._label_limit_logged = False
def counter(self, name, reset_on_collect=False, **labels):
return self._metric(self._counters, Counter, name, reset_on_collect, labels)
def gauge(self, name, reset_on_collect=False, **labels):
return self._metric(self._gauges, Gauge, name, reset_on_collect, labels)
def timer(self, name, reset_on_collect=False, unit=None, **labels):
return self._metric(self._timers, Timer, name, reset_on_collect, labels, unit)
def histogram(self, name, reset_on_collect=False, unit=None, buckets=None, **labels):
return self._metric(self._histograms, Histogram, name, reset_on_collect, labels, unit, buckets=buckets)
def _metric(self, container, metric_class, name, reset_on_collect, labels, unit=None, **kwargs):
labels = self._labels_to_key(labels)
key = (name, labels)
with self._lock:
if (key not in container):
if any((pattern.match(name) for pattern in self._registry.ignore_patterns)):
metric = noop_metric
elif ((((len(self._gauges) + len(self._counters)) + len(self._timers)) + len(self._histograms)) >= DISTINCT_LABEL_LIMIT):
if (not self._label_limit_logged):
self._label_limit_logged = True
logger.warning(('The limit of %d metricsets has been reached, no new metricsets will be created.' % DISTINCT_LABEL_LIMIT))
metric = noop_metric
else:
metric = metric_class(name, reset_on_collect=reset_on_collect, unit=unit, **kwargs)
container[key] = metric
return container[key]
def collect(self):
self.before_collect()
timestamp = int((time.time() * 1000000))
samples = defaultdict(dict)
if self._counters:
for ((name, labels), counter) in self._counters.copy().items():
if (counter is not noop_metric):
val = counter.val
if (val or (not counter.reset_on_collect)):
samples[labels].update({name: {'value': val}})
if counter.reset_on_collect:
counter.reset()
if self._gauges:
for ((name, labels), gauge) in self._gauges.copy().items():
if (gauge is not noop_metric):
val = gauge.val
if (val or (not gauge.reset_on_collect)):
samples[labels].update({name: {'value': val, 'type': 'gauge'}})
if gauge.reset_on_collect:
gauge.reset()
if self._timers:
for ((name, labels), timer) in self._timers.copy().items():
if (timer is not noop_metric):
(val, count) = timer.val
if (val or (not timer.reset_on_collect)):
sum_name = '.sum'
if timer._unit:
sum_name += ('.' + timer._unit)
samples[labels].update({(name + sum_name): {'value': val}})
samples[labels].update({(name + '.count'): {'value': count}})
if timer.reset_on_collect:
timer.reset()
if self._histograms:
for ((name, labels), histo) in self._histograms.copy().items():
if (histo is not noop_metric):
counts = histo.val
if (counts or (not histo.reset_on_collect)):
bucket_midpoints = []
for (i, bucket_le) in enumerate(histo.buckets):
if (i == 0):
if (bucket_le > 0):
bucket_le /= 2.0
elif (i == (len(histo.buckets) - 1)):
bucket_le = histo.buckets[(i - 1)]
else:
bucket_le = (histo.buckets[(i - 1)] + ((bucket_le - histo.buckets[(i - 1)]) / 2.0))
bucket_midpoints.append(bucket_le)
samples[labels].update({name: {'counts': counts, 'values': bucket_midpoints, 'type': 'histogram'}})
if histo.reset_on_collect:
histo.reset()
if samples:
for (labels, sample) in samples.items():
result = {'samples': sample, 'timestamp': timestamp}
if labels:
result['tags'] = {k: v for (k, v) in labels}
(yield self.before_yield(result))
def before_collect(self) -> None:
pass
def before_yield(self, data):
return data
def _labels_to_key(self, labels):
return tuple(((k, str(v)) for (k, v) in sorted(labels.items()))) |
def extractLovelyxDay(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if ('WSU' in item['tags']):
return buildReleaseMessageWithType(item, "Because I'm a Weapon Shop Uncle", vol, chp, frag=frag, postfix=postfix)
return False |
def create_lab_test_template(medical_department):
if frappe.db.exists('Lab Test Template', 'Blood Test'):
return frappe.get_doc('Lab Test Template', 'Blood Test')
template = frappe.new_doc('Lab Test Template')
template.lab_test_name = 'Blood Test'
template.lab_test_code = 'Blood Test'
template.lab_test_group = 'Services'
template.department = medical_department
template.is_billable = 1
template.lab_test_rate = 2000
template.save()
return template |
def _upload_file(source_path: str, target_path: str, unzip: bool=False):
with open(source_path, 'rb') as file_to_upload:
body = upload_file_model.BodyUploadLocalFile(rest_types.File(payload=file_to_upload, file_name=os.path.basename(source_path)))
response = upload_local_file_api.sync_detailed(target_path, client=REST_CLIENT, unzip=unzip, multipart_data=body)
if (response.status_code != 200):
raise Exception(f'Failed to upload file. Server returned status code {response.status_code} and message {response.parsed}') |
def list_providers(feature: Optional[str]=None, subfeature: Optional[str]=None) -> List[str]:
providers_set = set()
for (provider, feature_i, subfeature_i, *_phase) in list_features():
if ((not feature) or (feature_i == feature)):
if ((not subfeature) or (subfeature_i == subfeature)):
providers_set.add(provider)
return list(providers_set) |
def test_handles_multi_step_scenarios():
env = build_dummy_structured_env()
env = SpacesRecordingWrapper.wrap(env, output_dir='space_records')
env.reset()
for _ in range(6):
action = env.action_space.sample()
env.step(action)
assert (len(env.episode_record.step_records) == 3)
for step_record in env.episode_record.step_records:
assert (len(step_record.actions) == 2)
assert (len(step_record.observations) == 2) |
class BridgeRoom(WeatherRoom):
def at_object_creation(self):
super().at_object_creation()
self.db.west_exit = 'cliff'
self.db.east_exit = 'gate'
self.db.fall_exit = 'cliffledge'
self.cmdset.add(BridgeCmdSet, persistent=True)
self.locks.add('view:false()')
def update_weather(self, *args, **kwargs):
if (random.random() < 80):
self.msg_contents(('|w%s|n' % random.choice(BRIDGE_WEATHER)))
def at_object_receive(self, character, source_location, move_type='move', **kwargs):
if character.has_account:
wexit = search_object(self.db.west_exit)
eexit = search_object(self.db.east_exit)
fexit = search_object(self.db.fall_exit)
if (not (wexit and eexit and fexit)):
character.msg("The bridge's exits are not properly configured. Contact an admin. Forcing west-end placement.")
character.db.tutorial_bridge_position = 0
return
if (source_location == eexit[0]):
character.db.tutorial_bridge_position = 4
else:
character.db.tutorial_bridge_position = 0
character.execute_cmd('look')
def at_object_leave(self, character, target_location, move_type='move', **kwargs):
if character.has_account:
del character.db.tutorial_bridge_position |
class VariableStore(dict):
def __init__(self):
super(VariableStore, self).__init__()
self.nxt = None
def create_variable(self, name, variable):
self[name] = variable
def get_variable(self, name):
if (name in self):
return self[name]
return None
def increment_variable(self, name, amount=1):
self[name] += amount
def decrement_variable(self, name, amount=1):
self[name] -= amount |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'firewall_ssh_setting': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['firewall_ssh_setting']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['firewall_ssh_setting']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'firewall_ssh_setting')
(is_error, has_changed, result, diff) = fortios_firewall_ssh(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
def node_id_to_name(client, node_id):
logger = logging.getLogger(__name__)
stats = client.nodes.stats()
name = None
if (node_id in stats['nodes']):
name = stats['nodes'][node_id]['name']
else:
logger.error('No node_id found matching: "%s"', node_id)
logger.debug('Name associated with node_id "%s": %s', node_id, name)
return name |
def version_dialog(logging_level=logging.WARNING, mode=2):
from anima.utils import do_db_setup
do_db_setup()
from anima import ui
ui.SET_PYSIDE2()
from anima.ui.dialogs import version_dialog
from anima.dcc import blender as blender_dcc
b = blender_dcc.Blender()
logger.setLevel(logging_level)
version_dialog.UI(environment=b, parent=None, mode=mode) |
class Round2Test(unittest.TestCase):
def test_second_argument_type(self):
self.assertRaises(TypeError, round2, 3.14159, 2.0)
def test_halfway_cases(self):
self.assertAlmostEqual(round2(0.125, 2), 0.13)
self.assertAlmostEqual(round2(0.375, 2), 0.38)
self.assertAlmostEqual(round2(0.625, 2), 0.63)
self.assertAlmostEqual(round2(0.875, 2), 0.88)
self.assertAlmostEqual(round2((- 0.125), 2), (- 0.13))
self.assertAlmostEqual(round2((- 0.375), 2), (- 0.38))
self.assertAlmostEqual(round2((- 0.625), 2), (- 0.63))
self.assertAlmostEqual(round2((- 0.875), 2), (- 0.88))
self.assertAlmostEqual(round2(0.25, 1), 0.3)
self.assertAlmostEqual(round2(0.75, 1), 0.8)
self.assertAlmostEqual(round2((- 0.25), 1), (- 0.3))
self.assertAlmostEqual(round2((- 0.75), 1), (- 0.8))
self.assertEqual(round2((- 6.5), 0), (- 7.0))
self.assertEqual(round2((- 5.5), 0), (- 6.0))
self.assertEqual(round2((- 1.5), 0), (- 2.0))
self.assertEqual(round2((- 0.5), 0), (- 1.0))
self.assertEqual(round2(0.5, 0), 1.0)
self.assertEqual(round2(1.5, 0), 2.0)
self.assertEqual(round2(2.5, 0), 3.0)
self.assertEqual(round2(3.5, 0), 4.0)
self.assertEqual(round2(4.5, 0), 5.0)
self.assertEqual(round2(5.5, 0), 6.0)
self.assertEqual(round2(6.5, 0), 7.0)
self.assertEqual(round2((- 6.5)), (- 7.0))
self.assertEqual(round2((- 5.5)), (- 6.0))
self.assertEqual(round2((- 1.5)), (- 2.0))
self.assertEqual(round2((- 0.5)), (- 1.0))
self.assertEqual(round2(0.5), 1.0)
self.assertEqual(round2(1.5), 2.0)
self.assertEqual(round2(2.5), 3.0)
self.assertEqual(round2(3.5), 4.0)
self.assertEqual(round2(4.5), 5.0)
self.assertEqual(round2(5.5), 6.0)
self.assertEqual(round2(6.5), 7.0)
self.assertEqual(round2((- 25.0), (- 1)), (- 30.0))
self.assertEqual(round2((- 15.0), (- 1)), (- 20.0))
self.assertEqual(round2((- 5.0), (- 1)), (- 10.0))
self.assertEqual(round2(5.0, (- 1)), 10.0)
self.assertEqual(round2(15.0, (- 1)), 20.0)
self.assertEqual(round2(25.0, (- 1)), 30.0)
self.assertEqual(round2(35.0, (- 1)), 40.0)
self.assertEqual(round2(45.0, (- 1)), 50.0)
self.assertEqual(round2(55.0, (- 1)), 60.0)
self.assertEqual(round2(65.0, (- 1)), 70.0)
self.assertEqual(round2(75.0, (- 1)), 80.0)
self.assertEqual(round2(85.0, (- 1)), 90.0)
self.assertEqual(round2(95.0, (- 1)), 100.0)
self.assertEqual(round2(12325.0, (- 1)), 12330.0)
self.assertEqual(round2(0, (- 1)), 0.0)
self.assertEqual(round2(350.0, (- 2)), 400.0)
self.assertEqual(round2(450.0, (- 2)), 500.0)
self.assertAlmostEqual(round2(5e+20, (- 21)), 1e+21)
self.assertAlmostEqual(round2(1.5e+21, (- 21)), 2e+21)
self.assertAlmostEqual(round2(2.5e+21, (- 21)), 3e+21)
self.assertAlmostEqual(round2(5.5e+21, (- 21)), 6e+21)
self.assertAlmostEqual(round2(8.5e+21, (- 21)), 9e+21)
self.assertAlmostEqual(round2((- 1.5e+22), (- 22)), (- 2e+22))
self.assertAlmostEqual(round2((- 5e+21), (- 22)), (- 1e+22))
self.assertAlmostEqual(round2(5e+21, (- 22)), 1e+22)
self.assertAlmostEqual(round2(1.5e+22, (- 22)), 2e+22) |
def test_one_mathematical_operation(df_vartypes):
transformer = MathFeatures(variables=['Age', 'Marks'], func='sum')
X = transformer.fit_transform(df_vartypes)
ref = pd.DataFrame.from_dict({'Name': ['tom', 'nick', 'krish', 'jack'], 'City': ['London', 'Manchester', 'Liverpool', 'Bristol'], 'Age': [20, 21, 19, 18], 'Marks': [0.9, 0.8, 0.7, 0.6], 'dob': pd.date_range('2020-02-24', periods=4, freq='T'), 'sum_Age_Marks': [20.9, 21.8, 19.7, 18.6]})
pd.testing.assert_frame_equal(X, ref)
transformer = MathFeatures(variables=['Age', 'Marks'], func=['sum'])
X = transformer.fit_transform(df_vartypes)
pd.testing.assert_frame_equal(X, ref) |
def test_num_sources():
src = td.PlaneWave(source_time=td.GaussianPulse(freq0=.0, fwidth=.0), center=(0, 0, 0), size=(td.inf, td.inf, 0), direction='+')
_ = td.Simulation(size=(5, 5, 5), run_time=1e-12, sources=([src] * MAX_NUM_SOURCES))
with pytest.raises(pydantic.ValidationError):
_ = td.Simulation(size=(5, 5, 5), run_time=1e-12, sources=([src] * (MAX_NUM_SOURCES + 1))) |
class Createrepo(Action):
def run(self):
self.log.info('Action createrepo')
data = json.loads(self.data['data'])
ownername = data['ownername']
projectname = data['projectname']
project_dirnames = data['project_dirnames']
chroots = data['chroots']
appstream = data['appstream']
devel = data['devel']
result = ActionResult.SUCCESS
done_count = 0
for project_dirname in project_dirnames:
for chroot in chroots:
self.log.info('Creating repo for: %s/%s/%s', ownername, project_dirname, chroot)
repo = os.path.join(self.destdir, ownername, project_dirname, chroot)
try:
os.makedirs(repo)
self.log.info('Empty repo so far, directory created')
except FileExistsError:
pass
if (not call_copr_repo(repo, appstream=appstream, devel=devel, logger=self.log)):
result = ActionResult.FAILURE
return result |
class PhaseGrating(PhaseApodizer):
def __init__(self, grating_period, grating_amplitude, grating_profile=None, orientation=0):
self._grating_period = grating_period
self._orientation = orientation
self._grating_amplitude = grating_amplitude
if (grating_profile is None):
def sinusoidal_grating_profile(grid):
return np.sin(((2 * np.pi) * grid.y))
grating_profile = sinusoidal_grating_profile
self._grating_profile = grating_profile
super().__init__(self.grating_pattern)
def grating_pattern(self, grid):
return (self._grating_amplitude * Field(self._grating_profile(grid.rotated(self._orientation).scaled((1 / self._grating_period))), grid))
def orientation(self):
return self._orientation
def orientation(self, new_orientation):
self._orientation = new_orientation
self.phase = self.grating_pattern
def period(self):
return self._grating_period
def period(self, new_period):
self._grating_period = new_period
self.phase = self.grating_pattern
def amplitude(self):
return self._amplitude
def amplitude(self, new_amplitude):
self._amplitude = new_amplitude
self.phase = self.grating_pattern |
class Google():
def y(data: List[dict], y_columns: List[str], x_axis: str, options: dict=None) -> dict:
is_data = {'labels': [], 'datasets': [], 'series': [], 'python': True}
if (data is None):
return is_data
if ((options is not None) and (options.get('agg') == 'distinct')):
labels = OrderedSet()
for rec in data:
labels.add(rec[x_axis])
for y in y_columns:
if (y in rec):
is_data['datasets'].append([str(rec[x_axis]), rec[y]])
is_data['labels'] = sorted(list(labels))
else:
agg_data = {}
for rec in data:
for y in y_columns:
if (y in rec):
agg_data.setdefault(y, {})[rec[x_axis]] = (agg_data.get(y, {}).get(rec[x_axis], 0) + float(rec[y]))
(labels, data) = (OrderedSet(), [])
for c in y_columns:
for (x, y) in agg_data.get(c, {}).items():
labels.add(x)
is_data['labels'] = labels
for x in labels:
is_data['datasets'].append(([str(x)] + [agg_data.get(y, {}).get(x) for y in y_columns]))
is_data['series'] = y_columns
is_data['x'] = x_axis
return is_data
def table(data: List[dict], rows: List[str], cols: List[str]) -> dict:
is_data = {'rows': rows, 'datasets': [], 'cols': cols, 'python': True}
if (data is None):
return is_data
for rec in data:
is_data['datasets'].append([rec.get(c, '') for c in (rows + cols)])
return is_data |
def recursive_update_all_values(config_type: Type[BaseModel], config_values: dict, name_prefixes: List[str], index_prefixes: Optional[list]=None, value_storage=None) -> dict:
if (index_prefixes is None):
index_prefixes = []
if (value_storage is None):
value_storage = os.environ
result_dict: dict = {}
for (name, model_field) in config_type.__fields__.items():
is_nesting = False
is_branching = False
branches = []
try:
is_nesting = issubclass(model_field.type_, BaseModel)
except TypeError:
if isinstance(model_field.type_, typing._UnionGenericAlias):
is_branching = True
branches = model_field.type_.__args__
if ((not is_nesting) and (not is_branching)):
value = get_nested_key(config_values, (index_prefixes + [name]), None)
env_name = make_env_name(name_prefixes, name)
new_value = value_storage.get(env_name, value)
set_nested_key(result_dict, index_prefixes, name, new_value)
elif is_branching:
branch_dicts = []
for branch in branches:
snake_name = camel_to_snake_case(branch.__name__)
cur = recursive_update_all_values(branch, config_values, (name_prefixes + [snake_name]), index_prefixes=(index_prefixes + [camel_to_snake_case(name)]), value_storage=value_storage)
cur['branch_name'] = branch.__name__
branch_dicts += [cur]
try:
choose_function = model_field.field_info.extra['choose_function']
except KeyError as exc:
raise ValueError(f'''You should provide choose_function to your field.
For example: {name}: {model_field.type_} = Field(choose_function=lambda x: True)''') from exc
result_branch_dict = next(filter(choose_function, branch_dicts))
del result_branch_dict['branch_name']
set_nested_key(result_dict, index_prefixes, name, result_branch_dict[name])
else:
snake_name = camel_to_snake_case(name)
nested_config = recursive_update_all_values(model_field.type_, config_values, (name_prefixes + [snake_name]), index_prefixes=(index_prefixes + [snake_name.lower()]), value_storage=value_storage)
merge_leafs_dicts(result_dict, nested_config)
return result_dict |
class Layout3D(Layout):
def scene(self):
return self.sub_data('scene', LayoutScene)
def grid_colors(self, x_color, y_color=None, z_color=None):
self.scene.xaxis.gridcolor = x_color
self.scene.xaxis.zerolinecolor = x_color
self.scene.yaxis.gridcolor = y_color
self.scene.yaxis.zerolinecolor = y_color
self.scene.zaxis.gridcolor = z_color
self.scene.zaxis.zerolinecolor = z_color
return self
def axis_colors(self, x_color, y_color=None, z_color=None):
self.scene.xaxis.set_color(x_color)
self.scene.yaxis.set_color((y_color or x_color))
self.scene.zaxis.set_color((z_color or x_color))
return self |
def test_clip_behavior_enum():
r = ft.Container()
assert (r.clip_behavior is None)
assert (r._get_attr('clipBehavior') is None)
r = ft.Container(clip_behavior=ft.ClipBehavior.ANTI_ALIAS)
assert isinstance(r.clip_behavior, ft.ClipBehavior)
assert (r.clip_behavior == ft.ClipBehavior.ANTI_ALIAS)
assert (r._get_attr('clipBehavior') == 'antiAlias')
r = ft.Container(clip_behavior='none')
assert isinstance(r.clip_behavior, str)
assert (r._get_attr('clipBehavior') == 'none') |
class FullTextDataGenerator(ContextAwareLayoutTokenModelDataGenerator):
def iter_model_data_for_context_layout_token_features(self, token_features: ContextAwareLayoutTokenFeatures) -> Iterable[LayoutModelData]:
(yield token_features.get_layout_model_data([token_features.token_text, token_features.get_lower_token_text(), token_features.get_prefix(1), token_features.get_prefix(2), token_features.get_prefix(3), token_features.get_prefix(4), token_features.get_suffix(1), token_features.get_suffix(2), token_features.get_suffix(3), token_features.get_suffix(4), token_features.get_block_status_with_blockstart_for_single_token(), token_features.get_line_status_with_linestart_for_single_token(), token_features.get_alignment_status(), token_features.get_token_font_status(), token_features.get_token_font_size_feature(), token_features.get_str_is_bold(), token_features.get_str_is_italic(), token_features.get_capitalisation_status_using_allcap(), token_features.get_digit_status_using_containsdigits(), token_features.get_str_is_single_char(), token_features.get_punctuation_type_feature(), token_features.get_dummy_str_relative_document_position(), token_features.get_dummy_str_relative_page_position(), token_features.get_dummy_str_is_bitmap_around(), token_features.get_dummy_callout_type(), token_features.get_dummy_str_is_callout_known(), token_features.get_str_is_superscript()])) |
def create_oidc_provider_config(provider_id, client_id, issuer, display_name=None, enabled=None, client_secret=None, id_token_response_type=None, code_response_type=None, app=None):
client = _get_client(app)
return client.create_oidc_provider_config(provider_id, client_id=client_id, issuer=issuer, display_name=display_name, enabled=enabled, client_secret=client_secret, id_token_response_type=id_token_response_type, code_response_type=code_response_type) |
class DataframeScatterOverlay(TextBoxOverlay):
inspector = Instance(ScatterInspector)
message_for_data = Callable
('inspector:inspector_event')
def scatter_point_found(self, event):
inspector_event = event.new
data_idx = inspector_event.event_index
if (data_idx is not None):
self.text = self.message_for_data(data_idx)
else:
self.text = ''
self.visible = (len(self.text) > 0)
self.component.request_redraw()
def _message_for_data_default(self):
def show_data(data_idx):
data = self.inspector.data.iloc[data_idx]
elements = ['idx: {}'.format(data_idx)]
for col in data.index:
elements.append('{}: {}'.format(col, data[col]))
text = '\n'.join(elements)
return text
return show_data |
def fetch_agency_tier_id_by_agency(agency_name: str, is_subtier: bool=False) -> Optional[int]:
agency_type = ('subtier_agency' if is_subtier else 'toptier_agency')
columns = ['id']
filters = {'{}__name'.format(agency_type): agency_name}
if (not is_subtier):
filters['toptier_flag'] = True
result = Agency.objects.filter(**filters).values(*columns).first()
if (not result):
logger.warning('{} not found for agency_name: {}'.format(','.join(columns), agency_name))
return None
return result[columns[0]] |
.parametrize('feature_func,expected', [(None, {'feature1': None, 'feature2': None}), ('st1', {'feature1': 'st1', 'feature2': 'st1'}), ({'feature1': _custom_stattest}, {'feature1': _custom_stattest, 'feature2': None}), ({'feature2': _custom_stattest}, {'feature1': None, 'feature2': _custom_stattest}), ({'feature1': _another_stattest, 'feature2': _custom_stattest}, {'feature1': _another_stattest, 'feature2': _custom_stattest}), ({'feature1': 'st1'}, {'feature1': 'st1', 'feature2': None}), ({'feature2': 'st2'}, {'feature1': None, 'feature2': 'st2'}), ({'feature1': 'st1', 'feature2': 'st2'}, {'feature1': 'st1', 'feature2': 'st2'}), ({'feature1': _another_stattest, 'feature2': 'st2'}, {'feature1': _another_stattest, 'feature2': 'st2'})])
def test_stattest_function_valid(feature_func, expected):
options = DataDriftOptions(feature_stattest_func=feature_func)
for (feature, expected_func) in expected.items():
assert (options.get_feature_stattest_func(feature, 'cat') == expected_func) |
class OptionSeriesSolidgaugeSonificationContexttracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def restore_materials(objs):
if (len(objs) == 0):
return
else:
for obj in objs:
if (stored_materials.get(obj) == None):
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(state=True, view_layer=None)
bpy.context.view_layer.objects.active = obj
count = len(obj.material_slots)
for i in range(count):
bpy.ops.object.material_slot_remove()
objs = [obj for obj in objs if (obj in stored_materials)]
for obj in objs:
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
for index in range(len(stored_materials[obj])):
material = stored_materials[obj][index]
faces = stored_material_faces[obj][index]
if material:
material.name = material.name.replace('backup_', '')
obj.material_slots[index].material = material
material.use_fake_user = False
for face in bm.faces:
if (face.index in faces):
face.material_index = index
bpy.ops.object.mode_set(mode='OBJECT')
if (len(stored_materials[obj]) == 0):
for i in range(len(obj.material_slots)):
bpy.ops.object.material_slot_remove() |
class NetworkSimulator():
def __init__(self):
self.agents = []
self.latency_distribution_sample = transform(normal_distribution(50, 20), (lambda x: max(x, 0)))
self.time = 0
self.objqueue = {}
self.peers = {}
self.reliability = 0.9
def generate_peers(self, num_peers=5):
self.peers = {}
for a in self.agents:
p = []
while (len(p) <= (num_peers // 2)):
p.append(random.choice(self.agents))
if (p[(- 1)] == a):
p.pop()
self.peers[a.id] = (self.peers.get(a.id, []) + p)
for peer in p:
self.peers[peer.id] = (self.peers.get(peer.id, []) + [a])
def tick(self):
if (self.time in self.objqueue):
for (recipient, obj) in self.objqueue[self.time]:
if (random.random() < self.reliability):
recipient.on_receive(obj)
del self.objqueue[self.time]
for a in self.agents:
a.tick()
self.time += 1
def run(self, steps):
for i in range(steps):
self.tick()
def broadcast(self, sender, obj):
for p in self.peers[sender.id]:
recv_time = (self.time + self.latency_distribution_sample())
if (recv_time not in self.objqueue):
self.objqueue[recv_time] = []
self.objqueue[recv_time].append((p, obj))
def direct_send(self, to_id, obj):
for a in self.agents:
if (a.id == to_id):
recv_time = (self.time + self.latency_distribution_sample())
if (recv_time not in self.objqueue):
self.objqueue[recv_time] = []
self.objqueue[recv_time].append((a, obj))
def knock_offline_random(self, n):
ko = {}
while (len(ko) < n):
c = random.choice(self.agents)
ko[c.id] = c
for c in ko.values():
self.peers[c.id] = []
for a in self.agents:
self.peers[a.id] = [x for x in self.peers[a.id] if (x.id not in ko)]
def partition(self):
a = {}
while (len(a) < (len(self.agents) / 2)):
c = random.choice(self.agents)
a[c.id] = c
for c in self.agents:
if (c.id in a):
self.peers[c.id] = [x for x in self.peers[c.id] if (x.id in a)]
else:
self.peers[c.id] = [x for x in self.peers[c.id] if (x.id not in a)] |
def split_and_convert(args):
saved_dir = (args.saved_dir + ('/%d-gpu/' % args.infer_gpu_num))
if (os.path.exists(saved_dir) is False):
os.makedirs(saved_dir)
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert ((i_gpu_num % t_gpu_num) == 0)
factor = int((i_gpu_num / t_gpu_num))
model = GPTJForCausalLM.from_pretrained(args.in_file)
try:
config = configparser.ConfigParser()
config['gpt'] = {}
for key in vars(args):
config['gpt'][key] = f'{vars(args)[key]}'
for (k, v) in vars(model.config).items():
config['gpt'][k] = f'{v}'
config['gpt']['weight_data_type'] = args.weight_data_type
with open((Path(saved_dir) / 'config.ini').as_posix(), 'w') as configfile:
config.write(configfile)
except Exception:
print('Fail to save the config in config.ini.')
np_weight_data_type = get_weight_data_type(args.weight_data_type)
huggingface_model_name_pattern = ['ln_1.bias', 'ln_1.weight', 'attn.q_proj.weight', 'attn.out_proj.weight', 'mlp.fc_in.bias', 'mlp.fc_in.weight', 'mlp.fc_out.bias', 'mlp.fc_out.weight']
ft_model_name_pattern = ['input_layernorm.bias', 'input_layernorm.weight', 'attention.query_key_value.weight', 'attention.dense.weight', 'mlp.dense_h_to_4h.bias', 'mlp.dense_h_to_4h.weight', 'mlp.dense_4h_to_h.bias', 'mlp.dense_4h_to_h.weight']
torch.multiprocessing.set_start_method('spawn')
pool = multiprocessing.Pool(args.processes)
for (name, param) in model.named_parameters():
if ((name.find('weight') == (- 1)) and (name.find('bias') == (- 1))):
continue
print(name)
if (name == 'transformer.wte.weight'):
param.detach().cpu().numpy().astype(np_weight_data_type).tofile((saved_dir + 'model.wte.bin'))
elif (name == 'transformer.ln_f.bias'):
param.detach().cpu().numpy().astype(np_weight_data_type).tofile((saved_dir + 'model.final_layernorm.bias.bin'))
elif (name == 'transformer.ln_f.weight'):
param.detach().cpu().numpy().astype(np_weight_data_type).tofile((saved_dir + 'model.final_layernorm.weight.bin'))
elif (name == 'lm_head.weight'):
param.detach().cpu().numpy().astype(np_weight_data_type).tofile((saved_dir + 'model.lm_head.weight.bin'))
elif (name == 'lm_head.bias'):
param.detach().cpu().numpy().astype(np_weight_data_type).tofile((saved_dir + 'model.lm_head.bias.bin'))
else:
for i in range(len(huggingface_model_name_pattern)):
if (name.find(huggingface_model_name_pattern[i]) != (- 1)):
if (name.find('attn.q_proj.weight') != (- 1)):
layer = name.split('.')[2]
base_k = f'transformer.h.{layer}.'
w = model.state_dict()
QKV_w = torch.stack([w[(base_k + 'attn.q_proj.weight')], w[(base_k + 'attn.k_proj.weight')], w[(base_k + 'attn.v_proj.weight')]])
QKV_w = QKV_w.permute(2, 0, 1)
weights = QKV_w.detach().cpu().numpy().astype(np_weight_data_type)
else:
weights = param.detach().cpu().numpy().astype(np_weight_data_type)
if ((name.find('mlp.fc_in.weight') != (- 1)) or (name.find('mlp.fc_out.weight') != (- 1)) or (name.find('attn.out_proj.weight') != (- 1))):
weights = weights.T
new_name = name.replace('transformer.h.', 'layers.').replace(huggingface_model_name_pattern[i], ft_model_name_pattern[i])
pool.starmap(split_and_convert_process, [(0, saved_dir, factor, new_name, weights)])
pool.close()
pool.join() |
class TelemetryMessageVehicleBrakingSystem(object):
swagger_types = {'parking': 'bool'}
attribute_map = {'parking': 'parking'}
def __init__(self, parking=None):
self._parking = None
self.discriminator = None
if (parking is not None):
self.parking = parking
def parking(self):
return self._parking
def parking(self, parking):
self._parking = parking
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(TelemetryMessageVehicleBrakingSystem, dict):
for (key, value) in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if (not isinstance(other, TelemetryMessageVehicleBrakingSystem)):
return False
return (self.__dict__ == other.__dict__)
def __ne__(self, other):
return (not (self == other)) |
def read_where_def(line: str) -> (tuple[(Literal['where'], bool)] | None):
line_stripped = strip_strings(line, maintain_len=True)
line_no_comment = line_stripped.split('!')[0].rstrip()
where_match = FRegex.WHERE.match(line_no_comment)
if where_match:
trailing_line = line[where_match.end(0):]
close_paren = find_paren_match(trailing_line)
if (close_paren < 0):
return ('where', True)
if FRegex.WORD.match(trailing_line[(close_paren + 1):].strip()):
return ('where', True)
else:
return ('where', False)
return None |
def test_icons() -> None:
run(COMMAND_LINES['icons'], b'INFO Icons are written to out/icons_by_name and out/icons_by_id.\nINFO Icon grid is written to out/icon_grid.svg.\nINFO Icon grid is written to doc/grid.svg.\n')
assert (OUTPUT_PATH / 'icon_grid.svg').is_file()
assert (OUTPUT_PATH / 'icons_by_name').is_dir()
assert (OUTPUT_PATH / 'icons_by_id').is_dir()
assert ((OUTPUT_PATH / 'icons_by_name') / 'Rontgen apple.svg').is_file()
assert ((OUTPUT_PATH / 'icons_by_id') / 'apple.svg').is_file() |
def test_reaction_python_only():
m = MyObject1()
with raises(TypeError):
event.reaction(3)
with raises(TypeError):
event.reaction(isinstance)
assert isinstance(m.r1, event._reaction.Reaction)
with raises(AttributeError):
m.r1 = 3
with raises(AttributeError):
del m.r1
assert ('reaction' in repr(m.__class__.r1).lower())
assert ('reaction' in repr(m.r1).lower())
assert ('r1' in repr(m.r1)) |
def get_exchange_cache_path(security_type='future', exchange='shfe', the_date=datetime.today(), data_type='day_kdata'):
the_dir = get_exchange_cache_dir(security_type=security_type, exchange=exchange, the_year=the_date.year, data_type=data_type)
if (not os.path.exists(the_dir)):
os.makedirs(the_dir)
return os.path.join(the_dir, fooltrader.utils.utils.to_time_str(the_time=the_date, time_fmt='%Y%m%d')) |
def convert_to_info_str(shapes: List[Union[(IntImm, IntVar)]], is_constant=False) -> str:
info_str_shapes = []
for shape in shapes:
if is_constant:
if (not isinstance(shape, IntImm)):
raise RuntimeError(f"Constant got type {type(shape)} can't have non-IntImm input!")
info_str_shapes.append(str(shape.value()))
elif isinstance(shape, IntImm):
info_str_shapes.append(f"""IntImm(value={shape.value()}, name="{shape._attrs['name']}")""")
else:
info_str_shapes.append(f"""IntVar(values={shape._attrs['values']}, name="{shape._attrs['name']}")""")
return f"[{', '.join(info_str_shapes)}]" |
()
def sanic_elastic_app(elasticapm_client):
def _generate(error_handler=None, elastic_client=None, elastic_client_cls=None, config=None, transaction_name_callback=None, user_context_callback=None, custom_context_callback=None, label_info_callback=None):
Sanic.test_mode = True
args = {'name': 'elastic-apm-test-app'}
if error_handler:
args['error_handler'] = error_handler
app = Sanic(**args)
apm_args = {}
for (key, value) in {'client': elastic_client, 'client_cls': elastic_client_cls, 'config': config, 'transaction_name_callback': transaction_name_callback, 'user_context_callback': user_context_callback, 'custom_context_callback': custom_context_callback, 'label_info_callback': label_info_callback}.items():
if (value is not None):
apm_args[key] = value
apm = ElasticAPM(app=app, **apm_args)
try:
from sanic_testing import TestManager
except ImportError:
from sanic.testing import SanicTestClient as TestManager
TestManager(app=app)
bp = Blueprint(name='test', url_prefix='/apm', version='v1')
(ValueError)
async def handle_value_error(request, exception):
return json({'source': 'value-error-custom'}, status=500)
async def attribute_error_handler(request, expception):
return json({'source': 'custom-handler'}, status=500)
app.error_handler.add(AttributeError, attribute_error_handler)
('/unhandled-exception')
async def raise_something(request):
raise CustomException('Unhandled')
('/', methods=['GET', 'POST'])
def default_route(request: Request):
with async_capture_span('test'):
pass
return json({'response': 'ok'})
('/greet/<name:str>')
async def greet_person(request: Request, name: str):
return json({'response': f'Hello {name}'})
('/capture-exception')
async def capture_exception(request):
try:
(1 / 0)
except ZeroDivisionError:
(await apm.capture_exception())
return json({'response': 'invalid'}, 500)
app.blueprint(blueprint=bp)
('/raise-exception')
async def raise_exception(request):
raise AttributeError
('/fallback-default-error')
async def raise_default_error(request):
raise CustomException
('/raise-value-error')
async def raise_value_error(request):
raise ValueError
('/add-custom-headers')
async def custom_headers(request):
return json({'data': 'message'}, headers={'sessionid': 1234555})
try:
(yield (app, apm))
finally:
elasticapm.uninstrument()
return _generate |
class OptionPlotoptionsPieSonificationContexttracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def get_scale(scale: int, ds_root: Path):
if (scale > 0):
return scale
scale = 1
img = read_image(multi_glob_sorted((ds_root / 'rgb_1x'), ['*.png', '*.jpg'])[0])
img_size = min(img.width, img.height)
if (img_size >= 800):
scale = 4
elif (img_size >= 400):
scale = 2
logger.info(f'Training auto-scale: image resolution {img.width}x{img.height}, scale {scale}')
return scale |
def _import_provider_cls(provider_cls_name: str) -> Optional[Type[providers.Provider]]:
try:
cls = _import_string(provider_cls_name)
except (ImportError, ValueError) as exception:
raise SchemaError(f'Can not import provider "{provider_cls_name}"') from exception
except AttributeError:
return None
else:
if (isinstance(cls, type) and (not issubclass(cls, providers.Provider))):
raise SchemaError(f'Provider class "{cls}" is not a subclass of providers base class')
return cls |
def main(instantiate_gui=True):
from tvtk.tools import ivtk
v = ivtk.viewer(browser=False, instantiate_gui=instantiate_gui)
cs = tvtk.ConeSource()
m = tvtk.PolyDataMapper(input_connection=cs.output_port)
a = tvtk.Actor(mapper=m)
v.scene.add_actor(a)
v.scene.reset_zoom()
b = PipelineBrowser(v.scene)
b.show()
return (v, b, a) |
class Lexer(object):
def __init__(self, rules, skip_whitespace=True):
idx = 1
regex_parts = []
self.group_type = {}
for (regex, type) in rules:
groupname = ('GROUP%s' % idx)
regex_parts.append(('(?P<%s>%s)' % (groupname, regex)))
self.group_type[groupname] = type
idx += 1
self.regex = re.compile('|'.join(regex_parts))
self.skip_whitespace = skip_whitespace
self.re_ws_skip = re.compile('\\S')
def input(self, buf):
self.buf = buf
self.pos = 0
def token(self):
if (self.pos >= len(self.buf)):
return None
else:
if self.skip_whitespace:
m = self.re_ws_skip.search(self.buf, self.pos)
if m:
self.pos = m.start()
else:
return None
m = self.regex.match(self.buf, self.pos)
if m:
groupname = m.lastgroup
tok_type = self.group_type[groupname]
tok = Token(tok_type, m.group(groupname), self.pos)
self.pos = m.end()
return tok
raise LexerError(self.pos)
def tokens(self):
while 1:
tok = self.token()
if (tok is None):
break
(yield tok) |
.skipif((not has_torch), reason='needs PyTorch')
.skipif((not has_torch_cuda_gpu), reason='needs a GPU')
def test_issue564():
import torch
if (CupyOps.xp is not None):
ops = CupyOps()
t = torch.zeros((10, 2)).cuda()
a = ops.asarray(t)
assert (a.shape == t.shape)
ops.xp.testing.assert_allclose(a, ops.alloc2f(10, 2)) |
class EmbedStoreRetriever():
def _validate_dataset_id(self, dataset_id):
available_dataset_id = ['podcasts_01', 'arxiv_01', 'wikipedia_01']
if (dataset_id not in available_dataset_id):
raise InvalidInput(f'Invalid dataset_id : Has to be one of the following - {available_dataset_id}')
def __init__(self, dataset_id, num_docs, retrieving_method='similarity_score') -> None:
self._validate_dataset_id(dataset_id)
self.dataset_id = dataset_id
self.num_docs = num_docs
self.retrieving_method = retrieving_method
def query(self, prompt, post_processing_config={}) -> list:
request_params = {'dataset_id': self.dataset_id, 'filters': post_processing_config.get('filters', {}), 'num_docs': self.num_docs}
embed_store_response = call_embed_store(prompt, request_params)
if embed_store_response.get('success'):
contexts = embed_store_response.get('contexts')
return contexts |
.parametrize('argset', (primal_test_argsets() + primal_trial_argsets()))
def test_replace_arg_primal(primal_form, argset):
new_arg = argset.new_arg
idxs = argset.idxs
error = argset.error
replace_function = argset.replace_function
arg_idx = argset.arg_idx
primal_form = primal_form.label_map((lambda t: t.has_label(subject)), replace_subject(TrialFunction(V0)), drop)
if (error is None):
new_form = primal_form.label_map(all_terms, map_if_true=replace_function(new_arg, **idxs))
if ('new_idx' in idxs):
split_arg = (new_arg if (type(new_arg) is tuple) else split(new_arg))
new_arg = split_arg[idxs['new_idx']].ufl_operands[0]
if isinstance(new_arg, Argument):
assert (new_form.form.arguments()[arg_idx] is new_arg)
elif (type(new_arg) is Function):
assert (new_form.form.coefficients()[0] is new_arg)
else:
with pytest.raises(error):
new_form = primal_form.label_map(all_terms, map_if_true=replace_function(new_arg, **idxs)) |
class Not(ConstraintExpr):
__slots__ = ('constraint',)
def __init__(self, constraint: ConstraintExpr) -> None:
self.constraint = constraint
def check(self, description: Description) -> bool:
return (not self.constraint.check(description))
def is_valid(self, data_model: DataModel) -> bool:
return self.constraint.is_valid(data_model)
def __eq__(self, other: Any) -> bool:
return (isinstance(other, Not) and (self.constraint == other.constraint))
def encode(self) -> models_pb2.Query.ConstraintExpr.Not:
not_pb = models_pb2.Query.ConstraintExpr.Not()
constraint_expression_pb = ConstraintExpr._encode(self.constraint)
not_pb.expression.CopyFrom(constraint_expression_pb)
return not_pb
def decode(cls, not_pb: Any) -> 'Not':
expression = ConstraintExpr._decode(not_pb.expression)
return cls(expression) |
def test_item_group_set_offset_negative(monkeypatch: MonkeyPatch):
inputs = ['1 2 3', '5', '6', '7']
monkeypatch.setattr('builtins.input', (lambda : inputs.pop(0)))
val = item.create_item_group(['name_1', 'name_2', 'name_3'], [0, 1, 5], [50, 50, 50], 'value', 'name', offset=(- 1))
val.edit()
assert (val.values == [6, 7, 8]) |
def _decode_ds_note(log, contract):
(selector, tail) = (log.topics[0][:4], log.topics[0][4:])
if ((selector.hex() not in contract.selectors) or sum(tail)):
return
name = contract.selectors[selector.hex()]
data = bytes.fromhex(log.data[2:])
try:
(func, args) = contract.decode_input(data[data.index(selector):])
except ValueError:
return
return {'name': name, 'address': log.address, 'decoded': True, 'data': [{'name': abi['name'], 'type': abi['type'], 'value': arg, 'decoded': True} for (arg, abi) in zip(args, contract.get_method_object(selector.hex()).abi['inputs'])]} |
def dipole3d_12(ax, da, A, bx, db, B, R):
result = numpy.zeros((3, 3, 6), dtype=float)
x0 = ((ax + bx) ** (- 1.0))
x1 = (3.0 * x0)
x2 = (x0 * ((ax * A[0]) + (bx * B[0])))
x3 = (- x2)
x4 = (x3 + A[0])
x5 = (x3 + B[0])
x6 = (x4 * x5)
x7 = (2.0 * x6)
x8 = (x3 + R[0])
x9 = (x4 * x8)
x10 = (x5 * x8)
x11 = (2.0 * x10)
x12 = (((- 2.0) * x2) + B[0])
x13 = (x0 * (x12 + R[0]))
x14 = (x0 + x11)
x15 = (x13 + (x14 * x4))
x16 = 1.
x17 = ((ax * bx) * x0)
x18 = (((5. * da) * db) * numpy.exp(((- x17) * ((((A[0] - B[0]) ** 2) + ((A[1] - B[1]) ** 2)) + ((A[2] - B[2]) ** 2)))))
x19 = (numpy.sqrt(x0) * x18)
x20 = (x0 * x19)
x21 = (x16 * x20)
x22 = (0. * x21)
x23 = (x0 * ((ax * A[1]) + (bx * B[1])))
x24 = (- x23)
x25 = (x24 + B[1])
x26 = (0.5 * x0)
x27 = (x19 * x26)
x28 = (x15 * x27)
x29 = (x0 * ((ax * A[2]) + (bx * B[2])))
x30 = (- x29)
x31 = (x30 + B[2])
x32 = ((0. * (x25 ** 2)) + (0. * x26))
x33 = (x26 + x9)
x34 = ((x0 ** 1.5) * x18)
x35 = (x16 * x34)
x36 = (x33 * x35)
x37 = (x20 * x31)
x38 = ((0. * x26) + (0. * (x31 ** 2)))
x39 = (x24 + A[1])
x40 = (0. * x21)
x41 = (x40 * (x13 + (x14 * x5)))
x42 = (x25 * x39)
x43 = (x26 + x42)
x44 = (x10 + x26)
x45 = (x34 * x44)
x46 = (((- 2.0) * x23) + B[1])
x47 = (2.0 * x42)
x48 = ((x0 * (x46 + A[1])) + (x25 * (x0 + x47)))
x49 = (x40 * x8)
x50 = (x21 * x8)
x51 = (x30 + A[2])
x52 = (x20 * x25)
x53 = (x31 * x51)
x54 = (x26 + x53)
x55 = (((- 2.0) * x29) + B[2])
x56 = (2.0 * x53)
x57 = ((x0 * (x55 + A[2])) + (x31 * (x0 + x56)))
x58 = ((x0 * (x12 + A[0])) + (x5 * (x0 + x7)))
x59 = (x24 + R[1])
x60 = (x40 * x59)
x61 = (x26 + x6)
x62 = (x25 * x59)
x63 = (x26 + x62)
x64 = (x34 * x63)
x65 = (x0 * (x46 + R[1]))
x66 = (2.0 * x62)
x67 = (x0 + x66)
x68 = (x40 * ((x25 * x67) + x65))
x69 = (x21 * x59)
x70 = (x39 * x59)
x71 = (x26 + x70)
x72 = (x35 * x71)
x73 = ((0. * x26) + (0. * (x5 ** 2)))
x74 = ((x39 * x67) + x65)
x75 = (x27 * x74)
x76 = (x20 * x5)
x77 = (x30 + R[2])
x78 = (x40 * x77)
x79 = (x31 * x77)
x80 = (x26 + x79)
x81 = (x34 * x80)
x82 = (x21 * x77)
x83 = (x0 * (x55 + R[2]))
x84 = (2.0 * x79)
x85 = (x0 + x84)
x86 = (x40 * ((x31 * x85) + x83))
x87 = (x51 * x77)
x88 = (x26 + x87)
x89 = (x35 * x88)
x90 = ((x51 * x85) + x83)
x91 = (x27 * x90)
result[(0, 0, 0)] = numpy.sum((x22 * ((x0 * (((x1 + x11) + x7) + (2.0 * x9))) + ((2.0 * x15) * x5))))
result[(0, 0, 1)] = numpy.sum((x25 * x28))
result[(0, 0, 2)] = numpy.sum((x28 * x31))
result[(0, 0, 3)] = numpy.sum((x32 * x36))
result[(0, 0, 4)] = numpy.sum(((x25 * x33) * x37))
result[(0, 0, 5)] = numpy.sum((x36 * x38))
result[(0, 1, 0)] = numpy.sum((x39 * x41))
result[(0, 1, 1)] = numpy.sum((x43 * x45))
result[(0, 1, 2)] = numpy.sum(((x37 * x39) * x44))
result[(0, 1, 3)] = numpy.sum((x48 * x49))
result[(0, 1, 4)] = numpy.sum(((x37 * x43) * x8))
result[(0, 1, 5)] = numpy.sum(((x38 * x39) * x50))
result[(0, 2, 0)] = numpy.sum((x41 * x51))
result[(0, 2, 1)] = numpy.sum(((x44 * x51) * x52))
result[(0, 2, 2)] = numpy.sum((x45 * x54))
result[(0, 2, 3)] = numpy.sum(((x32 * x50) * x51))
result[(0, 2, 4)] = numpy.sum(((x52 * x54) * x8))
result[(0, 2, 5)] = numpy.sum((x49 * x57))
result[(1, 0, 0)] = numpy.sum((x58 * x60))
result[(1, 0, 1)] = numpy.sum((x61 * x64))
result[(1, 0, 2)] = numpy.sum(((x37 * x59) * x61))
result[(1, 0, 3)] = numpy.sum((x4 * x68))
result[(1, 0, 4)] = numpy.sum(((x37 * x4) * x63))
result[(1, 0, 5)] = numpy.sum(((x38 * x4) * x69))
result[(1, 1, 0)] = numpy.sum((x72 * x73))
result[(1, 1, 1)] = numpy.sum((x5 * x75))
result[(1, 1, 2)] = numpy.sum(((x37 * x5) * x71))
result[(1, 1, 3)] = numpy.sum((x22 * ((x0 * (((x1 + x47) + x66) + (2.0 * x70))) + ((2.0 * x25) * x74))))
result[(1, 1, 4)] = numpy.sum((x31 * x75))
result[(1, 1, 5)] = numpy.sum((x38 * x72))
result[(1, 2, 0)] = numpy.sum(((x51 * x69) * x73))
result[(1, 2, 1)] = numpy.sum(((x51 * x63) * x76))
result[(1, 2, 2)] = numpy.sum(((x54 * x59) * x76))
result[(1, 2, 3)] = numpy.sum((x51 * x68))
result[(1, 2, 4)] = numpy.sum((x54 * x64))
result[(1, 2, 5)] = numpy.sum((x57 * x60))
result[(2, 0, 0)] = numpy.sum((x58 * x78))
result[(2, 0, 1)] = numpy.sum(((x52 * x61) * x77))
result[(2, 0, 2)] = numpy.sum((x61 * x81))
result[(2, 0, 3)] = numpy.sum(((x32 * x4) * x82))
result[(2, 0, 4)] = numpy.sum(((x4 * x52) * x80))
result[(2, 0, 5)] = numpy.sum((x4 * x86))
result[(2, 1, 0)] = numpy.sum(((x39 * x73) * x82))
result[(2, 1, 1)] = numpy.sum(((x43 * x76) * x77))
result[(2, 1, 2)] = numpy.sum(((x39 * x76) * x80))
result[(2, 1, 3)] = numpy.sum((x48 * x78))
result[(2, 1, 4)] = numpy.sum((x43 * x81))
result[(2, 1, 5)] = numpy.sum((x39 * x86))
result[(2, 2, 0)] = numpy.sum((x73 * x89))
result[(2, 2, 1)] = numpy.sum(((x5 * x52) * x88))
result[(2, 2, 2)] = numpy.sum((x5 * x91))
result[(2, 2, 3)] = numpy.sum((x32 * x89))
result[(2, 2, 4)] = numpy.sum((x25 * x91))
result[(2, 2, 5)] = numpy.sum((x22 * ((x0 * (((x1 + x56) + x84) + (2.0 * x87))) + ((2.0 * x31) * x90))))
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.