code stringlengths 281 23.7M |
|---|
def test_pad_sequences():
a = [[1], [1, 2], [1, 2, 3]]
b = pad_sequences(a, maxlen=3, padding='pre')
assert_allclose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = pad_sequences(a, maxlen=3, padding='post')
assert_allclose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
b = pad_sequences(a, maxlen=2, truncating='pre')
assert_allclose(b, [[0, 1], [1, 2], [2, 3]])
b = pad_sequences(a, maxlen=2, truncating='post')
assert_allclose(b, [[0, 1], [1, 2], [1, 2]])
b = pad_sequences(a, maxlen=3, value=1)
assert_allclose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]]) |
(cc=STDCALL, params={'nCount': DWORD, 'lpHandles': HANDLE, 'bWaitAll': BOOL, 'dwMilliseconds': DWORD})
def hook_WaitForMultipleObjects(ql: Qiling, address: int, params):
nCount = params['nCount']
lpHandles = params['lpHandles']
for i in range(nCount):
handle_value = ql.unpack(ql.mem.read((lpHandles + (i * ql.arch.pointersize)), ql.arch.pointersize))
if handle_value:
thread = ql.os.handle_manager.get(handle_value).obj
ql.os.thread_manager.cur_thread.waitfor(thread)
return 0 |
_test
def test_zeropadding3d_legacy_interface():
old_layer = keras.layers.ZeroPadding3D((2, 2, 2), dim_ordering='tf', name='zp3d')
new_layer = keras.layers.ZeroPadding3D((2, 2, 2), data_format='channels_last', name='zp3d')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) |
class VTAnalysis():
def __init__(self, api_keys_list, waiting_time=16):
self.REPORT_URL = '
self.SCAN_URL = '
self.api_keys_list = {}
for api_key in api_keys_list:
self.api_keys_list[api_key] = True
self.api_key = api_keys_list[0]
self.reports = {}
self.waiting_queue = set()
self.WAITING_TIME = waiting_time
def get_api_keys_list(self):
return self.api_keys_list
def get_file_md5(self, file):
md5 = hashlib.md5()
with open(file, 'rb') as f:
for chunk in iter((lambda : f.read(4096)), b''):
md5.update(chunk)
return md5.hexdigest()
def set_progress(self, progress):
self.reports.update(progress)
def change_api_key(self):
tqdm.write(f'[*] {self.api_key} is unavailable, change another API key')
self.api_keys_list[self.api_key] = False
for api_key in self.api_keys_list:
if self.api_keys_list[api_key]:
self.api_key = api_key
return True
tqdm.write(red(f'[ERROR] There is no available api key'))
return False
def check_api_key_available(self):
tqdm.write('[*] Check API keys available')
for api_key in self.api_keys_list:
try:
params = {'apikey': api_key, 'resource': '34efc3ebf51a6511c0d12cce7592db73'}
res = requests.get(self.REPORT_URL, params)
tqdm.write(f'API {api_key}: {res.status_code}')
if (res.status_code == 200):
self.api_keys_list[api_key] = True
elif (res.status_code == 403):
self.api_keys_list[api_key] = False
elif (res.status_code == 204):
self.api_keys_list[api_key] = False
elif (res.status_code == 400):
tqdm.write('Failed to check api key: Bad Request.')
except Exception as e:
tqdm.write(red(f'[ERROR] Failed to check api: {api_key}'))
continue
def get_reports(self, all_info=False):
if all_info:
return self.reports
positives_report = {}
for file_md5 in self.reports:
if (self.reports[file_md5] > 0):
positives_report[file_md5] = self.reports[file_md5]
return positives_report
def retreive_report(self, file_md5):
params = {'apikey': self.api_key, 'resource': file_md5}
res = requests.get(self.REPORT_URL, params)
if (res.status_code == 200):
return res.json()
else:
if (not self.change_api_key()):
return False
return self.retreive_report(file_md5)
def scan_file(self, filename, file):
params = {'apikey': self.api_key}
files = {'file': (filename, file)}
res = requests.post(self.SCAN_URL, files=files, params=params)
if (res.status_code == 200):
return res.json()
else:
if (not self.change_api_key()):
return False
return self.scan_file(filename, file)
def analyze_single_file(self, path):
if (not os.path.isfile(path)):
tqdm.write(red(f'[*] Error: Given path is not a file: {path}'))
return
tqdm.write(f'[*] Retrieved file scan report: {path}')
file_md5 = self.get_file_md5(path)
if (file_md5 in self.reports):
tqdm.write(green(f'[*] {file_md5} already retrieved report'))
return self.reports[file_md5]
report = self.retreive_report(file_md5)
time.sleep(self.WAITING_TIME)
if (not report):
tqdm.write(red(f'[*] ERROR: All API keys are unavailable'))
return (- 1)
if (report['response_code'] == 1):
self.reports[file_md5] = report['positives']
return report['positives']
tqdm.write(f'[*] Upload file: {path}')
with open(path, 'rb') as f:
scan_result = self.scan_file(os.path.basename(path), f)
time.sleep(self.WAITING_TIME)
if (not scan_result):
tqdm.write(red(f'[*] ERROR: All API keys are unavailable'))
return (- 1)
if (scan_result['response_code'] == 0):
tqdm.write(red(f'[*] ERROR: Failed to upload file: {path}'))
return
tqdm.write(f'[*] Retrieve file scan reports again')
re_report = self.retreive_report(file_md5)
time.sleep(self.WAITING_TIME)
if (not re_report):
tqdm.write(red(f'[*] ERROR: All API keys are unavailable'))
return (- 1)
if (re_report['response_code'] == 1):
self.reports[file_md5] = report['positives']
return report['positives']
else:
tqdm.write(f'[*] Unable to retrieve {file_md5}, add to waiting queue')
self.waiting_queue.add(file_md5)
return
def analyze_multi_file(self, path):
if (not os.path.isdir(path)):
tqdm.write(red(f'[*] Error: Given path is not a directory: {path}'))
return
file_count = sum((len(files) for (_, _, files) in os.walk(path)))
progress_bar = tqdm(total=file_count)
for (root, dirs, files) in os.walk(path):
for name in files:
file_path = os.path.join(root, name)
try:
result = self.analyze_single_file(file_path)
progress_bar.update(1)
if (result == (- 1)):
return
if (not result):
continue
if (result > 0):
tqdm.write(green(f'[*] Found positives file: {file_path}'))
except Exception as e:
tqdm.write(yellow(f'[WARN] Exception found: {e.message}'))
continue
progress_bar.close()
tqdm.write(f'[*] Start to retrieve file report from waiting queue')
for file_md5 in tqdm(self.waiting_queue):
try:
report = self.retreive_report(file_md5)
if (not report):
tqdm.write(red(f'[*] ERROR: All API keys are unavailable'))
return (- 1)
if (report['response_code'] == 1):
self.reports[file_md5] = report['positives']
except Exception as e:
tqdm.write(yellow(f'[WARN] Exception found: {e.message}'))
continue |
def setup_intersphinx(app, config):
if (not app.config.hoverxref_intersphinx):
return
if (sphinx.version_info < (3, 0, 0)):
listeners = list(app.events.listeners.get('missing-reference').items())
else:
listeners = [(listener.id, listener.handler) for listener in app.events.listeners.get('missing-reference')]
for (listener_id, function) in listeners:
module_name = inspect.getmodule(function).__name__
if (module_name == 'sphinx.ext.intersphinx'):
app.disconnect(listener_id) |
def _is_unnecessary_indexing(node: Union[(nodes.For, nodes.Comprehension)]) -> bool:
index_nodes = []
for assign_name_node in node.target.nodes_of_class((nodes.AssignName, nodes.Name)):
index_nodes.extend(_index_name_nodes(assign_name_node.name, node))
return (all((_is_redundant(index_node, node) for index_node in index_nodes)) and index_nodes) |
def test_global_pool_cell():
inputs_x = torch.randn([2, 256, 32, 32])
inputs_y = torch.randn([2, 256, 32, 32])
gp_cell = GlobalPoolingCell(with_out_conv=False)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (gp_cell_out.size() == inputs_x.size())
gp_cell = GlobalPoolingCell(256, 256)
gp_cell_out = gp_cell(inputs_x, inputs_y, out_size=inputs_x.shape[(- 2):])
assert (gp_cell_out.size() == inputs_x.size()) |
class BrokenUserTests(unittest.TestCase):
def setUp(self):
self.user = BrokenUser
def tearDown(self):
self.user = None
def test_get_username(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.get_username(User('foobar'))
def test_user_model(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.user_model()
def test_username_max_length(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.username_max_length()
def test_get_user(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.get_user(1)
def test_get_social_auth(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.get_social_auth('foo', 1)
def test_get_social_auth_for_user(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.get_social_auth_for_user(User('foobar'))
def test_create_social_auth(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.create_social_auth(User('foobar'), 1, 'foo')
def test_disconnect(self):
with self.assertRaisesRegex(NotImplementedError, NOT_IMPLEMENTED_MSG):
self.user.disconnect(BrokenUser()) |
class TestNoReturn(TestNameCheckVisitorBase):
_passes()
def test_no_return(self):
from typing import Optional
from typing_extensions import NoReturn
def f() -> NoReturn:
raise Exception
def capybara(x: Optional[int]) -> None:
if (x is None):
f()
assert_is_value(x, TypedValue(int))
_passes()
def test_no_return_parameter(self):
from typing_extensions import NoReturn
def assert_unreachable(x: NoReturn) -> None:
pass
def capybara():
assert_unreachable(1)
_passes()
def test_assignability(self):
from typing_extensions import NoReturn
def takes_never(x: NoReturn):
print(x) |
def test_raise_error_with_builtin_function_as_task(runner, tmp_path):
source = '\n from pytask import task\n from pathlib import Path\n from datetime import datetime\n\n task(\n kwargs={"format": "%y/%m/%d"}, produces=Path("time.txt")\n )(datetime.utcnow().strftime)\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.COLLECTION_FAILED)
assert ('Builtin functions cannot be wrapped' in result.output) |
class StubQuery(object):
def __init__(self, model):
self.model = model
self.order_by = ['pk']
def select_related(self):
return False
def add_context(self, *args, **kwargs):
pass
def get_context(self, *args, **kwargs):
return {}
def get_meta(self):
return self.model._meta
def _prepare(self, field):
return self |
.parametrize('public_catalog, credentials, expected_repos', [(False, None, None), (True, None, ['public/publicrepo']), (False, ('devtable', 'password'), ['devtable/simple', 'devtable/complex', 'devtable/gargantuan']), (True, ('devtable', 'password'), ['devtable/simple', 'devtable/complex', 'devtable/gargantuan'])])
.parametrize('page_size', [1, 2, 10, 50, 100])
def test_catalog(public_catalog, credentials, expected_repos, page_size, v2_protocol, liveserver_session, app_reloader, liveserver, registry_server_executor):
with FeatureFlagValue('PUBLIC_CATALOG', public_catalog, registry_server_executor.on(liveserver)):
results = v2_protocol.catalog(liveserver_session, page_size=page_size, credentials=credentials, namespace='devtable', repo_name='simple')
if (expected_repos is None):
assert (len(results) == 0)
else:
assert set(expected_repos).issubset(set(results)) |
_register
class StreamPropertiesObject(BaseObject):
GUID = guid2bytes('B7DC0791-A9B7-11CF-8EE6-00C00C205365')
def parse(self, asf, data):
super(StreamPropertiesObject, self).parse(asf, data)
(channels, sample_rate, bitrate) = struct.unpack('<HII', data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = (bitrate * 8) |
class GaussianProcessLogLikelihoodInterface(with_metaclass(ABCMeta, GaussianProcessDataInterface)):
def dim(self):
pass
def num_hyperparameters(self):
pass
def get_hyperparameters(self):
pass
def set_hyperparameters(self, hyperparameters):
pass
hyperparameters = abstractproperty(get_hyperparameters, set_hyperparameters)
def compute_log_likelihood(self):
pass
def compute_grad_log_likelihood(self):
pass
def compute_hessian_log_likelihood(self):
pass |
class SRMFile(cpi.File):
def __init__(self, api, adaptor):
_cpi_base = super(SRMFile, self)
_cpi_base.__init__(api, adaptor)
def _dump(self):
print(('url : %s' % self._url))
print(('flags : %s' % self._flags))
print(('session: %s' % self._session))
def _alive(self):
alive = self.shell.alive()
if (not alive):
self.shell = sups.PTYShell(self._adaptor.pty_url)
_CALL
def init_instance(self, adaptor_state, url, flags, session):
self._url = url
self._flags = flags
self._session = session
self._init_check()
try:
self.shell = sups.PTYShell(self._adaptor.pty_url, self.session)
except Exception as e:
raise rse.NoSuccess("Couldn't open shell") from e
try:
(rc, out, _) = self.shell.run_sync('grid-proxy-info')
except Exception as e:
self.shell.finalize(kill_pty=True)
raise rse.NoSuccess('grid-proxy-info failed') from e
if (rc != 0):
raise rse.NoSuccess('grid-proxy-info failed')
if ('timeleft : 0:00:00' in out):
raise rse.AuthenticationFailed('x509 proxy expired.')
try:
(rc, _, _) = self.shell.run_sync('gfal2_version')
except Exception as e:
self.shell.finalize(kill_pty=True)
raise rse.NoSuccess('gfal2_version') from e
if (rc != 0):
raise rse.DoesNotExist('gfal2 client not found')
return self.get_api()
def _init_check(self):
url = self._url
flags = self._flags
if url.username:
raise rse.BadParameter(('Cannot handle url %s (has username)' % url))
if url.password:
raise rse.BadParameter(('Cannot handle url %s (has password)' % url))
self._path = url.path
path = url.path
_CALL
def get_url(self):
return self._url
_CALL
def get_size_self(self):
self._alive()
return self._adaptor.file_get_size(self.shell, self._url)
_CALL
def copy_self(self, dst, flags):
self._alive()
self._adaptor.srm_transfer(self.shell, flags, self._url, dst)
_CALL
def remove_self(self, flags):
self._alive()
self._adaptor.srm_file_remove(self.shell, flags, self._url)
_CALL
def is_file_self(self):
self._alive()
stat = self._adaptor.srm_stat(self.shell, self._url)
if (stat['mode'] == 'file'):
return True
else:
return False
_CALL
def is_link_self(self):
self._alive()
stat = self._adaptor.srm_stat(self.shell, self._url)
if (stat['mode'] == 'link'):
return True
else:
return False
_CALL
def is_dir_self(self):
self._alive()
stat = self._adaptor.srm_stat(self.shell, self._url)
if (stat['mode'] == 'dir'):
return True
else:
return False
_CALL
def close(self, timeout=None):
if timeout:
raise rse.BadParameter('timeout for close not supported') |
def create_csp_stem(in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None):
stem = nn.Sequential()
feature_info = []
if (not isinstance(out_chs, (tuple, list))):
out_chs = [out_chs]
stem_depth = len(out_chs)
assert stem_depth
assert (stride in (1, 2, 4))
prev_feat = None
prev_chs = in_chans
last_idx = (stem_depth - 1)
stem_stride = 1
for (i, chs) in enumerate(out_chs):
conv_name = f'conv{(i + 1)}'
conv_stride = (2 if (((i == 0) and (stride > 1)) or ((i == last_idx) and (stride > 2) and (not pool))) else 1)
if ((conv_stride > 1) and (prev_feat is not None)):
feature_info.append(prev_feat)
stem.add_module(conv_name, ConvNormAct(prev_chs, chs, kernel_size, stride=conv_stride, padding=(padding if (i == 0) else ''), act_layer=act_layer, norm_layer=norm_layer))
stem_stride *= conv_stride
prev_chs = chs
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name]))
if pool:
assert (stride > 2)
if (prev_feat is not None):
feature_info.append(prev_feat)
if (aa_layer is not None):
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
stem.add_module('aa', aa_layer(channels=prev_chs, stride=2))
pool_name = 'aa'
else:
stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
pool_name = 'pool'
stem_stride *= 2
prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name]))
feature_info.append(prev_feat)
return (stem, feature_info) |
def read_embeddings(file_enc, skip_lines=0, filter_set=None):
embs = dict()
total_vectors_in_file = 0
with open(file_enc, 'rb') as f:
for (i, line) in enumerate(f):
if (i < skip_lines):
continue
if (not line):
break
if (len(line) == 0):
continue
l_split = line.decode('utf8').strip().split(' ')
if (len(l_split) == 2):
continue
total_vectors_in_file += 1
if ((filter_set is not None) and (l_split[0] not in filter_set)):
continue
embs[l_split[0]] = [float(em) for em in l_split[1:]]
return (embs, total_vectors_in_file) |
class TestSetAssertions():
.parametrize('op', ['>=', '>', '<=', '<', '=='])
def test_set_extra_item(self, op, pytester: Pytester) -> None:
pytester.makepyfile(f'''
def test_hello():
x = set("hello x")
y = set("hello y")
assert x {op} y
''')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*def test_hello():*', f'*assert x {op} y*'])
if (op in ['>=', '>', '==']):
result.stdout.fnmatch_lines(['*E*Extra items in the right set:*', "*E*'y'"])
if (op in ['<=', '<', '==']):
result.stdout.fnmatch_lines(['*E*Extra items in the left set:*', "*E*'x'"])
.parametrize('op', ['>', '<', '!='])
def test_set_proper_superset_equal(self, pytester: Pytester, op) -> None:
pytester.makepyfile(f'''
def test_hello():
x = set([1, 2, 3])
y = x.copy()
assert x {op} y
''')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*def test_hello():*', f'*assert x {op} y*', '*E*Both sets are equal*'])
def test_pytest_assertrepr_compare_integration(self, pytester: Pytester) -> None:
pytester.makepyfile('\n def test_hello():\n x = set(range(100))\n y = x.copy()\n y.remove(50)\n assert x == y\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*def test_hello():*', '*assert x == y*', '*E*Extra items*left*', '*E*50*', '*= 1 failed in*']) |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step', side_effect=mock_step_mutating_run)
def test_foreach_evaluates_run_decorator(mock_invoke, mock_moduleloader):
step = Step({'name': 'step1', 'run': '{dynamic_run_expression}', 'foreach': ['{key1}', '{key2}', 'key3']})
context = get_test_context()
context['dynamic_run_expression'] = True
original_len = len(context)
with patch_logger('pypyr.dsl', logging.INFO) as mock_logger_info:
step.run_step(context)
assert (mock_logger_info.mock_calls == [call('foreach: running step value1'), call('foreach: running step value2'), call('step1 not running because run is False.'), call('foreach: running step key3'), call('step1 not running because run is False.'), call('foreach decorator looped 3 times.')])
assert (mock_invoke.call_count == 1)
assert (len(context) == (original_len + 1))
assert (context['i'] == 'key3')
assert (step.for_counter == 'key3') |
def sharp_iferror(extr, test, then='', Else=None, *args):
if re.match('<(?:strong|span|p|div)\\s(?:[^\\s>]*\\s+)*?class="(?:[^"\\s>]*\\s+)*?error(?:\\s[^">]*)?"', test):
return extr.expand(then.strip())
elif (Else is None):
return test.strip()
else:
return extr.expand(Else.strip()) |
class ELF32_Sym(ELF_Sym):
Sym_SIZE = (4 * 4)
def __init__(self, buf, endian=0):
if (len(buf) != self.Sym_SIZE):
raise
self.fmt = ('<IIIBBH' if (endian == 0) else '>IIIBBH')
(st_name, st_value, st_size, st_info, st_other, st_shndx) = struct.unpack(self.fmt, buf)
super(ELF32_Sym, self).__init__(st_name, st_value, st_size, st_info, st_other, st_shndx)
def pack(self):
struct.pack(self.fmt, self.st_name, self.st_value, self.st_size, self.st_info, self.st_other, self.st_shndx) |
.skipif((not is_py39_plus), reason='3.9+ only')
def test_annotated_attrs():
from typing import Annotated
converter = Converter()
class Inner():
a: int
class Outer():
i: Annotated[(Inner, 'test')]
j: list[Annotated[(Inner, 'test')]]
orig = Outer(Inner(1), [Inner(1)])
raw = converter.unstructure(orig)
assert (raw == {'i': {'a': 1}, 'j': [{'a': 1}]})
structured = converter.structure(raw, Outer)
assert (structured == orig)
converter.register_unstructure_hook(Inner, (lambda v: {'a': 2}))
raw = converter.unstructure(Outer(Inner(1), [Inner(1)]))
assert (raw == {'i': {'a': 2}, 'j': [{'a': 2}]})
structured = converter.structure(raw, Outer)
assert (structured == Outer(Inner(2), [Inner(2)])) |
class Effect5300(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'shieldCapacity', src.getModifiedItemAttr('shipBonusAD1'), skill='Amarr Destroyer', **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'hp', src.getModifiedItemAttr('shipBonusAD1'), skill='Amarr Destroyer', **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'armorHP', src.getModifiedItemAttr('shipBonusAD1'), skill='Amarr Destroyer', **kwargs) |
def test_utf8_bom(gm_manager):
script = textwrap.dedent('\n \ufeff// ==UserScript==\n // qutebrowser test userscript\n // ==/UserScript==\n '.lstrip('\n'))
_save_script(script, 'bom.user.js')
gm_manager.load_scripts()
scripts = gm_manager.all_scripts()
assert (len(scripts) == 1)
script = scripts[0]
assert ('// ==UserScript==' in script.code().splitlines()) |
class StatReporter(PlainReporter):
error_messages = []
style_messages = []
def __init__(self, source_lines=None):
super().__init__(source_lines)
StatReporter.error_messages = []
StatReporter.style_messages = []
def print_messages(self, level='all'):
StatReporter.error_messages.extend(self._error_messages)
StatReporter.style_messages.extend(self._style_messages)
_display = None |
class UnicornTask():
def __init__(self, uc: Uc, begin: int, end: int, task_id=None):
self._uc = uc
self._begin = begin
self._end = end
self._stop_request = False
self._ctx = None
self._task_id = None
self._arch = self._uc._arch
self._mode = self._uc._mode
def pc(self):
raw_pc = self._raw_pc()
if (self._uc.reg_read(UC_ARM_REG_CPSR) & (1 << 5)):
return (raw_pc | 1)
else:
return raw_pc
def _raw_pc(self):
if (self._arch == UC_ARCH_X86):
if ((self._mode & UC_MODE_32) != 0):
return self._uc.reg_read(UC_X86_REG_EIP)
elif ((self._mode & UC_MODE_64) != 0):
return self._uc.reg_read(UC_X86_REG_RIP)
elif (self._arch == UC_ARCH_MIPS):
return self._uc.reg_read(UC_MIPS_REG_PC)
elif (self._arch == UC_ARCH_ARM):
return self._uc.reg_read(UC_ARM_REG_PC)
elif (self._arch == UC_ARCH_ARM64):
return self._uc.reg_read(UC_ARM64_REG_PC)
elif (self._arch == UC_ARCH_PPC):
return self._uc.reg_read(UC_PPC_REG_PC)
elif (self._arch == UC_ARCH_M68K):
return self._uc.reg_read(UC_M68K_REG_PC)
elif (self._arch == UC_ARCH_SPARC):
return self._uc.reg_read(UC_SPARC_REG_PC)
elif (self._arch == UC_ARCH_RISCV):
return self._uc.reg_read(UC_RISCV_REG_PC)
return 0
def _reach_end(self):
return (self._raw_pc() == self._end)
def save(self):
return self._uc.context_save()
def restore(self, context):
self._uc.context_restore(context)
self._begin = self.pc
def on_start(self):
if self._ctx:
self.restore(self._ctx)
def on_interrupted(self, ucerr: int):
self._ctx = self.save()
def on_exit(self):
pass |
def infer_dtype_from_tensor(data: Union[(PackedMap, PackedList, List, torch.Tensor, Tuple)]):
if isinstance(data, WithPresence):
t = infer_dtype_from_tensor(data.values)
if t.nullable:
raise TypeError("WithPresence structs can't be nested")
return t.with_null()
if isinstance(data, torch.Tensor):
torcharrow_dtype_name = str(data.dtype)
assert torcharrow_dtype_name.startswith('torch.')
torcharrow_dtype_name = torcharrow_dtype_name[len('torch.'):]
if (torcharrow_dtype_name == 'bool'):
torcharrow_dtype_name = 'boolean'
if (not hasattr(dtypes, torcharrow_dtype_name)):
raise TypeError(f'Unexpected dtype for the tensor: {data.dtype}')
return getattr(dtypes, torcharrow_dtype_name)
if isinstance(data, PackedList):
if ((not isinstance(data.offsets, torch.Tensor)) or (data.offsets.dtype not in [torch.int16, torch.int32, torch.int64])):
raise TypeError('PackedList.offsets is expected to be an integer-valued tensor')
return dtypes.List(infer_dtype_from_tensor(data.values))
if isinstance(data, PackedMap):
if ((not isinstance(data.offsets, torch.Tensor)) or (data.offsets.dtype not in [torch.int16, torch.int32, torch.int64])):
raise TypeError('PackedMap.offsets is expected to be an integer-valued tensor')
return dtypes.Map(infer_dtype_from_tensor(data.keys), infer_dtype_from_tensor(data.values))
if isinstance(data, tuple):
types = [infer_dtype_from_tensor(x) for x in data]
fields = getattr(data, '_fields', None)
if (fields is not None):
assert (len(fields) == len(types))
return dtypes.Struct([dtypes.Field(n, t) for (n, t) in zip(fields, types)])
else:
return dtypes.Tuple(types)
if isinstance(data, list):
return dtypes.infer_dtype_from_prefix(data)
raise TypeError(f"Can't infer datatype based on torch structure of type {type(data)}") |
def trigger_update(distribution, for_py_version, wheel, search_dirs, app_data, env, periodic):
wheel_path = (None if (wheel is None) else str(wheel.path))
cmd = [sys.executable, '-c', dedent('\n from virtualenv.report import setup_report, MAX_LEVEL\n from virtualenv.seed.wheels.periodic_update import do_update\n setup_report(MAX_LEVEL, show_pid=True)\n do_update({!r}, {!r}, {!r}, {!r}, {!r}, {!r})\n ').strip().format(distribution, for_py_version, wheel_path, str(app_data), [str(p) for p in search_dirs], periodic)]
debug = (env.get('_VIRTUALENV_PERIODIC_UPDATE_INLINE') == '1')
pipe = (None if debug else DEVNULL)
kwargs = {'stdout': pipe, 'stderr': pipe}
if ((not debug) and (sys.platform == 'win32')):
kwargs['creationflags'] = CREATE_NO_WINDOW
process = Popen(cmd, **kwargs)
logging.info('triggered periodic upgrade of %s%s (for python %s) via background process having PID %d', distribution, ('' if (wheel is None) else f'=={wheel.version}'), for_py_version, process.pid)
if debug:
process.communicate()
else:
process.returncode = 0 |
.parametrize('shift', [1.5, np.array([(- 0.5), 1, 0.3])])
.parametrize('scale', [2.0, np.array([1.5, 3.3, 1.0])])
def test_multivariate_rv_transform(shift, scale):
mu = np.array([0, 0.9, (- 2.1)])
cov = np.array([[1, 0, 0.9], [0, 1, 0], [0.9, 0, 1]])
x_rv_raw = pt.random.multivariate_normal(mu, cov=cov)
x_rv = (shift + (x_rv_raw * scale))
x_rv.name = 'x'
x_vv = x_rv.clone()
logp = conditional_logp({x_rv: x_vv})[x_vv]
assert_no_rvs(logp)
x_vv_test = np.array([5.0, 4.9, (- 6.3)])
scale_mat = (scale * np.eye(x_vv_test.shape[0]))
np.testing.assert_allclose(logp.eval({x_vv: x_vv_test}), sp.stats.multivariate_normal.logpdf(x_vv_test, (shift + (mu * scale)), ((scale_mat cov) scale_mat.T))) |
class KBKDFCMAC(KeyDerivationFunction):
def __init__(self, algorithm, mode: Mode, length: int, rlen: int, llen: (int | None), location: CounterLocation, label: (bytes | None), context: (bytes | None), fixed: (bytes | None), backend: typing.Any=None, *, break_location: (int | None)=None):
if ((not issubclass(algorithm, ciphers.BlockCipherAlgorithm)) or (not issubclass(algorithm, ciphers.CipherAlgorithm))):
raise UnsupportedAlgorithm('Algorithm supplied is not a supported cipher algorithm.', _Reasons.UNSUPPORTED_CIPHER)
self._algorithm = algorithm
self._cipher: (ciphers.BlockCipherAlgorithm | None) = None
self._deriver = _KBKDFDeriver(self._prf, mode, length, rlen, llen, location, break_location, label, context, fixed)
def _prf(self, _: bytes) -> cmac.CMAC:
assert (self._cipher is not None)
return cmac.CMAC(self._cipher)
def derive(self, key_material: bytes) -> bytes:
self._cipher = self._algorithm(key_material)
assert (self._cipher is not None)
from cryptography.hazmat.backends.openssl.backend import backend as ossl
if (not ossl.cmac_algorithm_supported(self._cipher)):
raise UnsupportedAlgorithm('Algorithm supplied is not a supported cipher algorithm.', _Reasons.UNSUPPORTED_CIPHER)
return self._deriver.derive(key_material, (self._cipher.block_size // 8))
def verify(self, key_material: bytes, expected_key: bytes) -> None:
if (not constant_time.bytes_eq(self.derive(key_material), expected_key)):
raise InvalidKey |
class GreeterStub():
def __init__(self, channel):
self._client = purerpc.Client('Greeter', channel)
self.SayHello = self._client.get_method_stub('SayHello', purerpc.RPCSignature(purerpc.Cardinality.UNARY_UNARY, generated.greeter_pb2.HelloRequest, generated.greeter_pb2.HelloReply))
self.SayHelloGoodbye = self._client.get_method_stub('SayHelloGoodbye', purerpc.RPCSignature(purerpc.Cardinality.UNARY_STREAM, generated.greeter_pb2.HelloRequest, generated.greeter_pb2.HelloReply))
self.SayHelloToMany = self._client.get_method_stub('SayHelloToMany', purerpc.RPCSignature(purerpc.Cardinality.STREAM_STREAM, generated.greeter_pb2.HelloRequest, generated.greeter_pb2.HelloReply))
self.SayHelloToManyAtOnce = self._client.get_method_stub('SayHelloToManyAtOnce', purerpc.RPCSignature(purerpc.Cardinality.STREAM_UNARY, generated.greeter_pb2.HelloRequest, generated.greeter_pb2.HelloReply)) |
def _create_sigma_widgets() -> dict[(str, tuple[(str, QtWidgets.QWidget)])]:
P_sigma = QtWidgets.QDoubleSpinBox()
P_sigma.setRange(0, 500)
P_sigma.setStepType(QtWidgets.QAbstractSpinBox.AdaptiveDecimalStepType)
P_sigma.setToolTip('Magnitude of error in initial estimates.\nUsed to scale the matrix P.')
widgets = {'P_sigma': ('max(<b>P</b>)', P_sigma)}
G_sigma = QtWidgets.QDoubleSpinBox()
G_sigma.setRange(0, 500)
G_sigma.setStepType(QtWidgets.QAbstractSpinBox.AdaptiveDecimalStepType)
G_sigma.setToolTip('Magnitude of error in process\nUsed to scale the matrix G.')
widgets['G_sigma'] = ('max(<b>G</b>)', G_sigma)
R_sigma = QtWidgets.QDoubleSpinBox()
R_sigma.setRange(0, 500)
R_sigma.setStepType(QtWidgets.QAbstractSpinBox.AdaptiveDecimalStepType)
R_sigma.setToolTip('Magnitude of error in measurements.\nUsed to scale the matrix R.')
widgets['R_sigma'] = ('max(<b>R</b>)', R_sigma)
return widgets |
('the width of cell {n_str} is {inches_str} inches')
def then_the_width_of_cell_n_is_x_inches(context, n_str, inches_str):
def _cell(table, idx):
(row, col) = ((idx // 3), (idx % 3))
return table.cell(row, col)
(idx, inches) = ((int(n_str) - 1), float(inches_str))
cell = _cell(context.table_, idx)
assert (cell.width == Inches(inches)), ('got %s' % cell.width.inches) |
def _forgiving_version(version):
version = version.replace(' ', '.')
match = _PEP440_FALLBACK.search(version)
if match:
safe = match['safe']
rest = version[len(safe):]
else:
safe = '0'
rest = version
local = f'sanitized.{_safe_segment(rest)}'.strip('.')
return f'{safe}.dev0+{local}' |
def create_quantizable_transformer_decoder_layer(transformerDecoderLayer: torch.nn.TransformerDecoderLayer) -> QuantizableTransformerDecoderLayer:
if isinstance(transformerDecoderLayer.activation, (torch.nn.modules.activation.ReLU, torch.nn.functional.relu)):
activation = 'relu'
elif isinstance(transformerDecoderLayer.activation, (torch.nn.modules.activation.GELU, torch.nn.functional.gelu)):
activation = 'gelu'
else:
assert False
quantizable_decoder = QuantizableTransformerDecoderLayer(d_model=transformerDecoderLayer.linear1.in_features, nhead=transformerDecoderLayer.self_attn.num_heads, dim_feedforward=transformerDecoderLayer.linear1.out_features, dropout=transformerDecoderLayer.dropout.p, activation=activation, layer_norm_eps=transformerDecoderLayer.norm1.eps, batch_first=transformerDecoderLayer.self_attn.batch_first, norm_first=transformerDecoderLayer.norm_first)
copy_params_helper(src_module=transformerDecoderLayer, dest_module=quantizable_decoder)
return quantizable_decoder |
def can_create_user(email_address, blacklisted_domains=None):
if (features.BLACKLISTED_EMAILS and email_address and ('' in email_address)):
blacklisted_domains = (blacklisted_domains or [])
(_, email_domain) = email_address.split('', 1)
extracted = tldextract.extract(email_domain)
if (extracted.registered_domain.lower() in blacklisted_domains):
return False
if (not features.USER_CREATION):
return False
if features.INVITE_ONLY_USER_CREATION:
if (not email_address):
return False
return bool(model.team.lookup_team_invites_by_email(email_address))
return True |
def fr_department(value: typing.Union[(str, int)]):
if (not value):
return False
if isinstance(value, str):
if (value in ('2A', '2B')):
return True
try:
value = int(value)
except ValueError:
return False
return ((1 <= value <= 19) or (21 <= value <= 95) or (971 <= value <= 976)) |
class DOE(ABC):
def __init__(self):
pass
def get_transmittance(self, xx, yy, ):
pass
def __add__(self, DOE2):
return DOE_mix(self, DOE2)
def get_E(self, E, xx, yy, ):
return (E * self.get_transmittance(xx, yy, ))
def get_coherent_PSF(self, xx, yy, z, ):
(xx, yy, PSF) = scaled_fourier_transform(xx, yy, self.get_transmittance(xx, yy, ), =, z=z, scale_factor=1, mesh=True)
PSF = ((1 / ((z * ) ** 2)) * PSF)
return PSF
def get_incoherent_PSF(self, xx, yy, z, ):
return (bd.abs(self.get_coherent_PSF(xx, yy, z, )) ** 2)
def get_amplitude_transfer_function(self, fxx, fyy, z, ):
return self.get_transmittance((((- fxx) * ) * z), (((- fyy) * ) * z), )
def get_optical_transfer_function(self, fxx, fyy, z, ):
global bd
from ..util.backend_functions import backend as bd
h = bd.fft.ifft2(bd.fft.ifftshift(self.get_amplitude_transfer_function(fxx, fyy, z, )))
H = bd.fft.fftshift(bd.fft.fft2((h * bd.conjugate(h))))
dfx = (fxx[(0, 1)] - fxx[(0, 0)])
dfy = (fyy[(1, 0)] - fyy[(0, 0)])
H = (H / bd.amax(bd.abs(H)))
return H |
def _get_command_line_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(('--' + Args.REPORT_DATA_PICKLES), help='Pickle files with the ReportData objects', required=True, nargs='+')
parser.add_argument(('--' + Args.RUN_NAMES), help='A name for each run', required=True, nargs='+')
parser.add_argument(('--' + Args.BASE_OUT_DIR), help='Output directory', required=True)
parser.add_argument(('--' + Args.ADD_TIME), help='Add a timestamp to the output directory', default=False, required=False, action='store_true', dest=Args.ADD_TIME)
parser.add_argument(('--' + Args.NO_ADD_TIME), help="Don't add a timestamp to the output directory", required=False, action='store_false', dest=Args.ADD_TIME)
args_dict = vars(parser.parse_args())
return args_dict |
class LayoutLMConfig(BertConfig):
model_type = 'layoutlm'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, max_2d_position_embeddings=1024, **kwargs):
super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, hidden_act=hidden_act, hidden_dropout_prob=hidden_dropout_prob, attention_probs_dropout_prob=attention_probs_dropout_prob, max_position_embeddings=max_position_embeddings, type_vocab_size=type_vocab_size, initializer_range=initializer_range, layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, **kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings |
def _do_new(bz, opt, parser):
def parse_multi(val):
return _parse_triset(val, checkplus=False, checkminus=False, checkequal=False, splitcomma=True)[0]
kwopts = {}
if opt.blocked:
kwopts['blocks'] = parse_multi(opt.blocked)
if opt.cc:
kwopts['cc'] = parse_multi(opt.cc)
if opt.component:
kwopts['component'] = opt.component
if opt.dependson:
kwopts['depends_on'] = parse_multi(opt.dependson)
if opt.comment:
kwopts['description'] = opt.comment
if opt.groups:
kwopts['groups'] = parse_multi(opt.groups)
if opt.keywords:
kwopts['keywords'] = parse_multi(opt.keywords)
if opt.os:
kwopts['op_sys'] = opt.os
if opt.arch:
kwopts['platform'] = opt.arch
if opt.priority:
kwopts['priority'] = opt.priority
if opt.product:
kwopts['product'] = opt.product
if opt.severity:
kwopts['severity'] = opt.severity
if opt.summary:
kwopts['summary'] = opt.summary
if opt.url:
kwopts['url'] = opt.url
if opt.version:
kwopts['version'] = opt.version
if opt.assigned_to:
kwopts['assigned_to'] = opt.assigned_to
if opt.qa_contact:
kwopts['qa_contact'] = opt.qa_contact
if opt.sub_component:
kwopts['sub_component'] = opt.sub_component
if opt.alias:
kwopts['alias'] = opt.alias
if opt.comment_tag:
kwopts['comment_tags'] = opt.comment_tag
if opt.private:
kwopts['comment_private'] = opt.private
ret = bz.build_createbug(**kwopts)
if opt.fields:
_merge_field_opts(ret, opt.fields, parser)
b = bz.createbug(ret)
b.refresh()
return [b] |
class ThreadMonitor():
source = None
graph = {}
def __init__(self, func, *args, **kwargs) -> Callable:
self.func = func
self.shared_memory = kwargs['shared_memory']
self.sleep = kwargs['sleep']
self.cache = kwargs['cache']
self.timeout = kwargs['timeout']
self.wait = kwargs['wait']
self.execute = (kwargs['execute'] if ('execute' in kwargs) else True)
self.source = None
self.result_queue = Queue()
self.__name__ = func.__name__
def get_func(self):
return self.func
def future(self, callback=None):
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
def wait_for_done(future_queue):
logging.debug('wait_for_done: looping: result queue: %s', future_queue)
while True:
try:
logging.debug('wait_for_done: checking queue')
result = future_queue.get_nowait()
logging.debug('wait_for_done: got result')
return result
except:
(yield)
_future = loop.create_task(wait_for_done(self.result_queue))
def entangle():
loop.run_until_complete(_future)
_future.loop = loop
_future.entangle = entangle
if callback:
_future.add_done_callback(callback)
return _future
def __call__(self, *args, **kwargs) -> Callable:
logging.info('Thread:invoke: %s', self.func.__name__)
_func = self.func
if isinstance(self.func, partial):
def find_func(pfunc):
if isinstance(pfunc, partial):
return find_func(pfunc.func)
return pfunc
_func = find_func(self.func)
self.source = inspect.getsource(_func)
def assign_cpu(func, cpu, **kwargs):
pid = os.getpid()
cpu_mask = [int(cpu)]
os.sched_setaffinity(pid, cpu_mask)
func(**kwargs)
def invoke(func, *args, **kwargs):
graphs = []
json_graph = '{}'
json_graphs = []
future_queue = kwargs['future_queue']
del kwargs['future_queue']
def get_result(_queue, func, sleep, now, thread, event, wait, timeout):
if hasattr(func, '__name__'):
name = func.__name__
else:
name = func
logging.debug('get_result: started %s', now)
while True:
logging.debug('Checking queue for result...')
try:
logging.debug('Waiting on event for %s with wait %s', name, self.wait)
if wait:
logging.debug('Wait event timeout in %s seconds.', wait)
event.wait(wait)
if (not event.is_set()):
if thread.is_alive():
thread.terminate()
raise ThreadTimeoutException()
else:
logging.debug('Waiting until complete.')
event.wait()
logging.debug('Got event for %s', name)
logging.debug('Timeout is %s', timeout)
if timeout:
logging.debug('Pre get(timeout=%s)', timeout)
_response = _queue.get(timeout=timeout)
_result = _response['result']
logging.debug('Post get(timeout=%s)', timeout)
else:
_response = _queue.get()
_result = _response['result']
logging.debug('Got result for[%s] %s', name, str(_result))
(yield)
return _response
except multiprocessing.TimeoutError as ex:
logging.debug('Timeout exception')
raise ThreadTimeoutException() from ex
except que.Empty as ex:
if (thread and (not thread.is_alive())):
raise ThreadTerminatedException() from ex
(yield time.sleep(sleep))
scheduler = None
cpu = None
if ('cpu' in kwargs):
cpu = kwargs['cpu']
del kwargs['cpu']
if ('scheduler' in kwargs):
scheduler = kwargs['scheduler']
del kwargs['scheduler']
if (len(args) == 0):
pass
else:
asyncio.set_event_loop(asyncio.new_event_loop())
loop = asyncio.get_event_loop()
_tasks = []
threads = []
for arg in args:
event = multiprocessing.Event()
if hasattr(arg, '__name__'):
aname = arg.__name__
else:
aname = arg
_queue = Queue()
_thread = None
if isinstance(arg, partial):
logging.info('Thread: %s', arg.__name__)
kargs = {'queue': _queue, 'event': event}
if self.shared_memory:
kargs['smm'] = SMM
kargs['sm'] = SharedMemory
if cpu:
arg_cpu = scheduler.get()
logging.debug('ARG CPU SET TO: %s', arg_cpu[1])
_thread = Thread(target=assign_cpu, args=(arg, arg_cpu[1]), kwargs=kargs)
_thread.cookie = arg_cpu[1]
else:
logging.debug('NO CPU SET')
_thread = Thread(target=arg, kwargs=kargs)
if self.shared_memory:
_thread.shared_memory = True
threads += [_thread]
_thread.start()
else:
logging.info('Value: %s', aname)
_queue.put({'graph': [(func.__name__, aname)], 'result': arg})
event.set()
now = time.time()
if hasattr(arg, '__name__'):
graphs += [(func.__name__, arg.__name__)]
else:
graphs += [(func.__name__, arg)]
_tasks += [get_result(_queue, arg, self.sleep, now, _thread, event, self.wait, self.timeout)]
tasks = asyncio.gather(*_tasks)
_args = loop.run_until_complete(tasks)
try:
args = [_arg['result'] for _arg in _args]
arg_graph = [_arg['graph'] for _arg in _args]
json_graphs = [_arg['json'] for _arg in _args if ('json' in _arg)]
except:
args = [_arg for _arg in _args]
arg_graph = []
json_graphs = []
logging.debug('JSON GRAPHs: %s', json_graphs)
logging.debug('ARG GRAPH: %s', arg_graph)
def add_to_graph(gr, argr):
for item in argr:
if isinstance(item, list):
add_to_graph(gr, item)
elif isinstance(item, tuple):
gr += [item]
return gr
logging.debug('GRAPH: %s', graphs)
_G = {}
_G[func.__name__] = {}
G = _G[func.__name__]
for node in graphs:
if (len(node) < 2):
continue
if (node[1] not in G):
G[node[1]] = []
for graphnode in json_graphs:
if (node[1] in graphnode):
G[node[1]] = graphnode[node[1]]
json_graph = json.dumps(_G, indent=4)
logging.debug('JSON: %s', json_graph)
_ = [thread.join() for thread in threads]
if scheduler:
for _thread in threads:
logging.debug('Putting CPU: %s back on scheduler queue.', _thread.cookie)
scheduler.put(('0', _thread.cookie, 'Y'))
if cpu:
pid = os.getpid()
cpu_mask = [int(cpu)]
os.sched_setaffinity(pid, cpu_mask)
is_proc = False
if (('proc' in kwargs) and (kwargs['proc'] is True)):
del kwargs['proc']
is_proc = True
event = None
if ('event' in kwargs):
event = kwargs['event']
del kwargs['event']
if ('queue' in kwargs):
queue = kwargs['queue']
del kwargs['queue']
if self.shared_memory:
kwargs['smm'] = SMM
kwargs['sm'] = SharedMemory
logging.info('Calling %s', func.__name__)
logging.debug(args)
if ((not cpu) and ('cpu' in kwargs)):
cpu = kwargs['cpu']
del kwargs['cpu']
if ((not scheduler) and ('scheduler' in kwargs)):
scheduler = kwargs['scheduler']
del kwargs['scheduler']
try:
if self.execute:
logging.debug('thread: execute: %s', self.execute)
if is_proc:
logging.debug('self.execute: proc: creating thread')
_mq = Queue()
def func_wrapper(_wf, _wq):
logging.debug('func_wrapper: %s', _wf)
result = _wf()
logging.debug('func_wrapper: result: %s', result)
if callable(result):
return func_wrapper(result, _wq)
try:
logging.debug('func_wrapper: putting result on queue')
_wq.put({'graph': [(func.__name__, _wf.__name__)], 'result': result})
logging.debug('func_wrapper: done putting queue')
except Exception:
with open('error.out', 'w') as errfile:
errfile.write(traceback.format_exc())
return None
_pfunc = partial(func, *args, **kwargs)
proc = threading.Thread(target=func_wrapper, args=(_pfunc, _mq))
proc.start()
else:
result = func(*args, **kwargs)
else:
if event:
logging.debug('Setting event for %s', func.__name__)
event.set()
return (args, kwargs)
finally:
if (scheduler and cpu):
logging.debug('Putting CPU: %s back on scheduler queue.', cpu)
scheduler.put(['0', cpu, 'N'])
if self.cache:
pass
logging.debug('PUT GRAPH [%s]: %s', func.__name__, graphs)
logging.debug('PUT GRAPH JSON [%s]: %s', func.__name__, json_graph)
queue.put({'graph': graphs, 'json': json.loads(json_graph), 'result': result})
if event:
logging.debug('Setting event for %s', func.__name__)
event.set()
else:
if self.shared_memory:
kwargs['smm'] = SMM
kwargs['sm'] = SharedMemory
logging.debug('Calling function %s with: %s', func.__name__, str(args))
_mq = Queue()
def func_wrapper(_wf, _wq):
logging.debug('func_wrapper: %s', _wf)
result = _wf()
logging.debug('func_wrapper: result: %s', result)
if callable(result):
return func_wrapper(result, _wq)
try:
logging.debug('func_wrapper: putting result on queue')
_wq.put({'graph': [(func.__name__, _wf.__name__)], 'result': result})
if is_proc:
future_queue.put(result)
logging.debug('func_wrapper: done putting queue')
except Exception:
with open('error.out', 'w') as errfile:
errfile.write(traceback.format_exc())
return None
pfunc = partial(func, *args, **kwargs)
pfunc.__name__ = func.__name__
try:
if self.execute:
logging.debug('thread: execute2: %s', self.execute)
proc = threading.Thread(target=func_wrapper, args=(pfunc, _mq))
proc.start()
logging.debug('Executing function %s with timeout %s', func, self.timeout)
if (not is_proc):
proc.join(self.timeout)
else:
if event:
logging.debug('Setting event for %s', func.__name__)
event.set()
return (args, kwargs)
finally:
if (scheduler and cpu):
logging.debug('Putting CPU: %s back on scheduler queue.', cpu)
scheduler.put(('0', cpu, 'Y'))
logging.debug('thread: waiting for result on queue')
sys.path.append(os.getcwd())
if (not is_proc):
response = _mq.get()
result = response['result']
if (len(json_graphs) > 0):
callgraph = {func.__name__: json_graphs}
graph_queue.put(callgraph)
self.graph = json.dumps(callgraph)
logging.debug('thread: got result from queue')
if ((not is_proc) and proc.is_alive()):
proc.join()
raise ThreadTimeoutException()
if is_proc:
return True
future_queue.put(result)
return result
kwargs['future_queue'] = self.result_queue
pfunc = partial(invoke, self.func, *args, **kwargs)
if hasattr(self.func, '__name__'):
pfunc.__name__ = self.func.__name__
else:
pfunc.__name__ = 'thread'
def get_graph(wait=True):
if wait:
return graph_queue.get()
else:
def wait_for_graph():
logging.debug('wait_for_graph: looping')
while True:
try:
logging.debug('wait_for_graph: checking queue')
graph = graph_queue.get_nowait()
logging.debug('wait_for_graph: got result')
return graph
except:
(yield)
loop = asyncio.get_event_loop()
task = loop.create_task(wait_for_graph())
return task
pfunc.graph = get_graph
pfunc.future = self.future
return pfunc |
class SnekAPITestCase(testing.TestCase):
def setUp(self):
super().setUp()
self.patcher = mock.patch('snekbox.api.snekapi.NsJail', autospec=True)
self.mock_nsjail = self.patcher.start()
self.mock_nsjail.return_value.python3.return_value = EvalResult(args=[], returncode=0, stdout='output', stderr='error')
self.addCleanup(self.patcher.stop)
logging.getLogger('snekbox.nsjail').setLevel(logging.WARNING)
self.app = SnekAPI() |
class WindowsFile(File):
def exists(self):
return (self.check_output('powershell -command \\"Test-Path \'%s\'\\"', self.path) == 'True')
def is_file(self):
return (self.check_output('powershell -command \\"(Get-Item \'%s\') -is [System.IO.FileInfo]\\"', self.path) == 'True')
def is_directory(self):
return (self.check_output('powershell -command \\"(Get-Item \'%s\') -is [System.IO.DirectoryInfo]\\"', self.path) == 'True')
def is_pipe(self):
raise NotImplementedError
def is_socket(self):
raise NotImplementedError
def is_symlink(self):
return (self.check_output('powershell -command \\"(Get-Item -Path \'%s\').Attributes -band [System.IO.FileAttributes]::ReparsePoint\\"', self.path) == 'True')
def linked_to(self):
return self.check_output('powershell -command \\"(Get-Item -Path \'%s\' -ReadOnly).FullName\\"', self.path)
def user(self):
raise NotImplementedError
def uid(self):
raise NotImplementedError
def group(self):
raise NotImplementedError
def gid(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def contains(self, pattern):
return (self.run_test('powershell -command \\"Select-String -Path \'%s\' -Pattern \'%s\'\\"', self.path, pattern).stdout != '')
def md5sum(self):
raise NotImplementedError
def sha256sum(self):
raise NotImplementedError
def _get_content(self, decode):
out = self.run_expect([0], 'powershell -command \\"cat -- \'%s\'\\"', self.path)
if decode:
return out.stdout
return out.stdout_bytes
def content(self):
return self._get_content(False)
def content_string(self):
return self._get_content(True)
def mtime(self):
date_time_str = self.check_output('powershell -command \\"Get-ChildItem -Path \'%s\' | Select-Object -ExpandProperty LastWriteTime\\"', self.path)
return datetime.datetime.strptime(date_time_str.strip(), '%A, %B %d, %Y %I:%M:%S %p')
def size(self):
return int(self.check_output('powershell -command \\"Get-Item -Path \'%s\' | Select-Object -ExpandProperty Length\\"', self.path))
def listdir(self):
out = self.check_output('powershell -command \\"Get-ChildItem -Path \'%s\' | Select-Object -ExpandProperty Name\\"', self.path)
return [item.strip() for item in out.strip().split('\n')] |
def test_used_with_class_scope(testdir: Any) -> None:
testdir.makeini('\n [pytest]\n asyncio_mode=auto\n ')
testdir.makepyfile('\n import pytest\n import random\n import unittest\n\n def get_random_number():\n return random.randint(0, 1)\n\n (autouse=True, scope="class")\n def randint_mock(class_mocker):\n return class_mocker.patch("random.randint", lambda x, y: 5)\n\n class TestGetRandomNumber(unittest.TestCase):\n def test_get_random_number(self):\n assert get_random_number() == 5\n ')
result = testdir.runpytest_subprocess()
assert ('AssertionError' not in result.stderr.str())
result.stdout.fnmatch_lines('* 1 passed in *') |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0059_auto__1503')]
operations = [migrations.AddField(model_name='logoplacement', name='describe_as_sponsor', field=models.BooleanField(default=False)), migrations.AddField(model_name='logoplacement', name='link_to_sponsors_page', field=models.BooleanField(default=False)), migrations.AddField(model_name='logoplacementconfiguration', name='describe_as_sponsor', field=models.BooleanField(default=False)), migrations.AddField(model_name='logoplacementconfiguration', name='link_to_sponsors_page', field=models.BooleanField(default=False))] |
('/v1/repository/<apirepopath:repository>/permissions/user/<username>/transitive')
_param('repository', 'The full path of the repository. e.g. namespace/name')
_param('username', 'The username of the user to which the permissions apply')
class RepositoryUserTransitivePermission(RepositoryParamResource):
_repo_admin(allow_for_superuser=True)
('getUserTransitivePermission')
def get(self, namespace_name, repository_name, username):
roles = model.get_repo_roles(username, namespace_name, repository_name)
if (not roles):
raise NotFound
return {'permissions': [r.to_dict() for r in roles]} |
def test_add_without_query(local_client: QdrantClient=QdrantClient(':memory:'), collection_name: str='demo_collection', docs: List[str]=None):
if (docs is None):
docs = ['Qdrant has Langchain integrations', 'Qdrant also has Llama Index integrations']
if (not local_client._is_fastembed_installed):
pytest.skip('FastEmbed is not installed, skipping test')
local_client.add(collection_name=collection_name, documents=docs)
assert (local_client.count(collection_name).count == 2) |
class LoaderParser():
parsers = {}
def parse(self):
try:
return self.obj
except AttributeError:
pass
obj = self.parsers[self.catalog.format](self.data, self)
obj.item = self
obj.catalog = self.catalog.name
self.obj = obj
return obj |
class X448PrivateKey(metaclass=abc.ABCMeta):
def generate(cls) -> X448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if (not backend.x448_supported()):
raise UnsupportedAlgorithm('X448 is not supported by this version of OpenSSL.', _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM)
return rust_openssl.x448.generate_key()
def from_private_bytes(cls, data: bytes) -> X448PrivateKey:
from cryptography.hazmat.backends.openssl.backend import backend
if (not backend.x448_supported()):
raise UnsupportedAlgorithm('X448 is not supported by this version of OpenSSL.', _Reasons.UNSUPPORTED_EXCHANGE_ALGORITHM)
return rust_openssl.x448.from_private_bytes(data)
def public_key(self) -> X448PublicKey:
def private_bytes(self, encoding: _serialization.Encoding, format: _serialization.PrivateFormat, encryption_algorithm: _serialization.KeySerializationEncryption) -> bytes:
def private_bytes_raw(self) -> bytes:
def exchange(self, peer_public_key: X448PublicKey) -> bytes: |
def print_node_balances(chain_state: Any, token_network_address: TokenNetworkAddress, translator: Optional[Translator]=None) -> None:
if (translator is None):
trans = (lambda s: s)
else:
trans = translator.translate
balances = get_node_balances(chain_state, token_network_address)
for balance in balances:
click.secho(f'{trans(pex(balance[0]))} ->{balance[1]} <-{balance[2]}', fg='blue')
click.secho(f'Sum {trans(pex(chain_state.our_address))}: {sum((b[1] for b in balances))}') |
def old_get_auth(sock, dname, host, dno):
auth_name = auth_data = b''
try:
data = os.popen(('xauth list %s 2>/dev/null' % dname)).read()
lines = data.split('\n')
if (len(lines) >= 1):
parts = lines[0].split(None, 2)
if (len(parts) == 3):
auth_name = parts[1]
hexauth = parts[2]
auth = b''
for i in range(0, len(hexauth), 2):
auth = (auth + chr(int(hexauth[i:(i + 2)], 16)))
auth_data = auth
except os.error:
pass
return (auth_name, auth_data) |
def test_new_window(conn):
win = conn.create_window(1, 2, 640, 480)
assert isinstance(win, window.XWindow)
geom = win.get_geometry()
assert (geom.x == 1)
assert (geom.y == 2)
assert (geom.width == 640)
assert (geom.height == 480)
win.kill_client()
with pytest.raises(xcffib.ConnectionException):
win.get_geometry() |
def test_match_benchmark(benchmark, tabbed_browser, qtbot, mode_manager, qapp, config_stub):
tab = tabbed_browser.widget.tabs[0]
with qtbot.wait_signal(tab.load_finished):
tab.load_url(QUrl('qute://testdata/data/hints/benchmark.html'))
config_stub.val.hints.scatter = False
manager = qutebrowser.browser.hints.HintManager(win_id=0)
with qtbot.wait_signal(mode_manager.entered):
manager.start()
def bench():
manager.handle_partial_key('a')
qapp.processEvents()
manager.handle_partial_key('')
qapp.processEvents()
benchmark(bench)
with qtbot.wait_signal(mode_manager.left):
mode_manager.leave(usertypes.KeyMode.hint) |
class UnionType(BaseInstance):
def __init__(self, left: ((UnionType | nodes.ClassDef) | nodes.Const), right: ((UnionType | nodes.ClassDef) | nodes.Const), parent: (nodes.NodeNG | None)=None) -> None:
super().__init__()
self.parent = parent
self.left = left
self.right = right
def callable(self) -> Literal[False]:
return False
def bool_value(self, context: (InferenceContext | None)=None) -> Literal[True]:
return True
def pytype(self) -> Literal['types.UnionType']:
return 'types.UnionType'
def display_type(self) -> str:
return 'UnionType'
def __repr__(self) -> str:
return f'<UnionType({self._proxied.name}) l.{self.lineno} at 0x{id(self)}>'
def __str__(self) -> str:
return f'UnionType({self._proxied.name})' |
.parametrize('case', ['to_false', 'to_true_free', 'to_true_busy'])
def test_admin_session_update_layout_generation(mock_emit_session_update: MagicMock, clean_database, flask_app, case):
user1 = database.User.create(id=1234, name='The Name')
user2 = database.User.create(id=1235, name='Other')
session = database.MultiplayerSession.create(id=1, name='Debug', state=MultiplayerSessionVisibility.VISIBLE, creator=user1, generation_in_progress=(user2 if (case == 'to_true_busy') else None))
w1 = database.World.create(session=session, name='W1', preset='{}', uuid=uuid.UUID('-0000-0000-0000-'))
w2 = database.World.create(session=session, name='W2', preset='{}', uuid=uuid.UUID('-0000-0000-0000-'))
w3 = database.World.create(session=session, name='W3', preset='{}', uuid=uuid.UUID('-0000-0000-0000-'))
database.MultiplayerMembership.create(user=user1, session=session, admin=True)
sa = MagicMock()
sa.get_current_user.return_value = user1
order = [str(w.uuid) for w in [w2, w1, w3]]
expected_order = {w.name: None for w in [w1, w2, w3]}
if (case == 'to_true_busy'):
expectation = pytest.raises(error.InvalidActionError, match='Generation already in progress by Other.')
expected_user = user2
else:
expectation = contextlib.nullcontext()
if (case == 'to_false'):
expected_user = None
order = []
else:
expected_user = user1
expected_order = {'W1': 1, 'W2': 0, 'W3': 2}
with expectation, flask_app.test_request_context():
session_admin.admin_session(sa, 1, SessionAdminGlobalAction.UPDATE_LAYOUT_GENERATION.value, order)
if (case == 'to_true_busy'):
mock_emit_session_update.assert_not_called()
else:
mock_emit_session_update.assert_called_once_with(session)
assert (database.MultiplayerSession.get_by_id(1).generation_in_progress == expected_user)
worlds = {w.name: w.order for w in database.World.select()}
assert (worlds == expected_order) |
class F15_Raid(F14_Raid):
removedKeywords = F14_Raid.removedKeywords
removedAttrs = F14_Raid.removedAttrs
def _getParser(self):
op = F14_Raid._getParser(self)
op.add_argument('--label', version=F15, help='\n Specify the label to give to the filesystem to be made.\n If the given label is already in use by another\n filesystem, a new label will be created.')
return op |
class TestCliffordGroup():
clifford = gates.qubit_clifford_group()
pauli = [qutip.qeye(2), qutip.sigmax(), qutip.sigmay(), qutip.sigmaz()]
def test_single_qubit_group_dimension_is_24(self):
assert (len(self.clifford) == 24)
def test_all_elements_different(self):
clifford = [_remove_global_phase(gate) for gate in self.clifford]
for (i, gate) in enumerate(clifford):
for other in clifford[(i + 1):]:
assert (not np.allclose(gate.full(), other.full(), atol=0.001))
.parametrize('gate', gates.qubit_clifford_group())
def test_gate_normalises_pauli_group(self, gate):
pauli_gates = [_remove_global_phase(x) for x in self.pauli]
normalised = [_remove_global_phase(((gate * pauli) * gate.dag())) for pauli in self.pauli]
for gate in normalised:
for (i, pauli) in enumerate(pauli_gates):
if np.allclose(gate.full(), pauli.full(), atol=1e-10):
del pauli_gates[i]
break
assert (len(pauli_gates) == 0) |
def reshape_patch_back(patch_tensor, patch_size):
assert (5 == patch_tensor.ndim)
batch_size = np.shape(patch_tensor)[0]
seq_length = np.shape(patch_tensor)[1]
patch_height = np.shape(patch_tensor)[2]
patch_width = np.shape(patch_tensor)[3]
channels = np.shape(patch_tensor)[4]
img_channels = (channels // (patch_size * patch_size))
a = np.reshape(patch_tensor, [batch_size, seq_length, patch_height, patch_width, patch_size, patch_size, img_channels])
b = np.transpose(a, [0, 1, 2, 4, 3, 5, 6])
img_tensor = np.reshape(b, [batch_size, seq_length, (patch_height * patch_size), (patch_width * patch_size), img_channels])
return img_tensor |
def test_upload_uuid_in_batches(local_client, remote_client):
records = generate_fixtures(UPLOAD_NUM_VECTORS)
vectors = defaultdict(list)
for record in records:
for (vector_name, vector) in record.vector.items():
vectors[vector_name].append(vector)
batch = models.Batch(ids=[str(uuid.uuid4()) for _ in records], vectors=vectors, payloads=[record.payload for record in records])
local_client.upsert(COLLECTION_NAME, batch)
remote_client.upsert(COLLECTION_NAME, batch)
compare_collections(local_client, remote_client, UPLOAD_NUM_VECTORS) |
def test_size_hint(view):
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test1'))
height1 = view.sizeHint().height()
assert (height1 > 0)
view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test2'))
height2 = view.sizeHint().height()
assert (height2 == (height1 * 2)) |
def list_longest_drawdowns(prices_tms: QFSeries, count: int) -> List[Tuple[(datetime, datetime)]]:
result = []
drawdown_timeseries = drawdown_tms(prices_tms)
start_date = None
for (date, value) in drawdown_timeseries.iteritems():
if (value == 0):
if (start_date is not None):
result.append((start_date, date))
start_date = None
elif (start_date is None):
start_date = date
if (start_date is not None):
result.append((start_date, drawdown_timeseries.index[(- 1)]))
result.sort(key=(lambda val: (val[0] - val[1])))
return result[:count] |
.requires_user_action
class ContentValignTestCase(InteractiveTestCase):
def test_content_valign_bottom(self):
self.window = TestWindow(resizable=True, visible=False, content_valign='bottom')
self.window.set_visible()
app.run()
self.user_verify('Test passed?', take_screenshot=False)
def test_content_valign_center(self):
self.window = TestWindow(resizable=True, visible=False, content_valign='center')
self.window.set_visible()
app.run()
self.user_verify('Test passed?', take_screenshot=False) |
def inference_segmentor(model, img):
cfg = model.cfg
device = next(model.parameters()).device
test_pipeline = ([LoadImage()] + cfg.data.test.pipeline[1:])
test_pipeline = Compose(test_pipeline)
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
data = scatter(data, [device])[0]
else:
data['img_metas'] = data['img_metas'][0].data
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result |
def test_config_file(tmp_path):
config_body = '\n exec_before = "from collections import Counter as C"\n '
config_file_path = (tmp_path / 'config.toml')
config_file_path.write_text(config_body)
args = ['apply', 'C(x)']
stdin = '1\n2\n'.encode()
env = dict(os.environ)
env.update({f'{utils.NAME}_CONFIG_DIR'.upper().encode(): str(tmp_path).encode()})
output = helpers.run(args, input=stdin, env=env).decode()
assert output.startswith('Counter') |
def recv_batch(batch_queue, replay_ip, device):
def _thunk(thread_queue):
ctx = zmq.Context.instance()
socket = ctx.socket(zmq.DEALER)
socket.setsockopt(zmq.IDENTITY, pickle.dumps('dealer-{}'.format(os.getpid())))
socket.connect('tcp://{}:51003'.format(replay_ip))
outstanding = 0
while True:
socket.send(b'')
outstanding += 1
if (outstanding < 3):
try:
data = socket.recv(zmq.NOBLOCK, copy=False)
except zmq.Again:
continue
else:
data = socket.recv(copy=False)
thread_queue.put(data)
outstanding -= 1
data = None
thread_queue = queue.Queue(maxsize=3)
threading.Thread(target=_thunk, args=(thread_queue,)).start()
while True:
data = thread_queue.get()
batch = pickle.loads(data)
(states, actions, rewards, next_states, dones, weights, idxes) = batch
states = np.array([np.array(state) for state in states])
states = torch.FloatTensor(states).to(device)
actions = torch.LongTensor(actions).to(device)
rewards = torch.FloatTensor(rewards).to(device)
next_states = np.array([np.array(state) for state in next_states])
next_states = torch.FloatTensor(next_states).to(device)
dones = torch.FloatTensor(dones).to(device)
weights = torch.FloatTensor(weights).to(device)
batch = [states, actions, rewards, next_states, dones, weights, idxes]
batch_queue.put(batch)
(data, batch) = (None, None) |
def get_exp_subspace(fea_weight_lst, w2s_ratio, real_exp_len=None):
exp_subspace_lst = []
n_ano = len(fea_weight_lst)
dim = len(fea_weight_lst[0])
for ii in range(n_ano):
fea_weight = fea_weight_lst[ii]
if (w2s_ratio == 'real_len'):
if (real_exp_len is None):
raise ValueError('not give real exp len')
exp_subspace_lst.append(weight2subspace(fea_weight, num=real_exp_len[ii]))
elif (w2s_ratio == 'auto'):
r = math.sqrt((2 / dim))
exp_subspace_lst.append(weight2subspace(fea_weight, ratio=r))
elif (w2s_ratio == 'pn'):
exp_subspace_lst.append(weight2subspace_pn(fea_weight))
else:
exp_subspace_lst.append(weight2subspace(fea_weight, ratio=w2s_ratio))
return exp_subspace_lst |
_cache(maxsize=1000, typed=False)
def get_column_picklist(table_name: str, column_name: str, db_path: str) -> list:
fetch_sql = 'SELECT DISTINCT `{}` FROM `{}`'.format(column_name, table_name)
try:
conn = sqlite3.connect(db_path, uri=True)
conn.text_factory = bytes
c = conn.cursor()
c.execute(fetch_sql)
picklist = set()
for x in c.fetchall():
if isinstance(x[0], str):
picklist.add(x[0].encode('utf-8'))
elif isinstance(x[0], bytes):
try:
picklist.add(x[0].decode('utf-8'))
except UnicodeDecodeError:
picklist.add(x[0].decode('latin-1'))
else:
picklist.add(x[0])
picklist = list(picklist)
finally:
conn.close()
return picklist |
class _TensorDictKeysView():
def __init__(self, tensordict: T, include_nested: bool, leaves_only: bool, is_leaf: Callable[([Type], bool)]=None) -> None:
self.tensordict = tensordict
self.include_nested = include_nested
self.leaves_only = leaves_only
if (is_leaf is None):
is_leaf = _default_is_leaf
self.is_leaf = is_leaf
def __iter__(self) -> (Iterable[str] | Iterable[tuple[(str, ...)]]):
if (not self.include_nested):
if self.leaves_only:
for key in self._keys():
target_class = self.tensordict.entry_class(key)
if _is_tensor_collection(target_class):
continue
(yield key)
else:
(yield from self._keys())
else:
(yield from ((key if (len(key) > 1) else key[0]) for key in self._iter_helper(self.tensordict)))
def _iter_helper(self, tensordict: T, prefix: (str | None)=None) -> (Iterable[str] | Iterable[tuple[(str, ...)]]):
for (key, value) in self._items(tensordict):
full_key = self._combine_keys(prefix, key)
cls = value.__class__
is_leaf = self.is_leaf(cls)
if (self.include_nested and (not is_leaf)):
subkeys = tuple(self._iter_helper(value, prefix=full_key))
(yield from subkeys)
if ((not self.leaves_only) or is_leaf):
(yield full_key)
def _combine_keys(self, prefix: (tuple | None), key: str) -> tuple:
if (prefix is not None):
return (prefix + (key,))
return (key,)
def __len__(self) -> int:
return sum((1 for _ in self))
def _items(self, tensordict: (TensorDictBase | None)=None) -> Iterable[tuple[(NestedKey, CompatibleType)]]:
if (tensordict is None):
tensordict = self.tensordict
if (isinstance(tensordict, TensorDict) or is_tensorclass(tensordict)):
return tensordict._tensordict.items()
from tensordict.nn import TensorDictParams
if isinstance(tensordict, TensorDictParams):
return tensordict._param_td.items()
if isinstance(tensordict, KeyedJaggedTensor):
return tuple(((key, tensordict[key]) for key in tensordict.keys()))
from tensordict._lazy import _CustomOpTensorDict, _iter_items_lazystack, LazyStackedTensorDict
if isinstance(tensordict, LazyStackedTensorDict):
return _iter_items_lazystack(tensordict, return_none_for_het_values=True)
if isinstance(tensordict, _CustomOpTensorDict):
return ((key, tensordict._get_str(key, NO_DEFAULT)) for key in tensordict._source.keys())
raise NotImplementedError(type(tensordict))
def _keys(self) -> _TensorDictKeysView:
return self.tensordict._tensordict.keys()
def __contains__(self, key: NestedKey) -> bool:
key = _unravel_key_to_tuple(key)
if (not key):
raise TypeError(_NON_STR_KEY_ERR)
if isinstance(key, str):
if (key in self._keys()):
if self.leaves_only:
return (not _is_tensor_collection(self.tensordict.entry_class(key)))
return True
return False
else:
if (len(key) == 1):
return (key[0] in self._keys())
elif self.include_nested:
if (key[0] in self._keys()):
entry_type = self.tensordict.entry_class(key[0])
if (entry_type in (Tensor, _MemmapTensor)):
return False
if (entry_type is KeyedJaggedTensor):
if (len(key) > 2):
return False
return (key[1] in self.tensordict.get(key[0]).keys())
_is_tensordict = _is_tensor_collection(entry_type)
if _is_tensordict:
leaf_td = self.tensordict._get_tuple(key[:(- 1)], None)
if ((leaf_td is None) or ((not _is_tensor_collection(leaf_td.__class__)) and (not isinstance(leaf_td, KeyedJaggedTensor)))):
return False
return (key[(- 1)] in leaf_td.keys())
return False
if all((isinstance(subkey, str) for subkey in key)):
raise TypeError(_NON_STR_KEY_TUPLE_ERR)
def __repr__(self):
include_nested = f'include_nested={self.include_nested}'
leaves_only = f'leaves_only={self.leaves_only}'
return f'''{self.__class__.__name__}({list(self)},
{indent(include_nested, (4 * ' '))},
{indent(leaves_only, (4 * ' '))})''' |
class MongoLogger(FlaggingCallback):
def __init__(self, url, project_name) -> None:
self.url = url
self.client = MongoClient(url)
self.project_name = project_name
self.components = None
self.db = None
try:
self.client.admin.command('ping')
print('Pinged MongoDB. Correctly set up')
except Exception as e:
print(e)
def setup(self, components: list, flagging_dir: str=None):
self.components = components
if (DB_NAME in self.client.list_database_names()):
self.db = self.client[DB_NAME]
print(f"Database '{DB_NAME}' already exists.")
else:
self.db = self.client[DB_NAME]
print(f"Database '{DB_NAME}' created.")
if (COLLECTION_NAME in self.db.list_collection_names()):
print(f"Collection '{COLLECTION_NAME}' already exists in database '{DB_NAME}'.")
else:
self.db.create_collection(COLLECTION_NAME)
print(f"Collection '{COLLECTION_NAME}' created in database '{DB_NAME}'.")
def flag(self, flag_data: list[Any], flag_option: str='', username: str=''):
print(f'last value is: {flag_data}')
event = LlmEvent(project_name=self.project_name, created_at=datetime.now(timezone.utc), instance_id=str(uuid.uuid4()), user_prompt=flag_data[0], responses=[LlmResponse(model_id=flag_data[i], text=flag_data[(i + NUM_LLM_OPTIONS)], gen_stats=flag_data[(- 2)][(i - 1)]) for i in range(1, (NUM_LLM_OPTIONS + 1))], session_id=flag_data[(- 1)])
if flag_data[(- 3)]:
vote_number = int(flag_data[(- 3)][(- 1)])
event.votes = Vote(llm=flag_data[vote_number], score=1)
print(f'Event is {event.json()}')
result = self.client[DB_NAME][COLLECTION_NAME].insert_one(event.dict())
print(f'Mongo result {result}') |
class MishActivation(nn.Module):
def __init__(self):
super().__init__()
if (version.parse(torch.__version__) < version.parse('1.9.0')):
self.act = self._mish_python
else:
self.act = nn.functional.mish
def _mish_python(self, input: Tensor) -> Tensor:
return (input * torch.tanh(nn.functional.softplus(input)))
def forward(self, input: Tensor) -> Tensor:
return self.act(input) |
def feedforwardGAN(qnnArch, unitaries, inputData):
storedStates = []
for x in range(len(inputData)):
currentState = (inputData[x] * inputData[x].dag())
layerwiseList = [currentState]
for l in range(1, len(qnnArch)):
currentState = makeLayerChannel(qnnArch, unitaries, l, currentState)
layerwiseList.append(currentState)
storedStates.append(layerwiseList)
return storedStates |
class HeadphoneMonitor(GObject.Object):
__gsignals__ = {'action': (GObject.SignalFlags.RUN_LAST, None, (object,))}
def __init__(self):
super().__init__()
self._subscribe_id = None
self._process = None
self._status = None
def is_connected(self):
if (self._status is None):
raise Exception('call start() first')
return self._status
def _emit(self):
self.emit('action', (HeadphoneAction.CONNECTED if self._status else HeadphoneAction.DISCONNECTED))
def _update_status(self):
assert (self._status is not None)
new_status = get_headphone_status()
if (new_status != self._status):
self._status = new_status
self._emit()
return
def start(self):
null = open(os.devnull, 'wb')
try:
self._process = subprocess.Popen(['pactl', 'subscribe'], stdout=subprocess.PIPE, stderr=null)
except OSError:
self._status = False
return
f = self._process.stdout
fcntl.fcntl(f, fcntl.F_SETFL, os.O_NONBLOCK)
def can_read_cb(fd, flags):
if (flags & (GLib.IOCondition.HUP | GLib.IOCondition.ERR)):
f.close()
self._subscribe_id = None
return False
data = f.read()
if (not data):
f.close()
self._subscribe_id = None
return False
if (b' on sink ' in data):
self._update_status()
return True
self._status = get_headphone_status()
self._subscribe_id = GLib.io_add_watch(f, GLib.PRIORITY_HIGH, ((GLib.IOCondition.IN | GLib.IOCondition.ERR) | GLib.IOCondition.HUP), can_read_cb)
def stop(self):
if (self._subscribe_id is not None):
GLib.source_remove(self._subscribe_id)
self._subscribe_id = None
if (self._process is not None):
self._process.terminate()
self._process.wait()
self._process = None
self._status = None |
def _parse_start_and_end_idx(target_nodes: str, num_nodes: int) -> Tuple[(int, int)]:
indices = target_nodes.split(':')
if (len(indices) == 1):
return (int(indices[0]), int(indices[0]))
else:
start_idx = indices[0]
end_idx = indices[1]
return (int((start_idx or '0')), int((end_idx or str((num_nodes - 1))))) |
def downgrade(op, tables, tester):
op.create_index('queueitem_retries_remaining', 'queueitem', ['retries_remaining'], unique=False)
op.create_index('queueitem_processing_expires', 'queueitem', ['processing_expires'], unique=False)
op.create_index('queueitem_available_after', 'queueitem', ['available_after'], unique=False)
op.create_index('queueitem_available', 'queueitem', ['available'], unique=False)
op.drop_index('queueitem_processing_expires_queue_name_available', table_name='queueitem')
op.drop_index('queueitem_pexpires_aafter_rremaining_available', table_name='queueitem')
op.drop_index('queueitem_pe_aafter_qname_rremaining_available', table_name='queueitem')
op.drop_index('queueitem_processing_expires_available', table_name='queueitem') |
class MultiProcess():
def __init__(self, dataset=None, wiki5m_alias2qid=None, wiki5m_qid2alias=None, head_cluster=None):
self.dataset = dataset
self.wiki5m_alias2qid = wiki5m_alias2qid
self.wiki5m_qid2alias = wiki5m_qid2alias
self.head_cluster = head_cluster
self.output_folder = './pretrain_data/data/'
def process(self, digits, fold='1by1'):
(file_list, para_id) = digits
run_proc(para_id, file_list, self.wiki5m_alias2qid, self.wiki5m_qid2alias, self.head_cluster, min_seq_len=80, max_seq_len=200, output_folder=self.output_folder)
def run(self):
n_cpu = multiprocessing.cpu_count()
num = len(self.dataset)
self.n_cpu = n_cpu
print('cpu num: {}'.format(n_cpu))
chunk_size = int((num / n_cpu))
procs = []
for i in range(0, n_cpu):
min_i = (chunk_size * i)
if (i < (n_cpu - 1)):
max_i = (chunk_size * (i + 1))
else:
max_i = num
digits = [self.dataset[min_i:max_i], i]
procs.append(multiprocessing.Process(target=self.process, args=(digits, 'parallel')))
for proc in procs:
proc.start()
for proc in procs:
proc.join()
def merge(self):
for (path, _, filenames) in os.walk(self.output_folder):
for filename in filenames:
file_list.append(os.path.join(path, filename))
fw = open(os.path.join(self.output_folder, 'data.json'), 'w', encoding='utf-8')
print('Start merging ...')
for file in tqdm(file_list):
with open(file, 'r', encoding='utf-8') as fr:
lines = fr.readlines()
for line in lines:
fw.write(line)
print('Meger is done.') |
class TestDriverFCIDumpDumpH2(QiskitChemistryTestCase, BaseTestDriverFCIDumpDumper):
def setUp(self):
super().setUp()
self.core_energy = 0.7199
self.num_orbitals = 2
self.num_electrons = 2
self.spin_number = 0
self.wf_symmetry = 1
self.orb_symmetries = [1, 1]
self.mo_onee = [[1.2563, 0.0], [0.0, 0.4719]]
self.mo_eri = [0.6757, 0.0, 0.1809, 0.6646, 0.0, 0.6986]
try:
driver = PySCFDriver(atom='H .0 .0 .0; H .0 .0 0.735', unit=UnitsType.ANGSTROM, charge=0, spin=0, basis='sto3g')
qmolecule = driver.run()
with tempfile.NamedTemporaryFile() as dump:
FCIDumpDriver.dump(qmolecule, dump.name)
from pyscf.tools import fcidump as pyscf_fcidump
self.dumped = pyscf_fcidump.read(dump.name)
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed.')
except ImportError:
self.skipTest('PYSCF driver does not appear to be installed.') |
def convert_examples_to_image_features(examples: List[UDInputExample], label_list: List[str], max_seq_length: int, processor: Union[(PyGameTextRenderer, PangoCairoTextRenderer)], transforms: Optional[Callable]=None, pad_token=(- 100), *kwargs) -> Tuple[(List[Dict[(str, Union[(int, torch.Tensor)])]], int)]:
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(f'Writing example {ex_index} of {len(examples)}')
encoding = processor(example.words)
image = encoding.pixel_values
num_patches = encoding.num_text_patches
word_starts = encoding.word_starts
pixel_values = transforms(Image.fromarray(image))
attention_mask = get_attention_mask(num_patches, seq_length=max_seq_length)
pad_item = [pad_token]
if (len(example.head_labels) > max_seq_length):
logger.warning('Sequence %d of len %d truncated: %s', ex_index, len(example.head_labels), example.words)
arc_labels = example.head_labels[:min(max_seq_length, len(example.head_labels))]
arc_labels = [(pad_token if (al > max_seq_length) else al) for al in arc_labels]
arc_labels = (arc_labels + ((max_seq_length - len(arc_labels)) * pad_item))
rel_labels = [label_map[i] for i in example.rel_labels[:min(max_seq_length, len(example.rel_labels))]]
rel_labels = (rel_labels + ((max_seq_length - len(rel_labels)) * pad_item))
word_starts = (word_starts + (((max_seq_length + 1) - len(word_starts)) * pad_item))
assert (len(attention_mask) == max_seq_length)
assert (len(arc_labels) == max_seq_length)
assert (len(rel_labels) == max_seq_length)
assert (len(word_starts) == (max_seq_length + 1))
if (ex_index < 5):
logger.info('*** Example ***')
logger.info(f"sentence: {' '.join(example.words)}")
logger.info(f'attention_mask: {attention_mask}')
logger.info(f'arc_labels: {arc_labels}')
logger.info(f'rel_labels: {rel_labels}')
logger.info(f'word_starts: {word_starts}')
features.append({'pixel_values': pixel_values, 'attention_mask': attention_mask, 'word_starts': word_starts, 'arc_labels': arc_labels, 'rel_labels': rel_labels})
return (features, pad_token) |
def read_task_data(task, subgoal_idx=None):
repeat_idx = task['repeat_idx']
task_dict = {'repeat_idx': repeat_idx, 'type': task['task_type'], 'task': '/'.join(task['root'].split('/')[(- 3):(- 1)])}
if (subgoal_idx is not None):
task_dict['subgoal_idx'] = subgoal_idx
task_dict['subgoal_action'] = task['plan']['high_pddl'][subgoal_idx]['discrete_action']['action']
return task_dict |
def synthesis(args):
model = create_model(args)
if (args.resume is not None):
attempt_to_restore(model, args.resume, args.use_cuda)
device = torch.device(('cuda' if args.use_cuda else 'cpu'))
model.to(device)
output_dir = 'samples'
os.makedirs(output_dir, exist_ok=True)
avg_rtf = []
for filename in os.listdir(os.path.join(args.input, 'mel')):
start = time.time()
conditions = np.load(os.path.join(args.input, 'mel', filename))
conditions = torch.FloatTensor(conditions).unsqueeze(0)
conditions = conditions.transpose(1, 2).to(device)
batch_size = conditions.size()[0]
z = torch.randn(batch_size, args.z_dim).to(device).normal_(0.0, 1.0)
audios = model(conditions, z)
audios = audios.cpu().squeeze().detach().numpy()
print(audios.shape)
name = filename.split('.')[0]
sample = np.load(os.path.join(args.input, 'audio', filename))
sample = mu_law_decode(mu_law_encode(sample))
save_wav(np.squeeze(sample), '{}/{}_target.wav'.format(output_dir, name))
save_wav(np.asarray(audios), '{}/{}.wav'.format(output_dir, name))
time_used = (time.time() - start)
rtf = (time_used / (len(audios) / 24000))
avg_rtf.append(rtf)
print('Time used: {:.3f}, RTF: {:.4f}'.format(time_used, rtf))
print('Average RTF: {:.3f}'.format((sum(avg_rtf) / len(avg_rtf)))) |
def dqn_heatmap():
from dqn import Net
(x_pxl, y_pxl) = (300, 400)
state = torch.Tensor([[np.cos(theta), np.sin(theta), thetadot] for thetadot in np.linspace((- 8), 8, y_pxl) for theta in np.linspace((- np.pi), np.pi, x_pxl)])
net = Net()
net.load_state_dict(torch.load('param/dqn_net_params.pkl'))
q = net(state)
value_map = q.max(1)[0].view(y_pxl, x_pxl).detach().numpy()
action_map = (((q.max(1)[1].view(y_pxl, x_pxl).detach().numpy() / 10) * 4) - 2)
fig = plt.figure()
fig.suptitle('DQN')
ax = fig.add_subplot(121)
im = ax.imshow(value_map, cmap=plt.cm.spring, interpolation='bicubic')
plt.colorbar(im, shrink=0.5)
ax.set_title('Value Map')
ax.set_xlabel('$\\theta$')
ax.set_xticks(np.linspace(0, x_pxl, 5))
ax.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$\\pi/2$', '$\\pi$'])
ax.set_ylabel('$\\dot{\\theta}$')
ax.set_yticks(np.linspace(0, y_pxl, 5))
ax.set_yticklabels(['-8', '-4', '0', '4', '8'])
ax = fig.add_subplot(122)
im = ax.imshow(action_map, cmap=plt.cm.winter, interpolation='bicubic')
plt.colorbar(im, shrink=0.5)
ax.set_title('Action Map')
ax.set_xlabel('$\\theta$')
ax.set_xticks(np.linspace(0, x_pxl, 5))
ax.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$\\pi/2$', '$\\pi$'])
ax.set_ylabel('$\\dot{\\theta}$')
ax.set_yticks(np.linspace(0, y_pxl, 5))
ax.set_yticklabels(['-8', '-4', '0', '4', '8'])
plt.tight_layout()
plt.savefig('img/dqn_heatmap.png')
plt.show() |
def CopyTo(desc, src, dest):
import win32api, win32con
while 1:
try:
win32api.CopyFile(src, dest, 0)
return
except win32api.error as details:
if (details.winerror == 5):
raise
if silent:
raise
tb = None
full_desc = ("Error %s\n\nIf you have any Python applications running, please close them now\nand select 'Retry'\n\n%s" % (desc, details.strerror))
rc = win32api.MessageBox(0, full_desc, 'Installation Error', win32con.MB_ABORTRETRYIGNORE)
if (rc == win32con.IDABORT):
raise
elif (rc == win32con.IDIGNORE):
return |
def bngl_import_compare_nfsim(bng_file):
m = model_from_bngl(bng_file)
BNG_SEED = 123
with BngFileInterface(model=None) as bng:
bng.action('readFile', file=bng_file, skip_actions=1)
bng.action('simulate', method='nf', n_steps=10, t_end=100, seed=BNG_SEED)
bng.execute()
yfull1 = bng.read_simulation_results()
with BngFileInterface(model=m) as bng:
bng.action('simulate', method='nf', n_steps=10, t_end=100, seed=BNG_SEED)
bng.execute()
yfull2 = bng.read_simulation_results()
for i in range(len(m.observables)):
print(i)
print(yfull1[i])
print(yfull2[i])
print((yfull1[i] == yfull2[i]))
assert (yfull1[i] == yfull2[i]) |
class CNN_Parrallel(nn.Module):
def __init__(self):
super(CNN_Parrallel, self).__init__()
self.encoder_1 = CNN_encoder()
self.encoder_2 = CNN_encoder()
self.classifier = nn.Sequential(nn.Linear(((96 * 4) * 6), 128), nn.ReLU(), nn.Linear(128, 14))
def forward(self, x1, x2, flag='unsupervised'):
if (flag == 'supervised'):
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return (y1, y2)
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return (x1, x2) |
def strip_docstrings(line_gen):
res = []
prev_toktype = token.INDENT
last_lineno = (- 1)
last_col = 0
tokgen = tokenize.generate_tokens(line_gen)
for (toktype, ttext, (slineno, scol), (elineno, ecol), ltext) in tokgen:
if (slineno > last_lineno):
last_col = 0
if (scol > last_col):
res.append((' ' * (scol - last_col)))
if ((toktype == token.STRING) and (prev_toktype == token.INDENT)):
res.append('#--')
elif (toktype == tokenize.COMMENT):
res.append('##\n')
else:
res.append(ttext)
prev_toktype = toktype
last_col = ecol
last_lineno = elineno
return ''.join(res) |
class BuildBackbone(object):
def __init__(self, cfgs, is_training):
self.cfgs = cfgs
self.base_network_name = cfgs.NET_NAME
self.is_training = is_training
self.fpn_func = self.fpn_mode(cfgs.FPN_MODE)
self.pretrain_zoo = PretrainModelZoo()
def fpn_mode(self, fpn_mode):
if (fpn_mode == 'bifpn'):
fpn_func = bifpn_p3top7.NeckBiFPNRetinaNet(self.cfgs)
elif (fpn_mode == 'fpn'):
fpn_func = fpn_p3top7.NeckFPNRetinaNet(self.cfgs)
else:
raise Exception('only support [fpn, bifpn]')
return fpn_func
def build_backbone(self, input_img_batch):
if self.base_network_name.startswith('resnet_v1'):
feature_dict = resnet.ResNetBackbone(self.cfgs).resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif (self.base_network_name in self.pretrain_zoo.mxnet_zoo):
feature_dict = resnet_gluoncv.ResNetGluonCVBackbone(self.cfgs).resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif (self.base_network_name in self.pretrain_zoo.pth_zoo):
feature_dict = resnet_pytorch.ResNetPytorchBackbone(self.cfgs).resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name.startswith('MobilenetV2'):
feature_dict = mobilenet_v2.MobileNetV2Backbone(self.cfgs).mobilenetv2_base(input_img_batch, is_training=self.is_training)
elif ('efficientnet-lite' in self.base_network_name):
feature_dict = efficientnet_lite_builder.EfficientNetLiteBackbone(self.cfgs).build_model_fpn_base(input_img_batch, model_name=self.base_network_name, training=True)
elif ('efficientnet' in self.base_network_name):
feature_dict = efficientnet_builder.EfficientNetBackbone(self.cfgs).build_model_fpn_base(input_img_batch, model_name=self.base_network_name, training=True)
elif ('darknet' in self.base_network_name):
feature_dict = darknet.DarkNetBackbone(self.cfgs).darknet53_body(input_img_batch, is_training=self.is_training)
else:
raise ValueError('Sorry, we only support {}'.format(self.pretrain_zoo.all_pretrain))
return self.fpn_func.fpn_retinanet(feature_dict, self.is_training) |
class TFAuto():
def __init__(self, train_data_path, test_data_path, path_root='/tfx'):
self._tfx_root = os.path.join(os.getcwd(), path_root)
self._pipeline_root = os.path.join(self._tfx_root, 'pipelines')
self._metadata_db_root = os.path.join(self._tfx_root, 'metadata.db')
self._metadata = os.path.join(self._tfx_root, 'metadata')
self._log_root = os.path.join(self._tfx_root, 'logs')
self._model_root = os.path.join(self._tfx_root, 'model')
self._train_data_path = train_data_path
self._test_data_path = test_data_path
self._input_fn_module_file = 'inputfn_trainer.py'
self._constants_module_file = 'constants_trainer.py'
self._model_trainer_module_file = 'model_trainer.py'
self.tfautils = TFAutoUtils(data_path=train_data_path, path_root=path_root)
self.tfadata = TFAutoData()
self.tfamodel = TFAutoModel(self._tfx_root, train_data_path, test_data_path)
self.tfautils.create_directories()
print('TF initialized...')
print('All paths setup at {}'.format(self._tfx_root))
def generate_config_json(self):
config_dict = {}
config_json = os.path.join(self._tfx_root, 'config.json')
config_dict['root_path'] = self._tfx_root
config_dict['data_schema'] = self.tfadata.features_list
config_dict['len_train'] = self.tfadata._len_train
config_dict['ignore_features'] = ['ADD_FEATURES_TO_IGNORE_FROM_MODEL']
config_dict['file_headers'] = list(self.tfadata.file_headers)
with open(config_json, 'w') as fp:
json.dump(config_dict, fp, indent=4)
def step_data_explore(self, viz=False):
self.pipeline = self.tfadata.run_initial(self._train_data_path, self._test_data_path, self._tfx_root, self._metadata_db_root, self.tfautils, viz)
self.generate_config_json()
def step_model_build(self, label_column, model_type='REGRESSION', model_complexity=1):
if (self.tfadata._run == True):
print('Success: Started AutoML Training')
self.tfamodel.run_initial(label_column, model_type, model_complexity)
else:
print('Error: Please run Step 1 - step_data_explore')
print('Success: Model Training complete. Exported to: {}'.format((self._model_root + '/')))
def step_model_whatif(self):
if (self.tfadata._run == True):
self.tfamodel.call_wit()
else:
print('Error: Please run Step 2 - step_model_build') |
def test_run_shortcut_minimal_fallback_func_args(mock_pipe, monkeypatch):
shortcuts = {'arb pipe': {'pipeline_name': 'sc pipe'}}
monkeypatch.setattr('pypyr.config.config.shortcuts', shortcuts)
out = run(pipeline_name='arb pipe', args_in=['arb', 'context', 'input'], parse_args=True, dict_in={'a': 'b', 'e': 'f'}, groups=['g'], success_group='sg', failure_group='fg', loader='arb loader', py_dir='arb/dir')
assert (type(out) is Context)
assert (out == Context({'a': 'b', 'e': 'f'}))
assert (not out.is_in_pipeline_scope)
mock_pipe.assert_called_once_with(name='sc pipe', context_args=['arb', 'context', 'input'], parse_input=True, groups=['g'], success_group='sg', failure_group='fg', loader='arb loader', py_dir='arb/dir')
mock_pipe.return_value.run.assert_called_once_with(out)
assert (shortcuts == {'arb pipe': {'pipeline_name': 'sc pipe'}}) |
class PolyConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, num_blocks):
super(PolyConv, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)
self.bns = nn.ModuleList()
for i in range(num_blocks):
self.bns.append(nn.BatchNorm2d(num_features=out_channels))
self.activ = nn.ReLU(inplace=True)
def forward(self, x, index):
x = self.conv(x)
x = self.bns[index](x)
x = self.activ(x)
return x |
def opdm_to_ohdm_mapping(dim: int) -> DualBasis:
dbe_list = []
for i in range(dim):
for j in range(i, dim):
dbe = DualBasisElement()
if (i != j):
dbe.add_element('ck', (i, j), 0.5)
dbe.add_element('ck', (j, i), 0.5)
dbe.add_element('kc', (j, i), 0.5)
dbe.add_element('kc', (i, j), 0.5)
dbe.dual_scalar = 0.0
else:
dbe.add_element('ck', (i, j), 1.0)
dbe.add_element('kc', (i, j), 1.0)
dbe.dual_scalar = 1.0
dbe_list.append(dbe)
return DualBasis(elements=dbe_list) |
def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False):
endpos = min((ofs + dlen), len(strg))
pos = ofs
numbered = (not unnumbered)
num_prefix = ''
while (pos < endpos):
endsub = min((pos + 16), endpos)
substrg = strg[pos:endsub]
lensub = (endsub - pos)
if ((lensub <= 0) or (lensub != len(substrg))):
fprintf(sys.stdout, '??? hex_char_dump: ofs=%d dlen=%d base=%d -> endpos=%d pos=%d endsub=%d substrg=%r\n', ofs, dlen, base, endpos, pos, endsub, substrg)
break
hexd = ''.join((('%02x ' % BYTES_ORD(c)) for c in substrg))
chard = ''
for c in substrg:
c = chr(BYTES_ORD(c))
if (c == '\x00'):
c = '~'
elif (not (' ' <= c <= '~')):
c = '?'
chard += c
if numbered:
num_prefix = ('%5d: ' % ((base + pos) - ofs))
fprintf(fout, '%s %-48s %s\n', num_prefix, hexd, chard)
pos = endsub |
def test_issue2353(caplog, path_rgb_byte_tif):
from rasterio.warp import calculate_default_transform
with caplog.at_level(logging.INFO):
with rasterio.open(path_rgb_byte_tif) as src:
_ = src.colorinterp
(t, w, h) = calculate_default_transform('PROJCS["unknown",GEOGCS["unknown",DATUM["unknown",SPHEROID["GRS 1980",6378137,298.]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.]],PROJECTION["Geostationary_Satellite"],PARAMETER["central_meridian",-137],PARAMETER["satellite_height",],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=geos +sweep=x +lon_0=-137 +h= +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs"]]', 'EPSG:4326', 21696, 21696, (- 5434894.885056), (- 5434894.885056), 5434894.885056, 5434894.885056)
_ = src.colorinterp |
def wait_for_block(raiden: 'RaidenService', block_number: BlockNumber, retry_timeout: float) -> None:
current = raiden.get_block_number()
log_details = {'node': to_checksum_address(raiden.address), 'target_block_number': block_number}
while (current < block_number):
assert raiden, ALARM_TASK_ERROR_MSG
assert raiden.alarm, ALARM_TASK_ERROR_MSG
log.debug('wait_for_block', current_block_number=current, **log_details)
gevent.sleep(retry_timeout)
current = raiden.get_block_number() |
class DecoderConfigDescriptor(BaseDescriptor):
TAG = 4
decSpecificInfo = None
def __init__(self, fileobj, length):
r = BitReader(fileobj)
try:
self.objectTypeIndication = r.bits(8)
self.streamType = r.bits(6)
self.upStream = r.bits(1)
self.reserved = r.bits(1)
self.bufferSizeDB = r.bits(24)
self.maxBitrate = r.bits(32)
self.avgBitrate = r.bits(32)
if ((self.objectTypeIndication, self.streamType) != (64, 5)):
return
if ((length * 8) == r.get_position()):
return
tag = r.bits(8)
except BitReaderError as e:
raise DescriptorError(e)
if (tag == DecoderSpecificInfo.TAG):
assert r.is_aligned()
self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj)
def codec_param(self):
param = (u'.%X' % self.objectTypeIndication)
info = self.decSpecificInfo
if (info is not None):
param += (u'.%d' % info.audioObjectType)
return param
def codec_desc(self):
info = self.decSpecificInfo
desc = None
if (info is not None):
desc = info.description
return desc |
class EnumExactValueProvider(BaseEnumProvider):
def _provide_loader(self, mediator: Mediator, request: LoaderRequest) -> Loader:
return self._make_loader(get_type_from_request(request))
def _make_loader(self, enum):
variants = [case.value for case in enum]
value_to_member = self._get_exact_value_to_member(enum)
if (value_to_member is None):
def enum_exact_loader(data):
if (type(data) is enum):
raise BadVariantError(variants, data)
try:
return enum(data)
except ValueError:
raise BadVariantError(variants, data) from None
return enum_exact_loader
def enum_exact_loader_v2m(data):
try:
return value_to_member[data]
except KeyError:
raise BadVariantError(variants, data) from None
except TypeError:
raise BadVariantError(variants, data)
return enum_exact_loader_v2m
def _get_exact_value_to_member(self, enum: Type[Enum]) -> Optional[Mapping[(Any, Any)]]:
try:
value_to_member = {case.value: case for case in enum}
except TypeError:
return None
if (getattr(enum._missing_, '__func__', None) != Enum._missing_.__func__):
return None
return value_to_member
def _provide_dumper(self, mediator: Mediator, request: DumperRequest) -> Dumper:
return _enum_exact_value_dumper |
.parametrize('setting,expected_value', [(None, None), ('auto', None), ('always', True), ('never', False)])
def test_color_cli_option(runner, setting, expected_value, boxed_context, in_tmp_dir, tmp_path):
args = ['--schemafile', 'schema.json', 'foo.json']
if setting:
args.extend(('--color', setting))
touch_files(tmp_path, 'foo.json')
runner.invoke(cli_main, args)
assert (boxed_context.ref.color == expected_value) |
class MyHTTPServer(HTTPServer):
def __init__(self, ghost, *args, **kwargs):
self.ghost = ghost
self.error = None
self.didPrintStartMsg = False
try:
HTTPServer.__init__(self, *args, **kwargs)
except Exception as e:
self.error = e
def service_actions(self):
if (not self.didPrintStartMsg):
logger.info('server started')
self.ghost.nvim.async_call(self.ghost.echo, 'server started on port {0}', self.ghost.port)
self.ghost.server_started = True
self.didPrintStartMsg = True |
class FastFashionMNIST(datasets.FashionMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = self.data.unsqueeze(1).float().div(255)
self.data = self.data.sub_(0.2861).div_(0.353)
(self.data, self.targets) = (self.data.to('cuda'), self.targets.to('cuda'))
def __getitem__(self, index):
(img, target) = (self.data[index], self.targets[index])
return (img, target) |
def test_base_head():
head = ExampleHead(3, 400, dict(type='CrossEntropyLoss'))
cls_scores = torch.rand((3, 4))
gt_labels = torch.LongTensor(([2] * 3)).squeeze()
losses = head.loss(cls_scores, gt_labels)
assert ('loss_cls' in losses.keys())
assert (losses.get('loss_cls') > 0), 'cls loss should be non-zero' |
.parametrize('parser, expected_error_msg', [(('precondition-unknown-scenario',), "Cannot import precondition scenario 'Unknown Scenario' from feature"), (('precondition-unknown-scenario-same-feature',), "Cannot import precondition scenario 'Unknown Scenario' from feature"), (('precondition-recursion',), 'Your feature'), (('precondition-malformed',), "Scenario tag must have argument in format: 'test.feature: Some scenario'")], ids=['Unknown Scenario', 'Unknown Scenario within same Feature', 'Recursion', 'Malformed precondition Tag'], indirect=['parser'])
def test_parse_scenario_precondition_errors(parser, expected_error_msg):
with pytest.raises(errors.FeatureFileSyntaxError) as exc:
parser.parse()
assert str(exc.value).startswith(expected_error_msg) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.