code stringlengths 281 23.7M |
|---|
def apply_adaround_example():
AimetLogger.set_level_for_all_areas(logging.DEBUG)
tf.keras.backend.clear_session()
model = keras_model()
dataset_size = 32
batch_size = 16
possible_batches = (dataset_size // batch_size)
input_data = np.random.rand(dataset_size, 16, 16, 3)
dataset = tf.data.Dataset.from_tensor_slices(input_data)
dataset = dataset.batch(batch_size=batch_size)
params = AdaroundParameters(data_set=dataset, num_batches=possible_batches, default_num_iterations=10)
param_bw = 4
output_bw = 8
quant_scheme = QuantScheme.post_training_tf_enhanced
adarounded_model = Adaround.apply_adaround(model, params, path='./', filename_prefix='dummy', default_param_bw=param_bw, default_quant_scheme=quant_scheme)
sim = QuantizationSimModel(adarounded_model, quant_scheme, default_output_bw=output_bw, default_param_bw=param_bw)
sim.set_and_freeze_param_encodings(encoding_path='./dummy.encodings')
sim.compute_encodings(dummy_forward_pass, None) |
class Logging():
__instance = None
__logger_level_dic = {'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'WARN': logging.WARN, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}
def __init__(self):
self.__logger = logging.getLogger()
self.__filename = 'train.log'
__log_info = log_info
self.__log_dir = __log_info['log_dir']
self.__log_level = __log_info['log_level']
log_backupCount = __log_info['log_backupCount']
log_interval = __log_info['log_interval']
if ((log_backupCount is None) or (not isinstance(log_backupCount, int))):
log_backupCount = 7
elif (log_backupCount < 0):
log_backupCount = 7
if ((log_interval is None) or (not isinstance(log_interval, int))):
log_interval = 1
elif (log_interval < 0):
log_interval = 7
if ((self.__log_level == None) or (self.__log_level == '')):
self.__level = logging.INFO
elif (self.__log_level.upper() not in self.__logger_level_dic.keys()):
self.__level = logging.INFO
else:
self.__level = self.__logger_level_dic[self.__log_level.upper()]
if ((self.__log_dir == None) or (self.__log_dir == '')):
project_root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../'))
self.__log_dir = os.path.join(project_root_path, 'logs')
if (not os.path.exists(self.__log_dir)):
os.makedirs(self.__log_dir)
fh = logging.handlers.TimedRotatingFileHandler(os.path.join(self.__log_dir, self.__filename), 'D', log_interval, log_backupCount)
fh.suffix = '%Y%m%d-%H%M.log'
fh.setLevel(self.__level)
ch = logging.StreamHandler()
ch.setLevel(self.__level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s[line:%(lineno)d] - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.__logger.setLevel(self.__level)
self.__logger.addHandler(fh)
self.__logger.addHandler(ch)
def getLogger(cls) -> logging.Logger:
if (not cls.__instance):
cls.__instance = Logging()
return cls.__instance.__logger |
class TestEnvVarHandling(BaseTestCase):
def test_reading_wrap_handler(self):
with Popen([sys.executable], stdin=PIPE, stdout=PIPE) as p:
(stdout, _) = p.communicate(b'from pyvisa import ctwrapper;print(ctwrapper.WRAP_HANDLER);exit()')
assert (b'True' == stdout.rstrip())
env = os.environ.copy()
env['PYVISA_WRAP_HANDLER'] = '0'
with Popen([sys.executable], stdin=PIPE, stdout=PIPE, env=env) as p:
(stdout, _) = p.communicate(b'from pyvisa import ctwrapper;print(ctwrapper.WRAP_HANDLER);exit()')
assert (b'False' == stdout.rstrip()) |
class StateSwitcher(QState):
def __init__(self, machine):
super(StateSwitcher, self).__init__(machine)
self.m_stateCount = 0
self.m_lastIndex = 0
def onEntry(self, event):
n = ((qrand() % self.m_stateCount) + 1)
while (n == self.m_lastIndex):
n = ((qrand() % self.m_stateCount) + 1)
self.m_lastIndex = n
self.machine().postEvent(StateSwitchEvent(n))
def onExit(self, event):
pass
def addState(self, state, animation):
self.m_stateCount += 1
trans = StateSwitchTransition(self.m_stateCount)
trans.setTargetState(state)
self.addTransition(trans)
trans.addAnimation(animation) |
()
('--input', '-i', 'image_path', help='Path to an image')
('--lsb-count', '-n', default=2, show_default=2, type=int, help='How many LSBs to display')
_context
def stegdetect(ctx: click.Context, image_path: str, lsb_count: int) -> None:
if image_path:
StegDetect.show_lsb(image_path, lsb_count)
else:
click.echo(ctx.get_help()) |
def test_format_descriptors():
with pytest.raises(RuntimeError) as excinfo:
m.get_format_unbound()
assert re.match('^NumPy type info missing for .*UnboundStruct.*$', str(excinfo.value))
ld = np.dtype('longdouble')
ldbl_fmt = (('4x' if (ld.alignment > 4) else '') + ld.char)
ss_fmt = (('^T{?:bool_:3xI:uint_:f:float_:' + ldbl_fmt) + ':ldbl_:}')
dbl = np.dtype('double')
partial_fmt = (('^T{?:bool_:3xI:uint_:f:float_:' + str((((4 * (dbl.alignment > 4)) + dbl.itemsize) + (8 * (ld.alignment > 8))))) + 'xg:ldbl_:}')
nested_extra = str(max(8, ld.alignment))
assert (m.print_format_descriptors() == [ss_fmt, '^T{?:bool_:I:uint_:f:float_:g:ldbl_:}', (('^T{' + ss_fmt) + ':a:^T{?:bool_:I:uint_:f:float_:g:ldbl_:}:b:}'), partial_fmt, (((((('^T{' + nested_extra) + 'x') + partial_fmt) + ':a:') + nested_extra) + 'x}'), '^T{3s:a:3s:b:}', '^T{(3)4s:a:(2)i:b:(3)B:c:1x(4, 2)f:d:}', '^T{q:e1:B:e2:}', '^T{Zf:cflt:Zd:cdbl:}']) |
def _parse_assignments(lvalue: Expression, stmt: AssignmentStmt) -> tuple[(list[NameExpr], list[Expression])]:
lvalues: list[NameExpr] = []
rvalues: list[Expression] = []
if isinstance(lvalue, (TupleExpr, ListExpr)):
if all((isinstance(item, NameExpr) for item in lvalue.items)):
lvalues = cast(List[NameExpr], lvalue.items)
if isinstance(stmt.rvalue, (TupleExpr, ListExpr)):
rvalues = stmt.rvalue.items
elif isinstance(lvalue, NameExpr):
lvalues = [lvalue]
rvalues = [stmt.rvalue]
return (lvalues, rvalues) |
def graphite_electrolyte_exchange_current_density_Ecker2015(c_e, c_s_surf, c_s_max, T):
k_ref = (1.11 * 1e-10)
m_ref = (pybamm.constants.F * k_ref)
E_r = 53400
arrhenius = (pybamm.exp(((- E_r) / (pybamm.constants.R * T))) * pybamm.exp((E_r / (pybamm.constants.R * 296.15))))
return ((((m_ref * arrhenius) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5)) |
.network
def test_installer_with_pypi_repository(package: ProjectPackage, locker: Locker, installed: CustomInstalledRepository, config: Config, env: NullEnv) -> None:
pool = RepositoryPool()
pool.add_repository(MockRepository())
installer = Installer(NullIO(), env, package, locker, pool, config, installed=installed)
package.python_versions = '>=3.7'
package.add_dependency(Factory.create_dependency('pytest', '^3.5', groups=['dev']))
result = installer.run()
assert (result == 0)
expected = fixture('with-pypi-repository')
assert (expected == locker.written_data) |
class AgentRLConfig():
def __init__(self):
self.use_gpu = False
self.input_dim = ((((189 + 11) + 189) + 4) + 9)
self.hidden1_dim = 100
self.hidden2_dim = 6
self.dp = 0.2
self.DPN_model_path = (root_path + '/agents/agent_rl_model/')
self.DPN_model_name = 'TwoLayer'
self.all_facet_list = ['categories', 'state', 'city', 'price', 'stars']
self.utt_gen_dict_path = (root_path + '/data/agents/utt_gen_dict.json')
self.pretrain_data_path = (root_path + '/data/RL_data/RL_pretrain_data.pkl')
self.pretrain_epoch_num = 200
self.pretrain_lr = 0.003
self.pretrain_batch_size = 64
self.pretrain_log_path = (root_path + '/agents/')
self.pretrain_save_step = 200
self.PG_discount_rate = 0.95
self.PG_batch_size = 100
self.PG_data_path = (root_path + '/data/RL_data/RL_data.pkl')
self.PG_epoch_num = 100
self.PG_lr = 0.001
self.PG_save_step = 10
self.PG_silence = True
self.PG_log_path = (root_path + '/agents/') |
.skipif((sys.version_info[:2] <= (3, 7)), reason="regex doesn't work well in py36 (for some edge cases)")
def test_split():
assert (deps.split('\n pyscaffold>=42.1.0,<43.0\n platformdirs==1\n cookiecutter<8\n mypkg~=9.0') == ['pyscaffold>=42.1.0,<43.0', 'platformdirs==1', 'cookiecutter<8', 'mypkg~=9.0'])
assert (deps.split('\n pyscaffold>=42.1.0,<43.0;platformdirs==1\n cookiecutter<8;mypkg~=9.0\n\n') == ['pyscaffold>=42.1.0,<43.0', 'platformdirs==1', 'cookiecutter<8', 'mypkg~=9.0'])
assert (deps.split('pyscaffold>=42.1.0,<43.0; platformdirs==1; cookiecutter<8; mypkg~=9.0; ') == ['pyscaffold>=42.1.0,<43.0', 'platformdirs==1', 'cookiecutter<8', 'mypkg~=9.0'])
assert (deps.split("\n pyscaffold>=42.1.0,<43.0;python_version>='3.4'; platformdirs==1") == ["pyscaffold>=42.1.0,<43.0;python_version>='3.4'", 'platformdirs==1'])
assert (deps.split("\n pyscaffold>=42.1.0,<43.0; python_version>='3.4'; platformdirs==1") == ["pyscaffold>=42.1.0,<43.0; python_version>='3.4'", 'platformdirs==1']) |
.skip(reason="Rewrite disabled as it don't support unsorted indices")
.skipif((not pytensor.config.cxx), reason='G++ not available, so we need to skip this test.')
def test_local_csm_grad_c():
data = vector()
(indices, indptr, shape) = (ivector(), ivector(), ivector())
mode = get_default_mode()
if (pytensor.config.mode == 'FAST_COMPILE'):
mode = Mode(linker='c|py', optimizer='fast_compile')
mode = mode.including('specialize', 'local_csm_grad_c')
for (CS, cast) in [(sparse.CSC, sp.sparse.csc_matrix), (sparse.CSR, sp.sparse.csr_matrix)]:
cost = pt_sum(sparse.DenseFromSparse()(CS(data, indices, indptr, shape)))
f = pytensor.function([data, indices, indptr, shape], pytensor.grad(cost, data), mode=mode)
assert (not any((isinstance(node.op, sparse.CSMGrad) for node in f.maker.fgraph.toposort())))
v = cast(random_lil((10, 40), config.floatX, 3))
f(v.data, v.indices, v.indptr, v.shape) |
def flatten_dictionary(input, sep='.', prefix=None):
for (name, value) in sorted(input.items()):
fullname = sep.join(filter(None, [prefix, name]))
if isinstance(value, dict):
for result in flatten_dictionary(value, sep, fullname):
(yield result)
else:
(yield (fullname, value)) |
.parametrize('constraint_parts,expected', [(['3.8'], Version.from_parts(3, 8)), (['=', '3.8'], Version.from_parts(3, 8)), (['==', '3.8'], Version.from_parts(3, 8)), (['>', '3.8'], VersionRange(min=Version.from_parts(3, 8))), (['>=', '3.8'], VersionRange(min=Version.from_parts(3, 8), include_min=True)), (['<', '3.8'], VersionRange(max=Version.from_parts(3, 8))), (['<=', '3.8'], VersionRange(max=Version.from_parts(3, 8), include_max=True)), (['^', '3.8'], VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(4, 0), include_min=True)), (['~', '3.8'], VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(3, 9), include_min=True)), (['~=', '3.8'], VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(4, 0), include_min=True)), (['3.8.*'], VersionRange(min=Version.parse('3.8.0.dev0'), max=Version.parse('3.9.0.dev0'), include_min=True)), (['==', '3.8.*'], VersionRange(min=Version.parse('3.8.0.dev0'), max=Version.parse('3.9.0.dev0'), include_min=True)), (['!=', '3.8.*'], VersionRange(max=Version.parse('3.8.dev0')).union(VersionRange(Version.parse('3.9.dev0'), include_min=True))), (['>', '3.8', ',', '<=', '6.5'], VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(6, 5), include_max=True)), (['>=', '2.7', ',', '!=', '3.0.*', ',', '!=', '3.1.*'], VersionUnion(VersionRange(Version.parse('2.7'), Version.parse('3.0.dev0'), True, False), VersionRange(Version.parse('3.2.dev0'), None, True, False))), (['~', '2.7', '||', '~', '3.8'], VersionUnion(VersionRange(min=Version.from_parts(2, 7), max=Version.from_parts(2, 8), include_min=True), VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(3, 9), include_min=True))), (['~', '2.7', '||', '~', '3.8', '|', '>=', '3.10', ',', '<', '3.12'], VersionUnion(VersionRange(min=Version.from_parts(2, 7), max=Version.from_parts(2, 8), include_min=True), VersionRange(min=Version.from_parts(3, 8), max=Version.from_parts(3, 9), include_min=True), VersionRange(min=Version.from_parts(3, 10), max=Version.from_parts(3, 12), include_min=True)))])
.parametrize(('with_whitespace_padding',), [(True,), (False,)])
def test_parse_constraint_with_white_space_padding(constraint_parts: list[str], expected: VersionConstraint, with_whitespace_padding: bool) -> None:
padding = (' ' * (4 if with_whitespace_padding else 0))
constraint = padding.join(['', *constraint_parts, ''])
assert (parse_constraint(constraint) == expected) |
('/')
class TodoList(Resource):
_list_with(listed_todo)
def get(self):
return [{'id': id, 'todo': todo} for (id, todo) in TODOS.items()]
(parser=parser)
_with(todo, code=201)
def post(self):
args = parser.parse_args()
todo_id = ('todo%d' % (len(TODOS) + 1))
TODOS[todo_id] = {'task': args['task']}
return (TODOS[todo_id], 201) |
def load_and_merge_dbfs(database_directory: pathlib.Path, refresh_cache: bool) -> 'pd.DataFrame':
patimg = load_iview_dbf(database_directory, refresh_cache, 'patimg')
dates = pd.to_datetime(patimg['IMG_DATE'], format='%Y%m%d').dt.date
date_options = dates.sort_values(ascending=False).unique()
selected_date = st.selectbox('Date', options=date_options)
patimg_filtered_by_date = patimg.loc[(dates == selected_date)]
merged = patimg_filtered_by_date
for (database_key, merge_key) in [('port', 'PORT_DBID'), ('trtmnt', 'TRT_DBID'), ('patient', 'PAT_DBID')]:
dbf_to_be_merged = load_iview_dbf(database_directory, refresh_cache, database_key)
merged = merged.merge(dbf_to_be_merged, left_on=merge_key, right_on=merge_key)
timestamps_string = (((merged['IMG_DATE'].astype('str') + 'T') + merged['IMG_TIME'].astype('str')) + '000')
merged['datetime'] = pd.to_datetime(timestamps_string, format='%Y%m%dT%H%M%S%f')
table = merged[['machine_id', 'patient_id', 'treatment', 'port', 'datetime', 'PIMG_DBID', 'DICOM_UID', 'LAST_NAME', 'FIRST_NAME']]
return table |
class torrentproject(object):
url = '
name = 'TorrentProject'
supported_categories = {'all': '0'}
class MyHTMLParser(HTMLParser):
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.insideResults = False
self.insideDataDiv = False
self.pageComplete = False
self.spanCount = (- 1)
self.infoMap = {'name': 0, 'torrLink': 0, 'size': 5, 'seeds': 2, 'leech': 3}
self.fullResData = []
self.pageRes = []
self.singleResData = self.get_single_data()
def get_single_data(self):
return {'name': '-1', 'seeds': '-1', 'leech': '-1', 'size': '-1', 'link': '-1', 'desc_link': '-1', 'engine_url': self.url}
def handle_starttag(self, tag, attrs):
attributes = dict(attrs)
if ((tag == 'div') and ('nav' in attributes.get('id', ''))):
self.pageComplete = True
if ((tag == 'div') and (attributes.get('id', '') == 'similarfiles')):
self.insideResults = True
if ((tag == 'div') and self.insideResults and ('gac_bb' not in attributes.get('class', ''))):
self.insideDataDiv = True
elif ((tag == 'span') and self.insideDataDiv and ('verified' != attributes.get('title', ''))):
self.spanCount += 1
if (self.insideDataDiv and (tag == 'a') and (len(attrs) > 0)):
if ((self.infoMap['torrLink'] == self.spanCount) and ('href' in attributes)):
self.singleResData['link'] = (self.url + attributes['href'])
if ((self.infoMap['name'] == self.spanCount) and ('href' in attributes)):
self.singleResData['desc_link'] = (self.url + attributes['href'])
def handle_endtag(self, tag):
if (not self.pageComplete):
if (tag == 'div'):
self.insideDataDiv = False
self.spanCount = (- 1)
if (len(self.singleResData) > 0):
if ((self.singleResData['name'] != '-1') and (self.singleResData['size'] != '-1') and (self.singleResData['name'].lower() != 'nome')):
if ((self.singleResData['desc_link'] != '-1') or (self.singleResData['link'] != '-1')):
try:
prettyPrinter(self.singleResData)
except Exception:
print(self.singleResData)
self.pageRes.append(self.singleResData)
self.fullResData.append(self.singleResData)
self.singleResData = self.get_single_data()
def handle_data(self, data):
if self.insideDataDiv:
for (key, val) in self.infoMap.items():
if (self.spanCount == val):
curr_key = key
if ((curr_key in self.singleResData) and (data.strip() != '')):
if (self.singleResData[curr_key] == '-1'):
self.singleResData[curr_key] = data.strip()
elif (curr_key != 'name'):
self.singleResData[curr_key] += data.strip()
def feed(self, html):
HTMLParser.feed(self, html)
self.pageComplete = False
self.insideResults = False
self.insideDataDiv = False
self.spanCount = (- 1)
def search(self, what, cat='all'):
parser = self.MyHTMLParser(self.url)
what = what.replace('%20', '+')
for currPage in range(0, 5):
url = (self.url + '?t={0}&p={1}'.format(what, currPage))
html = retrieve_url(url)
parser.feed(html)
if (len(parser.pageRes) <= 0):
break
del parser.pageRes[:]
parser.close()
def download_torrent(self, info):
html = retrieve_url(info)
m = re.search('href=[\'"].*?(magnet.+?)[\'"]', html)
if (m and (len(m.groups()) > 0)):
magnet = unquote(m.group(1))
print(((magnet + ' ') + info)) |
def xserver_start(display, executable='Xvfb', authfile=None):
pid = os.fork()
if (pid != 0):
return pid
if (authfile is None):
authfile = os.devnull
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
cmd = [executable, '-auth', authfile, '-noreset', display]
print('starting xserver: `{0}`'.format(' '.join(cmd)))
os.execlp(cmd[0], *cmd) |
class Effect6039(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemMultiply((lambda mod: mod.item.requiresSkill('Small Projectile Turret')), 'trackingSpeed', (1 / module.getModifiedItemAttr('modeTrackingPostDiv')), stackingPenalties=True, penaltyGroup='postDiv', **kwargs) |
class BenchmarkSpec(namedtuple('BenchmarkSpec', 'name version origin')):
__slots__ = ()
def from_raw(cls, raw):
if isinstance(raw, BenchmarkSpec):
return (raw, None)
elif isinstance(raw, str):
return parse_benchmark(raw)
else:
raise ValueError(f'unsupported raw spec {raw!r}')
def __new__(cls, name, version=None, origin=None):
self = super().__new__(cls, name, (version or None), (origin or None))
return self |
def load(name):
f = pd.read_csv(f'datasets/{name}_LLMs.csv')
a_human = f['human'].tolist()
mgt_text_list = []
for detectLLM in ['ChatGLM', 'Dolly', 'ChatGPT-turbo', 'GPT4All', 'StableLM', 'Claude']:
mgt_text_list.append(f[f'{detectLLM}'].fillna('').tolist())
res = []
for i in range(len(a_human)):
flag = 1
if (len(a_human[i].split()) <= 1):
flag = 0
for mgt_text in mgt_text_list:
if (len(mgt_text[i].split()) <= 1):
flag = 0
break
if flag:
res.append([a_human[i], mgt_text_list[0][i], mgt_text_list[1][i], mgt_text_list[2][i], mgt_text_list[3][i], mgt_text_list[4][i], mgt_text_list[5][i]])
data_new = {'train': {'text': [], 'label': []}, 'test': {'text': [], 'label': []}}
index_list = list(range(len(res)))
random.seed(0)
random.shuffle(index_list)
total_num = len(res)
for i in tqdm.tqdm(range(total_num), desc='parsing data'):
if (i < (total_num * 0.8)):
data_partition = 'train'
else:
data_partition = 'test'
for j in range(0, 7):
data_new[data_partition]['text'].append(process_spaces(res[index_list[i]][j]))
data_new[data_partition]['label'].append(j)
return data_new |
class s3fd(nn.Module):
def __init__(self):
super(s3fd, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3)
self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0)
self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0)
self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv3_3_norm = L2Norm(256, scale=10)
self.conv4_3_norm = L2Norm(512, scale=8)
self.conv5_3_norm = L2Norm(512, scale=5)
self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding=1)
self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding=1)
self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding=1)
self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
def forward(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
f3_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3(h))
f4_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
f5_3 = h
h = F.max_pool2d(h, 2, 2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
ffc7 = h
h = F.relu(self.conv6_1(h))
h = F.relu(self.conv6_2(h))
f6_2 = h
h = F.relu(self.conv7_1(h))
h = F.relu(self.conv7_2(h))
f7_2 = h
f3_3 = self.conv3_3_norm(f3_3)
f4_3 = self.conv4_3_norm(f4_3)
f5_3 = self.conv5_3_norm(f5_3)
cls1 = self.conv3_3_norm_mbox_conf(f3_3)
reg1 = self.conv3_3_norm_mbox_loc(f3_3)
cls2 = self.conv4_3_norm_mbox_conf(f4_3)
reg2 = self.conv4_3_norm_mbox_loc(f4_3)
cls3 = self.conv5_3_norm_mbox_conf(f5_3)
reg3 = self.conv5_3_norm_mbox_loc(f5_3)
cls4 = self.fc7_mbox_conf(ffc7)
reg4 = self.fc7_mbox_loc(ffc7)
cls5 = self.conv6_2_mbox_conf(f6_2)
reg5 = self.conv6_2_mbox_loc(f6_2)
cls6 = self.conv7_2_mbox_conf(f7_2)
reg6 = self.conv7_2_mbox_loc(f7_2)
chunk = torch.chunk(cls1, 4, 1)
bmax = torch.max(torch.max(chunk[0], chunk[1]), chunk[2])
cls1 = torch.cat([bmax, chunk[3]], dim=1)
return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6] |
class PublisherGroup(TimeStampedModel):
name = models.CharField(_('Name'), max_length=200, help_text=_('Visible to advertisers'))
slug = models.SlugField(_('Publisher group slug'), max_length=200, unique=True)
publishers = models.ManyToManyField(Publisher, related_name='publisher_groups', blank=True, help_text=_('A group of publishers that can be targeted by advertisers'))
history = HistoricalRecords()
class Meta():
ordering = ('name',)
def __str__(self):
return self.name |
class EditDistance(object):
def __init__(self, time_mediated):
self.time_mediated_ = time_mediated
self.scores_ = np.nan
self.backtraces_ = np.nan
self.confusion_pairs_ = {}
def cost(self, ref, hyp, code):
if self.time_mediated_:
if (code == Code.match):
return (abs((ref.start - hyp.start)) + abs((ref.end - hyp.end)))
elif (code == Code.insertion):
return (hyp.end - hyp.start)
elif (code == Code.deletion):
return (ref.end - ref.start)
else:
return ((abs((ref.start - hyp.start)) + abs((ref.end - hyp.end))) + 0.1)
elif (code == Code.match):
return 0
elif ((code == Code.insertion) or (code == Code.deletion)):
return 3
else:
return 4
def get_result(self, refs, hyps):
res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
(num_rows, num_cols) = self.scores_.shape
res.score = self.scores_[((num_rows - 1), (num_cols - 1))]
curr_offset = coordinate_to_offset((num_rows - 1), (num_cols - 1), num_cols)
while (curr_offset != 0):
curr_row = offset_to_row(curr_offset, num_cols)
curr_col = offset_to_col(curr_offset, num_cols)
prev_offset = self.backtraces_[(curr_row, curr_col)]
prev_row = offset_to_row(prev_offset, num_cols)
prev_col = offset_to_col(prev_offset, num_cols)
res.refs.appendleft((curr_row - 1))
res.hyps.appendleft((curr_col - 1))
if (((curr_row - 1) == prev_row) and (curr_col == prev_col)):
res.codes.appendleft(Code.deletion)
elif ((curr_row == prev_row) and ((curr_col - 1) == prev_col)):
res.codes.appendleft(Code.insertion)
else:
ref_str = refs[res.refs[0]].label
hyp_str = hyps[res.hyps[0]].label
if (ref_str == hyp_str):
res.codes.appendleft(Code.match)
else:
res.codes.appendleft(Code.substitution)
confusion_pair = ('%s -> %s' % (ref_str, hyp_str))
if (confusion_pair not in self.confusion_pairs_):
self.confusion_pairs_[confusion_pair] = 1
else:
self.confusion_pairs_[confusion_pair] += 1
curr_offset = prev_offset
return res
def align(self, refs, hyps):
if ((len(refs) == 0) and (len(hyps) == 0)):
return np.nan
self.scores_ = np.zeros(((len(refs) + 1), (len(hyps) + 1)))
self.backtraces_ = np.zeros(((len(refs) + 1), (len(hyps) + 1)))
(num_rows, num_cols) = self.scores_.shape
for i in range(num_rows):
for j in range(num_cols):
if ((i == 0) and (j == 0)):
self.scores_[(i, j)] = 0.0
self.backtraces_[(i, j)] = 0
continue
if (i == 0):
self.scores_[(i, j)] = (self.scores_[(i, (j - 1))] + self.cost(None, hyps[(j - 1)], Code.insertion))
self.backtraces_[(i, j)] = coordinate_to_offset(i, (j - 1), num_cols)
continue
if (j == 0):
self.scores_[(i, j)] = (self.scores_[((i - 1), j)] + self.cost(refs[(i - 1)], None, Code.deletion))
self.backtraces_[(i, j)] = coordinate_to_offset((i - 1), j, num_cols)
continue
ref = refs[(i - 1)]
hyp = hyps[(j - 1)]
best_score = (self.scores_[((i - 1), (j - 1))] + (self.cost(ref, hyp, Code.match) if (ref.label == hyp.label) else self.cost(ref, hyp, Code.substitution)))
prev_row = (i - 1)
prev_col = (j - 1)
ins = (self.scores_[(i, (j - 1))] + self.cost(None, hyp, Code.insertion))
if (ins < best_score):
best_score = ins
prev_row = i
prev_col = (j - 1)
delt = (self.scores_[((i - 1), j)] + self.cost(ref, None, Code.deletion))
if (delt < best_score):
best_score = delt
prev_row = (i - 1)
prev_col = j
self.scores_[(i, j)] = best_score
self.backtraces_[(i, j)] = coordinate_to_offset(prev_row, prev_col, num_cols)
return self.get_result(refs, hyps) |
def parse_threshold(threshold):
tmp = threshold.split(',')
parsed_thresholds = []
results = {}
results['thresholds'] = parsed_thresholds
for i in tmp:
if (i.find('=') < 1):
raise InvalidThreshold(("Invalid input: '%s' is not of the format key=value" % i))
(key, value) = i.split('=', 1)
key_lower = key.lower()
if (key_lower in list(pynag.Plugins.state.keys())):
parsed_thresholds.append((pynag.Plugins.state[key_lower], value))
else:
results[key_lower] = value
return results |
class LxMounts(gdb.Command):
def __init__(self):
super(LxMounts, self).__init__('lx-mounts', gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if (len(argv) >= 1):
try:
pid = int(argv[0])
except gdb.error:
raise gdb.GdbError('Provide a PID as integer value')
else:
pid = 1
task = tasks.get_task_by_pid(pid)
if (not task):
raise gdb.GdbError("Couldn't find a process with PID {}".format(pid))
namespace = task['nsproxy']['mnt_ns']
if (not namespace):
raise gdb.GdbError('No namespace for current process')
gdb.write('{:^18} {:^15} {:>9} {} {} options\n'.format('mount', 'super_block', 'devname', 'pathname', 'fstype'))
for vfs in lists.list_for_each_entry(namespace['list'], mount_ptr_type, 'mnt_list'):
devname = vfs['mnt_devname'].string()
devname = (devname if devname else 'none')
pathname = ''
parent = vfs
while True:
mntpoint = parent['mnt_mountpoint']
pathname = (utils.dentry_name(mntpoint) + pathname)
if (parent == parent['mnt_parent']):
break
parent = parent['mnt_parent']
if (pathname == ''):
pathname = '/'
superblock = vfs['mnt']['mnt_sb']
fstype = superblock['s_type']['name'].string()
s_flags = int(superblock['s_flags'])
m_flags = int(vfs['mnt']['mnt_flags'])
rd = ('ro' if (s_flags & constants.LX_SB_RDONLY) else 'rw')
gdb.write('{} {} {} {} {} {}{}{} 0 0\n'.format(vfs.format_string(), superblock.format_string(), devname, pathname, fstype, rd, info_opts(FS_INFO, s_flags), info_opts(MNT_INFO, m_flags))) |
class TestConstantsWithoutRequest():
def test__all__(self):
expected = {key for (key, member) in constants.__dict__.items() if ((not key.startswith('_')) and (getattr(member, '__module__', 'telegram.constants') == 'telegram.constants') and (key != 'sys'))}
actual = set(constants.__all__)
assert (actual == expected), f'Members {(expected - actual)} were not listed in constants.__all__'
def test_message_attachment_type(self):
assert all((getattr(constants.MessageType, x.name, False) for x in constants.MessageAttachmentType)), 'All MessageAttachmentType members should be in MessageType'
def test_to_json(self):
assert (json.dumps(StrEnumTest.FOO) == json.dumps('foo'))
assert (json.dumps(IntEnumTest.FOO) == json.dumps(1))
def test_string_representation(self):
assert (repr(StrEnumTest.FOO) == '<StrEnumTest.FOO>')
assert (f'{StrEnumTest.FOO} this {StrEnumTest.BAR}' == 'foo this bar')
assert (f'{StrEnumTest.FOO:*^10}' == '***foo****')
assert (str(StrEnumTest.FOO) == 'foo')
def test_int_representation(self):
assert (repr(IntEnumTest.FOO) == '<IntEnumTest.FOO>')
assert (f'{IntEnumTest.FOO}/0 is undefined!' == '1/0 is undefined!')
assert (f'{IntEnumTest.FOO:*^10}' == '****1*****')
assert (str(IntEnumTest.FOO) == '1')
def test_string_inheritance(self):
assert isinstance(StrEnumTest.FOO, str)
assert ((StrEnumTest.FOO + StrEnumTest.BAR) == 'foobar')
assert (StrEnumTest.FOO.replace('o', 'a') == 'faa')
assert (StrEnumTest.FOO == StrEnumTest.FOO)
assert (StrEnumTest.FOO == 'foo')
assert (StrEnumTest.FOO != StrEnumTest.BAR)
assert (StrEnumTest.FOO != 'bar')
assert (object() != StrEnumTest.FOO)
assert (hash(StrEnumTest.FOO) == hash('foo'))
def test_int_inheritance(self):
assert isinstance(IntEnumTest.FOO, int)
assert ((IntEnumTest.FOO + IntEnumTest.BAR) == 3)
assert (IntEnumTest.FOO == IntEnumTest.FOO)
assert (IntEnumTest.FOO == 1)
assert (IntEnumTest.FOO != IntEnumTest.BAR)
assert (IntEnumTest.FOO != 2)
assert (object() != IntEnumTest.FOO)
assert (hash(IntEnumTest.FOO) == hash(1))
def test_bot_api_version_and_info(self):
assert (str(constants.BOT_API_VERSION_INFO) == constants.BOT_API_VERSION)
assert (tuple((int(x) for x in constants.BOT_API_VERSION.split('.'))) == constants.BOT_API_VERSION_INFO)
def test_bot_api_version_info(self):
vi = constants.BOT_API_VERSION_INFO
assert isinstance(vi, tuple)
assert (repr(vi) == f'BotAPIVersion(major={vi[0]}, minor={vi[1]})')
assert (vi == (vi[0], vi[1]))
assert (not (vi < (vi[0], vi[1])))
assert (vi < (vi[0], (vi[1] + 1)))
assert (vi < ((vi[0] + 1), vi[1]))
assert (vi < ((vi[0] + 1), (vi[1] + 1)))
assert (vi[0] == vi.major)
assert (vi[1] == vi.minor) |
class S3ObjectStore(IObjectStore):
def __init__(self, bucket_prefix: str) -> None:
self.bucket = bucket_prefix
super().__init__()
def put_many(self, objects: List[object], *args, **kwargs) -> List[Any]:
result = []
for obj in objects:
serialized = cloudpickle.dumps(obj)
ref = uuid.uuid4()
s3_utils.upload(f's3://{self.bucket}/{ref}', serialized)
result.append(ref)
return result
def get_many(self, refs: List[Any], *args, **kwargs) -> List[object]:
result = []
start = time.monotonic()
for ref in refs:
cur = s3_utils.download(f's3://{self.bucket}/{ref}')
serialized = cur['Body'].read()
loaded = cloudpickle.loads(serialized)
result.append(loaded)
end = time.monotonic()
logger.info(f'The total time taken to read all objects is: {(end - start)}')
return result |
class TwoLayerBidirectionalGRUModel(nn.Module):
def __init__(self):
super(TwoLayerBidirectionalGRUModel, self).__init__()
self.recurrent = torch.nn.GRU(input_size=3, hidden_size=5, num_layers=2, bidirectional=True)
def forward(self, x, hx=None):
return self.recurrent(x, hx) |
class YahooOAuth(BaseOAuth1):
name = 'yahoo-oauth'
ID_KEY = 'guid'
AUTHORIZATION_URL = '
REQUEST_TOKEN_URL = '
ACCESS_TOKEN_URL = '
EXTRA_DATA = [('guid', 'id'), ('access_token', 'access_token'), ('expires', 'expires')]
def get_user_details(self, response):
(fullname, first_name, last_name) = self.get_user_names(first_name=response.get('givenName'), last_name=response.get('familyName'))
emails = [email for email in response.get('emails', []) if email.get('handle')]
emails.sort(key=(lambda e: e.get('primary', False)), reverse=True)
return {'username': response.get('nickname'), 'email': (emails[0]['handle'] if emails else ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
url = '
return self.get_json(url.format(self._get_guid(access_token)), auth=self.oauth_auth(access_token))['profile']
def _get_guid(self, access_token):
return self.get_json(' auth=self.oauth_auth(access_token))['guid']['value'] |
def update_rule_buffer(rule_buffer_list, rule_path_list):
for rule_path in rule_path_list:
with open(rule_path, 'r') as json_file:
json_data = json.loads(json_file.read())
if isinstance(json_data, list):
for rule_data in json_data:
rule_buffer_list.append(RuleObject(rule_path, rule_data))
else:
rule_buffer_list.append(RuleObject(rule_path, json_data)) |
def entropy_loss(output, pooling, softmax, logsoftmax):
(pooling_hf, pooling_lf) = pooling
(softmax_hf, softmax_lf) = softmax
(logsoftmax_hf, logsoftmax_lf) = logsoftmax
(output_hf, output_lf) = output
pool_hf = pooling_hf(output_hf)
le_hf = (- torch.mean(torch.mul(softmax_hf(pool_hf), logsoftmax_hf(pool_hf))))
pool_lf = pooling_lf(output_lf)
le_lf = (- torch.mean(torch.mul(softmax_lf(pool_lf), logsoftmax_lf(pool_lf))))
return (le_hf + le_lf) |
class FusionOptimizer(GraphRewriter):
def add_requirements(self, fgraph):
fgraph.attach_feature(ReplaceValidate())
def elemwise_to_scalar(inputs, outputs):
replace_inputs = [(inp, inp.clone()) for inp in inputs]
outputs = clone_replace(outputs, replace=replace_inputs)
inputs = [inp for (_, inp) in replace_inputs]
fg = FunctionGraph(inputs=inputs, outputs=outputs, clone=False)
middle_inputs = []
scalar_inputs = [ps.get_scalar_type(inp.type.dtype).make_variable() for inp in inputs]
middle_scalar_inputs = []
for node in fg.toposort():
node_scalar_inputs = []
for inp in node.inputs:
if (inp in inputs):
node_scalar_inputs.append(scalar_inputs[inputs.index(inp)])
elif (inp in middle_inputs):
node_scalar_inputs.append(middle_scalar_inputs[middle_inputs.index(inp)])
else:
new_scalar_input = ps.get_scalar_type(inp.type.dtype).make_variable()
node_scalar_inputs.append(new_scalar_input)
middle_scalar_inputs.append(new_scalar_input)
middle_inputs.append(inp)
new_scalar_node = node.op.scalar_op.make_node(*node_scalar_inputs)
middle_scalar_inputs.append(new_scalar_node.outputs[0])
middle_inputs.append(node.outputs[0])
scalar_outputs = [middle_scalar_inputs[middle_inputs.index(out)] for out in fg.outputs]
return (scalar_inputs, scalar_outputs)
def apply(self, fgraph):
nb_replacement = 0
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callbacks_before = fgraph.execute_callbacks_times.copy()
callback_before = fgraph.execute_callbacks_time
max_operands = elemwise_max_operands_fct(None)
def find_next_fuseable_subgraph(fg: FunctionGraph) -> Generator[(tuple[(list[Variable], list[Variable])], None, None)]:
FUSEABLE_MAPPING = DefaultDict[(Variable, list[Apply])]
UNFUSEABLE_MAPPING = DefaultDict[(Variable, set[ApplyOrOutput])]
def initialize_fuseable_mappings(*, fg: FunctionGraph) -> tuple[(FUSEABLE_MAPPING, UNFUSEABLE_MAPPING)]:
_cache(maxsize=None)
def elemwise_scalar_op_has_c_code(node: Apply) -> bool:
if node.op.scalar_op.supports_c_code(node.inputs, node.outputs):
return True
else:
warn(f'Optimization Warning: The Op {node.op.scalar_op} does not provide a C implementation. As well as being potentially slow, this also disables loop fusion.')
return False
fuseable_clients: FUSEABLE_MAPPING = defaultdict(list)
unfuseable_clients: UNFUSEABLE_MAPPING = defaultdict(set)
for (out, clients) in fg.clients.items():
if (not clients):
continue
out_maybe_fuseable = (out.owner and isinstance(out.owner.op, Elemwise) and (len(out.owner.outputs) == 1) and elemwise_scalar_op_has_c_code(out.owner))
for (client, _) in clients:
if (out_maybe_fuseable and (not isinstance(client, str)) and isinstance(client.op, Elemwise) and (len(client.outputs) == 1) and (out.type.broadcastable == client.outputs[0].type.broadcastable) and elemwise_scalar_op_has_c_code(client)):
if (client not in fuseable_clients[out]):
fuseable_clients[out].append(client)
else:
unfuseable_clients[out].add(client)
return (fuseable_clients, unfuseable_clients)
def find_fuseable_subgraph(*, fg: FunctionGraph, visited_nodes: set[Apply], fuseable_clients: FUSEABLE_MAPPING, unfuseable_clients: UNFUSEABLE_MAPPING) -> tuple[(list[Variable], list[Variable])]:
KT = TypeVar('KT')
VT = TypeVar('VT', list, set)
def shallow_clone_defaultdict(d: DefaultDict[(KT, VT)]) -> DefaultDict[(KT, VT)]:
new_dict: DefaultDict[(KT, VT)] = defaultdict(d.default_factory)
new_dict.update({k: v.copy() for (k, v) in d.items()})
return new_dict
def variables_depend_on(variables, depend_on, stop_search_at=None) -> bool:
return any(((a in depend_on) for a in ancestors(variables, blockers=stop_search_at)))
toposort = fg.toposort()
for starting_node in toposort:
if (starting_node in visited_nodes):
continue
starting_out = starting_node.outputs[0]
if (not fuseable_clients.get(starting_out)):
visited_nodes.add(starting_node)
continue
subgraph_inputs: list[Variable] = []
subgraph_outputs: list[Variable] = []
unfuseable_clients_subgraph: set[Variable] = set()
fuseable_clients_temp = shallow_clone_defaultdict(fuseable_clients)
unfuseable_clients_clone = shallow_clone_defaultdict(unfuseable_clients)
fuseable_nodes_to_visit = deque([starting_node])
while fuseable_nodes_to_visit:
next_node = fuseable_nodes_to_visit.popleft()
visited_nodes.add(next_node)
next_out = next_node.outputs[0]
must_become_output = ((next_out not in fuseable_clients_temp) or (next_out in unfuseable_clients_clone))
if (must_become_output and (next_out in subgraph_outputs)):
subgraph_outputs.remove(next_out)
required_unfuseable_inputs = [inp for inp in next_node.inputs if (next_node in unfuseable_clients_clone.get(inp, ()))]
new_required_unfuseable_inputs = [inp for inp in required_unfuseable_inputs if (inp not in subgraph_inputs)]
must_backtrack = False
if (new_required_unfuseable_inputs and subgraph_outputs):
if variables_depend_on([next_out], depend_on=unfuseable_clients_subgraph, stop_search_at=subgraph_outputs):
must_backtrack = True
if (not must_backtrack):
implied_unfuseable_clients = {c for client in unfuseable_clients_clone.get(next_out, ()) if (not isinstance(client, str)) for c in client.outputs}
new_implied_unfuseable_clients = (implied_unfuseable_clients - unfuseable_clients_subgraph)
if (new_implied_unfuseable_clients and subgraph_inputs):
if variables_depend_on(subgraph_inputs, depend_on=new_implied_unfuseable_clients):
must_backtrack = True
if must_backtrack:
for inp in next_node.inputs:
if ((inp.owner in visited_nodes) and (next_node in fuseable_clients_temp[inp])):
fuseable_clients_temp[inp].remove(next_node)
unfuseable_clients_clone[inp].add(next_node)
fuseable_nodes_to_visit.appendleft(inp.owner)
for client in fuseable_clients_temp[next_out]:
if (client in visited_nodes):
fuseable_clients_temp[next_out].remove(client)
unfuseable_clients_clone[next_out].add(client)
fuseable_nodes_to_visit.appendleft(client)
visited_nodes.remove(next_node)
continue
for inp in new_required_unfuseable_inputs:
if (inp not in subgraph_inputs):
subgraph_inputs.append(inp)
if must_become_output:
subgraph_outputs.append(next_out)
unfuseable_clients_subgraph.update(new_implied_unfuseable_clients)
for inp in sorted((inp for inp in next_node.inputs if ((inp not in required_unfuseable_inputs) and (inp.owner not in visited_nodes))), key=(lambda inp: toposort.index(inp.owner)), reverse=True):
fuseable_nodes_to_visit.appendleft(inp.owner)
for next_node in sorted((node for node in fuseable_clients_temp.get(next_out, ()) if (node not in visited_nodes)), key=(lambda node: toposort.index(node))):
fuseable_nodes_to_visit.append(next_node)
if ((len(subgraph_outputs) == 1) and (set(subgraph_outputs[0].owner.inputs) == set(subgraph_inputs))):
for inp in starting_node.inputs:
if (starting_node in fuseable_clients.get(inp, ())):
fuseable_clients[inp].remove(starting_node)
unfuseable_clients[inp].add(starting_node)
unfuseable_clients[starting_out].update(fuseable_clients.pop(starting_out, ()))
continue
return (subgraph_inputs, subgraph_outputs)
raise ValueError
def update_fuseable_mappings_after_fg_replace(*, fg: FunctionGraph, visited_nodes: set[Apply], fuseable_clients: FUSEABLE_MAPPING, unfuseable_clients: UNFUSEABLE_MAPPING, starting_nodes: set[Apply]) -> None:
next_nodes = fg.apply_nodes
(new_composite_node,) = (next_nodes - starting_nodes)
dropped_nodes = (starting_nodes - next_nodes)
for dropped_node in dropped_nodes:
(dropped_out,) = dropped_node.outputs
fuseable_clients.pop(dropped_out, None)
unfuseable_clients.pop(dropped_out, None)
visited_nodes.remove(dropped_node)
for inp in subgraph_inputs:
if (inp in fuseable_clients):
new_fuseable_clients = [client for client in fuseable_clients[inp] if (client not in dropped_nodes)]
if new_fuseable_clients:
fuseable_clients[inp] = new_fuseable_clients
else:
fuseable_clients.pop(inp)
unfuseable_clients[inp] = ((unfuseable_clients[inp] - dropped_nodes) | {new_composite_node})
for out in new_composite_node.outputs:
unfuseable_clients[out] = {client for (client, _) in fg.clients[out]}
visited_nodes.add(new_composite_node)
return
(fuseable_clients, unfuseable_clients) = initialize_fuseable_mappings(fg=fg)
visited_nodes: set[Apply] = set()
while True:
starting_nodes = fg.apply_nodes.copy()
try:
(subgraph_inputs, subgraph_outputs) = find_fuseable_subgraph(fg=fg, visited_nodes=visited_nodes, fuseable_clients=fuseable_clients, unfuseable_clients=unfuseable_clients)
except ValueError:
return
else:
(yield (subgraph_inputs, subgraph_outputs))
update_fuseable_mappings_after_fg_replace(fg=fg, visited_nodes=visited_nodes, fuseable_clients=fuseable_clients, unfuseable_clients=unfuseable_clients, starting_nodes=starting_nodes)
for (inputs, outputs) in find_next_fuseable_subgraph(fgraph):
if ((len(inputs) + len(outputs)) > max_operands):
warn('Loop fusion failed because the resulting node would exceed the kernel argument limit.')
break
(scalar_inputs, scalar_outputs) = self.elemwise_to_scalar(inputs, outputs)
composite_outputs = Elemwise(ps.Composite(scalar_inputs, scalar_outputs))(*inputs)
if (not isinstance(composite_outputs, list)):
composite_outputs = [composite_outputs]
for (old_out, composite_out) in zip(outputs, composite_outputs):
if old_out.name:
composite_out.name = old_out.name
fgraph.replace_all_validate(list(zip(outputs, composite_outputs)), reason=self.__class__.__name__)
nb_replacement += 1
if fgraph.profile:
validate_time = (fgraph.profile.validate_time - validate_before)
callback_time = (fgraph.execute_callbacks_time - callback_before)
callbacks_time = {}
for (k, v) in fgraph.execute_callbacks_times.items():
if (k in callbacks_before):
callbacks_time[k] = (v - callbacks_before[k])
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
return (self, 1, nb_replacement, 0, validate_time, callback_time, callbacks_time, (- 1))
def print_profile(stream, prof, level=0):
blanc = (' ' * level)
print(blanc, 'FusionOptimizer', file=stream)
print(blanc, ' nb_iter', prof[1], file=stream)
print(blanc, ' nb_replacement', prof[2], file=stream)
print(blanc, ' nb_inconsistency_replace', prof[3], file=stream)
print(blanc, ' validate_time', prof[4], file=stream)
print(blanc, ' callback_time', prof[5], file=stream)
if ((prof[5] is not None) and (prof[5] > 1)):
print(blanc, ' callbacks_time', file=stream)
for i in sorted(prof[6].items(), key=(lambda a: a[1]))[::(- 1)]:
if (i[1] > 0):
print(blanc, ' ', i)
print(blanc, ' time_toposort', prof[7], file=stream) |
()
('--color', default='auto', help='use colored output (no|auto|always)')
('--log-level', default='WARN', help='log level (debug|info|warn|error)')
('-I', '--include', multiple=True, help='include a path in the Specstrom module search paths')
_context
def root(ctx, color, log_level, include):
if (color.lower() == 'auto'):
ctx.color = None
elif (color.lower() == 'always'):
ctx.color = True
elif (color.lower() == 'no'):
ctx.color = False
else:
raise click.UsageError(f'Invalid color option: `{color}`')
global_options['includes'] = include
logging.basicConfig(format='%(asctime)s.%(msecs)03d %(name)-24s %(levelname)-8s %(message)s', level=getattr(logging, log_level.upper()), datefmt='%Y-%m-%d %H:%M:%S')
logging.getLogger('urllib3').setLevel(logging.INFO)
logging.getLogger('selenium.webdriver.remote').setLevel(logging.INFO) |
def pytest_configure(config: Config) -> None:
import faulthandler
stderr_fileno = get_stderr_fileno()
if faulthandler.is_enabled():
config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno
config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno)
faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) |
class TestCaseHandshakeLoss(TestCase):
_num_runs = 50
def name():
return 'handshakeloss'
def testname(p: Perspective):
return 'multiconnect'
def abbreviation():
return 'L1'
def desc():
return 'Handshake completes under extreme packet loss.'
def timeout() -> int:
return 300
def scenario() -> str:
return 'drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=30 --rate_to_client=30'
def get_paths(self):
for _ in range(self._num_runs):
self._files.append(self._generate_random_file((1 * KB)))
return self._files
def check(self) -> TestResult:
num_handshakes = self._count_handshakes()
if (num_handshakes != self._num_runs):
logging.info('Expected %d handshakes. Got: %d', self._num_runs, num_handshakes)
return TestResult.FAILED
if (not self._check_version_and_files()):
return TestResult.FAILED
return TestResult.SUCCEEDED |
def get_effective_match_source(s: str, start: int, end: int) -> Match:
_start = (- 1)
for i in range(start, (start - 2), (- 1)):
if (i < 0):
_start = (i + 1)
break
if is_span_separator(s[i]):
_start = i
break
if (_start < 0):
return None
_end = (- 1)
for i in range((end - 1), (end + 3)):
if (i >= len(s)):
_end = (i - 1)
break
if is_span_separator(s[i]):
_end = i
break
if (_end < 0):
return None
while ((_start < len(s)) and is_span_separator(s[_start])):
_start += 1
while ((_end >= 0) and is_span_separator(s[_end])):
_end -= 1
return Match(_start, ((_end - _start) + 1)) |
class LoraInjectedConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size, stride=1, padding=0, dilation=1, groups: int=1, bias: bool=True, r: int=4, dropout_p: float=0.1, scale: float=1.0):
super().__init__()
if (r > min(in_channels, out_channels)):
raise ValueError(f'LoRA rank {r} must be less or equal than {min(in_channels, out_channels)}')
self.r = r
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.lora_down = nn.Conv2d(in_channels=in_channels, out_channels=r, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False)
self.dropout = nn.Dropout(dropout_p)
self.lora_up = nn.Conv2d(in_channels=r, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.selector = nn.Identity()
self.scale = scale
nn.init.normal_(self.lora_down.weight, std=(1 / r))
nn.init.zeros_(self.lora_up.weight)
def forward(self, input):
return (self.conv(input) + (self.dropout(self.lora_up(self.selector(self.lora_down(input)))) * self.scale))
def realize_as_lora(self):
return ((self.lora_up.weight.data * self.scale), self.lora_down.weight.data)
def set_selector_from_diag(self, diag: torch.Tensor):
assert (diag.shape == (self.r,))
self.selector = nn.Conv2d(in_channels=self.r, out_channels=self.r, kernel_size=1, stride=1, padding=0, bias=False)
self.selector.weight.data = torch.diag(diag)
self.selector.weight.data = self.selector.weight.data.to(self.lora_up.weight.device).to(self.lora_up.weight.dtype) |
class TestLeavesScope(TestNameCheckVisitorBase):
_passes()
def test_leaves_scope(self):
def capybara(cond):
if cond:
return
else:
x = 3
print(x)
_passes()
def test_try_always_leaves_scope(self):
def capybara(cond):
try:
x = 3
except ValueError:
if cond:
raise
else:
return None
print(x)
_passes()
def test_try_may_leave_scope(self):
def capybara(cond):
try:
x = 3
except ValueError:
if cond:
pass
else:
return None
print(x)
_passes()
def test_assert_false(self):
def capybara(cond):
if cond:
assert False
else:
x = 3
print(x)
_passes()
def test_after_assert_false(self):
def capybara(cond):
assert False
if cond:
x = True
else:
x = None
y = None
assert_is_value(y, KnownValue(None))
assert_is_value(x, MultiValuedValue([KnownValue(True), KnownValue(None)]))
_passes()
def test_elif_assert_false(self):
def capybara(cond):
if (cond == 1):
x = 3
elif (cond == 2):
x = 4
else:
assert 0
print(x)
_passes()
def test_visit_assert_message(self):
from typing import Union
def needs_int(x: int) -> None:
pass
def capybara(x: Union[(int, str)]) -> None:
assert_is_value(x, MultiValuedValue([TypedValue(int), TypedValue(str)]))
assert isinstance(x, str), needs_int(x)
assert_is_value(x, TypedValue(str))
_passes()
def test_no_cross_function_propagation(self):
def capybara(cond):
if (cond == 1):
x = 3
else:
pass
return x
def kerodon():
y = capybara(2)
print(y) |
class SolidfilesCom(SimpleDownloader):
__name__ = 'SolidfilesCom'
__type__ = 'downloader'
__version__ = '0.09'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Solidfiles.com downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('sraedler', 'simon.'), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
NAME_PATTERN = '<h1 class="node-name">(?P<N>.+?)</h1>'
SIZE_PATTERN = '</copy-button>\\s*(?P<S>[\\d.,]+) (?P<U>[\\w_^]+)'
OFFLINE_PATTERN = '<h1>404'
def setup(self):
self.multi_dl = True
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search('"downloadUrl":"(.+?)"', self.data)
if (m is not None):
self.link = m.group(1) |
class GraphicsLayout(GraphicsWidget):
def __init__(self, parent=None, border=None):
GraphicsWidget.__init__(self, parent)
if (border is True):
border = (100, 100, 100)
elif (border is False):
border = None
self.border = border
self.layout = QtWidgets.QGraphicsGridLayout()
self.setLayout(self.layout)
self.items = {}
self.rows = {}
self.itemBorders = {}
self.currentRow = 0
self.currentCol = 0
self.setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding))
def setBorder(self, *args, **kwds):
self.border = fn.mkPen(*args, **kwds)
for borderRect in self.itemBorders.values():
borderRect.setPen(self.border)
def nextRow(self):
self.currentRow += 1
self.currentCol = (- 1)
self.nextColumn()
def nextColumn(self):
self.currentCol += 1
while (self.getItem(self.currentRow, self.currentCol) is not None):
self.currentCol += 1
def nextCol(self, *args, **kargs):
return self.nextColumn(*args, **kargs)
def addPlot(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
plot = PlotItem(**kargs)
self.addItem(plot, row, col, rowspan, colspan)
return plot
def addViewBox(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
vb = ViewBox(**kargs)
self.addItem(vb, row, col, rowspan, colspan)
return vb
def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):
text = LabelItem(text, **kargs)
self.addItem(text, row, col, rowspan, colspan)
return text
def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):
layout = GraphicsLayout(**kargs)
self.addItem(layout, row, col, rowspan, colspan)
return layout
def addItem(self, item, row=None, col=None, rowspan=1, colspan=1):
if (row is None):
row = self.currentRow
if (col is None):
col = self.currentCol
self.items[item] = []
for i in range(rowspan):
for j in range(colspan):
row2 = (row + i)
col2 = (col + j)
if (row2 not in self.rows):
self.rows[row2] = {}
self.rows[row2][col2] = item
self.items[item].append((row2, col2))
borderRect = QtWidgets.QGraphicsRectItem()
borderRect.setParentItem(self)
borderRect.setZValue(1000.0)
borderRect.setPen(fn.mkPen(self.border))
self.itemBorders[item] = borderRect
item.geometryChanged.connect(self._updateItemBorder)
self.layout.addItem(item, row, col, rowspan, colspan)
self.layout.activate()
self.nextColumn()
def getItem(self, row, col):
return self.rows.get(row, {}).get(col, None)
def boundingRect(self):
return self.rect()
def itemIndex(self, item):
for i in range(self.layout.count()):
if (self.layout.itemAt(i).graphicsItem() is item):
return i
raise ValueError(f'Could not determine index of item {item}')
def removeItem(self, item):
ind = self.itemIndex(item)
self.layout.removeAt(ind)
self.scene().removeItem(item)
for (r, c) in self.items[item]:
del self.rows[r][c]
del self.items[item]
item.geometryChanged.disconnect(self._updateItemBorder)
itemBorder = self.itemBorders.pop(item)
self.scene().removeItem(itemBorder)
self.update()
def clear(self):
for i in list(self.items.keys()):
self.removeItem(i)
self.currentRow = 0
self.currentCol = 0
def setContentsMargins(self, *args):
self.layout.setContentsMargins(*args)
def setSpacing(self, *args):
self.layout.setSpacing(*args)
def _updateItemBorder(self):
if (self.border is None):
return
item = self.sender()
if (item is None):
return
r = item.mapRectToParent(item.boundingRect())
self.itemBorders[item].setRect(r) |
class NativeTorchQuantWrapper(nn.Module):
def __init__(self, post_training_module: Union[(StaticGridQuantWrapper, LearnedGridQuantWrapper)], module_name: str, device: torch.device):
super(NativeTorchQuantWrapper, self).__init__()
self._module_to_wrap = getattr(post_training_module, module_name)
if isinstance(post_training_module, StaticGridQuantWrapper):
if (post_training_module._mode != QcQuantizeOpMode.ACTIVE):
raise ValueError('Only ACTIVE QcQuantizeOpMode is supported while using StaticGridQuantWrapper')
self.output_quantizers = [TorchQuantizer(quantizer, device) for quantizer in post_training_module.output_quantizers]
self.input_quantizers = [TorchQuantizer(quantizer, device) for quantizer in post_training_module.input_quantizers]
self.param_quantizers = {}
for (name, quantizer) in post_training_module.param_quantizers.items():
self.param_quantizers[name] = TorchQuantizer(quantizer, device)
def _quantize_dequantize(tensor_quantizers, tensors_to_quantize):
outputs = []
for (index, input_tensor) in enumerate(tensors_to_quantize):
if (not isinstance(input_tensor, torch.Tensor)):
_logger.error('Expecting quantize activation input of type torch.Tensor but got %s', type(input_tensor))
raise AssertionError
if (input_tensor.dtype in utils.torch_dtypes_to_ignore_for_quantization):
outputs.append(input_tensor)
continue
assert (len(tensor_quantizers) > index), f'Not enough tensor quantizers ({len(tensor_quantizers)}) allocated'
output = tensor_quantizers[index].quantize_dequantize(input_tensor)
outputs.append(output)
if (len(outputs) == 1):
outputs = outputs[0]
return outputs
def forward(self, *inputs, **kwargs):
quantized_inputs = self._quantize_dequantize(self.input_quantizers, inputs)
if isinstance(quantized_inputs, torch.Tensor):
quantized_inputs = [quantized_inputs]
for (name, param) in self._module_to_wrap.named_parameters():
param_quantizer = self.param_quantizers[name]
if param_quantizer.enabled:
setattr(self._module_to_wrap, name, torch.nn.Parameter(param_quantizer.quantize_dequantize(param), requires_grad=True))
wrapped_output = self._module_to_wrap(*quantized_inputs, **kwargs)
if (not self.output_quantizers[0].enabled):
output = wrapped_output
else:
if isinstance(wrapped_output, torch.Tensor):
wrapped_output = [wrapped_output]
output = self._quantize_dequantize(self.output_quantizers, wrapped_output)
return output |
class TFMobileViTSelfOutput(tf.keras.layers.Layer):
def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None:
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(hidden_size, name='dense')
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, training: bool=False) -> tf.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
return hidden_states |
class Migration(migrations.Migration):
dependencies = [('blog', '0002_auto__2128')]
operations = [migrations.AlterField(model_name='post', name='content', field=i18n.fields.I18nTextField(blank=True, verbose_name='content')), migrations.AlterField(model_name='post', name='excerpt', field=i18n.fields.I18nTextField(verbose_name='excerpt')), migrations.AlterField(model_name='post', name='slug', field=i18n.fields.I18nCharField(blank=True, max_length=200, verbose_name='slug')), migrations.AlterField(model_name='post', name='title', field=i18n.fields.I18nCharField(max_length=200, verbose_name='title'))] |
def optimize_one_inter_rep(inter_rep, layer_name, target, probe, lr=0.001, max_epoch=256, loss_func=nn.HuberLoss(), smoothness_loss_func=None, image=None, verbose=False):
with autocast('cuda', enabled=False):
target_clone = torch.Tensor(target).to(torch.float).to(torch_device).unsqueeze(0).unsqueeze(0)
tensor = inter_rep.clone().to(torch_device).requires_grad_(True).to(torch.float)
rep_f = (lambda : tensor)
optimizer = torch.optim.Adam([tensor], lr=lr)
if verbose:
bar = tqdm(range(max_epoch), leave=False)
else:
bar = range(max_epoch)
for i in bar:
input_tensor = rep_f()
optimizer.zero_grad()
probe_depth_out = probe(input_tensor)
loss = loss_func(probe_depth_out, target_clone)
if smoothness_loss_func:
loss += smoothness_loss_func(probe_depth_out, image)
loss.backward()
optimizer.step()
if verbose:
bar.set_description(f'At layer {layer_name} [{(i + 1)}/{max_epoch}]; Loss: {loss.item():.3f}')
return rep_f().clone() |
def get_coeffs_for_poly3(length, lane_offset, zero_start, lane_width_end=None):
start_heading = 0
end_heading = 0
s0 = 0
s1 = length
A = np.array([[0, 1, (2 * s0), (3 * (s0 ** 2))], [0, 1, (2 * s1), (3 * (s1 ** 2))], [1, s0, (s0 ** 2), (s0 ** 3)], [1, s1, (s1 ** 2), (s1 ** 3)]])
if zero_start:
B = [start_heading, end_heading, 0, lane_offset]
else:
B = [start_heading, end_heading, lane_offset, 0]
if (lane_width_end is not None):
B = [start_heading, end_heading, lane_offset, lane_width_end]
return np.linalg.solve(A, B) |
class SwishLayerNorm(nn.Module):
def __init__(self, input_dims: Union[(int, List[int], torch.Size)], device: Optional[torch.device]=None) -> None:
super().__init__()
self.norm: torch.nn.modules.Sequential = nn.Sequential(nn.LayerNorm(input_dims, device=device), nn.Sigmoid())
def forward(self, input: torch.Tensor) -> torch.Tensor:
return (input * self.norm(input)) |
def test_create_questionset_page(db):
questionset = QuestionSet.objects.exclude(pages=None).first()
page = questionset.pages.first()
page.locked = True
page.save()
with pytest.raises(ValidationError):
QuestionSetLockedValidator()({'parents': [questionset], 'locked': False}) |
def _build(isolation: bool, srcdir: PathType, outdir: PathType, distribution: str, config_settings: (ConfigSettingsType | None), skip_dependency_check: bool) -> str:
if isolation:
return _build_in_isolated_env(srcdir, outdir, distribution, config_settings)
else:
return _build_in_current_env(srcdir, outdir, distribution, config_settings, skip_dependency_check) |
def _build_call_graph(bloq: Bloq, generalizer: GeneralizerT, ssa: SympySymbolAllocator, keep: Callable[([Bloq], bool)], max_depth: Optional[int], g: nx.DiGraph, depth: int) -> None:
if (bloq in g):
return
g.add_node(bloq)
if keep(bloq):
return
if ((max_depth is not None) and (depth >= max_depth)):
return
try:
callee_counts = _generalize_callees(bloq.build_call_graph(ssa), generalizer)
except (DecomposeNotImplementedError, DecomposeTypeError):
return
if (not callee_counts):
return
for (callee, n) in callee_counts:
_build_call_graph(callee, generalizer, ssa, keep, max_depth, g, (depth + 1))
if ((bloq, callee) in g.edges):
g.edges[(bloq, callee)]['n'] += n
else:
g.add_edge(bloq, callee, n=n) |
def example_dconv_offset():
print('using extra offsets')
input = torch.randn(B, inC, inT, inH, inW).cuda()
offset = torch.randn(B, (((kT * kH) * kW) * 3), inT, inH, inW).cuda()
dcn = DeformConv(inC, outC, kernel_size=[kT, kH, kW], stride=[sT, sH, sW], padding=[pT, pH, pW], dilation=[dT, dH, dW]).cuda()
print('input.shape: ', input.shape)
print('offset.shape: ', offset.shape)
output = dcn(input, offset)
targert = output.new(*output.size())
targert.data.uniform_((- 0.01), 0.01)
error = (targert - output).mean()
error.backward()
print('output.shape: ', output.shape) |
class _RunAlgoError(click.ClickException, ValueError):
exit_code = 1
def __init__(self, pyfunc_msg, cmdline_msg=None):
if (cmdline_msg is None):
cmdline_msg = pyfunc_msg
super(_RunAlgoError, self).__init__(cmdline_msg)
self.pyfunc_msg = pyfunc_msg
def __str__(self):
return self.pyfunc_msg |
def assert_args_presence(args, doc, member, name):
args_not_in_doc = [(arg not in doc) for arg in args]
if any(args_not_in_doc):
raise ValueError('{} {} arguments are not present in documentation '.format(name, list(compress(args, args_not_in_doc))), member.__module__)
words = doc.replace('*', '').split()
styles = [((arg + ':') not in words) for arg in args]
if any(styles):
raise ValueError("{} {} are not style properly 'argument': documentation".format(name, list(compress(args, styles))), member.__module__)
indexes = [words.index((arg + ':')) for arg in args]
if (indexes != sorted(indexes)):
raise ValueError('{} arguments order is different from the documentation'.format(name), member.__module__) |
def main():
dataset = morefusion.datasets.YCBVideoDataset(split='train')
class_names = morefusion.datasets.ycb_video.class_names
example = dataset[1000]
rgb = example['color']
depth = example['depth']
K = example['meta']['intrinsic_matrix']
pcd = morefusion.geometry.pointcloud_from_depth(depth, fx=K[(0, 0)], fy=K[(1, 1)], cx=K[(0, 2)], cy=K[(1, 2)])
class_id = example['meta']['cls_indexes'][0]
mask = (example['label'] == class_id)
mask = ((~ np.isnan(pcd).any(axis=2)) & mask)
points = pcd[mask].astype(np.float32)
values = (rgb[mask].astype(np.float32) / 255)
models = morefusion.datasets.YCBVideoModels()
pitch = models.get_voxel_pitch(dimension=32, class_id=class_id)
centroid = points.mean(axis=0)
origin = (centroid - ((pitch * 32) / 2.0))
print(f'class_id: {class_id}')
print(f'class_name: {class_names[class_id]}')
print(f'origin: {origin}')
print(f'pitch: {pitch}')
check_max_voxelization_3d(origin, pitch, points, values, gpu=(- 1), start_loop=False, caption='Voxelization3D (CPU)', resolution=(400, 400))
check_max_voxelization_3d(origin, pitch, points, values, gpu=0, start_loop=False, caption='Voxelization3D (GPU)', resolution=(400, 400))
pyglet.app.run() |
class DoubleBarrier(object):
def __init__(self, client, path, num_clients, identifier=None):
self.client = client
self.path = path
self.num_clients = num_clients
self._identifier = (identifier or ('%s-%s' % (socket.getfqdn(), os.getpid())))
self.participating = False
self.assured_path = False
self.node_name = uuid.uuid4().hex
self.create_path = ((self.path + '/') + self.node_name)
def enter(self):
try:
self.client.retry(self._inner_enter)
self.participating = True
except KazooException:
self._best_effort_cleanup()
self.participating = False
def _inner_enter(self):
if (not self.assured_path):
self.client.ensure_path(self.path)
self.assured_path = True
ready = self.client.handler.event_object()
try:
self.client.create(self.create_path, self._identifier.encode('utf-8'), ephemeral=True)
except NodeExistsError:
pass
def created(event):
if (event.type == EventType.CREATED):
ready.set()
self.client.exists(((self.path + '/') + 'ready'), watch=created)
children = self.client.get_children(self.path)
if (len(children) < self.num_clients):
ready.wait()
else:
self.client.ensure_path((self.path + '/ready'))
return True
def leave(self):
try:
self.client.retry(self._inner_leave)
except KazooException:
self._best_effort_cleanup()
self.participating = False
def _inner_leave(self):
try:
self.client.delete((self.path + '/ready'))
except NoNodeError:
pass
while True:
children = self.client.get_children(self.path)
if (not children):
return True
if ((len(children) == 1) and (children[0] == self.node_name)):
self.client.delete(self.create_path)
return True
children.sort()
ready = self.client.handler.event_object()
def deleted(event):
if (event.type == EventType.DELETED):
ready.set()
if (self.node_name == children[0]):
if (not self.client.exists(((self.path + '/') + children[(- 1)]), watch=deleted)):
continue
ready.wait()
continue
self.client.delete(self.create_path)
if (not self.client.exists(((self.path + '/') + children[0]), watch=deleted)):
continue
ready.wait()
def _best_effort_cleanup(self):
try:
self.client.retry(self.client.delete, self.create_path)
except NoNodeError:
pass |
class Walker(object, metaclass=MetaNodeTypeHandler):
def __init__(self, env=None):
if (env is None):
import pysmt.environment
env = pysmt.environment.get_env()
self.env = env
self.functions = {}
for o in op.all_types():
try:
self.functions[o] = getattr(self, nt_to_fun(o))
except AttributeError:
self.functions[o] = self.walk_error
def set_function(self, function, *node_types):
from warnings import warn
warn('Instance-based walkers (<=0.6.0) walkers are deprecated. You should use new-style/class based walkers', stacklevel=2)
for nt in node_types:
self.functions[nt] = function
def set_handler(cls, function, *node_types):
for nt in node_types:
setattr(cls, nt_to_fun(nt), function)
def super(cls, self, formula, *args, **kwargs):
f = getattr(cls, nt_to_fun(formula.node_type()))
return f(self, formula, *args, **kwargs)
(op.ALL_TYPES)
def walk_error(self, formula, **kwargs):
node_type = formula.node_type()
if (node_type in self.env.dwf):
dwf = self.env.dwf[node_type]
walker_class = type(self)
if (type(self) in dwf):
self.functions[node_type] = partial(dwf[walker_class], self)
return self.functions[node_type](formula, **kwargs)
node_type = formula.node_type()
raise pysmt.exceptions.UnsupportedOperatorError(node_type=node_type, expression=formula) |
class Progbar(object):
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
if (target is None):
target = (- 1)
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
values = (values or [])
for (k, v) in values:
if (k not in self.sum_values):
self.sum_values[k] = [(v * (current - self.seen_so_far)), (current - self.seen_so_far)]
self.unique_values.append(k)
else:
self.sum_values[k][0] += (v * (current - self.seen_so_far))
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if (self.verbose == 1):
if ((not force) and ((now - self.last_update) < self.interval)):
return
prev_total_width = self.total_width
sys.stdout.write(('\x08' * prev_total_width))
sys.stdout.write('\r')
if (self.target is not (- 1)):
numdigits = (int(np.floor(np.log10(self.target))) + 1)
barstr = ('%%%dd/%%%dd [' % (numdigits, numdigits))
bar = (barstr % (current, self.target))
prog = (float(current) / self.target)
prog_width = int((self.width * prog))
if (prog_width > 0):
bar += ('=' * (prog_width - 1))
if (current < self.target):
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = ((now - self.start) / current)
else:
time_per_unit = 0
eta = (time_per_unit * (self.target - current))
info = ''
if ((current < self.target) and (self.target is not (- 1))):
info += (' - ETA: %ds' % eta)
else:
info += (' - %ds' % (now - self.start))
for k in self.unique_values:
info += (' - %s:' % k)
if isinstance(self.sum_values[k], list):
avg = np.mean((self.sum_values[k][0] / max(1, self.sum_values[k][1])))
if (abs(avg) > 0.001):
info += (' %.4f' % avg)
else:
info += (' %.4e' % avg)
else:
info += (' %s' % self.sum_values[k])
self.total_width += len(info)
if (prev_total_width > self.total_width):
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if (current >= self.target):
sys.stdout.write('\n')
if (self.verbose == 2):
if (current >= self.target):
info = ('%ds' % (now - self.start))
for k in self.unique_values:
info += (' - %s:' % k)
avg = np.mean((self.sum_values[k][0] / max(1, self.sum_values[k][1])))
if (avg > 0.001):
info += (' %.4f' % avg)
else:
info += (' %.4e' % avg)
sys.stdout.write((info + '\n'))
self.last_update = now
def add(self, n, values=None):
self.update((self.seen_so_far + n), values) |
class BotogramStyle(style.Style):
background_color = '#fff'
highlight_color = '#f3f3f3'
default_style = ''
styles = {token.Whitespace: 'underline #f8f8f8', token.Error: '#a40000 border:#ef2929', token.Other: NAMES, token.Comment: COMMENTS, token.Keyword: KEYWORD, token.Operator: OPERATORS, token.Operator.Word: KEYWORD, token.Punctuation: NAMES, token.Name: NAMES, token.Name.Decorator: ('bold ' + OPERATORS), token.Name.Entity: '#ce5c00', token.Name.Tag: KEYWORD, token.Number: NUMBERS, token.Literal: NAMES, token.String: STRING, token.String.Doc: COMMENTS, token.Generic: NAMES, token.Generic.Deleted: '#a40000', token.Generic.Emph: 'italic #000000', token.Generic.Error: '#ef2929', token.Generic.Heading: 'bold #000080', token.Generic.Inserted: '#00A000', token.Generic.Output: '#888', token.Generic.Prompt: '#745334', token.Generic.Strong: 'bold #000000', token.Generic.Subheading: 'bold #800080', token.Generic.Traceback: 'bold #a40000'} |
.parametrize('node', ['Path(__file__).parent', 'PathNode(path=Path(__file__).parent)', 'PickleNode(path=Path(__file__).parent)'])
def test_error_when_path_dependency_is_directory(runner, tmp_path, node):
source = f'''
from pathlib import Path
from pytask import PickleNode, PathNode
def task_example(path = {node}): ...
'''
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.COLLECTION_FAILED)
assert all(((i in result.output) for i in ('only', 'files', 'are', 'allowed'))) |
class QuadraticProgramElement():
def __init__(self, quadratic_program: 'problems.QuadraticProgram') -> None:
from .quadratic_program import QuadraticProgram
if (not isinstance(quadratic_program, QuadraticProgram)):
raise TypeError('QuadraticProgram instance expected')
self._quadratic_program = quadratic_program
def quadratic_program(self) -> 'problems.QuadraticProgram':
return self._quadratic_program
_program.setter
def quadratic_program(self, quadratic_program: 'problems.QuadraticProgram') -> None:
from .quadratic_program import QuadraticProgram
if (not isinstance(quadratic_program, QuadraticProgram)):
raise TypeError('QuadraticProgram instance expected')
self._quadratic_program = quadratic_program |
def postprocess_text(preds, responses, metric_name):
preds = [pred.strip() for pred in preds]
responses = [response.strip() for response in responses]
if (metric_name == 'rouge'):
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
responses = ['\n'.join(nltk.sent_tokenize(response)) for response in responses]
elif (metric_name == 'sacrebleu'):
responses = [[response] for response in responses]
elif (metric_name == 'bleu'):
preds = [pred.split(' ') for pred in preds]
responses = [[response.split(' ')] for response in responses]
else:
pass
return (preds, responses) |
class ThrottleTest(EvenniaTest):
def test_throttle(self):
ips = ('94.100.176.153', '45.56.148.77', '5.196.1.129')
kwargs = {'limit': 5, 'timeout': (15 * 60)}
throttle = Throttle(**kwargs)
for ip in ips:
self.assertFalse(throttle.check(ip))
for x in range(50):
obj = throttle.update(ip)
self.assertFalse(obj)
self.assertTrue(throttle.check(ip))
for x in range((throttle.cache_size * 2)):
obj = throttle.update(ip)
self.assertFalse(obj)
self.assertTrue(throttle.check(ip))
self.assertEqual(throttle.cache_size, len(throttle.get(ip)))
cache = throttle.get()
self.assertEqual(len(ips), len(cache.keys()))
self.assertEqual(sum([len(cache[x]) for x in cache.keys()]), (throttle.cache_size * len(ips))) |
def get_your_qr_page(request, qr_secret):
ri = get_ri_from_secret(qr_secret)
if (ri is not None):
return render(request, 'qr_app/your-qr-page.html', context={'qr_image': reverse('get_qr_from_secret', args=[qr_secret]), 'qr_image_svg': reverse('get_qr_from_secret_svg', args=[qr_secret]), 'qr_image_eps': reverse('get_qr_from_secret_eps', args=[qr_secret]), 'qr_url': ri.to_url})
else:
return HttpResponseNotFound('<h1>QR code not found</h1>') |
class TimeBuildModelLossActiveMaterial():
param_names = ['model', 'model option']
params = ([pybamm.lithium_ion.SPM, pybamm.lithium_ion.DFN], ['none', 'stress-driven', 'reaction-driven', 'stress and reaction-driven'])
def setup(self, _model, _params):
set_random_seed()
def time_setup_model(self, model, params):
build_model('Ai2020', model, 'loss of active material', params) |
class wz_input_before_epoch(unittest.TestCase):
def test(self):
run_test(self, ['-w 1023 604799'], ' Month/Day/Year H:M:S 01/16/1980 11:59:58 GPS\n Modified Julian Date 44254. GPS\n GPSweek DayOfWeek SecOfWeek 1 3 302398.500000\n FullGPSweek Zcount 1 201599\n Year DayOfYear SecondOfDay 1980 016 43198.500000\n Unix: Second Microsecond 500000\n Zcount: 29-bit (32-bit) 725887 (725887)\n') |
class KeyedOptimizer(optim.Optimizer):
def __init__(self, params: Mapping[(str, Union[(torch.Tensor, ShardedTensor)])], state: Mapping[(Any, Any)], param_groups: Collection[Mapping[(str, Any)]]) -> None:
torch._C._log_api_usage_once(f'torchrec.optim.{self.__class__.__name__}')
self._optimizer_step_pre_hooks: Dict[(int, Callable)] = OrderedDict()
self._optimizer_step_post_hooks: Dict[(int, Callable)] = OrderedDict()
self.state: Mapping[(Any, Any)] = state
self.param_groups: Collection[Mapping[(str, Any)]] = param_groups
self.params = params
self.defaults: Dict[(str, Any)] = {'_save_param_groups': False}
params_set = set(params.values())
non_param_state_keys = [key for key in self.state if (key not in params_set)]
if (len(non_param_state_keys) > 0):
raise ValueError('All state keys must be params. The following keys are not: {}.'.format(non_param_state_keys))
def _extract_state_dict_content(input_dict: Dict[(str, Any)]) -> Dict[(str, Any)]:
result = {}
for (k, v) in input_dict.items():
if isinstance(v, dict):
result[k] = KeyedOptimizer._extract_state_dict_content(v)
elif (hasattr(v, 'state_dict') and callable(v.state_dict)):
result[k] = v.state_dict()
else:
result[k] = v
return result
def _update_param_state_dict_object(current_param_state_dict: Dict[(str, Any)], param_state_dict_to_load: Dict[(str, Any)], parent_keys: List[Union[(str, int, float, bool, None)]]) -> None:
for (k, v) in current_param_state_dict.items():
new_v = param_state_dict_to_load[k]
parent_keys.append(k)
if isinstance(v, dict):
KeyedOptimizer._update_param_state_dict_object(v, new_v, parent_keys)
elif (hasattr(v, 'load_state_dict') and callable(v.load_state_dict)):
v.load_state_dict(new_v)
elif isinstance(v, ShardedTensor):
assert isinstance(new_v, ShardedTensor)
num_shards = len(v.local_shards())
num_new_shards = len(new_v.local_shards())
if (num_shards != num_new_shards):
raise ValueError(f'Different number of shards {num_shards} vs {num_new_shards} for the path of {json.dumps(parent_keys)}')
for (shard, new_shard) in zip(v.local_shards(), new_v.local_shards()):
shard.tensor.detach().copy_(new_shard.tensor)
elif isinstance(v, torch.Tensor):
v.detach().copy_(new_v)
else:
current_param_state_dict[k] = deepcopy(new_v)
def state_dict(self) -> Dict[(str, Any)]:
param_groups = self.param_groups
params = self.params
param_to_key = {param: key for (key, param) in params.items()}
ret_state = {param_to_key[param]: self._extract_state_dict_content(param_state) for (param, param_state) in self.state.items()}
ret_groups = []
for group in param_groups:
param_keys = []
for param in group['params']:
param_keys.append(param_to_key[param])
ret_group = {'params': sorted(param_keys)}
for (k, v) in group.items():
if (k != 'params'):
ret_group[k] = deepcopy(v)
ret_groups.append(ret_group)
ret: Dict[(str, object)] = {'state': ret_state}
if self.defaults['_save_param_groups']:
ret['param_groups'] = ret_groups
return ret
def post_load_state_dict(self) -> None:
pass
def load_state_dict(self, state_dict: Mapping[(str, Any)]) -> None:
new_state = state_dict['state']
state = self.state
params = self.params
if (len(state) != len(new_state)):
raise ValueError(f'Different parameter count: {len(state)} vs {len(new_state)}')
for (param_key, param) in params.items():
if (param not in state):
continue
if (param_key not in new_state):
raise ValueError(f'Parameter {param_key} not found')
if (len(state[param]) != len(new_state[param_key])):
raise ValueError(f'Different state size: {len(state[param])} vs {len(new_state[param_key])}')
KeyedOptimizer._update_param_state_dict_object(current_param_state_dict=state[param], param_state_dict_to_load=new_state[param_key], parent_keys=[param_key])
if self.defaults['_save_param_groups']:
new_param_groups = state_dict['param_groups']
param_groups = self.param_groups
if (len(param_groups) != len(new_param_groups)):
raise ValueError(f'Different param_groups count: {len(param_groups)} vs {len(new_param_groups)}')
param_to_key = {param: key for (key, param) in params.items()}
group_map = {}
for group in param_groups:
param_keys = []
for param in group['params']:
param_keys.append(param_to_key[param])
group_map['/'.join(sorted(param_keys))] = group
new_group_map = {}
for new_group in new_param_groups:
param_keys = []
for param_key in new_group['params']:
param_keys.append(param_key)
new_group_map['/'.join(sorted(param_keys))] = new_group
for (group_key, group) in group_map.items():
if (group_key not in new_group_map):
raise ValueError(f'Group {group_key} not found')
new_group = new_group_map[group_key]
if (len(group) != len(new_group)):
raise ValueError(f'Different param_group size: {len(group)} vs {len(new_group)}')
for k in group:
if (k not in new_group):
raise ValueError(f'Group key {k} not found for group {group_key}')
if (k != 'params'):
group[k] = deepcopy(new_group[k])
self.post_load_state_dict()
def add_param_group(self, param_group: Any) -> None:
raise NotImplementedError()
def init_state(self, sparse_grad_parameter_names: Optional[Set[str]]=None) -> None:
for (key, param) in self.params.items():
if param.requires_grad:
t = torch.zeros_like(param)
if ((sparse_grad_parameter_names is not None) and (key in sparse_grad_parameter_names)):
t = t.to_sparse()
param.grad = torch.autograd.Variable(t)
self.step(closure=None)
def save_param_groups(self, save: bool) -> None:
self.defaults['_save_param_groups'] = save
def __getstate__(self) -> Dict[(str, Any)]:
return self.__dict__ |
.parametrize(('test_input', 'expected'), [('pep-0008.rst', {'authors': 'Guido van Rossum, Barry Warsaw, Alyssa Coghlan', 'number': 8, 'shorthand': ':abbr:`PA (Process, Active)`', 'title': 'Style Guide for Python Code', 'python_version': ''}), ('pep-0719.rst', {'authors': 'Thomas Wouters', 'number': 719, 'shorthand': ':abbr:`IA (Informational, Active)`', 'title': 'Python 3.13 Release Schedule', 'python_version': '3.13'})])
def test_pep_details(test_input, expected):
pep = parser.PEP((PEP_ROOT / test_input))
assert (pep.details == expected) |
def affine(inp, units, bias=True, W_initializer=None, b_initializer=None, W_name=WEIGHT_DEFAULT_NAME, bias_name=BIAS_DEFAULT_NAME):
input_size = inp.get_shape()[(- 1)].value
W = _weight_variable([input_size, units], initializer=W_initializer, name=W_name)
output = tf.matmul(inp, W)
if bias:
b = _bias_variable((units,), initializer=b_initializer, name=bias_name)
output += b
return output |
class EpisodeDescrSamplerTest(tf.test.TestCase):
dataset_spec = test_utils.DATASET_SPEC
split = Split.VALID
def setUp(self):
super(EpisodeDescrSamplerTest, self).setUp()
self.sampler = self.make_sampler()
def make_sampler(self):
return sampling.EpisodeDescriptionSampler(self.dataset_spec, self.split, config.EpisodeDescriptionConfig())
def test_max_examples(self):
class_set = self.dataset_spec.get_classes(self.split)
for _ in range(10):
episode_description = self.sampler.sample_episode_description()
self.assertTrue(all((((s + q) <= self.dataset_spec.get_total_images_per_class(class_set[cid])) for (cid, s, q) in episode_description)))
def test_min_examples(self):
for _ in range(10):
episode_description = self.sampler.sample_episode_description()
self.assertTrue(all((((s >= 1) and (q >= 1)) for (cid, s, q) in episode_description)))
def test_non_deterministic(self):
reference_sample = self.sampler.sample_episode_description()
for _ in range(10):
sampler = self.make_sampler()
sample = sampler.sample_episode_description()
if (sample != reference_sample):
break
else:
raise AssertionError('Different EpisodeDescriptionSamplers generate the same sequence of episode descriptions.')
def test_setting_randomstate(self):
init_rng = sampling.RNG
seed =
try:
sampling.RNG = np.random.RandomState(seed)
sampler = self.make_sampler()
reference_sample = sampler.sample_episode_description()
for _ in range(10):
sampling.RNG = np.random.RandomState(seed)
sampler = self.make_sampler()
sample = sampler.sample_episode_description()
self.assertEqual(reference_sample, sample)
finally:
sampling.RNG = init_rng
def assert_expected_chunk_sizes(self, expected_support_chunk_size, expected_query_chunk_size):
rval = self.sampler.compute_chunk_sizes()
(flush_chunk_size, support_chunk_size, query_chunk_size) = rval
expected_flush_chunk_size = (expected_support_chunk_size + expected_query_chunk_size)
self.assertEqual(flush_chunk_size, expected_flush_chunk_size)
self.assertEqual(support_chunk_size, expected_support_chunk_size)
self.assertEqual(query_chunk_size, expected_query_chunk_size)
def test_correct_chunk_sizes(self):
self.assert_expected_chunk_sizes(test_utils.MAX_SUPPORT_SET_SIZE, (test_utils.MAX_WAYS_UPPER_BOUND * test_utils.MAX_NUM_QUERY)) |
def resp_create_commit():
content = {'id': 'ed899a2f4b50b4370feeeab42383c746', 'short_id': 'ed899a2f', 'title': 'Commit message'}
with responses.RequestsMock() as rsps:
rsps.add(method=responses.POST, url=' json=content, content_type='application/json', status=200)
(yield rsps) |
class Module(object):
in_namespace_package = False
namespace_package_name = None
def __init__(self, name, directory=Path()):
self.name = name
name_as_path = name.replace('.', os.sep)
pkg_dir = (directory / name_as_path)
py_file = (directory / (name_as_path + '.py'))
src_pkg_dir = ((directory / 'src') / name_as_path)
src_py_file = ((directory / 'src') / (name_as_path + '.py'))
existing = set()
if pkg_dir.is_dir():
self.path = pkg_dir
self.is_package = True
self.prefix = ''
existing.add(pkg_dir)
if py_file.is_file():
self.path = py_file
self.is_package = False
self.prefix = ''
existing.add(py_file)
if src_pkg_dir.is_dir():
self.path = src_pkg_dir
self.is_package = True
self.prefix = 'src'
existing.add(src_pkg_dir)
if src_py_file.is_file():
self.path = src_py_file
self.is_package = False
self.prefix = 'src'
existing.add(src_py_file)
if (len(existing) > 1):
raise ValueError('Multiple files or folders could be module {}: {}'.format(name, ', '.join([str(p) for p in sorted(existing)])))
elif (not existing):
raise ValueError('No file/folder found for module {}'.format(name))
self.source_dir = (directory / self.prefix)
if ('.' in name):
self.namespace_package_name = name.rpartition('.')[0]
self.in_namespace_package = True
def file(self):
if self.is_package:
return (self.path / '__init__.py')
else:
return self.path
def version_files(self):
if self.is_package:
paths = [(self.path / '__init__.py')]
for filename in ('version.py', '_version.py', '__version__.py'):
if (self.path / filename).is_file():
paths.insert(0, (self.path / filename))
return paths
else:
return [self.path]
def iter_files(self):
def _include(path):
name = os.path.basename(path)
if ((name == '__pycache__') or name.endswith('.pyc')):
return False
return True
if self.is_package:
for (dirpath, dirs, files) in os.walk(str(self.path)):
for file in sorted(files):
full_path = os.path.join(dirpath, file)
if _include(full_path):
(yield full_path)
dirs[:] = [d for d in sorted(dirs) if _include(d)]
else:
(yield str(self.path)) |
def create_video_folders(dataset, output_dir, tmp_dir):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
if (not os.path.exists(tmp_dir)):
os.makedirs(tmp_dir)
label_to_dir = {}
for label_name in dataset['label-name'].unique():
this_dir = os.path.join(output_dir, label_name)
if (not os.path.exists(this_dir)):
os.makedirs(this_dir)
label_to_dir[label_name] = this_dir
return label_to_dir |
def _read_yaml_area_file_content(area_file_name):
from pyresample.utils import recursive_dict_update
if isinstance(area_file_name, (str, pathlib.Path, io.IOBase)):
area_file_name = [area_file_name]
area_dict = {}
for area_file_obj in area_file_name:
if isinstance(area_file_obj, io.IOBase):
tmp_dict = yaml.safe_load(area_file_obj)
elif (isinstance(area_file_obj, str) and ('\n' in area_file_obj)):
warnings.warn('It looks like you passed a YAML string directly. This is deprecated since pyresample 1.14.1, please use load_area_from_string or pass a stream or a path to a file instead', DeprecationWarning, stacklevel=3)
tmp_dict = yaml.safe_load(area_file_obj)
else:
with open(area_file_obj) as area_file_obj:
tmp_dict = yaml.safe_load(area_file_obj)
area_dict = recursive_dict_update(area_dict, tmp_dict)
return area_dict |
class AutoapiPropertyDocumenter(AutoapiDocumenter, autodoc.PropertyDocumenter):
objtype = 'apiproperty'
directivetype = 'property'
priority = ((autodoc.PropertyDocumenter.priority * 100) + 100)
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, PythonProperty)
def add_directive_header(self, sig):
autodoc.ClassLevelDocumenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
if (self.options.annotation and (self.options.annotation is not autodoc.SUPPRESS)):
self.add_line(f' :type: {self.options.annotation}', sourcename)
for property_type in ('abstractmethod', 'classmethod'):
if (property_type in self.object.properties):
self.add_line(f' :{property_type}:', sourcename) |
class Plugin(TrezorPlugin, QtPlugin):
icon_unpaired = 'trezor_unpaired.png'
icon_paired = 'trezor.png'
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
def pin_matrix_widget_class(self):
from trezorlib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget |
.pydicom
def test_sum_doses_in_datasets():
scale1 = 0.01
data1 = (np.array([[[0.9, 0.8, 0.7], [0.6, 0.5, 0.4]], [[0.91, 0.81, 0.71], [0.61, 0.51, 0.41]], [[0.92, 0.82, 0.72], [0.62, 0.52, 0.42]], [[0.93, 0.83, 0.73], [0.63, 0.53, 0.43]]]) / scale1).astype(np.uint32)
scale2 = 5e-09
data2 = (np.array([[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], [[0.09, 0.19, 0.29], [0.39, 0.49, 0.59]], [[0.08, 0.18, 0.28], [0.38, 0.48, 0.58]], [[0.07, 0.17, 0.27], [0.37, 0.47, 0.57]]]) / scale2).astype(np.uint32)
expected_sum = np.ones((4, 2, 3))
bits_allocated = 32
test_dicom_dict = {'PatientID': 'PMP', 'Modality': 'RTDOSE', 'ImagePositionPatient': [(- 1.0), (- 1.0), (- 1.0)], 'ImageOrientationPatient': [1, 0, 0, 0, 1, 0], 'BitsAllocated': bits_allocated, 'BitsStored': bits_allocated, 'Rows': 2, 'Columns': 3, 'PixelRepresentation': 0, 'SamplesPerPixel': 1, 'PhotometricInterpretation': 'MONOCHROME2', 'PixelSpacing': [1.0, 1.0], 'GridFrameOffsetVector': [0, 1, 2, 3], 'PixelData': data1.tobytes(), 'DoseGridScaling': scale1, 'DoseSummationType': 'PLAN', 'DoseType': 'PHYSICAL', 'DoseUnits': 'GY'}
ds1 = create.dicom_dataset_from_dict(test_dicom_dict)
ds1.fix_meta_info(enforce_standard=False)
ds2 = copy.deepcopy(ds1)
ds2.PixelData = data2.tobytes()
ds2.DoseGridScaling = scale2
ds_summed = dose.sum_doses_in_datasets([ds1, ds2])
assert np.allclose(dose.dose_from_dataset(ds_summed), expected_sum)
assert (ds_summed.DoseType == 'PHYSICAL')
ds2.DoseType = 'EFFECTIVE'
ds_summed = dose.sum_doses_in_datasets([ds1, ds2])
assert (ds_summed.DoseType == 'EFFECTIVE')
ds_summed = dose.sum_doses_in_datasets([ds1])
assert np.allclose(dose.dose_from_dataset(ds_summed), dose.dose_from_dataset(ds1))
ds_summed = dose.sum_doses_in_datasets([ds1, ds1, ds2, ds2])
assert np.allclose(dose.dose_from_dataset(ds_summed), (2 * expected_sum))
with pytest.raises(ValueError):
ds2.PatientID = 'PMX'
dose.sum_doses_in_datasets([ds1, ds2])
ds2.PatientID = 'PMP'
with pytest.raises(ValueError):
ds2.Modality = 'CT'
dose.sum_doses_in_datasets([ds1, ds2])
ds2.Modality = 'RTDOSE'
with pytest.raises(IndexError):
dose.sum_doses_in_datasets([])
with pytest.raises(ValueError):
ds2.DoseSummationType = 'BEAM'
dose.sum_doses_in_datasets([ds1, ds2])
ds2.DoseSummationType = 'PLAN'
with pytest.raises(ValueError):
ds2.DoseUnits = 'RELATIVE'
dose.sum_doses_in_datasets([ds1, ds2])
ds2.Modality = 'GY'
with pytest.raises(ValueError):
ds2.ImagePositionPatient = [(- 1), (- 1.1), (- 1)]
dose.sum_doses_in_datasets([ds1, ds2])
ds2.ImagePositionPatient = [(- 1), (- 1), (- 1)] |
def grant_superuser_rights(proj: Project) -> Project:
superuser = proj.export_users()[0]
superuser['record_delete'] = 1
superuser['record_rename'] = 1
superuser['lock_records_all_forms'] = 1
superuser['lock_records'] = 1
res = proj.import_users([superuser])
assert (res == 1)
return proj |
def delete_oldest_logs(file_list: List[Path], keep_number: int) -> None:
file_list = sorted(file_list)
if (len(file_list) > keep_number):
for existing_file in file_list[:(- keep_number)]:
try:
existing_file.unlink()
except FileNotFoundError:
pass |
def apply_optimizer_in_backward(optimizer_class: Type[torch.optim.Optimizer], params: Iterable[torch.nn.Parameter], optimizer_kwargs: Dict[(str, Any)]) -> None:
from torch.distributed.optim import _apply_optimizer_in_backward
warn("This API is deprecated. Please use Pytorch Distributed's _apply_optimizer_in_backward API instead.", DeprecationWarning)
_apply_optimizer_in_backward(optimizer_class=optimizer_class, params=params, optimizer_kwargs=optimizer_kwargs) |
class TestStats(BaseTestCase):
simple_benchmark = pd.Series((np.array([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
positive_returns = pd.Series((np.array([1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
negative_returns = pd.Series((np.array([0.0, (- 6.0), (- 7.0), (- 1.0), (- 9.0), (- 2.0), (- 6.0), (- 8.0), (- 5.0)]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
all_negative_returns = pd.Series((np.array([(- 2.0), (- 6.0), (- 7.0), (- 1.0), (- 9.0), (- 2.0), (- 6.0), (- 8.0), (- 5.0)]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
mixed_returns = pd.Series((np.array([np.nan, 1.0, 10.0, (- 4.0), 2.0, 3.0, 2.0, 1.0, (- 10.0)]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
flat_line_1 = pd.Series((np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='D'))
weekly_returns = pd.Series((np.array([0.0, 1.0, 10.0, (- 4.0), 2.0, 3.0, 2.0, 1.0, (- 10.0)]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='W'))
monthly_returns = pd.Series((np.array([0.0, 1.0, 10.0, (- 4.0), 2.0, 3.0, 2.0, 1.0, (- 10.0)]) / 100), index=pd.date_range('2000-1-30', periods=9, freq='M'))
one_return = pd.Series((np.array([1.0]) / 100), index=pd.date_range('2000-1-30', periods=1, freq='D'))
empty_returns = pd.Series((np.array([]) / 100), index=pd.date_range('2000-1-30', periods=0, freq='D'))
noise = pd.Series(rand.normal(0, 0.001, 1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
noise_uniform = pd.Series(rand.uniform((- 0.01), 0.01, 1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
inv_noise = noise.multiply((- 1))
flat_line_0 = pd.Series(np.linspace(0, 0, num=1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
flat_line_1_tz = pd.Series(np.linspace(0.01, 0.01, num=1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
pos_line = pd.Series(np.linspace(0, 1, num=1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
neg_line = pd.Series(np.linspace(0, (- 1), num=1000), index=pd.date_range('2000-1-30', periods=1000, freq='D', tz='UTC'))
replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_noise = noise.replace(replace_nan, np.nan)
replace_nan = rand.choice(noise.index.tolist(), rand.randint(1, 10))
sparse_flat_line_1_tz = flat_line_1_tz.replace(replace_nan, np.nan)
one = [(- 0.), 0., 0., (- 0.), (- 0.), 0., (- 0.), 0.]
two = [0., 0., (- 0.), 0., (- 0.), 0., 0.0151531, 0.]
df_index_simple = pd.date_range('2000-1-30', periods=8, freq='D')
df_index_week = pd.date_range('2000-1-30', periods=8, freq='W')
df_index_month = pd.date_range('2000-1-30', periods=8, freq='M')
df_simple = pd.DataFrame({'one': pd.Series(one, index=df_index_simple), 'two': pd.Series(two, index=df_index_simple)})
df_week = pd.DataFrame({'one': pd.Series(one, index=df_index_week), 'two': pd.Series(two, index=df_index_week)})
df_month = pd.DataFrame({'one': pd.Series(one, index=df_index_month), 'two': pd.Series(two, index=df_index_month)})
([(flat_line_1, ([0.0] * (flat_line_1.shape[0] - 1))), (pos_line, ([np.inf] + [(1 / n) for n in range(1, 999)]))])
def test_simple_returns(self, prices, expected):
simple_returns = self.empyrical.simple_returns(prices)
assert_almost_equal(np.array(simple_returns), expected, 4)
self.assert_indexes_match(simple_returns, prices.iloc[1:])
([(empty_returns, 0, []), (mixed_returns, 0, [0.0, 0.01, 0.111, 0.066559, 0.08789, 0.12052, 0.14293, 0.15436, 0.03893]), (mixed_returns, 100, [100.0, 101.0, 111.1, 106.65599, 108.78912, 112.05279, 114.29384, 115.43678, 103.8931]), (negative_returns, 0, [0.0, (- 0.06), (- 0.1258), (- 0.13454), (- 0.21243), (- 0.22818), (- 0.27449), (- 0.33253), (- 0.3659)])])
def test_cum_returns(self, returns, starting_value, expected):
cum_returns = self.empyrical.cum_returns(returns, starting_value=starting_value)
for i in range(returns.size):
assert_almost_equal(cum_returns[i], expected[i], 4)
self.assert_indexes_match(cum_returns, returns)
([(empty_returns, 0, np.nan), (one_return, 0, one_return[0]), (mixed_returns, 0, 0.03893), (mixed_returns, 100, 103.8931), (negative_returns, 0, (- 0.3659))])
def test_cum_returns_final(self, returns, starting_value, expected):
cum_returns_final = self.empyrical.cum_returns_final(returns, starting_value=starting_value)
assert_almost_equal(cum_returns_final, expected, 4)
([(simple_benchmark, empyrical.WEEKLY, [0.0, 0., 0.0]), (simple_benchmark, empyrical.MONTHLY, [0.01, 0.]), (simple_benchmark, empyrical.QUARTERLY, [0.]), (simple_benchmark, empyrical.YEARLY, [0.]), (weekly_returns, empyrical.MONTHLY, [0.0, 0., (- 0.)]), (weekly_returns, empyrical.YEARLY, [0.]), (monthly_returns, empyrical.YEARLY, [0.]), (monthly_returns, empyrical.QUARTERLY, [0., 0., (- 0.07282)])])
def test_aggregate_returns(self, returns, convert_to, expected):
returns = self.empyrical(pandas_only=True).aggregate_returns(returns, convert_to).values.tolist()
for (i, v) in enumerate(returns):
assert_almost_equal(v, expected[i], DECIMAL_PLACES)
([(empty_returns, np.nan), (one_return, 0.0), (simple_benchmark, 0.0), (mixed_returns, (- 0.1)), (positive_returns, (- 0.0)), (negative_returns, empyrical.cum_returns_final(negative_returns)), (all_negative_returns, empyrical.cum_returns_final(all_negative_returns)), (pd.Series((np.array([10, (- 10), 10]) / 100), index=pd.date_range('2000-1-30', periods=3, freq='D')), (- 0.1))])
def test_max_drawdown(self, returns, expected):
assert_almost_equal(self.empyrical.max_drawdown(returns), expected, DECIMAL_PLACES)
([(noise, 0.0001), (noise, 0.001), (noise_uniform, 0.01), (noise_uniform, 0.1)])
def test_max_drawdown_translation(self, returns, constant):
depressed_returns = (returns - constant)
raised_returns = (returns + constant)
max_dd = self.empyrical.max_drawdown(returns)
depressed_dd = self.empyrical.max_drawdown(depressed_returns)
raised_dd = self.empyrical.max_drawdown(raised_returns)
assert (max_dd <= raised_dd)
assert (depressed_dd <= max_dd)
([(mixed_returns, empyrical.DAILY, 1.), (weekly_returns, empyrical.WEEKLY, 0.), (monthly_returns, empyrical.MONTHLY, 0.)])
def test_annual_ret(self, returns, period, expected):
assert_almost_equal(self.empyrical.annual_return(returns, period=period), expected, DECIMAL_PLACES)
([(flat_line_1_tz, empyrical.DAILY, 0.0), (mixed_returns, empyrical.DAILY, 0.), (weekly_returns, empyrical.WEEKLY, 0.), (monthly_returns, empyrical.MONTHLY, 0.)])
def test_annual_volatility(self, returns, period, expected):
assert_almost_equal(self.empyrical.annual_volatility(returns, period=period), expected, DECIMAL_PLACES)
([(empty_returns, empyrical.DAILY, np.nan), (one_return, empyrical.DAILY, np.nan), (mixed_returns, empyrical.DAILY, 19.), (weekly_returns, empyrical.WEEKLY, 2.), (monthly_returns, empyrical.MONTHLY, 0.)])
def test_calmar(self, returns, period, expected):
assert_almost_equal(self.empyrical.calmar_ratio(returns, period=period), expected, DECIMAL_PLACES)
([(empty_returns, 0.0, 0.0, np.nan), (one_return, 0.0, 0.0, np.nan), (mixed_returns, 0.0, 10.0, 0.), (mixed_returns, 0.0, (- 10.0), np.nan), (mixed_returns, flat_line_1, 0.0, 0.8125), (positive_returns, 0.01, 0.0, np.nan), (positive_returns, 0.011, 0.0, 1.125), (positive_returns, 0.02, 0.0, 0.0), (negative_returns, 0.01, 0.0, 0.0)])
def test_omega(self, returns, risk_free, required_return, expected):
assert_almost_equal(self.empyrical.omega_ratio(returns, risk_free=risk_free, required_return=required_return), expected, DECIMAL_PLACES)
([(noise_uniform, 0.0, 0.001), (noise, 0.001, 0.002)])
def test_omega_returns(self, returns, required_return_less, required_return_more):
assert (self.empyrical.omega_ratio(returns, required_return_less) > self.empyrical.omega_ratio(returns, required_return_more))
([(empty_returns, 0.0, np.nan), (one_return, 0.0, np.nan), (mixed_returns, mixed_returns, np.nan), (mixed_returns, 0.0, 1.), (mixed_returns, simple_benchmark, 0.), (positive_returns, 0.0, 52.), (negative_returns, 0.0, (- 24.)), (flat_line_1, 0.0, np.inf)])
def test_sharpe_ratio(self, returns, risk_free, expected):
assert_almost_equal(self.empyrical.sharpe_ratio(returns, risk_free=risk_free), expected, DECIMAL_PLACES)
([(noise_uniform, 0, 0.005), (noise_uniform, 0.005, 0.005)])
def test_sharpe_translation_same(self, returns, required_return, translation):
sr = self.empyrical.sharpe_ratio(returns, required_return)
sr_depressed = self.empyrical.sharpe_ratio((returns - translation), (required_return - translation))
sr_raised = self.empyrical.sharpe_ratio((returns + translation), (required_return + translation))
assert_almost_equal(sr, sr_depressed, DECIMAL_PLACES)
assert_almost_equal(sr, sr_raised, DECIMAL_PLACES)
([(noise_uniform, 0, 0.0002, 0.0001), (noise_uniform, 0.005, 0.0001, 0.0002)])
def test_sharpe_translation_diff(self, returns, required_return, translation_returns, translation_required):
sr = self.empyrical.sharpe_ratio(returns, required_return)
sr_depressed = self.empyrical.sharpe_ratio((returns - translation_returns), (required_return - translation_required))
sr_raised = self.empyrical.sharpe_ratio((returns + translation_returns), (required_return + translation_required))
assert (sr != sr_depressed)
assert (sr != sr_raised)
([(noise_uniform, 0, 0.005), (noise, 0, 0.005)])
def test_sharpe_translation_1(self, returns, required_return, translation):
sr = self.empyrical.sharpe_ratio(returns, required_return)
sr_depressed = self.empyrical.sharpe_ratio(returns, (required_return - translation))
sr_raised = self.empyrical.sharpe_ratio(returns, (required_return + translation))
assert (sr_depressed > sr)
assert (sr > sr_raised)
([(0.001, 0.002), (0.01, 0.02)])
def test_sharpe_noise(self, small, large):
index = pd.date_range('2000-1-30', periods=1000, freq='D')
smaller_normal = pd.Series(rand.normal(0.01, small, 1000), index=index)
larger_normal = pd.Series(rand.normal(0.01, large, 1000), index=index)
assert (self.empyrical.sharpe_ratio(smaller_normal, 0.001) > self.empyrical.sharpe_ratio(larger_normal, 0.001))
([(empty_returns, 0.0, empyrical.DAILY, np.nan), (one_return, 0.0, empyrical.DAILY, 0.0), (mixed_returns, mixed_returns, empyrical.DAILY, 0.0), (mixed_returns, 0.0, empyrical.DAILY, 0.), (mixed_returns, 0.1, empyrical.DAILY, 1.), (weekly_returns, 0.0, empyrical.WEEKLY, 0.), (weekly_returns, 0.1, empyrical.WEEKLY, 0.), (monthly_returns, 0.0, empyrical.MONTHLY, 0.), (monthly_returns, 0.1, empyrical.MONTHLY, 0.), (df_simple, 0.0, empyrical.DAILY, pd.Series([0., 0.], index=['one', 'two'])), (df_week, 0.0, empyrical.WEEKLY, pd.Series([0., 0.], index=['one', 'two'])), (df_month, 0.0, empyrical.MONTHLY, pd.Series([0., 0.], index=['one', 'two']))])
def test_downside_risk(self, returns, required_return, period, expected):
downside_risk = self.empyrical.downside_risk(returns, required_return=required_return, period=period)
if isinstance(downside_risk, float):
assert_almost_equal(downside_risk, expected, DECIMAL_PLACES)
else:
for i in range(downside_risk.size):
assert_almost_equal(downside_risk[i], expected[i], DECIMAL_PLACES)
([(noise, flat_line_0), (noise_uniform, flat_line_0)])
def test_downside_risk_noisy(self, noise, flat_line):
noisy_returns_1 = noise[0:250].add(flat_line[250:], fill_value=0)
noisy_returns_2 = noise[0:500].add(flat_line[500:], fill_value=0)
noisy_returns_3 = noise[0:750].add(flat_line[750:], fill_value=0)
dr_1 = self.empyrical.downside_risk(noisy_returns_1, flat_line)
dr_2 = self.empyrical.downside_risk(noisy_returns_2, flat_line)
dr_3 = self.empyrical.downside_risk(noisy_returns_3, flat_line)
assert (dr_1 <= dr_2)
assert (dr_2 <= dr_3)
([(noise, 0.005), (noise_uniform, 0.005)])
def test_downside_risk_trans(self, returns, required_return):
dr_0 = self.empyrical.downside_risk(returns, (- required_return))
dr_1 = self.empyrical.downside_risk(returns, 0)
dr_2 = self.empyrical.downside_risk(returns, required_return)
assert (dr_0 <= dr_1)
assert (dr_1 <= dr_2)
([(0.001, 0.002), (0.001, 0.01), (0, 0.001)])
def test_downside_risk_std(self, smaller_std, larger_std):
less_noise = pd.Series((rand.normal(0, smaller_std, 1000) if (smaller_std != 0) else np.full(1000, 0)), index=pd.date_range('2000-1-30', periods=1000, freq='D'))
more_noise = pd.Series((rand.normal(0, larger_std, 1000) if (larger_std != 0) else np.full(1000, 0)), index=pd.date_range('2000-1-30', periods=1000, freq='D'))
assert (self.empyrical.downside_risk(less_noise) < self.empyrical.downside_risk(more_noise))
([(empty_returns, 0.0, empyrical.DAILY, np.nan), (one_return, 0.0, empyrical.DAILY, np.nan), (mixed_returns, mixed_returns, empyrical.DAILY, np.nan), (mixed_returns, 0.0, empyrical.DAILY, 2.), (mixed_returns, flat_line_1, empyrical.DAILY, (- 1.)), (positive_returns, 0.0, empyrical.DAILY, np.inf), (negative_returns, 0.0, empyrical.DAILY, (- 13.)), (simple_benchmark, 0.0, empyrical.DAILY, np.inf), (weekly_returns, 0.0, empyrical.WEEKLY, 1.), (monthly_returns, 0.0, empyrical.MONTHLY, 0.), (df_simple, 0.0, empyrical.DAILY, pd.Series([3., 38.], index=['one', 'two'])), (df_week, 0.0, empyrical.WEEKLY, pd.Series([1., 17.], index=['one', 'two'])), (df_month, 0.0, empyrical.MONTHLY, pd.Series([0., 8.], index=['one', 'two']))])
def test_sortino(self, returns, required_return, period, expected):
sortino_ratio = self.empyrical.sortino_ratio(returns, required_return=required_return, period=period)
if isinstance(sortino_ratio, float):
assert_almost_equal(sortino_ratio, expected, DECIMAL_PLACES)
else:
for i in range(sortino_ratio.size):
assert_almost_equal(sortino_ratio[i], expected[i], DECIMAL_PLACES)
([(noise_uniform, 0), (noise, 0)])
def test_sortino_add_noise(self, returns, required_return):
returns = returns.copy()
sr_1 = self.empyrical.sortino_ratio(returns, required_return)
upside_values = returns[(returns > required_return)].index.tolist()
loss_loc = rand.choice(upside_values, 2)
returns[loss_loc[0]] = (- 0.01)
sr_2 = self.empyrical.sortino_ratio(returns, required_return)
returns[loss_loc[1]] = (- 0.01)
sr_3 = self.empyrical.sortino_ratio(returns, required_return)
assert (sr_1 > sr_2)
assert (sr_2 > sr_3)
([(noise_uniform, 0), (noise, 0)])
def test_sortino_sub_noise(self, returns, required_return):
returns = returns.copy()
sr_1 = self.empyrical.sortino_ratio(returns, required_return)
downside_values = returns[(returns < required_return)].index.tolist()
loss_loc = rand.choice(downside_values, 2)
returns[loss_loc[0]] = required_return
sr_2 = self.empyrical.sortino_ratio(returns, required_return)
returns[loss_loc[1]] = required_return
sr_3 = self.empyrical.sortino_ratio(returns, required_return)
assert (sr_1 <= sr_2)
assert (sr_2 <= sr_3)
([(noise_uniform, 0, 0.005), (noise_uniform, 0.005, 0.005)])
def test_sortino_translation_same(self, returns, required_return, translation):
sr = self.empyrical.sortino_ratio(returns, required_return)
sr_depressed = self.empyrical.sortino_ratio((returns - translation), (required_return - translation))
sr_raised = self.empyrical.sortino_ratio((returns + translation), (required_return + translation))
assert_almost_equal(sr, sr_depressed, DECIMAL_PLACES)
assert_almost_equal(sr, sr_raised, DECIMAL_PLACES)
([(noise_uniform, 0, 0, 0.001), (noise_uniform, 0.005, 0.001, 0)])
def test_sortino_translation_diff(self, returns, required_return, translation_returns, translation_required):
sr = self.empyrical.sortino_ratio(returns, required_return)
sr_depressed = self.empyrical.sortino_ratio((returns - translation_returns), (required_return - translation_required))
sr_raised = self.empyrical.sortino_ratio((returns + translation_returns), (required_return + translation_required))
assert (sr != sr_depressed)
assert (sr != sr_raised)
([(empty_returns, 0.0, np.nan), (one_return, 0.0, np.nan), (pos_line, pos_line, np.nan), (mixed_returns, 0.0, 0.), (mixed_returns, flat_line_1, (- 0.))])
def test_excess_sharpe(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.excess_sharpe(returns, factor_returns), expected, DECIMAL_PLACES)
([(flat_line_0, pos_line), (flat_line_1_tz, pos_line), (noise, pos_line)])
def test_excess_sharpe_noisy(self, noise_line, benchmark):
noisy_returns_1 = noise_line[0:250].add(benchmark[250:], fill_value=0)
noisy_returns_2 = noise_line[0:500].add(benchmark[500:], fill_value=0)
noisy_returns_3 = noise_line[0:750].add(benchmark[750:], fill_value=0)
ir_1 = self.empyrical.excess_sharpe(noisy_returns_1, benchmark)
ir_2 = self.empyrical.excess_sharpe(noisy_returns_2, benchmark)
ir_3 = self.empyrical.excess_sharpe(noisy_returns_3, benchmark)
assert (abs(ir_1) < abs(ir_2))
assert (abs(ir_2) < abs(ir_3))
([(pos_line, noise, flat_line_1_tz), (pos_line, inv_noise, flat_line_1_tz), (neg_line, noise, flat_line_1_tz), (neg_line, inv_noise, flat_line_1_tz)])
def test_excess_sharpe_trans(self, returns, add_noise, translation):
ir = self.empyrical.excess_sharpe((returns + add_noise), returns)
raised_ir = self.empyrical.excess_sharpe(((returns + add_noise) + translation), returns)
depressed_ir = self.empyrical.excess_sharpe(((returns + add_noise) - translation), returns)
assert (ir < raised_ir)
assert (depressed_ir < ir)
([(empty_returns, simple_benchmark, (np.nan, np.nan)), (one_return, one_return, (np.nan, np.nan)), (mixed_returns, negative_returns[1:], ((- 0.), (- 0.))), (mixed_returns, mixed_returns, (0.0, 1.0)), (mixed_returns, (- mixed_returns), (0.0, (- 1.0)))])
def test_alpha_beta(self, returns, benchmark, expected):
(alpha, beta) = self.empyrical(pandas_only=(len(returns) != len(benchmark)), return_types=np.ndarray).alpha_beta(returns, benchmark)
assert_almost_equal(alpha, expected[0], DECIMAL_PLACES)
assert_almost_equal(beta, expected[1], DECIMAL_PLACES)
([(empty_returns, simple_benchmark, np.nan), (one_return, one_return, np.nan), (mixed_returns, flat_line_1, np.nan), (mixed_returns, mixed_returns, 0.0), (mixed_returns, (- mixed_returns), 0.0)])
def test_alpha(self, returns, benchmark, expected):
observed = self.empyrical.alpha(returns, benchmark)
assert_almost_equal(observed, expected, DECIMAL_PLACES)
if (len(returns) == len(benchmark)):
returns_arr = returns.values
benchmark_arr = benchmark.values
mask = ((~ np.isnan(returns_arr)) & (~ np.isnan(benchmark_arr)))
(slope, intercept, _, _, _) = stats.linregress(benchmark_arr[mask], returns_arr[mask])
assert_almost_equal(observed, (intercept * 252), DECIMAL_PLACES)
([(0, 0.001), (0.01, 0.001)])
def test_alpha_beta_translation(self, mean_returns, translation):
std_returns = 0.01
correlation = 0.8
std_bench = 0.001
means = [mean_returns, 0.001]
covs = [[(std_returns ** 2), ((std_returns * std_bench) * correlation)], [((std_returns * std_bench) * correlation), (std_bench ** 2)]]
(ret, bench) = rand.multivariate_normal(means, covs, 1000).T
returns = pd.Series(ret, index=pd.date_range('2000-1-30', periods=1000, freq='D'))
benchmark = pd.Series(bench, index=pd.date_range('2000-1-30', periods=1000, freq='D'))
returns_depressed = (returns - translation)
returns_raised = (returns + translation)
alpha_beta = self.empyrical(return_types=np.ndarray).alpha_beta
(alpha_depressed, beta_depressed) = alpha_beta(returns_depressed, benchmark)
(alpha_standard, beta_standard) = alpha_beta(returns, benchmark)
(alpha_raised, beta_raised) = alpha_beta(returns_raised, benchmark)
assert_almost_equal((((alpha_standard + 1) ** (1 / 252)) - ((alpha_depressed + 1) ** (1 / 252))), translation, DECIMAL_PLACES)
assert_almost_equal((((alpha_raised + 1) ** (1 / 252)) - ((alpha_standard + 1) ** (1 / 252))), translation, DECIMAL_PLACES)
assert_almost_equal(beta_standard, beta_depressed, DECIMAL_PLACES)
assert_almost_equal(beta_standard, beta_raised, DECIMAL_PLACES)
([(0.1, 0.9)])
def test_alpha_beta_correlation(self, corr_less, corr_more):
mean_returns = 0.01
mean_bench = 0.001
std_returns = 0.01
std_bench = 0.001
index = pd.date_range('2000-1-30', periods=1000, freq='D')
means_less = [mean_returns, mean_bench]
covs_less = [[(std_returns ** 2), ((std_returns * std_bench) * corr_less)], [((std_returns * std_bench) * corr_less), (std_bench ** 2)]]
(ret_less, bench_less) = rand.multivariate_normal(means_less, covs_less, 1000).T
returns_less = pd.Series(ret_less, index=index)
benchmark_less = pd.Series(bench_less, index=index)
means_more = [mean_returns, mean_bench]
covs_more = [[(std_returns ** 2), ((std_returns * std_bench) * corr_more)], [((std_returns * std_bench) * corr_more), (std_bench ** 2)]]
(ret_more, bench_more) = rand.multivariate_normal(means_more, covs_more, 1000).T
returns_more = pd.Series(ret_more, index=index)
benchmark_more = pd.Series(bench_more, index=index)
alpha_beta = self.empyrical(return_types=np.ndarray).alpha_beta
(alpha_less, beta_less) = alpha_beta(returns_less, benchmark_less)
(alpha_more, beta_more) = alpha_beta(returns_more, benchmark_more)
assert (alpha_less > alpha_more)
assert (beta_less < beta_more)
([(sparse_noise, sparse_noise)])
def test_alpha_beta_with_nan_inputs(self, returns, benchmark):
(alpha, beta) = self.empyrical(return_types=np.ndarray).alpha_beta(returns, benchmark)
self.assertFalse(np.isnan(alpha))
self.assertFalse(np.isnan(beta))
([(empty_returns, simple_benchmark, np.nan), (one_return, one_return, np.nan), (mixed_returns, flat_line_1, np.nan), (noise, noise, 1.0), ((2 * noise), noise, 2.0), (noise, inv_noise, (- 1.0)), ((2 * noise), inv_noise, (- 2.0)), ((sparse_noise * flat_line_1_tz), sparse_flat_line_1_tz, np.nan), ((simple_benchmark + rand.normal(0, 0.001, len(simple_benchmark))), pd.DataFrame({'returns': simple_benchmark}), 1.0, 2)])
def test_beta(self, returns, benchmark, expected, decimal_places=DECIMAL_PLACES):
observed = self.empyrical.beta(returns, benchmark)
assert_almost_equal(observed, expected, decimal_places)
if (len(returns) == len(benchmark)):
if isinstance(benchmark, pd.DataFrame):
benchmark = benchmark['returns']
returns_arr = returns.values
benchmark_arr = benchmark.values
mask = ((~ np.isnan(returns_arr)) & (~ np.isnan(benchmark_arr)))
(slope, intercept, _, _, _) = stats.linregress(benchmark_arr[mask], returns_arr[mask])
assert_almost_equal(observed, slope)
([(empty_returns, simple_benchmark), (one_return, one_return), (mixed_returns, simple_benchmark[1:]), (mixed_returns, negative_returns[1:]), (mixed_returns, mixed_returns), (mixed_returns, (- mixed_returns))])
def test_alpha_beta_equality(self, returns, benchmark):
(alpha, beta) = self.empyrical(pandas_only=(len(returns) != len(benchmark)), return_types=np.ndarray).alpha_beta(returns, benchmark)
assert_almost_equal(alpha, self.empyrical.alpha(returns, benchmark), DECIMAL_PLACES)
assert_almost_equal(beta, self.empyrical.beta(returns, benchmark), DECIMAL_PLACES)
if (len(returns) == len(benchmark)):
returns_arr = returns.values
benchmark_arr = benchmark.values
mask = ((~ np.isnan(returns_arr)) & (~ np.isnan(benchmark_arr)))
(slope, intercept, _, _, _) = stats.linregress(returns_arr[mask], benchmark_arr[mask])
assert_almost_equal(alpha, intercept)
assert_almost_equal(beta, slope)
([(empty_returns, np.nan), (one_return, np.nan), (mixed_returns, 0.), (flat_line_1_tz, 1.0)])
def test_stability_of_timeseries(self, returns, expected):
assert_almost_equal(self.empyrical.stability_of_timeseries(returns), expected, DECIMAL_PLACES)
([(empty_returns, np.nan), (one_return, 1.0), (mixed_returns, 0.), (pd.Series(rand.randn(100000)), 1.0)])
def test_tail_ratio(self, returns, expected):
assert_almost_equal(self.empyrical.tail_ratio(returns), expected, 1)
([(empty_returns, empyrical.DAILY, np.nan), (one_return, empyrical.DAILY, 11.), (mixed_returns, empyrical.DAILY, 1.), (flat_line_1_tz, empyrical.DAILY, 11.), (pd.Series((np.array([3.0, 3.0, 3.0]) / 100), index=pd.date_range('2000-1-30', periods=3, freq='A')), 'yearly', 0.03)])
def test_cagr(self, returns, period, expected):
assert_almost_equal(self.empyrical.cagr(returns, period=period), expected, DECIMAL_PLACES)
([(noise, 0.01), (noise_uniform, 0.01)])
def test_cagr_translation(self, returns, constant):
cagr_depressed = self.empyrical.cagr((returns - constant))
cagr_unchanged = self.empyrical.cagr(returns)
cagr_raised = self.empyrical.cagr((returns + constant))
self.assertTrue((cagr_depressed < cagr_unchanged))
self.assertTrue((cagr_unchanged < cagr_raised))
([(sparse_noise,)])
def test_cagr_with_nan_inputs(self, returns):
self.assertFalse(np.isnan(self.empyrical.cagr(returns)))
([(pos_line, noise), (pos_line, noise_uniform), (flat_line_1_tz, noise)])
def test_cagr_noisy(self, returns, add_noise):
cagr = self.empyrical.cagr(returns)
noisy_cagr_1 = self.empyrical.cagr((returns + add_noise))
noisy_cagr_2 = self.empyrical.cagr((returns - add_noise))
np.testing.assert_approx_equal(cagr, noisy_cagr_1, 1)
np.testing.assert_approx_equal(cagr, noisy_cagr_2, 1)
([(one_return, one_return, np.nan), (positive_returns, simple_benchmark, 0.0), (mixed_returns, simple_benchmark, 0.09), (negative_returns, simple_benchmark, (- 0.03))])
def test_beta_fragility_heuristic(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.beta_fragility_heuristic(returns, factor_returns), expected, DECIMAL_PLACES)
mixed_returns_expected_gpd_risk_result = [0.1, 0., 1.e-06, 0., 0.]
negative_returns_expected_gpd_risk_result = [0.05, 0., 9.e-07, 0., 0.]
([(one_return, [0, 0, 0, 0, 0]), (empty_returns, [0, 0, 0, 0, 0]), (simple_benchmark, [0, 0, 0, 0, 0]), (positive_returns, [0, 0, 0, 0, 0]), (negative_returns, negative_returns_expected_gpd_risk_result), (mixed_returns, mixed_returns_expected_gpd_risk_result), (flat_line_1, [0, 0, 0, 0]), (weekly_returns, mixed_returns_expected_gpd_risk_result), (monthly_returns, mixed_returns_expected_gpd_risk_result)])
def test_gpd_risk_estimates(self, returns, expected):
result = self.empyrical.gpd_risk_estimates_aligned(returns)
for (result_item, expected_item) in zip(result, expected):
assert_almost_equal(result_item, expected_item, DECIMAL_PLACES)
([(empty_returns, 6, []), (negative_returns, 6, [(- 0.2282), (- 0.2745), (- 0.2899), (- 0.2747)])])
def test_roll_max_drawdown(self, returns, window, expected):
test = self.empyrical.roll_max_drawdown(returns, window=window)
assert_almost_equal(np.asarray(test), np.asarray(expected), 4)
self.assert_indexes_match(test, returns[(- len(expected)):])
([(empty_returns, 6, []), (negative_returns, 6, [(- 18.), (- 26.), (- 26.), (- 25.)]), (mixed_returns, 6, [7., 8., 8., (- 3.1374751)])])
def test_roll_sharpe_ratio(self, returns, window, expected):
test = self.empyrical.roll_sharpe_ratio(returns, window=window)
assert_almost_equal(np.asarray(test), np.asarray(expected), DECIMAL_PLACES)
self.assert_indexes_match(test, returns[(- len(expected)):])
([(empty_returns, empty_returns, np.nan), (one_return, one_return, 1.0), (mixed_returns, mixed_returns, 1.0), (all_negative_returns, mixed_returns, (- 0.))])
def test_capture_ratio(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.capture(returns, factor_returns), expected, DECIMAL_PLACES)
([(empty_returns, empty_returns, np.nan), (one_return, one_return, np.nan), (mixed_returns, mixed_returns, 1.0), (all_negative_returns, mixed_returns, 0.), (positive_returns, mixed_returns, (- 11.))])
def test_down_capture(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.down_capture(returns, factor_returns), expected, DECIMAL_PLACES)
([(empty_returns, simple_benchmark, 1, ([(np.nan, np.nan)] * len(simple_benchmark))), (one_return, one_return, 1, [(np.nan, np.nan)]), (mixed_returns, negative_returns, 6, [((- 0.), (- 0.7826087)), ((- 0.9828927), (- 0.)), ((- 0.), (- 0.)), ((- 0.), (- 0.))]), (mixed_returns, mixed_returns, 6, [(0.0, 1.0), (0.0, 1.0), (0.0, 1.0), (0.0, 1.0)]), (mixed_returns, (- mixed_returns), 6, [(0.0, (- 1.0)), (0.0, (- 1.0)), (0.0, (- 1.0)), (0.0, (- 1.0))])])
def test_roll_alpha_beta(self, returns, benchmark, window, expected):
test = self.empyrical(return_types=(np.ndarray, pd.DataFrame)).roll_alpha_beta(returns, benchmark, window)
if isinstance(test, pd.DataFrame):
self.assert_indexes_match(test, benchmark[(- len(expected)):])
test = test.values
alpha_test = [t[0] for t in test]
beta_test = [t[1] for t in test]
alpha_expected = [t[0] for t in expected]
beta_expected = [t[1] for t in expected]
assert_almost_equal(np.asarray(alpha_test), np.asarray(alpha_expected), DECIMAL_PLACES)
assert_almost_equal(np.asarray(beta_test), np.asarray(beta_expected), DECIMAL_PLACES)
([(empty_returns, empty_returns, 1, []), (one_return, one_return, 1, np.nan), (mixed_returns, mixed_returns, 6, [1.0, 1.0, 1.0, 1.0]), (positive_returns, mixed_returns, 6, [(- 0.), (- 0.), (- 0.), (- 0.)]), (all_negative_returns, mixed_returns, 6, [(- 6.e-05), (- 0.), (- 0.), (- 0.)])])
def test_roll_up_down_capture(self, returns, factor_returns, window, expected):
test = self.empyrical.roll_up_down_capture(returns, factor_returns, window=window)
assert_almost_equal(np.asarray(test), np.asarray(expected), DECIMAL_PLACES)
([(empty_returns, empty_returns, 1, []), (one_return, one_return, 1, [np.nan]), (mixed_returns, mixed_returns, 6, [1.0, 1.0, 1.0, 1.0]), (positive_returns, mixed_returns, 6, [(- 11.2743862), (- 11.2743862), (- 11.2743862), (- 11.)]), (all_negative_returns, mixed_returns, 6, [0., 0., 0., 0.])])
def test_roll_down_capture(self, returns, factor_returns, window, expected):
test = self.empyrical.roll_down_capture(returns, factor_returns, window=window)
assert_almost_equal(np.asarray(test), np.asarray(expected), DECIMAL_PLACES)
self.assert_indexes_match(test, returns[(- len(expected)):])
([(empty_returns, empty_returns, 1, []), (one_return, one_return, 1, [1.0]), (mixed_returns, mixed_returns, 6, [1.0, 1.0, 1.0, 1.0]), (positive_returns, mixed_returns, 6, [0., 0., 0., 0.0777048]), (all_negative_returns, mixed_returns, 6, [(- 5.e-05), (- 0.), (- 0.), (- 0.)])])
def test_roll_up_capture(self, returns, factor_returns, window, expected):
test = self.empyrical.roll_up_capture(returns, factor_returns, window=window)
assert_almost_equal(np.asarray(test), np.asarray(expected), DECIMAL_PLACES)
self.assert_indexes_match(test, returns[(- len(expected)):])
([(empty_returns, simple_benchmark, (np.nan, np.nan)), (one_return, one_return, (np.nan, np.nan)), (mixed_returns[1:], negative_returns[1:], ((- 0.), (- 0.))), (mixed_returns, mixed_returns, (0.0, 1.0)), (mixed_returns, (- mixed_returns), (0.0, (- 1.0)))])
def test_down_alpha_beta(self, returns, benchmark, expected):
(down_alpha, down_beta) = self.empyrical(pandas_only=(len(returns) != len(benchmark)), return_types=np.ndarray).down_alpha_beta(returns, benchmark)
assert_almost_equal(down_alpha, expected[0], DECIMAL_PLACES)
assert_almost_equal(down_beta, expected[1], DECIMAL_PLACES)
([(empty_returns, simple_benchmark, (np.nan, np.nan)), (one_return, one_return, (np.nan, np.nan)), (mixed_returns[1:], positive_returns[1:], (0., 0.)), (mixed_returns, mixed_returns, (0.0, 1.0)), (mixed_returns, (- mixed_returns), (0.0, (- 1.0)))])
def test_up_alpha_beta(self, returns, benchmark, expected):
(up_alpha, up_beta) = self.empyrical(pandas_only=(len(returns) != len(benchmark)), return_types=np.ndarray).up_alpha_beta(returns, benchmark)
assert_almost_equal(up_alpha, expected[0], DECIMAL_PLACES)
assert_almost_equal(up_beta, expected[1], DECIMAL_PLACES)
([(empty_returns, empty_returns, np.nan), (one_return, one_return, np.nan), (mixed_returns, mixed_returns, 1.0), (positive_returns, mixed_returns, (- 0.)), (all_negative_returns, mixed_returns, (- 0.))])
def test_up_down_capture(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.up_down_capture(returns, factor_returns), expected, DECIMAL_PLACES)
([(empty_returns, empty_returns, np.nan), (one_return, one_return, 1.0), (mixed_returns, mixed_returns, 1.0), (positive_returns, mixed_returns, 0.), (all_negative_returns, mixed_returns, (- 0.))])
def test_up_capture(self, returns, factor_returns, expected):
assert_almost_equal(self.empyrical.up_capture(returns, factor_returns), expected, DECIMAL_PLACES)
def test_value_at_risk(self):
value_at_risk = self.empyrical.value_at_risk
returns = [1.0, 2.0]
assert_almost_equal(value_at_risk(returns, cutoff=0.0), 1.0)
assert_almost_equal(value_at_risk(returns, cutoff=0.3), 1.3)
assert_almost_equal(value_at_risk(returns, cutoff=1.0), 2.0)
returns = [1, 81, 82, 83, 84, 85]
assert_almost_equal(value_at_risk(returns, cutoff=0.1), 41)
assert_almost_equal(value_at_risk(returns, cutoff=0.2), 81)
assert_almost_equal(value_at_risk(returns, cutoff=0.3), 81.5)
returns = rand.normal(0, 0.02, 21)
for cutoff in (0, 0.0499, 0.05, 0.2, 0.999, 1):
assert_almost_equal(value_at_risk(returns, cutoff), np.percentile(returns, (cutoff * 100)))
def test_conditional_value_at_risk(self):
value_at_risk = self.empyrical.value_at_risk
conditional_value_at_risk = self.empyrical.conditional_value_at_risk
returns = rand.normal(0, 0.02, 1)
expected_cvar = returns[0]
assert_almost_equal(conditional_value_at_risk(returns, cutoff=0), expected_cvar)
assert_almost_equal(conditional_value_at_risk(returns, cutoff=1), expected_cvar)
returns = rand.normal(0, 0.02, 21)
for cutoff in (0, 0.0499, 0.05, 0.2, 0.999, 1):
var = value_at_risk(returns, cutoff)
expected_cvar = np.mean(returns[(returns <= var)])
assert_almost_equal(conditional_value_at_risk(returns, cutoff), expected_cvar)
def empyrical(self):
return ReturnTypeEmpyricalProxy(self, (pd.Series, float)) |
class Fragment(Molecule):
type: Literal['Fragment'] = 'Fragment'
bond_indices: List[Tuple[(int, int)]] = Field(default_factory=list, description='The map indices of the atoms in the parent molecule that are involved in the bond. The fragment was built around these atoms. Note that one fragment might have more than one torsion bond for performance reasons.') |
def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module:
from_state_dict = from_model.state_dict()
our_state_dict = our_model.state_dict()
config = our_model.config
all_keys = []
for stage_idx in range(len(config.hidden_sizes)):
for block_id in range(config.depths[stage_idx]):
from_key = f'block{(stage_idx + 1)}.{block_id}.layer_scale_1'
to_key = f'van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight'
all_keys.append((from_key, to_key))
from_key = f'block{(stage_idx + 1)}.{block_id}.layer_scale_2'
to_key = f'van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight'
all_keys.append((from_key, to_key))
for (from_key, to_key) in all_keys:
our_state_dict[to_key] = from_state_dict.pop(from_key)
our_model.load_state_dict(our_state_dict)
return our_model |
class GptEncoder(nn.Module):
def __init__(self, args):
super(GptEncoder, self).__init__()
self.layers_num = args.layers_num
self.transformer = nn.ModuleList([TransformerLayer(args) for _ in range(self.layers_num)])
def forward(self, emb, seg):
(batch_size, seq_length, _) = emb.size()
mask = torch.ones(seq_length, seq_length, device=emb.device)
mask = torch.tril(mask)
mask = ((1.0 - mask) * (- 10000))
mask = mask.repeat(batch_size, 1, 1, 1)
hidden = emb
for i in range(self.layers_num):
hidden = self.transformer[i](hidden, mask)
return hidden |
def test_majorana_operator_commutes_with():
a = MajoranaOperator((0, 1, 5))
b = MajoranaOperator((1, 2, 7))
c = MajoranaOperator((2, 3, 4))
d = MajoranaOperator((0, 3, 6))
assert a.commutes_with(b)
assert (not a.commutes_with(c))
assert a.commutes_with(d)
assert b.commutes_with(c)
assert (not b.commutes_with(d))
assert c.commutes_with(d)
assert (a + c).commutes_with((b + d))
e = MajoranaOperator((0, 1, 1, 1, 4, 5))
f = MajoranaOperator((0, 1, 1, 4))
assert e.commutes_with(f)
with pytest.raises(TypeError):
_ = e.commutes_with(0) |
def scrape():
credentials = load_credentials()
if (credentials is None):
(username, password) = prompt_credentials()
else:
(username, password) = credentials
user_input = int(input('[Required] - How many followers do you want to scrape (100-2000 recommended): '))
usernames = input('Enter the Instagram usernames you want to scrape (separated by commas): ').split(',')
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--log-level=3')
mobile_emulation = {'userAgent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/90.0.1025.166 Mobile Safari/535.19'}
options.add_experimental_option('mobileEmulation', mobile_emulation)
bot = webdriver.Chrome(executable_path=CM().install(), options=options)
login(bot, username, password)
for user in usernames:
user = user.strip()
scrape_followers(bot, user, user_input)
bot.quit() |
def main():
sid = data['wps_checkin']
for item in sid:
sio.write('---{}---\n\n'.format(item['name']))
b1 = docer_webpage_clockin(item['sid'])
if (b1 == 1):
checinRecord_url = '
r = s.get(checinRecord_url, headers={'sid': item['sid']})
resp = json.loads(r.text)
sio.write(': {}\n\n'.format(resp['data']['max_days']))
checkinEarlytimes_url = '
r1 = s.get(checkinEarlytimes_url, headers={'sid': item['sid']})
resp1 = json.loads(r1.text)
sio.write(': {}\n\n'.format(resp1['data']))
max_days = resp['data']['max_days']
if ((resp1['data'] > 0) and (len(resp['data']['records']) > 0)):
max_days = docer_webpage_earlyclockin(item['sid'], resp1['data'], resp['data']['records'], max_days)
if (len(resp['data']['records']) > 0):
docer_webpage_giftReceive(item['sid'], max_days)
b2 = wps_miniprogram_clockin(item['sid'])
if (b2 == 1):
member_url = '
r = s.get(member_url, headers={'sid': item['sid']})
total_add_day = re.search('"total_add_day":(\\d+)', r.text).group(1)
sio.write(': {}\n\n'.format(total_add_day))
sio.write('\n\n ------\n\n')
summary_url = '
r = s.post(summary_url, headers={'sid': item['sid']})
resp = json.loads(r.text)
sio.write(':{}\n\n"":{}\n\n'.format(resp['data']['integral'], resp['data']['wealth']))
userinfo_url = '
r = s.get(userinfo_url, headers={'sid': item['sid']})
resp = json.loads(r.text)
if (len(resp['data']['vip']['enabled']) > 0):
sio.write(':\n\n')
for i in range(len(resp['data']['vip']['enabled'])):
sio.write('"":{}, "":{}\n\n'.format(resp['data']['vip']['enabled'][i]['name'], datetime.datetime.fromtimestamp(resp['data']['vip']['enabled'][i]['expire_time']).strftime('%Y--%m--%d %H:%M:%S')))
sio.write('\n\n wps\n\n')
for item in sid:
sio.write('---{}---\n\n'.format(item['name']))
if (type(resp['data']['userid']) == int):
wps_miniprogram_invite(invite_sid, resp['data']['userid'])
else:
sio.write(': ID, sid\n\n')
desp = sio.getvalue()
sendNotify.send(title='Wps', msg=desp)
print(desp)
return desp |
class GosuTemplateLexer(Lexer):
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
url = '
version_added = '1.5'
def get_tokens_unprocessed(self, text):
lexer = GosuLexer()
stack = ['templateText']
(yield from lexer.get_tokens_unprocessed(text, stack)) |
def matplotlib_plt(scatters, title, ylabel, output_file, limits=None, show=False, figsize=None):
linestyle = '-'
hybrid_matches = ['HM', 'VTM', 'JPEG', 'JPEG2000', 'WebP', 'BPG', 'AV1']
if (figsize is None):
figsize = (9, 6)
(fig, ax) = plt.subplots(figsize=figsize)
for sc in scatters:
if any(((x in sc['name']) for x in hybrid_matches)):
linestyle = '--'
ax.plot(sc['xs'], sc['ys'], marker='.', linestyle=linestyle, linewidth=0.7, label=sc['name'])
ax.set_xlabel('Bit-rate [bpp]')
ax.set_ylabel(ylabel)
ax.grid()
if (limits is not None):
ax.axis(limits)
ax.legend(loc='lower right')
if title:
ax.title.set_text(title)
if show:
plt.show()
if output_file:
fig.savefig(output_file, dpi=300) |
def test_dataid():
from satpy.dataset.dataid import DataID, ModifierTuple, ValueList, WavelengthRange
did = make_dataid()
assert issubclass(did._id_keys['calibration']['type'], ValueList)
assert ('enum' not in did._id_keys['calibration'])
did = make_dataid(name='cheese_shops', resolution=None)
assert ('resolution' not in did)
assert ('None' not in did.__repr__())
with pytest.raises(ValueError, match='Required field name missing.'):
make_dataid(name=None, resolution=1000)
assert (did['modifiers'] == ModifierTuple())
did2 = did.from_dict(dict(name='cheese_shops', resolution=None))
assert (did is not did2)
assert (did2 == did)
with pytest.raises(TypeError):
did['resolution'] = 1000
with pytest.raises(ValueError, match='Required field name missing.'):
make_dataid(resolution=1000)
assert (did.to_dict() == dict(name='cheese_shops', modifiers=tuple()))
did = make_dataid(name='VIS008', resolution=111)
assert (repr(did) == "DataID(name='VIS008', resolution=111, modifiers=())")
default_id_keys_config = {'name': None, 'wavelength': {'type': WavelengthRange}, 'resolution': None, 'calibration': {'enum': ['reflectance', 'brightness_temperature', 'radiance', 'counts']}, 'modifiers': {'default': ModifierTuple(), 'type': ModifierTuple}}
assert (DataID(default_id_keys_config, wavelength=10) != DataID(default_id_keys_config, name='VIS006')) |
def _validate_header(header: str, line_num: int, content: str) -> MessageIterator:
if (header == 'Title'):
(yield from _validate_title(line_num, content))
elif (header == 'Author'):
(yield from _validate_author(line_num, content))
elif (header == 'Sponsor'):
(yield from _validate_sponsor(line_num, content))
elif (header in {'BDFL-Delegate', 'PEP-Delegate'}):
(yield from _validate_delegate(line_num, content))
elif (header == 'Discussions-To'):
(yield from _validate_discussions_to(line_num, content))
elif (header == 'Status'):
(yield from _validate_status(line_num, content))
elif (header == 'Type'):
(yield from _validate_type(line_num, content))
elif (header == 'Topic'):
(yield from _validate_topic(line_num, content))
elif (header == 'Content-Type'):
(yield from _validate_content_type(line_num, content))
elif (header in {'Requires', 'Replaces', 'Superseded-By'}):
(yield from _validate_pep_references(line_num, content))
elif (header == 'Created'):
(yield from _validate_created(line_num, content))
elif (header == 'Python-Version'):
(yield from _validate_python_version(line_num, content))
elif (header == 'Post-History'):
(yield from _validate_post_history(line_num, content))
elif (header == 'Resolution'):
(yield from _validate_resolution(line_num, content)) |
.parametrize('readme, content_type', [('README.rst', 'text/x-rst'), ('README.md', 'text/markdown'), ('README', 'text/plain'), (Path('README.rst'), 'text/x-rst'), (Path('README.md'), 'text/markdown'), (Path('README'), 'text/plain')])
def test_utils_helpers_readme_content_type(readme: (str | Path), content_type: str) -> None:
assert (readme_content_type(readme) == content_type) |
def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs):
global args
eta = ('never' if args.quiet else 'always')
outfile = tempfile.NamedTemporaryFile()
cmd = f'fio --direct=1 --ioengine=libaio --name=coef --filename={testfile} --runtime={round(duration)} --readwrite={iotype} --iodepth={iodepth} --blocksize={blocksize} --eta={eta} --output-format json --output={outfile.name} --time_based --numjobs={jobs}'
if args.verbose:
dbg(f'Running {cmd}')
subprocess.check_call(cmd, shell=True)
with open(outfile.name, 'r') as f:
d = json.loads(f.read())
return sum(((j['read']['bw_bytes'] + j['write']['bw_bytes']) for j in d['jobs'])) |
def test_port_single(do_test):
class A(Component):
def construct(s):
s.in_ = InPort(Bits32)
a = A()
a._ref_ports = [(['clk'], 'clk', rt.Port('input', rdt.Vector(1)), 0), (['in_'], 'in_', rt.Port('input', rdt.Vector(32)), 0), (['reset'], 'reset', rt.Port('input', rdt.Vector(1)), 0)]
a._ref_ports_yosys = a._ref_ports
do_test(a) |
class Effect4416(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Torpedoes')), 'explosionDelay', ship.getModifiedItemAttr('shipBonusCF'), skill='Caldari Frigate', **kwargs) |
class Trainer():
def __init__(self, args, logger, attack):
self.args = args
self.logger = logger
self.attack = attack
def standard_train(self, model, tr_loader, va_loader=None):
self.train(model, tr_loader, va_loader, False)
def adversarial_train(self, model, tr_loader, va_loader=None):
self.train(model, tr_loader, va_loader, True)
def train(self, model, tr_loader, va_loader=None, adv_train=False):
args = self.args
logger = self.logger
opt = torch.optim.Adam(model.parameters(), args.learning_rate)
_iter = 0
begin_time = time()
for epoch in range(1, (args.max_epoch + 1)):
for (data, label) in tr_loader:
(data, label) = (tensor2cuda(data), tensor2cuda(label))
if adv_train:
adv_data = self.attack.perturb(data, label, 'mean', True)
output = model(adv_data, _eval=False)
else:
output = model(data, _eval=False)
loss = F.cross_entropy(output, label)
opt.zero_grad()
loss.backward()
opt.step()
if ((_iter % args.n_eval_step) == 0):
if adv_train:
with torch.no_grad():
stand_output = model(data, _eval=True)
pred = torch.max(stand_output, dim=1)[1]
std_acc = (evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100)
pred = torch.max(output, dim=1)[1]
adv_acc = (evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100)
else:
adv_data = self.attack.perturb(data, label, 'mean', False)
with torch.no_grad():
adv_output = model(adv_data, _eval=True)
pred = torch.max(adv_output, dim=1)[1]
adv_acc = (evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100)
pred = torch.max(output, dim=1)[1]
std_acc = (evaluate(pred.cpu().numpy(), label.cpu().numpy()) * 100)
logger.info(('epoch: %d, iter: %d, spent %.2f s, tr_loss: %.3f' % (epoch, _iter, (time() - begin_time), loss.item())))
logger.info(('standard acc: %.3f %%, robustness acc: %.3f %%' % (std_acc, adv_acc)))
if (va_loader is not None):
(va_acc, va_adv_acc) = self.test(model, va_loader, True)
(va_acc, va_adv_acc) = ((va_acc * 100.0), (va_adv_acc * 100.0))
logger.info(((('\n' + ('=' * 30)) + ' evaluation ') + ('=' * 30)))
logger.info(('test acc: %.3f %%, test adv acc: %.3f %%' % (va_acc, va_adv_acc)))
logger.info((((('=' * 28) + ' end of evaluation ') + ('=' * 28)) + '\n'))
begin_time = time()
if ((_iter % args.n_store_image_step) == 0):
tv.utils.save_image(torch.cat([data.cpu(), adv_data.cpu()], dim=0), os.path.join(args.log_folder, ('images_%d.jpg' % _iter)), nrow=16)
if ((_iter % args.n_checkpoint_step) == 0):
file_name = os.path.join(args.model_folder, ('checkpoint_%d.pth' % _iter))
save_model(model, file_name)
_iter += 1
def test(self, model, loader, adv_test=False):
total_acc = 0.0
num = 0
total_adv_acc = 0.0
with torch.no_grad():
for (data, label) in loader:
(data, label) = (tensor2cuda(data), tensor2cuda(label))
output = model(data, _eval=True)
pred = torch.max(output, dim=1)[1]
te_acc = evaluate(pred.cpu().numpy(), label.cpu().numpy(), 'sum')
total_acc += te_acc
num += output.shape[0]
if adv_test:
adv_data = self.attack.perturb(data, pred, 'mean', False)
adv_output = model(adv_data, _eval=True)
adv_pred = torch.max(adv_output, dim=1)[1]
adv_acc = evaluate(adv_pred.cpu().numpy(), label.cpu().numpy(), 'sum')
total_adv_acc += adv_acc
else:
total_adv_acc = (- num)
return ((total_acc / num), (total_adv_acc / num)) |
class ChineseNumberUnit(ChineseChar):
def __init__(self, power, simplified, traditional, big_s, big_t):
super(ChineseNumberUnit, self).__init__(simplified, traditional)
self.power = power
self.big_s = big_s
self.big_t = big_t
def __str__(self):
return '10^{}'.format(self.power)
def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
if small_unit:
return ChineseNumberUnit(power=(index + 1), simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
elif (numbering_type == NUMBERING_TYPES[0]):
return ChineseNumberUnit(power=(index + 8), simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif (numbering_type == NUMBERING_TYPES[1]):
return ChineseNumberUnit(power=((index + 2) * 4), simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif (numbering_type == NUMBERING_TYPES[2]):
return ChineseNumberUnit(power=pow(2, (index + 3)), simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
else:
raise ValueError('Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) |
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return (gdb.execute('monitor info version', to_string=True) != '')
except gdb.error:
return False
def probe_kgdb():
try:
thread_info = gdb.execute('info thread 2', to_string=True)
return ('shadowCPU0' in thread_info)
except gdb.error:
return False
global gdbserver_type
if (gdbserver_type is None):
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if ((gdbserver_type is not None) and hasattr(gdb, 'events')):
gdb.events.exited.connect(exit_handler)
return gdbserver_type |
def is_recursive_pair(s: Type, t: Type) -> bool:
if (isinstance(s, TypeAliasType) and s.is_recursive):
return (isinstance(get_proper_type(t), (Instance, UnionType)) or (isinstance(t, TypeAliasType) and t.is_recursive) or isinstance(get_proper_type(s), TupleType))
if (isinstance(t, TypeAliasType) and t.is_recursive):
return (isinstance(get_proper_type(s), (Instance, UnionType)) or (isinstance(s, TypeAliasType) and s.is_recursive) or isinstance(get_proper_type(t), TupleType))
return False |
class Test_DATETIME(TestCaseDATETIME2):
table_name = 'test_datetime'
ddl_create = f'CREATE TABLE {table_name} (test DATETIME)'
min_date = datetime.datetime(1753, 1, 1, 0, 0, 0, 0)
max_date = datetime.datetime(9999, 12, 31, 23, 59, 59, 997000)
def test_min_select(self):
self.conn.execute_query("SELECT CAST ('1753-1-1 0:0:0' as DATETIME)")
res = tuple(self.conn)[0][0]
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == self.min_date)
def test_min_insert(self):
res = self.insert_and_select('test', self.min_date)
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == self.min_date)
def test_max_select(self):
self.conn.execute_query("SELECT CAST ('9999-12-31 23:59:59.997' as DATETIME)")
res = tuple(self.conn)[0][0]
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == self.max_date)
def test_max_insert(self):
res = self.insert_and_select('test', self.max_date)
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == self.max_date)
def test_datetime(self):
for mks in (0, 3000, 7000):
testval = datetime.datetime(2013, 1, 2, 3, 4, 5, mks)
self.conn.execute_non_query(f'DELETE FROM {self.table_name}')
res = self.insert_and_select('test', testval)
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == testval)
def test_datetime_params_as_dict(self):
testval = datetime.datetime(2013, 1, 2, 3, 4, 5, 3000)
res = self.insert_and_select('test', testval, params_as_dict=True)
assert isinstance(res, datetime.datetime)
assert (not isinstance(res, datetime2))
assert (res == testval) |
def create_earley_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options) -> earley.Parser:
resolve_ambiguity = (options.ambiguity == 'resolve')
debug = (options.debug if options else False)
tree_class = ((options.tree_class or Tree) if (options.ambiguity != 'forest') else None)
extra = {}
if (lexer_conf.lexer_type == 'dynamic'):
f = create_earley_parser__dynamic
elif (lexer_conf.lexer_type == 'dynamic_complete'):
extra['complete_lex'] = True
f = create_earley_parser__dynamic
else:
f = create_earley_parser__basic
return f(lexer_conf, parser_conf, resolve_ambiguity=resolve_ambiguity, debug=debug, tree_class=tree_class, ordered_sets=options.ordered_sets, **extra) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.