id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1674603
|
import sys
from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
if sys.maxint == 2147483647:
SHIFT = 31
else:
SHIFT = 63
# XXX review the <Call> descrs to replace some EF=5 with EF=4 (elidable)
class TestString(BaseTestPyPyC):
def test_lookup_default_encoding(self):
def main(n):
import string
i = 0
letters = string.letters
uletters = unicode(string.letters)
while i < n:
i += letters[i % len(letters)] == uletters[i % len(letters)]
return i
log = self.run(main, [300], import_site=True)
assert log.result == 300
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i14 = int_lt(i6, i9)
guard_true(i14, descr=...)
guard_not_invalidated(descr=...)
i16 = int_eq(i6, %d)
guard_false(i16, descr=...)
i15 = int_mod(i6, i10)
i17 = int_rshift(i15, %d)
i18 = int_and(i10, i17)
i19 = int_add(i15, i18)
i21 = int_lt(i19, 0)
guard_false(i21, descr=...)
i22 = int_ge(i19, i10)
guard_false(i22, descr=...)
i23 = strgetitem(p11, i19)
i24 = int_ge(i19, i12)
guard_false(i24, descr=...)
i25 = unicodegetitem(p13, i19)
p27 = newstr(1)
strsetitem(p27, 0, i23)
p30 = call(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=...)
guard_no_exception(descr=...)
i32 = call(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=...)
guard_true(i32, descr=...)
i34 = int_add(i6, 1)
--TICK--
jump(..., descr=...)
""" % (-sys.maxint-1, SHIFT))
def test_long(self):
def main(n):
import string
i = 1
while i < n:
i += int(long(string.digits[i % len(string.digits)], 16))
return i
log = self.run(main, [1100], import_site=True)
assert log.result == main(1100)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i11 = int_lt(i6, i7)
guard_true(i11, descr=...)
guard_not_invalidated(descr=...)
i13 = int_eq(i6, %d) # value provided below
guard_false(i13, descr=...)
i15 = int_mod(i6, 10)
i17 = int_rshift(i15, %d) # value provided below
i18 = int_and(10, i17)
i19 = int_add(i15, i18)
i21 = int_lt(i19, 0)
guard_false(i21, descr=...)
i22 = int_ge(i19, 10)
guard_false(i22, descr=...)
i23 = strgetitem(p10, i19)
p25 = newstr(1)
strsetitem(p25, 0, i23)
p93 = call(ConstClass(fromstr), p25, 16, descr=<Callr . ri EF=4>)
guard_no_exception(descr=...)
i95 = getfield_gc_pure(p93, descr=<FieldS rpython.rlib.rbigint.rbigint.inst_size .*>)
i96 = int_gt(i95, #)
guard_false(i96, descr=...)
i94 = call(ConstClass(rbigint._toint_helper), p93, descr=<Calli . r EF=4>)
guard_no_exception(descr=...)
i95 = int_add_ovf(i6, i94)
guard_no_overflow(descr=...)
--TICK--
jump(..., descr=...)
""" % (-sys.maxint-1, SHIFT))
def test_str_mod(self):
def main(n):
s = 0
while n > 0:
s += len('%d %d' % (n, n))
n -= 1
return s
log = self.run(main, [1000])
assert log.result == main(1000)
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i79 = int_gt(i74, 0)
guard_true(i79, descr=...)
guard_not_invalidated(descr=...)
p80 = call(ConstClass(ll_int2dec__Signed), i74, descr=<Callr . i EF=3>)
guard_no_exception(descr=...)
i85 = strlen(p80)
p86 = new(descr=<SizeDescr .+>)
p88 = newstr(23)
{{{
setfield_gc(p86, 0, descr=<FieldS stringbuilder.current_pos .+>)
setfield_gc(p86, p88, descr=<FieldP stringbuilder.current_buf .+>)
setfield_gc(p86, 23, descr=<FieldS stringbuilder.current_end .+>)
setfield_gc(p86, 23, descr=<FieldS stringbuilder.total_size .+>)
}}}
call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=<Callv 0 rr EF=5>)
guard_no_exception(descr=...)
i89 = getfield_gc(p86, descr=<FieldS stringbuilder.current_pos .+>)
i90 = getfield_gc(p86, descr=<FieldS stringbuilder.current_end .+>)
i91 = int_eq(i89, i90)
cond_call(i91, ConstClass(ll_grow_by__stringbuilderPtr_Signed), p86, 1, descr=<Callv 0 ri EF=5>)
guard_no_exception(descr=...)
i92 = getfield_gc(p86, descr=<FieldS stringbuilder.current_pos .+>)
i93 = int_add(i92, 1)
p94 = getfield_gc(p86, descr=<FieldP stringbuilder.current_buf .+>)
strsetitem(p94, i92, 32)
setfield_gc(p86, i93, descr=<FieldS stringbuilder.current_pos .+>)
call(ConstClass(ll_append_res0__stringbuilderPtr_rpy_stringPtr), p86, p80, descr=<Callv 0 rr EF=5>)
guard_no_exception(descr=...)
p95 = call(..., descr=<Callr . r EF=5>) # ll_build
guard_no_exception(descr=...)
i96 = strlen(p95)
i97 = int_add_ovf(i71, i96)
guard_no_overflow(descr=...)
i98 = int_sub(i74, 1)
--TICK--
jump(..., descr=...)
""")
def test_getattr_promote(self):
def main(n):
class A(object):
def meth_a(self):
return 1
def meth_b(self):
return 2
a = A()
l = ['a', 'b']
s = 0
for i in range(n):
name = 'meth_' + l[i & 1]
meth = getattr(a, name) # ID: getattr
s += meth()
return s
log = self.run(main, [1000])
assert log.result == main(1000)
loops = log.loops_by_filename(self.filepath)
assert len(loops) == 1
for loop in loops:
assert loop.match_by_id('getattr','''
guard_not_invalidated?
i32 = strlen(p31)
i34 = int_add(5, i32)
p35 = newstr(i34)
strsetitem(p35, 0, 109)
strsetitem(p35, 1, 101)
strsetitem(p35, 2, 116)
strsetitem(p35, 3, 104)
strsetitem(p35, 4, 95)
copystrcontent(p31, p35, 0, 5, i32)
i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=<Calli [48] rr EF=0 OS=28>)
guard_value(i49, 1, descr=...)
''')
def test_remove_duplicate_method_calls(self):
def main(n):
lst = []
for i in range(n):
s = 'Hello %d' % i
t = s.lower() # ID: callone
u = s.lower() # ID: calltwo
lst.append(t)
lst.append(u)
return len(','.join(lst))
log = self.run(main, [1000])
assert log.result == main(1000)
loops = log.loops_by_filename(self.filepath)
loop, = loops
assert loop.match_by_id('callone', '''
p114 = call(ConstClass(ll_lower__rpy_stringPtr), p113, descr=<Callr . r EF=3>)
guard_no_exception(descr=...)
''')
assert loop.match_by_id('calltwo', '') # nothing
def test_move_method_call_out_of_loop(self):
def main(n):
lst = []
s = 'Hello %d' % n
for i in range(n):
t = s.lower() # ID: callone
lst.append(t)
return len(','.join(lst))
log = self.run(main, [1000])
assert log.result == main(1000)
loops = log.loops_by_filename(self.filepath)
loop, = loops
assert loop.match_by_id('callone', '') # nothing
def test_lookup_codec(self):
log = self.run("""
import codecs
def main(n):
for i in xrange(n):
codecs.lookup('utf8')
return i
""", [1000])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i45 = int_lt(i43, i26)
guard_true(i45, descr=...)
i46 = int_add(i43, 1)
setfield_gc(p15, i46, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
guard_not_invalidated(descr=...)
--TICK--
jump(..., descr=...)
""")
def test_decode_ascii(self):
log = self.run("""
def main(n):
for i in xrange(n):
unicode('abc')
return i
""", [1000])
loop, = log.loops_by_filename(self.filepath)
assert loop.match("""
i49 = int_lt(i47, i24)
guard_true(i49, descr=...)
i50 = int_add(i47, 1)
setfield_gc(p15, i50, descr=<FieldS pypy.module.__builtin__.functional.W_XRangeIterator.inst_current 8>)
guard_not_invalidated(descr=...)
p52 = call(ConstClass(str_decode_ascii__raise_unicode_exception_decode), ConstPtr(ptr38), 3, 1, descr=<Callr . rii EF=5>)
guard_no_exception(descr=...)
p53 = getfield_gc_pure(p52, descr=<FieldP tuple2.item0 .>)
guard_nonnull(p53, descr=...)
--TICK--
jump(..., descr=...)
""")
|
1674635
|
import numpy as np
import argparse
from base_module import Posenet, Camnet, discriminator, Encoder
from mmdgan_mh_enc import Pose_mmdgan_enc
import os
import random
import tensorflow as tf
import scipy.io as sio
import logging, logging.config
import sys
from eval_functions import err_3dpe
import ops
parse = argparse.ArgumentParser()
parse.add_argument("--batchsize", help= "the batch size used in training", default=128, type = int)
parse.add_argument("--epochs", help="number of epochs during training", default=50, type = int)
parse.add_argument("--latent_dim", help="dimension of latent space", default=1024, type = int)
parse.add_argument("--latent_dim_pose", help="dimension for pose in the latent space of discriminator", default=128, type=int)
parse.add_argument("--latent_dim_kcs", help="dimension for kcs in the latent space of discriminator", default=1024, type=int)
parse.add_argument("--d_output_dim", help="dimension for output of discriminator", default=8, type=int)
parse.add_argument("--lr", help="learning rate", default=1e-4, type=float)
parse.add_argument("--architecture", help="which architeture to use[mmdgan, mmdgan_enc]", default='mmdgan_enc', type=str)
parse.add_argument("--beta1", help="beta1 for adamoptimizor", default=0.5, type=float)
parse.add_argument("--diter", help="the number of discriminator updates oer generator updates", default=1, type=int)
parse.add_argument("--kernel", help="kernel type used in mmd[dot, mix_rbf, mix_rq]", default='mix_rq', type=str)
parse.add_argument("--repro_weight", help="weight of reprojection loss", default=10.0, type=float)
parse.add_argument("--cam_weight", help="weight of camera loss", default=10.0, type=float)
parse.add_argument("--gp_weight", help="weight of dot kernel in mix kernel", default=0.1, type=float)
parse.add_argument("--reg_weight", help="weight for regularizer", default=7.5, type=float)
parse.add_argument("--dot_weight", help="weight of dot kernel in mix kernel", default=10.0, type=float)
parse.add_argument("--lr_decay", help="learning rate decay rate", default=0.94, type=float)
parse.add_argument("--enc_weight", help="weight of encoder", default=10.0, type=float)
parse.add_argument("--sampling", help="set to true if generate samples", default=True, type=bool)
parse.add_argument("--checkpoint", help="which model to load", default=0, type=int)
# 931070 for gt data
# 971070 for shft
parse.add_argument("--num_samples", help="number of hypotheses", default=10, type=int)
parse.add_argument("--datatype", help="datatype used for training [GT, SHFT, GTMJ]", default='GT', type=str)
parse.add_argument("--load_path", help="specify the path to load model", default='./models', type=str)
args = parse.parse_args()
actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Photo', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Waiting', 'WalkDog', 'WalkTogether', 'Walking']
pose3d_dim = 16 * 3
pose2d_dim = 16 * 2
cam_dim = 6
lr = args.lr
model_name = '{}_regweight{}_encweight{}_2D{}'.format(args.architecture, args.reg_weight, args.enc_weight, args.datatype)
log_dir = 'logs_eval'
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger()
fileHandler = logging.FileHandler("{0}/log.txt".format(log_dir))
logger.addHandler(fileHandler)
logger.info("Logs will be written to %s" % log_dir)
def log_arguments():
logger.info('Command: %s', ' '.join(sys.argv))
s = '\n'.join([' {}: {}'.format(arg, getattr(args, arg)) for arg in vars(args)])
s = 'Arguments:\n' + s
logger.info(s)
log_arguments()
posenet = Posenet(args.latent_dim, pose3d_dim)
camnet = Camnet(args.latent_dim, cam_dim)
disc = discriminator(args.latent_dim_pose, args.latent_dim_kcs, args.d_output_dim)
encoder = Encoder(args.latent_dim, args.latent_dim)
mmd_posenet = Pose_mmdgan_enc(posenet, camnet, disc, encoder, args.latent_dim, args.batchsize, log_dir, args.epochs, pose2d_dim, pose3d_dim,
args.kernel, args.repro_weight, args.cam_weight, args.gp_weight, args.reg_weight, args.dot_weight, args.enc_weight)
mmd_posenet.build_model()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
batchsize = args.batchsize
load_dir = os.path.join(args.load_path, model_name)
ckpt = tf.train.get_checkpoint_state(load_dir, latest_filename="checkpoint")
if args.checkpoint > 0:
ckpt_name = os.path.join(os.path.join(load_dir, "checkpoint-{}".format(args.checkpoint)))
else:
ckpt_name = ckpt.model_checkpoint_path
mmd_posenet.saver.restore(sess, ckpt_name)
print('Loading model {}'.format(os.path.basename(ckpt_name)))
path = 'new_data/test/2d{}_3dTEM'.format(args.datatype)
path_cam = 'new_data/test/2d{}_3dCAM'.format(args.datatype)
logger.info('{0:>15} {1:>30} {2:>30}'.format('Action', 'Protocol1', 'Protocol2'))
val_best_all = []
valcam_best_all = []
val_zc_all = []
valcam_zc_all = []
for action in actions:
data_2d_3d_test = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path, action, args.datatype))
data_cam = sio.loadmat('{}/{}_2d{}_3d_test.mat'.format(path_cam, action, args.datatype))
poses2d_eval = data_2d_3d_test['poses_2d'][::64, :]
poses3d_eval = data_2d_3d_test['poses_3d'][::64, :] / 1000
poses_3d_cam = data_cam['poses_3d'][::64, :] / 1000
poses_zc = []
posescam_zc = []
# generate results under zero code setting
for eval in range(poses2d_eval.shape[0] // batchsize):
noise_zc = np.zeros([batchsize, args.latent_dim])
poses, cam = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], noise_zc,
lr)
poses_reshape = np.reshape(poses, [poses.shape[0], 3, 16])
k = np.reshape(cam, [cam.shape[0], 2, 3])
R = ops.compute_R(k) # recover rotation matrix from camera matrix
poses_cam = np.matmul(R, poses_reshape) # transfer pose from the template frame to the camera frame
poses_cam_reshape = np.reshape(poses_cam, [poses_cam.shape[0], -1])
posescam_zc.append(poses_cam_reshape)
poses_zc.append(poses)
poses_zc = np.vstack(poses_zc)
posescam_zc = np.vstack(posescam_zc)
# compute the error under zero code setting
val_zc = 0.0
valcam_zc = 0.0
for p in range(poses_zc.shape[0]):
err_zc = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_zc[p:p + 1, :], True)
errcam_zc = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_zc[p:p + 1, :], False)
# scale the output according to the ratio between poses in camera frame and poses in template frame in the training set
val_zc = val_zc + err_zc
valcam_zc = valcam_zc + errcam_zc
val_zc_all.append(err_zc)
valcam_zc_all.append(errcam_zc)
val_zc = val_zc / poses_zc.shape[0]
valcam_zc = valcam_zc/posescam_zc.shape[0]
# generate results for multiple hypotheses
poses_samples_all = []
posescam_samples_all = []
R_all = []
poses_repro_all = []
for eval in range(poses2d_eval.shape[0] // batchsize):
poses_samples_batch = []
posescam_samples_batch = []
poses_repro_batch = []
for i in range(args.num_samples):
z_test = np.random.normal(0, 1, (batchsize, args.latent_dim))
posespred, campred = mmd_posenet.inference(sess, poses2d_eval[eval * batchsize: (eval + 1) * batchsize],
poses3d_eval[eval * batchsize: (eval + 1) * batchsize], z_test,
lr)
posespred_reshape = np.reshape(posespred, [posespred.shape[0], 3, 16])
poses_samples_batch.append(posespred)
k = np.reshape(campred, [campred.shape[0], 2, 3])
R = ops.compute_R(k)
posespred_cam = np.matmul(R, posespred_reshape)
posespred_cam_reshape = np.reshape(posespred_cam, [posespred_cam.shape[0], -1])
posescam_samples_batch.append(posespred_cam_reshape)
poses_repro = np.reshape(np.matmul(k, posespred_reshape), [posespred.shape[0], -1])
poses_repro_batch.append(poses_repro)
poses_samples_batch = np.stack(poses_samples_batch, axis=1)
poses_samples_all.append(poses_samples_batch)
posescam_samples_batch = np.stack(posescam_samples_batch,axis=1)
posescam_samples_all.append(posescam_samples_batch)
poses_repro_batch = np.stack(poses_repro_batch, axis=1)
poses_repro_all.append(poses_repro_batch)
R_all.append(R)
poses_samples_all = np.concatenate(poses_samples_all, axis=0)
posescam_samples_all = np.concatenate(posescam_samples_all, axis=0)
poses_repro_all = np.concatenate(poses_repro_all, axis=0)
R_all = np.concatenate(R_all, axis=0)
# compute error for bh setting
err = np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]])
err_cam = np.zeros([poses_samples_all.shape[0], poses_samples_all.shape[1]])
for p in range(err.shape[0]):
for s in range(args.num_samples):
err[p, s] = 1000 * err_3dpe(poses3d_eval[p:p + 1, :], poses_samples_all[p:p + 1, s, :], True)
err_cam[p, s] = 1000 * err_3dpe(poses_3d_cam[p:p + 1, :], 1.1 * posescam_samples_all[p:p + 1, s, :],
False) # scale the output according to the ratio between poses in camera
# frame and poses in template frame in the training set
val_best = np.mean(np.min(err, axis=1))
valcam_best = np.mean(np.min(err_cam, axis=1))
val_best_all.append(np.min(err, axis=1))
valcam_best_all.append(np.min(err_cam, axis=1))
logger.info('{0:<15} {1:>15.2f} {2:>15.2f} {3:>15.2f} {4:>15.2f}'.format(action, valcam_zc, valcam_best, val_zc, val_best ))
valcam_zc_all = np.array(valcam_zc_all)
val_zc_all = np.array(val_zc_all)
valcam_best_all = np.concatenate(valcam_best_all)
val_best_all = np.concatenate(val_best_all)
logger.info('{0:<15} {1:>15.2f} {2:>15.2f} {3:>15.2f} {4:>15.2f}'.format('Average', np.mean(valcam_zc_all), np.mean(valcam_best_all),
np.mean(val_zc_all), np.mean(val_best_all)))
# the result for each column represents: protocol 1 (zc, bh), protocol 2 (zc, bh)
|
1674639
|
arr = input().split()
for i in range(len(arr)):
if(arr[i]=='1'):
print(i)
break
|
1674652
|
def f ( one, two, three, four, five, six, seven, eight, nine, ten, eleven, twelve ) :
pass
def g (): return 5 ** 2
|
1674668
|
from regex import regex
from credsweeper.credentials import LineData
from credsweeper.filters import Filter
class ValueTokenCheck(Filter):
"""Check if first substring of token is shorter than 5.
Split candidate value into substrings using ` ;`{})(<>[]` separators. Check if first substring is shorter than 5
Examples:
"my password"
"12);password"
"""
SPLIT_PATTERN = " |;|\\)|\\(|{|}|<|>|\\[|\\]|`"
def run(self, line_data: LineData) -> bool:
"""Run filter checks on received credential candidate data 'line_data'.
Args:
line_data: credential candidate data
Return:
True, if need to filter candidate and False if left
"""
if line_data.value is None:
return True
tokens = regex.split(self.SPLIT_PATTERN, line_data.value, maxsplit=1)
# If tokens have length of 1 - pattern is not present in the value and original value returned from `.split(`
if len(tokens) < 2:
return False
token = tokens[0]
if len(token) < 5:
return True
return False
|
1674669
|
from __future__ import print_function, division
from gym import wrappers
from gym.wrappers import Monitor
import gym
import obstacle_env
def run(episodes=1):
env = gym.make('obstacle-v0')
env = Monitor(env, 'out', force=True)
for _ in range(episodes):
env.reset()
env.unwrapped.set_monitor(env) # to capture in-between frames
done = False
while not done:
action = env.unwrapped.dynamics.desired_action
observation, reward, done, info = env.step(action)
env.render()
env.close()
if __name__ == '__main__':
run()
|
1674673
|
import codecs
import os
import unicodedata
from common.dictionary import cedict_definition, add_dict_entry
hsk_word_level = {} # {"A" : 1, "ABC" : 1 }
hsk_char_level = {} # {"A" : 1, "B" : 1 , "C" : 1}
hsk_words = {} # {1 : set("A", "ABC"), ...}
hsk_chars = {}
hsk_words_2010 = {} # {1 : set("A", "ABC"), ...}
hsk_chars_2010 = {}
def get_hsk_level(hanzi):
if hanzi in hsk_word_level:
return hsk_word_level[hanzi]
elif hanzi in hsk_char_level:
return hsk_char_level[hanzi]
return 0
def get_hsk_word_level(hanzi):
if hanzi in hsk_word_level:
return hsk_word_level[hanzi]
return 0
def get_hsk_char_level(hanzi):
if hanzi in hsk_char_level:
return hsk_char_level[hanzi]
return 0
def get_hsk_word_level_negative_notfound(hanzi):
if hanzi in hsk_word_level:
return hsk_word_level[hanzi]
elif hanzi in cedict_definition:
return 0
return -1
def get_hsk_char_level_negative_notfound(hanzi):
if hanzi in hsk_char_level:
return hsk_char_level[hanzi]
elif hanzi in cedict_definition:
return 0
return -1
def do_hsk_parsing(directory):
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L1.txt"), 1)
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L2.txt"), 2)
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L3.txt"), 3)
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L4.txt"), 4)
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L5.txt"), 5)
parse_hsk_file(os.path.join(directory, "HSK Official With Definitions 2012 L6.txt"), 6)
build_hsk_extralists(hsk_words, hsk_chars)
parse_hsk_2010_file(os.path.join(directory, "New_HSK_2010.csv"))
build_hsk_extralists(hsk_words_2010, hsk_chars_2010)
# parse newer 2012 HSK format
def parse_hsk_file(in_file_name, hsklevel):
hsk_words[hsklevel] = set()
infile = codecs.open(in_file_name, 'r', "utf-8")
for line in infile:
splitted = line.strip().split("\t")
if len(splitted) >= 4:
word = unicodedata.normalize("NFKC", splitted[0].strip()).replace('\ufeff', "")
if word != "":
hsk_words[hsklevel].add(word)
if word in hsk_word_level:
hsk_word_level[word] = min(hsk_word_level[word], hsklevel)
else:
hsk_word_level[word] = hsklevel
for somehanzi in word:
if somehanzi in hsk_char_level:
hsk_char_level[somehanzi] = min(hsk_char_level[somehanzi], hsklevel)
else:
hsk_char_level[somehanzi] = hsklevel
trad = splitted[1].strip()
pinyin = splitted[2].strip()
definition = splitted[4].strip()
add_dict_entry(word, trad, pinyin, definition)
infile.close()
def build_hsk_extralists(words, chars):
# build a list of characters from the words lists
for i in range(1, 7):
chars[i] = set()
for word in words[i]:
for char in word:
chars[i].add(char)
chars[2] = chars[2] - chars[1]
chars[3] = chars[3] - chars[2] - chars[1]
chars[4] = chars[4] - chars[3] - chars[2] - chars[1]
chars[5] = chars[5] - chars[4] - chars[3] - chars[2] - chars[1]
chars[6] = chars[6] - chars[5] - chars[4] - chars[3] - chars[2] - chars[1]
# build lists of character/word ranges; e.g. words[13] is the
# union of the words for HSK levels 1, 2, and 3.
for i in range(1, 6):
for j in range(i + 1, 7):
words[i * 10 + j] = words[i]
chars[i * 10 + j] = chars[i]
for k in range(i + 1, j + 1):
words[i * 10 + j] = words[i * 10 + j].union(words[k])
chars[i * 10 + j] = chars[i * 10 + j].union(chars[k])
def parse_hsk_2010_file(in_file_name):
infile = codecs.open(in_file_name, 'r', "utf-8")
hsk_words_2010[1] = set()
hsk_words_2010[2] = set()
hsk_words_2010[3] = set()
hsk_words_2010[4] = set()
hsk_words_2010[5] = set()
hsk_words_2010[6] = set()
for line in infile:
splitted = line.split(",")
if len(splitted) > 1:
hsklevel = int(splitted[0].strip().replace('\ufeff', ""))
word = unicodedata.normalize("NFKC", splitted[1].strip()).replace('\ufeff', "")
if word != "":
hsk_words_2010[hsklevel].add(word)
infile.close()
|
1674699
|
import time
import asyncio
import random
from argparse import ArgumentParser
from lbry.wallet.network import ClientSession
class AgentSmith(ClientSession):
async def do_nefarious_things(self):
await self.send_request('blockchain.claimtrie.search', {
'no_totals': True,
'offset': random.choice(range(0, 300, 20)),
'limit': 20,
'any_tags': (
random.choice([[
random.choice(['gaming', 'games', 'game']) +
random.choice(['entertainment', 'playthrough', 'funny']) +
random.choice(['xbox', 'xbox one', 'xbox news'])
], [
random.choice(['aliens', 'alien', 'ufo', 'ufos']) +
random.choice(['news', 'sighting', 'sightings'])
], [
random.choice(['art', 'automotive']),
random.choice(['blockchain', 'economics', 'food']),
random.choice(['funny', 'learnings', 'nature']),
random.choice(['news', 'science', 'technology'])
]
])
),
'not_tags': random.choice([[], [
'porn', 'mature', 'xxx', 'nsfw'
]]),
'order_by': random.choice([
['release_time'],
['trending_global', 'trending_mixed'],
['effective_amount']
])
})
class AgentSmithProgram:
def __init__(self, host, port):
self.host, self.port = host, port
self.agent_smiths = []
async def make_one_more_of_them(self):
smith = AgentSmith(network=None, server=(self.host, self.port))
await smith.create_connection()
self.agent_smiths.append(smith)
async def coordinate_nefarious_activity(self):
start = time.perf_counter()
await asyncio.gather(
*(s.do_nefarious_things() for s in self.agent_smiths),
return_exceptions=True
)
return time.perf_counter() - start
def __len__(self):
return len(self.agent_smiths)
async def delete_one_smith(self):
if self.agent_smiths:
await self.agent_smiths.pop().close()
async def delete_program(self):
await asyncio.gather(*(
s.close() for s in self.agent_smiths
))
async def main(host, port):
smiths = AgentSmithProgram(host, port)
await smiths.make_one_more_of_them()
activity = asyncio.create_task(smiths.coordinate_nefarious_activity())
ease_off = 0
for i in range(1000):
await asyncio.sleep(1)
if activity.done() and activity.result() < .9:
print('more, more, more...')
await asyncio.gather(*(
asyncio.create_task(smiths.make_one_more_of_them()) for _ in range(20)
))
else:
print('!!!!!!!!!!!!!!')
print('IS NEO LOSING?')
print('!!!!!!!!!!!!!!')
await asyncio.gather(*(
asyncio.create_task(smiths.delete_one_smith()) for _ in range(21)
))
print(f'coordinate all {len(smiths)} smiths to action')
activity = asyncio.create_task(smiths.coordinate_nefarious_activity())
print('finishing up any remaining actions')
await activity
print('neo has won, deleting agents...')
await smiths.delete_program()
print('done.')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--host', dest='host', default='localhost', type=str)
parser.add_argument('--port', dest='port', default=50001, type=int)
args = parser.parse_args()
asyncio.run(main(args.host, args.port))
|
1674721
|
import boto3
import json
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb','ap-southeast-1')
users_table=dynamodb.Table('codebreaker-users')
problems_table = dynamodb.Table('codebreaker-problems')
def getUsersTable():
resp = users_table.scan(ProjectionExpression='username, problemScores')
val = resp['Items']
while 'LastEvaluatedKey' in resp:
resp = users_table.scan(ExclusiveStartKey=resp['LastEvaluatedKey'],ProjectionExpression='username, problemScores')
users = resp['Items']
val = val.update(users)
p =getAllProblemsLimited()
for t in val:
bad = []
for x in t['problemScores']:
try:
r = p[x]
except KeyError:
bad.append(x)
for i in bad:
t['problemScores'].pop(i)
return val
def getAllProblemsLimited():
res = problems_table.scan(
ProjectionExpression = 'problemName, analysisVisible, noACs'
)['Items']
x = {}
for i in res:
x[i['problemName']] = {
'analysisVisible':i['analysisVisible'],
'noAC': i['noACs']
}
return x
def getUserInfoFromUsername(username):
scan_kwargs = {
'FilterExpression':Key('username').eq(username)
}
done = False
start_key = None
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey']= start_key
response = users_table.scan(**scan_kwargs)
res = response.get('Items',[])
if len(res) > 0:
return res[0]
start_key = response.get('LastEvaluatedKey',None)
done = start_key is None
placeHolder = {
'email' : '',
'school':'',
'role':'',
'username':'',
'problemScores':{},
}
return placeHolder
|
1674735
|
import fply.dylib
import fply.rpc
if fply.rpc.available():
FPLY = fply.rpc.FPLY
elif fply.dylib.available():
FPLY = fply.dylib.FPLY
else:
import sys
sys.stderr.write("!! Cannot find binary fairplay module, fallback to dummy\n")
sys.stderr.write("!! Most clients will refuse to connect\n")
from fply.dummy import FPLY
|
1674745
|
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
act_cfg (dict, optional): Config dict for activation layer in
ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There
would be extra_convs when num_outs larger than the length
of in_channels.
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
num_outs=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(ChannelMapper, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
|
1674757
|
import argparse
import contextlib
import pathlib
import sys
from datetime import datetime
from typing import Mapping, TextIO, Tuple, Iterable, Optional
import semver
import toml
from poetry2conda import __version__
def convert(
file: TextIO, include_dev: bool = False, extras: Optional[Iterable[str]] = None
) -> str:
""" Convert a pyproject.toml file to a conda environment YAML
This is the main function of poetry2conda, where all parsing, converting,
etc. gets done.
Parameters
----------
file
A file-like object containing a pyproject.toml file.
include_dev
Whether to include the dev dependencies in the resulting environment.
extras
The name of extras to include in the output. Can be None or empty
for no extras.
Returns
-------
The contents of an environment.yaml file as a string.
"""
if extras is None:
extras = []
poetry2conda_config, poetry_config = parse_pyproject_toml(file)
env_name = poetry2conda_config["name"]
poetry_dependencies = poetry_config.get("dependencies", {})
if include_dev:
poetry_dependencies.update(poetry_config.get("dev-dependencies", {}))
poetry_extras = poetry_config.get("extras", {})
# We mark the items listed in the selected extras as non-optional
for extra in extras:
for item in poetry_extras[extra]:
dep = poetry_dependencies[item]
if isinstance(dep, dict):
dep["optional"] = False
conda_constraints = poetry2conda_config.get("dependencies", {})
dependencies, pip_dependencies = collect_dependencies(
poetry_dependencies, conda_constraints
)
conda_yaml = to_yaml_string(env_name, dependencies, pip_dependencies)
return conda_yaml
def convert_version(spec_str: str) -> str:
""" Convert a poetry version spec to a conda-compatible version spec.
Poetry accepts tilde and caret version specs, but conda does not support
them. This function uses the `poetry-semver` package to parse it and
transform it to regular version spec ranges.
Parameters
----------
spec_str
A poetry version specification string.
Returns
-------
The same version specification without tilde or caret.
"""
spec = semver.parse_constraint(spec_str)
if isinstance(spec, semver.Version):
converted = f"=={str(spec)}"
elif isinstance(spec, semver.VersionRange):
converted = str(spec)
elif isinstance(spec, semver.VersionUnion):
raise ValueError("Complex version constraints are not supported at the moment.")
return converted
def parse_pyproject_toml(file: TextIO) -> Tuple[Mapping, Mapping]:
""" Parse a pyproject.toml file
This function assumes that the pyproject.toml contains a poetry and
poetry2conda config sections.
Parameters
----------
file
A file-like object containing a pyproject.toml file.
Returns
-------
A tuple with the poetry2conda and poetry config.
Raises
------
RuntimeError
When an expected configuration section is missing.
"""
pyproject_toml = toml.loads(file.read())
poetry_config = pyproject_toml.get("tool", {}).get("poetry", {})
if not poetry_config:
raise RuntimeError(f"tool.poetry section was not found on {file.name}")
poetry2conda_config = pyproject_toml.get("tool", {}).get("poetry2conda", {})
if not poetry2conda_config:
raise RuntimeError(f"tool.poetry2conda section was not found on {file.name}")
if "name" not in poetry2conda_config or not isinstance(
poetry2conda_config["name"], str
):
raise RuntimeError(f"tool.poetry2conda.name entry was not found on {file.name}")
return poetry2conda_config, poetry_config
def collect_dependencies(
poetry_dependencies: Mapping, conda_constraints: Mapping
) -> Tuple[Mapping, Mapping]:
""" Organize and apply conda constraints to dependencies
Parameters
----------
poetry_dependencies
A dictionary with dependencies as declared with poetry.
conda_constraints
A dictionary with conda constraints as declared with poetry2conda.
Returns
-------
A tuple with the modified dependencies and the dependencies that must be
installed with pip.
"""
dependencies = {}
pip_dependencies = {}
# 1. Do a first pass to change pip to conda packages
for name, conda_dict in conda_constraints.items():
if name in poetry_dependencies and "git" in poetry_dependencies[name]:
poetry_dependencies[name] = conda_dict["version"]
# 2. Now do the conversion
for name, constraint in poetry_dependencies.items():
if isinstance(constraint, str):
dependencies[name] = convert_version(constraint)
elif isinstance(constraint, dict):
if constraint.get("optional", False):
continue
if "git" in constraint:
git = constraint["git"]
tag = constraint["tag"]
pip_dependencies[f"git+{git}@{tag}#egg={name}"] = None
elif "version" in constraint:
dependencies[name] = convert_version(constraint["version"])
else:
raise ValueError(
f"This converter only supports normal dependencies and "
f"git dependencies. No path, url, python restricted, "
f"environment markers or multiple constraints. In your "
f'case, check the "{name}" dependency. Sorry.'
)
else:
raise ValueError(
f"This converter only supports normal dependencies and "
f"git dependencies. No multiple constraints. In your "
f'case, check the "{name}" dependency. Sorry.'
)
if name in conda_constraints:
conda_dict = conda_constraints[name]
if "name" in conda_dict:
new_name = conda_dict["name"]
dependencies[new_name] = dependencies.pop(name)
name = new_name
# do channel last, because it may move from dependencies to pip_dependencies
if "channel" in conda_dict:
channel = conda_dict["channel"]
if channel == "pip":
pip_dependencies[name] = dependencies.pop(name)
else:
new_name = f"{channel}::{name}"
dependencies[new_name] = dependencies.pop(name)
if pip_dependencies:
dependencies["pip"] = None
return dependencies, pip_dependencies
def to_yaml_string(
env_name: str, dependencies: Mapping, pip_dependencies: Mapping
) -> str:
""" Converts dependencies to a string in YAML format.
Note that there is no third party library to manage the YAML format. This is
to avoid an additional package dependency (like pyyaml, which is already
one of the packages that behaves badly in conda+pip mixed environments).
But also because our YAML is very simple
Parameters
----------
env_name
Name for the conda environment.
dependencies
Regular conda dependencies.
pip_dependencies
Pure pip dependencies.
Returns
-------
A string with an environment.yaml definition usable by conda.
"""
deps_str = []
for name, version in dependencies.items():
version = version or ""
deps_str.append(f" - {name}{version}")
if pip_dependencies:
deps_str.append(f" - pip:")
for name, version in pip_dependencies.items():
version = version or ""
deps_str.append(f" - {name}{version}")
deps_str = "\n".join(deps_str)
date_str = datetime.now().strftime("%c")
conda_yaml = f"""
###############################################################################
# NOTE: This file has been auto-generated by poetry2conda
# poetry2conda version = {__version__}
# date: {date_str}
###############################################################################
# If you want to change the contents of this file, you should probably change
# the pyproject.toml file and then use poetry2conda again to update this file.
# Alternatively, stop using (ana)conda.
###############################################################################
name: {env_name}
dependencies:
{deps_str}
""".lstrip()
return conda_yaml
def write_file(filename: str, contents: str) -> None:
context = contextlib.ExitStack()
if filename == '-':
f = sys.stdout
else:
environment_yaml = pathlib.Path(filename)
if not environment_yaml.exists():
environment_yaml.parent.mkdir(parents=True, exist_ok=True)
f = context.enter_context(environment_yaml.open('w'))
with context:
f.write(contents)
def main():
parser = argparse.ArgumentParser(
description="Convert a poetry-based pyproject.toml "
"to a conda environment.yaml"
)
parser.add_argument(
"pyproject",
metavar="TOML",
type=argparse.FileType("r"),
help="pyproject.toml input file.",
)
parser.add_argument(
"environment",
metavar="YAML",
type=str,
help="environment.yaml output file.",
)
parser.add_argument(
"--dev", action="store_true", help="include dev dependencies",
)
parser.add_argument(
"--extras", "-E", action="append", help="Add extra requirements",
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s (version {__version__})"
)
args = parser.parse_args()
converted_obj = convert(args.pyproject, include_dev=args.dev, extras=args.extras)
write_file(args.environment, converted_obj)
if __name__ == "__main__":
main()
|
1674764
|
import random
import numpy as np
import torch
def set_random_seed(seed):
if seed < 0:
return
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
def worker_init_fn(worker_id):
"""The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed.
Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
# print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
|
1674774
|
import FWCore.ParameterSet.Config as cms
#for dnn classifier
from Configuration.ProcessModifiers.trackdnn_cff import trackdnn
from RecoTracker.IterativeTracking.dnnQualityCuts import qualityCutDictionary
# This step runs over all clusters
# run only if there are high pT jets
jetsForCoreTracking = cms.EDFilter('CandPtrSelector', src = cms.InputTag('ak4CaloJetsForTrk'), cut = cms.string('pt > 100 && abs(eta) < 2.5'), filter = cms.bool(False))
jetsForCoreTrackingBarrel = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) < 2.5' )
jetsForCoreTrackingEndcap = jetsForCoreTracking.clone( cut = 'pt > 100 && abs(eta) > 1.4 && abs(eta) < 2.5' )
# care only at tracks from main PV
firstStepGoodPrimaryVertices = cms.EDFilter('PrimaryVertexObjectFilter',
filterParams = cms.PSet(
minNdof = cms.double(25.0),
maxZ = cms.double(15.0),
maxRho = cms.double(2.0)
),
src=cms.InputTag('firstStepPrimaryVertices')
)
import RecoTracker.TkSeedingLayers.seedingLayersEDProducer_cfi as _mod
# SEEDING LAYERS
jetCoreRegionalStepSeedLayers = _mod.seedingLayersEDProducer.clone(
layerList = ['BPix1+BPix2', 'BPix1+BPix3', 'BPix2+BPix3',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
'FPix1_pos+FPix2_pos', 'FPix1_neg+FPix2_neg',
#'BPix2+TIB1','BPix2+TIB2',
'BPix3+TIB1','BPix3+TIB2'],
TIB = dict(
matchedRecHits = cms.InputTag('siStripMatchedRecHits','matchedRecHit'),
TTRHBuilder = cms.string('WithTrackAngle'),
clusterChargeCut = cms.PSet(refToPSet_ = cms.string('SiStripClusterChargeCutNone'))
),
BPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0027),
hitErrorRZ = cms.double(0.006),
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
#skipClusters = cms.InputTag('jetCoreRegionalStepClusters')
),
FPix = dict(
useErrorsFromParam = cms.bool(True),
hitErrorRPhi = cms.double(0.0051),
hitErrorRZ = cms.double(0.0036),
TTRHBuilder = cms.string('WithTrackAngle'),
HitProducer = cms.string('siPixelRecHits'),
#skipClusters = cms.InputTag('jetCoreRegionalStepClusters')
)
)
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
_layerListForPhase1 = [
'BPix1+BPix2', 'BPix1+BPix3', 'BPix1+BPix4',
'BPix2+BPix3', 'BPix2+BPix4',
'BPix3+BPix4',
'BPix1+FPix1_pos', 'BPix1+FPix1_neg',
'BPix2+FPix1_pos', 'BPix2+FPix1_neg',
'FPix1_pos+FPix2_pos', 'FPix1_neg+FPix2_neg',
'FPix1_pos+FPix3_pos', 'FPix1_neg+FPix3_neg',
'FPix2_pos+FPix3_pos', 'FPix2_neg+FPix3_neg',
#'BPix3+TIB1','BPix3+TIB2'
'BPix4+TIB1','BPix4+TIB2'
]
trackingPhase1.toModify(jetCoreRegionalStepSeedLayers, layerList = _layerListForPhase1)
# TrackingRegion
from RecoTauTag.HLTProducers.tauRegionalPixelSeedTrackingRegions_cfi import tauRegionalPixelSeedTrackingRegions as _tauRegionalPixelSeedTrackingRegions
jetCoreRegionalStepTrackingRegions = _tauRegionalPixelSeedTrackingRegions.clone(
RegionPSet=dict(
ptMin = 10,
deltaPhiRegion = 0.20,
deltaEtaRegion = 0.20,
JetSrc = 'jetsForCoreTracking',
vertexSrc = 'firstStepGoodPrimaryVertices',
howToUseMeasurementTracker = 'Never')
)
jetCoreRegionalStepEndcapTrackingRegions = jetCoreRegionalStepTrackingRegions.clone(
RegionPSet=dict(
JetSrc = 'jetsForCoreTrackingEndcap')
)
# Seeding
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
jetCoreRegionalStepHitDoublets = _hitPairEDProducer.clone(
seedingLayers = 'jetCoreRegionalStepSeedLayers',
trackingRegions = 'jetCoreRegionalStepTrackingRegions',
produceSeedingHitSets = True,
maxElementTotal = 12000000,
)
jetCoreRegionalStepEndcapHitDoublets = jetCoreRegionalStepHitDoublets.clone(
trackingRegions = 'jetCoreRegionalStepEndcapTrackingRegions',
)
from RecoTracker.TkSeedGenerator.seedCreatorFromRegionConsecutiveHitsEDProducer_cff import seedCreatorFromRegionConsecutiveHitsEDProducer as _seedCreatorFromRegionConsecutiveHitsEDProducer
jetCoreRegionalStepSeeds = _seedCreatorFromRegionConsecutiveHitsEDProducer.clone(
seedingHitSets = 'jetCoreRegionalStepHitDoublets',
forceKinematicWithRegionDirection = True
)
import RecoTracker.TkSeedGenerator.deepCoreSeedGenerator_cfi
jetCoreRegionalStepSeedsBarrel = RecoTracker.TkSeedGenerator.deepCoreSeedGenerator_cfi.deepCoreSeedGenerator.clone(#to run MCtruthSeedGenerator clone here from Validation.RecoTrack
vertices = "firstStepPrimaryVertices",
cores = "jetsForCoreTrackingBarrel"
)
jetCoreRegionalStepSeedsEndcap = jetCoreRegionalStepSeeds.clone(
seedingHitSets = 'jetCoreRegionalStepEndcapHitDoublets',
)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff
jetCoreRegionalStepTrajectoryFilter = TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
minimumNumberOfHits = 4,
seedPairPenalty = 0,
minPt = 0.1
)
jetCoreRegionalStepBarrelTrajectoryFilter = jetCoreRegionalStepTrajectoryFilter.clone(
minimumNumberOfHits = 2,
maxConsecLostHits = 2,
maxLostHitsFraction = 1.1,
seedPairPenalty = 0,
minPt = 0.9 ## should it be slightly decrease ?
)
jetCoreRegionalStepEndcapTrajectoryFilter = jetCoreRegionalStepTrajectoryFilter.clone()
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA
(pp_on_XeXe_2017 | pp_on_AA).toModify(jetCoreRegionalStepTrajectoryFilter, minPt=5.0)
import TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi
jetCoreRegionalStepChi2Est = TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi.Chi2MeasurementEstimator.clone(
ComponentName = 'jetCoreRegionalStepChi2Est',
nSigma = 3.0,
MaxChi2 = 30.0
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
#need to also load the refToPSet_ used by GroupedCkfTrajectoryBuilder
CkfBaseTrajectoryFilter_block = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.CkfBaseTrajectoryFilter_block
jetCoreRegionalStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('jetCoreRegionalStepTrajectoryFilter')),
#clustersToSkip = cms.InputTag('jetCoreRegionalStepClusters'),
maxCand = 50,
estimator = 'jetCoreRegionalStepChi2Est',
maxDPhiForLooperReconstruction = cms.double(2.0),
maxPtForLooperReconstruction = cms.double(0.7)
)
jetCoreRegionalStepBarrelTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('jetCoreRegionalStepBarrelTrajectoryFilter')),
#clustersToSkip = cms.InputTag('jetCoreRegionalStepClusters'),
maxCand = 50,
estimator = 'jetCoreRegionalStepChi2Est',
keepOriginalIfRebuildFails = True,
lockHits = False,
requireSeedHitsInRebuild = False
)
jetCoreRegionalStepEndcapTrajectoryBuilder = jetCoreRegionalStepTrajectoryBuilder.clone(
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('jetCoreRegionalStepEndcapTrajectoryFilter')),
#clustersToSkip = cms.InputTag('jetCoreRegionalStepClusters'),
)
#customized cleaner for DeepCore
from TrackingTools.TrajectoryCleaning.TrajectoryCleanerBySharedHits_cfi import trajectoryCleanerBySharedHits
jetCoreRegionalStepDeepCoreTrajectoryCleaner = trajectoryCleanerBySharedHits.clone(
ComponentName = 'jetCoreRegionalStepDeepCoreTrajectoryCleaner',
fractionShared = 0.45
)
############## to run MCtruthSeedGenerator ####################
#import RecoTracker.TkSeedGenerator.deepCoreSeedGenerator_cfi
#import Validation.RecoTrack.JetCoreMCtruthSeedGenerator_cfi
#seedingDeepCore.toReplaceWith(jetCoreRegionalStepSeedsBarrel,
# RecoTracker.TkSeedGenerator.deepCoreSeedGenerator_cfi.deepCoreSeedGenerator.clone(#to run MCtruthSeedGenerator clone here from Validation.RecoTrack
# vertices="firstStepPrimaryVertices"
# )
#)
# MAKING OF TRACK CANDIDATES
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
jetCoreRegionalStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = 'jetCoreRegionalStepSeeds',
maxSeedsBeforeCleaning = 10000,
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string('jetCoreRegionalStepTrajectoryBuilder')),
NavigationSchool = 'SimpleNavigationSchool',
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
#numHitsForSeedCleaner = cms.int32(50),
#onlyPixelHitsForSeedCleaner = cms.bool(True),
)
jetCoreRegionalStepBarrelTrackCandidates = jetCoreRegionalStepTrackCandidates.clone(
src = 'jetCoreRegionalStepSeedsBarrel',
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string('jetCoreRegionalStepBarrelTrajectoryBuilder')),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
#numHitsForSeedCleaner = cms.int32(50),
#onlyPixelHitsForSeedCleaner = cms.bool(True),
TrajectoryCleaner = 'jetCoreRegionalStepDeepCoreTrajectoryCleaner',
doSeedingRegionRebuilding = True,
)
jetCoreRegionalStepEndcapTrackCandidates = jetCoreRegionalStepTrackCandidates.clone(
src = 'jetCoreRegionalStepSeedsEndcap',
TrajectoryBuilderPSet = cms.PSet( refToPSet_ = cms.string('jetCoreRegionalStepEndcapTrajectoryBuilder')),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
#numHitsForSeedCleaner = cms.int32(50),
#onlyPixelHitsForSeedCleaner = cms.bool(True),
)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
jetCoreRegionalStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
AlgorithmName = 'jetCoreRegionalStep',
src = 'jetCoreRegionalStepTrackCandidates',
Fitter = 'FlexibleKFFittingSmoother'
)
jetCoreRegionalStepBarrelTracks = jetCoreRegionalStepTracks.clone(
src = 'jetCoreRegionalStepBarrelTrackCandidates',
)
jetCoreRegionalStepEndcapTracks = jetCoreRegionalStepTracks.clone(
src = 'jetCoreRegionalStepEndcapTrackCandidates',
)
from Configuration.Eras.Modifier_fastSim_cff import fastSim
import RecoTracker.FinalTrackSelectors.trackListMerger_cfi
_fastSim_jetCoreRegionalStepTracks = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers = [],
hasSelector = [],
selectedTrackQuals = [],
copyExtras = True
)
fastSim.toReplaceWith(jetCoreRegionalStepTracks,_fastSim_jetCoreRegionalStepTracks)
# Final selection
from RecoTracker.FinalTrackSelectors.TrackCutClassifier_cff import *
jetCoreRegionalStep = TrackCutClassifier.clone(
src = 'jetCoreRegionalStepTracks',
mva = dict(
minPixelHits = [1,1,1],
maxChi2 = [9999.,9999.,9999.],
maxChi2n = [1.6,1.0,0.7],
minLayers = [3,5,5],
min3DLayers = [1,2,3],
maxLostLayers = [4,3,2],
maxDz = [0.5,0.35,0.2],
maxDr = [0.3,0.2,0.1]
),
vertices = 'firstStepGoodPrimaryVertices'
)
jetCoreRegionalStepBarrel = jetCoreRegionalStep.clone(
src = 'jetCoreRegionalStepBarrelTracks',
mva = dict(
# minPixelHits = [1,1,1], # they could be easily increased to at least 2 or 3 !
min3DLayers = [1,2,2],
),
)
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
trackingPhase1.toReplaceWith(jetCoreRegionalStep, TrackMVAClassifierPrompt.clone(
mva = dict(GBRForestLabel = 'MVASelectorJetCoreRegionalStep_Phase1'),
src = 'jetCoreRegionalStepTracks',
qualityCuts = [-0.2,0.0,0.4]
))
trackingPhase1.toReplaceWith(jetCoreRegionalStepBarrel, jetCoreRegionalStep.clone(
src = 'jetCoreRegionalStepBarrelTracks',
))
from RecoTracker.FinalTrackSelectors.TrackTfClassifier_cfi import *
from RecoTracker.FinalTrackSelectors.trackSelectionTf_cfi import *
trackdnn.toReplaceWith(jetCoreRegionalStep, TrackTfClassifier.clone(
src = 'jetCoreRegionalStepTracks',
qualityCuts = qualityCutDictionary["JetCoreRegionalStep"],
))
trackdnn.toReplaceWith(jetCoreRegionalStepBarrel, TrackTfClassifier.clone(
src = 'jetCoreRegionalStepBarrelTracks',
qualityCuts = qualityCutDictionary["JetCoreRegionalStep"],
))
fastSim.toModify(jetCoreRegionalStep,vertices = 'firstStepPrimaryVerticesBeforeMixing')
jetCoreRegionalStepEndcap = jetCoreRegionalStep.clone(
src = 'jetCoreRegionalStepEndcapTracks',
)
# Final sequence
JetCoreRegionalStepTask = cms.Task(jetsForCoreTracking,
firstStepGoodPrimaryVertices,
#jetCoreRegionalStepClusters,
jetCoreRegionalStepSeedLayers,
jetCoreRegionalStepTrackingRegions,
jetCoreRegionalStepHitDoublets,
jetCoreRegionalStepSeeds,
jetCoreRegionalStepTrackCandidates,
jetCoreRegionalStepTracks,
#jetCoreRegionalStepClassifier1,jetCoreRegionalStepClassifier2,
jetCoreRegionalStep)
JetCoreRegionalStep = cms.Sequence(JetCoreRegionalStepTask)
JetCoreRegionalStepBarrelTask = cms.Task(jetsForCoreTrackingBarrel,
firstStepGoodPrimaryVertices,
#jetCoreRegionalStepClusters,
jetCoreRegionalStepSeedLayers,
jetCoreRegionalStepSeedsBarrel,
jetCoreRegionalStepBarrelTrackCandidates,
jetCoreRegionalStepBarrelTracks,
jetCoreRegionalStepBarrel)
JetCoreRegionalStepEndcapTask = cms.Task(jetsForCoreTrackingEndcap,
firstStepGoodPrimaryVertices,
#jetCoreRegionalStepClusters,
jetCoreRegionalStepSeedLayers,
jetCoreRegionalStepEndcapTrackingRegions,
jetCoreRegionalStepEndcapHitDoublets,
jetCoreRegionalStepSeedsEndcap,
jetCoreRegionalStepEndcapTrackCandidates,
jetCoreRegionalStepEndcapTracks,
jetCoreRegionalStepEndcap)
from Configuration.ProcessModifiers.seedingDeepCore_cff import seedingDeepCore
from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import *
seedingDeepCore.toReplaceWith(jetCoreRegionalStepTracks, TrackCollectionMerger.clone(
trackProducers = ["jetCoreRegionalStepBarrelTracks",
"jetCoreRegionalStepEndcapTracks",],
inputClassifiers = ["jetCoreRegionalStepBarrel",
"jetCoreRegionalStepEndcap",],
foundHitBonus = 100.0,
lostHitPenalty = 1.0
))
seedingDeepCore.toReplaceWith(jetCoreRegionalStep, jetCoreRegionalStepTracks.clone()) #(*)
seedingDeepCore.toReplaceWith(JetCoreRegionalStepTask, cms.Task(
JetCoreRegionalStepBarrelTask,
JetCoreRegionalStepEndcapTask,
cms.Task(jetCoreRegionalStepTracks,jetCoreRegionalStep)
))
fastSim.toReplaceWith(JetCoreRegionalStepTask,
cms.Task(jetCoreRegionalStepTracks,
jetCoreRegionalStep))
|
1674792
|
import ply.lex as lex
import types
import re
# Token class, has all the necessary data for error reporting
# and reconstruction of the program
class PQLexerToken:
def __init__(self, type, value, line_no, column):
self.type = type
self.value = value
self.line_no = line_no
self.column = column
def __getitem__(self, index):
return (self.type, self.value, self.line_no, self.column)[index]
def __setitem__(self, index, value):
if index == 0:
self.type = value
elif index == 1:
self.value = value
elif index == 2:
self.line_no = value
elif index == 3:
self.column = value
def __delitem__(self, index):
if index == 0:
self.type = None
elif index == 1:
self.value = None
elif index == 2:
self.line_no = None
elif index == 3:
self.column = None
def __repr__(self):
return "(%s,%s,%d,%d)" % (self.type, self.value, self.line_no, self.column)
# Monkey-patch method, we add it to the lexer object to
# be able to pushback a token back into the lexer
def pushback_token(self,token):
self.pushback_queue.append(token)
# Monkey-patch method, we replace the lexer's token method with this method
# which returns pushed-back tokens, tracks opening and closing parens and
# constructs tokens with all the info we need
def get_token(self):
output_token = None
if not self.pushback_queue:
output_token = self.old_token()
else:
output_token = self.pushback_queue.pop()
if not output_token:
return None
if output_token.type in ['(', '[', '{']:
self.opened += 1
if output_token.type in [')', ']', '}']:
self.opened -= 1
output_token.value = PQLexerToken(output_token.type,
output_token.value,
self.lineno,
self.lexpos - self.newlinepos)
return output_token
# List of keywords
keywords = [
'DEF', 'RETURN', 'RAISE', 'FROM', 'IMPORT', 'AS',
'GLOBAL', 'NONLOCAL', 'ASSERT', 'IF', 'ELIF',
'ELSE', 'WHILE', 'FOR', 'LET', 'IN', 'TRY', 'FINALLY',
'WITH', 'EXCEPT', 'LAMBDA', 'OR', 'AND', 'NOT', 'IS',
'NONE', 'TRUE', 'FALSE', 'CLASS', 'YIELD', 'DEL',
'PASS', 'CONTINUE', 'BREAK', 'SELECT', 'WHERE',
'GROUP', 'BY', 'ORDER', 'WINDOW', 'PREVIOUS', 'FOLLOWING',
'START', 'END', 'WHEN', 'AT', 'ONLY', 'TUMBLING', 'SLIDING',
'ASC', 'DESC', 'COUNT', 'MATCH', 'EXACT', 'FILTER' ]
key_map = { k.lower():k for k in keywords if not k in ['NONE','TRUE','FALSE']}
key_map.update( {'None':'NONE', 'True':'TRUE', 'False':'FALSE'} )
# The lexer class
class Lexer:
tokens = keywords + [
# Literals, etc.
'NEWLINE',
'INDENT',
'DEDENT',
'STRING_LITERAL',
'LONG_STRING_LITERAL',
'NAME',
'FLOAT_NUMBER',
'DECIMAL_INTEGER',
'OCT_INTEGER',
'HEX_INTEGER',
'BIN_INTEGER',
'IMAG_NUMBER',
# Multi-char operators
'ELLIPSIS', 'POWER', 'LEFT_SHIFT', 'RIGHT_SHIFT', 'IDIV', 'EQUALS', 'GT_EQ',
'LT_EQ', 'NOT_EQ_1', 'NOT_EQ_2', 'ARROW', 'ADD_ASSIGN', 'SUB_ASSIGN', 'MULT_ASSIGN',
'AT_ASSIGN', 'DIV_ASSIGN', 'MOD_ASSIGN', 'AND_ASSIGN', 'OR_ASSIGN', 'XOR_ASSIGN',
'LEFT_SHIFT_ASSIGN', 'RIGHT_SHIFT_ASSIGN', 'POWER_ASSIGN', 'IDIV_ASSIGN',
'CHILD_AXIS', 'DESCENDENT_AXIS'
]
# Rules for literals, etc
# The rule for the newline is tricky, this is where
# all the Python identation/deidentation is happening
def t_NEWLINE(self,t):
r'\n|\r'
t.lexer.lineno += len(t.value)
t.lexer.newlinepos = t.lexer.lexpos
pos = t.lexer.lexpos
data = t.lexer.lexdata
# Consume all the whitespace until we hit something non-white or a newline
while True:
if pos >= len(data):
return t
if data[pos] in ['\n','\r'] or not re.match('\s',data[pos]):
break
pos += 1
# If this is a line with just whitespace, or we're inside parenthesis,
# don't return a token
if data[pos] in ['\n', '\t', '#'] or t.lexer.opened > 0:
return None
ws = data[t.lexer.lexpos:pos]
# Check if we went back to an older identation level, then
# create some DEDENT tokens
try:
idx = t.lexer.indent_stack.index(ws)
ndedents = len(t.lexer.indent_stack)-idx-1
for i in range(ndedents):
t.lexer.indent_stack.pop()
dedent_tok = lex.LexToken()
dedent_tok.type = 'DEDENT'
dedent_tok.value = ''
dedent_tok.lineno = t.lexer.lineno
dedent_tok.lexpos = pos
t.lexer.pushback_token(dedent_tok)
# Otherwise, check if we have added an identation level and create
# an IDENT token, or just return a newline
except:
last_ident = t.lexer.indent_stack[-1] if t.lexer.indent_stack else ""
if ws.startswith(last_ident):
indent_tok = lex.LexToken()
indent_tok.type = 'INDENT'
indent_tok.value = ws
indent_tok.lineno = t.lexer.lineno
indent_tok.lexpos = pos
t.lexer.pushback_token(indent_tok)
t.lexer.indent_stack.append(ws)
# Current ident doesn't contain the previous ident, identation error!
else:
raise Exception("Bad ident at line %d" % t.lexer.lineno )
return t
# multi-qoute strings are pretty straightforward
def t_LONG_STRING_LITERAL(self,t):
r'([bB][rR]?|[uU]?[rR]?)("""|\'\'\')'
pos = t.lexer.lexpos
data = t.lexer.lexdata
start_sym = data[t.lexer.lexpos-1]
content_len = 0
while True:
if pos >= len(data):
raise Exception("Unterminated string at line %d" % t.lexer.lineno)
if data[pos] == start_sym:
if content_len >= 2:
if data[pos-1] == data[pos-2] == start_sym:
break
pos += 1
content_len += 1
pos += 1
t.lexer.lexpos = pos
t.value = data[t.lexpos:pos]
return t
# Some hairy business with backslash handling in single quote strings
def t_STRING_LITERAL(self,t):
r'([bB][rR]?|[uU]?[rR]?)("|\')'
pos = t.lexer.lexpos
data = t.lexer.lexdata
start_sym = data[t.lexer.lexpos-1]
prev_slash = False
while True:
if pos >= len(data) or data[pos] == '\n':
raise Exception("Unterminated string at line %d" % t.lexer.lineno)
if data[pos] == start_sym and not prev_slash:
break
if data[pos] == '\\':
prev_slash = not prev_slash
else:
prev_slash = False
pos += 1
pos += 1
t.lexer.lexpos = pos
t.value = data[t.lexpos:pos]
return t
# Keywords go here, as well as identifiers
def t_NAME(self,t):
r'[^\W0-9]\w*'
if t.value in key_map:
t.type = key_map[t.value]
return t
t_DECIMAL_INTEGER = r'([1-9][0-9]*|0+)'
t_OCT_INTEGER = r'0[oO][0-7]+'
t_HEX_INTEGER = r'0[xX][0-9a-fA-F]+'
t_BIN_INTEGER = r'0[bB][01]+'
t_IMAG_NUMBER = r'[0-9]*\.[0-9]+[eE][+-]?[0-9]+[jJ]|[0-9]+\.[eE][+-]?[0-9]+[jJ]|[0-9]+[eE][+-]?[0-9]+[jJ]|[0-9]*\.[0-9]+[jJ]|[0-9]+\.[jJ]|[0-9]+[jJ]'
t_FLOAT_NUMBER = r'[0-9]*\.[0-9]+[eE][+-]?[0-9]+|[0-9]+\.[eE][+-]?[0-9]+|[0-9]+[eE][+-]?[0-9]+|[0-9]*\.[0-9]+|[0-9]+\.'
t_ELLIPSIS = r'\.\.\.'
t_POWER_ASSIGN = r'\*\*='
t_POWER = r'\*\*'
t_EQUALS = r'=='
t_LEFT_SHIFT_ASSIGN = r'<<='
t_LEFT_SHIFT = r'<<'
t_RIGHT_SHIFT_ASSIGN = r'>>='
t_RIGHT_SHIFT = r'>>'
t_ADD_ASSIGN = r'\+='
t_ARROW = r'->'
t_SUB_ASSIGN = r'-='
t_IDIV = r'//'
t_LT_EQ = r'<='
t_NOT_EQ_1 = r'<>'
t_NOT_EQ_2 = r'!='
t_GT_EQ = r'>='
t_MULT_ASSIGN = r'\*='
t_DIV_ASSIGN = r'/='
t_AT_ASSIGN = r'@='
t_MOD_ASSIGN = r'%='
t_AND_ASSIGN = r'&='
t_OR_ASSIGN = r'\|='
t_XOR_ASSIGN = r'\^='
t_IDIV_ASSIGN = r'//='
t_CHILD_AXIS = r'\./'
t_DESCENDENT_AXIS = r'\.//'
literals = '.(),:;=[]|^&+-*/%~{}<>@'
def t_comment(self,t):
r'\#'
pos = t.lexer.lexpos
data = t.lexer.lexdata
while True:
if pos >= len(data) or data[pos] == '\n':
break
pos += 1
t.lexer.lexpos = pos
# When we hit EOF, we check whether we have "open" INDENTs left.
# If that's the case, generate a DEDENT for every current INDENT
def t_eof(self, t):
if t.lexer.indent_stack != [""]:
t.lexer.indent_stack.pop()
dedent_tok = lex.LexToken()
dedent_tok.type = 'DEDENT'
dedent_tok.value = ''
dedent_tok.lineno = t.lexer.lineno
dedent_tok.lexpos = t.lexer.lexpos
return dedent_tok
return None
t_ignore = ' \t'
def t_error(self,t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
def build(self,**kwargs):
self.lexer = lex.lex(module=self, reflags=re.UNICODE, **kwargs)
self.lexer.opened = 0
self.lexer.newlinepos = 0
self.lexer.indent_stack = [""]
self.lexer.pushback_queue = []
# WARNING: Monkey-patching :)
self.lexer.pushback_token = types.MethodType(pushback_token,self.lexer)
self.lexer.old_token = self.lexer.token
self.lexer.token = types.MethodType(get_token,self.lexer)
def test(self,str):
self.lexer.input(str)
while True:
tok = self.lexer.token()
if not tok:
break
print (tok)
if __name__=='__main__':
import sys
source_file = open(sys.argv[1])
l = Lexer()
l.build()
str = "".join(source_file.readlines()) + "\n"
print("Parsing:", repr(str))
t = l.test(str)
print(t)
|
1674794
|
import os
import platform
import re
import shutil
import subprocess
import sys
from distutils.command.build import build # type: ignore
from distutils.command.install import install
from distutils.version import LooseVersion
from setuptools import Command, Extension, setup, setuptools
from setuptools.command.build_ext import build_ext
BUILD_HOOKS = []
INSTALL_HOOKS = []
def add_install_hook(hook):
INSTALL_HOOKS.append(hook)
def add_build_hook(hook):
BUILD_HOOKS.append(hook)
class HookCommand(Command):
def __init__(self, dist):
self.dist = dist
Command.__init__(self, dist)
def initialize_options(self, *args):
self.install_dir = None
self.build_dir = None
def finalize_options(self):
self.set_undefined_options("build", ("build_scripts", "build_dir"))
self.set_undefined_options(
"install",
("install_platlib", "install_dir"),
)
def run(self):
for _ in self.hooks:
_(install_dir=self.install_dir, build_dir=self.build_dir)
class build_hook(HookCommand):
hooks = BUILD_HOOKS
class install_hook(HookCommand):
hooks = INSTALL_HOOKS
############################################
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=["./"])
self.sourcedir = os.path.abspath(sourcedir)
def copy_vdf_client(build_dir, install_dir):
shutil.copy("src/vdf_client", install_dir)
shutil.copy("src/prover_test", install_dir)
shutil.copy("src/1weso_test", install_dir)
shutil.copy("src/2weso_test", install_dir)
def copy_vdf_bench(build_dir, install_dir):
shutil.copy("src/vdf_bench", install_dir)
def invoke_make(**kwargs):
subprocess.check_output("make -C src -f Makefile.vdf-client", shell=True)
BUILD_VDF_CLIENT = os.getenv("BUILD_VDF_CLIENT", "Y") == "Y"
BUILD_VDF_BENCH = os.getenv("BUILD_VDF_BENCH", "N") == "Y"
if BUILD_VDF_CLIENT or BUILD_VDF_BENCH:
add_build_hook(invoke_make)
if BUILD_VDF_CLIENT:
add_install_hook(copy_vdf_client)
if BUILD_VDF_BENCH:
add_install_hook(copy_vdf_bench)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build"
+ " the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + str(extdir),
"-DPYTHON_EXECUTABLE=" + sys.executable,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j", "6"]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
subprocess.check_call(["cmake", ext.sourcedir] + cmake_args, env=env)
subprocess.check_call(["cmake", "--build", "."] + build_args)
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked."""
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
Extension(
"chiavdf",
sorted(
[
"src/python_bindings/fastvdf.cpp",
"src/refcode/lzcnt.c",
]
),
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
"mpir_gc_x64",
],
library_dirs=["mpir_gc_x64"],
libraries=["mpir"],
language="c++",
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f:
f.write("int main (int argc, char **argv) { return 0; }")
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ["-std=c++17", "-std=c++14", "-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support " "is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
"msvc": ["/EHsc", "/std:c++17"],
"unix": [""],
}
l_opts = {
"msvc": [],
"unix": [""],
}
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
c_opts["unix"] += darwin_opts
l_opts["unix"] += darwin_opts # type: ignore
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == "unix":
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, "-fvisibility=hidden"):
opts.append("-fvisibility=hidden")
elif ct == "msvc":
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
if platform.system() == "Windows":
setup(
name="chiavdf",
author="<NAME>",
author_email="<EMAIL>",
description="Chia vdf verification (wraps C++)",
license="Apache License",
python_requires=">=3.7",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
build_requires=["pybind11"],
url="https://github.com/Chia-Network/chiavdf",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExt},
zip_safe=False,
use_scm_version={"fallback_version": "unknown-no-.git-directory"},
)
else:
build.sub_commands.append(("build_hook", lambda x: True)) # type: ignore
install.sub_commands.append(("install_hook", lambda x: True))
setup(
name="chiavdf",
author="<NAME>",
author_email="<EMAIL>",
description="Chia vdf verification (wraps C++)",
license="Apache License",
python_requires=">=3.7",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/Chia-Network/chiavdf",
setup_requires=["pybind11>=2.5.0"],
ext_modules=[CMakeExtension("chiavdf", "src")],
cmdclass=dict(
build_ext=CMakeBuild, install_hook=install_hook, build_hook=build_hook
),
zip_safe=False,
use_scm_version={"fallback_version": "unknown-no-.git-directory"},
)
|
1674817
|
import nltk
import os
print('All package imported succesfully')
assert os.path.isdir('/ml/nltk_data')
assert os.path.isdir('/ml/distilbert-base-uncased_tokenizer')
def verify_stat(filename):
res = os.stat(filename)
assert res.st_uid == 4000
assert res.st_gid == 4000
assert oct(res.st_mode)[-3:] == '775'
verify_stat('/ml/distilbert-base-uncased_tokenizer')
verify_stat('/ml/nltk_data')
verify_stat('/ml/distilbert-base-uncased.onnx')
verify_stat('/ml/glove_100_top_20k.p')
print('All files verified')
|
1674820
|
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("BatchNormalization")
@tf_func(tf.nn.batch_normalization)
class BatchNormalization(BackendHandler):
@classmethod
def get_attrs_processor_param(cls):
return {
"default": {
"epsilon": 1e-5
},
"rename": {
"epsilon": "variance_epsilon"
}
}
@classmethod
def _common(cls, node, **kwargs):
tensor_dict = kwargs["tensor_dict"]
x = tensor_dict[node.inputs[0]]
x_shape = x.get_shape().as_list()
x_rank = len(x_shape)
params_shape_broadcast = list([1, x_shape[1]] +
[1 for _ in range(2, x_rank)])
# process unknown channel shape
if params_shape_broadcast[1] is None:
params_shape_broadcast[1] = tf.shape(x)[1]
params_shape_broadcast = tf.stack(params_shape_broadcast)
total_num_dim = len(x.get_shape())
scale = tf.reshape(tensor_dict[node.inputs[1]], params_shape_broadcast)
bias = tf.reshape(tensor_dict[node.inputs[2]], params_shape_broadcast)
running_mean = tf.reshape(tensor_dict[node.inputs[3]],
params_shape_broadcast)
running_variance = tf.reshape(tensor_dict[node.inputs[4]],
params_shape_broadcast)
# from version 7, force to use test mode
if cls.SINCE_VERSION >= 7 or node.attrs.get("is_test", 0):
inputs = [x, running_mean, running_variance, bias, scale]
return [cls.make_tensor_from_onnx_node(node, inputs=inputs)]
spatial = node.attrs.get("spatial", 1) == 1
momentum = node.attrs.get("momentum", 0.9)
axis = [0] if spatial else [0] + list(range(2, total_num_dim))
mean, variance = tf.nn.moments(x, axis)
running_mean = running_mean * momentum + mean * (1 - momentum)
running_variance = running_variance * momentum + variance * (1 - momentum)
# TODO: need to conform to the documentation here
inputs = [x, running_mean, running_variance, bias, scale]
return [cls.make_tensor_from_onnx_node(node, inputs=inputs)]
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_6(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_7(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_9(cls, node, **kwargs):
return cls._common(node, **kwargs)
|
1674824
|
import datetime as dt
import numba as nb
import numpy as np
from randomgen import Xoroshiro128
x = Xoroshiro128()
f = x.ctypes.next_uint32
s = x.ctypes.state
@nb.jit(nopython=True)
def bounded_uint(lb: int, ub: int, state: int) -> int:
mask = delta = ub - lb
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
val = f(state) & mask
while val > delta:
val = f(state) & mask
return lb + val
print(bounded_uint(323, 2394691, s.value))
@nb.jit(nopython=True)
def bounded_uints(lb: int, ub: int, n: int, state: int) -> None:
out = np.empty(n, dtype=np.uint32)
for i in range(n):
out[i] = bounded_uint(lb, ub, state)
bounded_uints(323, 2394691, 10000000, s.value)
g = x.cffi.next_double
cffi_state = x.cffi.state
state_addr = x.cffi.state_address
def normals(n: int, state: int) -> np.ndarray:
out = np.empty(n)
for i in range((n + 1) // 2):
x1 = 2.0 * g(state) - 1.0
x2 = 2.0 * g(state) - 1.0
r2 = x1 * x1 + x2 * x2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * g(state) - 1.0
x2 = 2.0 * g(state) - 1.0
r2 = x1 * x1 + x2 * x2
f = np.sqrt(-2.0 * np.log(r2) / r2)
out[2 * i] = f * x1
if 2 * i + 1 < n:
out[2 * i + 1] = f * x2
return out
print(normals(10, cffi_state).var())
# Warm up
normalsj = nb.jit(normals, nopython=True)
normalsj(1, state_addr)
start = dt.datetime.now()
normalsj(1000000, state_addr)
ms = 1000 * (dt.datetime.now() - start).total_seconds()
print(
"1,000,000 Polar-transform (numba/Xoroshiro128) randoms in "
"{ms:0.1f}ms".format(ms=ms)
)
start = dt.datetime.now()
np.random.standard_normal(1000000)
ms = 1000 * (dt.datetime.now() - start).total_seconds()
print("1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms".format(ms=ms))
|
1674835
|
from unittest.mock import MagicMock
from mamba import describe, it, context
from crowd_anki.export.anki_exporter_wrapper import AnkiJsonExporterWrapper
DUMMY_EXPORT_DIRECTORY = "/tmp"
TEST_DECK_ID = 1
with describe(AnkiJsonExporterWrapper) as self:
with context("user is trying to export dynamic deck"):
with it("should warn and exit without initiating export"):
exporter_mock = MagicMock()
notifier_mock = MagicMock()
collection_mock = MagicMock()
collection_mock.decks.get.return_value = {'dyn': True}
subject = AnkiJsonExporterWrapper(collection_mock, TEST_DECK_ID, exporter_mock, notifier_mock)
subject.exportInto(DUMMY_EXPORT_DIRECTORY)
notifier_mock.warning.assert_called_once()
exporter_mock.export_to_directory.assert_not_called()
|
1674886
|
from douyin.downloaders.base import Downloader
from douyin.downloaders.video import VideoDownloader
from douyin.downloaders.music import MusicDownloader
|
1674890
|
import re
import os
import yaml
from .nested_dict import nested_dict
__all__ = [
'CodeTemplate', 'IDENT_REGEX', 'YamlLoader', 'nested_dict',
'split_name_params', 'write',
]
from tools.codegen.code_template import CodeTemplate
# You should use these lines, rather than doing it manually.
# Especially if you see this error!
#
# File "/usr/local/lib/python2.7/dist-packages/yaml/__init__.py", line 69, in load
# loader = Loader(stream)
# TypeError: 'module' object is not callable
try:
# use faster C loader if available
from yaml import CLoader as YamlLoader
except ImportError:
from yaml import Loader as YamlLoader
GENERATED_COMMENT = CodeTemplate(
"@" + "generated from ${filename}")
# Matches "foo" in "foo, bar" but not "foobar". Used to search for the
# occurrence of a parameter in the derivative formula
IDENT_REGEX = r'(^|\W){}($|\W)'
# TODO: Use a real parser here; this will get bamboozled
# by signatures that contain things like std::array<bool, 2> (note the space)
def split_name_params(prototype):
name, overload_name, params = re.match(r'(\w+)(\.\w+)?\((.*)\)', prototype).groups()
return name, params.split(', ')
# When tracing, we record inplace operations as out-of-place operations,
# because we don't have a story for side effects in the IR yet.
#
# Doing this un-inplacing is a little delicate however; __and__ is NOT inplace!
# TODO: Do something more robust
def uninplace_api_name(api_name):
if api_name.endswith('_') and not api_name.endswith('__'):
api_name = api_name[:-1]
if api_name.endswith('_out'):
api_name = api_name[:-4]
return api_name
def write(dirname, name, template, env):
env['generated_comment'] = GENERATED_COMMENT.substitute(filename=template.filename)
path = os.path.join(dirname, name)
# See Note [Unchanging results for ninja]
try:
with open(path, 'r') as f:
old_val = f.read()
except IOError:
old_val = None
new_val = template.substitute(env)
if old_val != new_val:
with open(path, 'w') as f:
print("Writing {}".format(path))
f.write(new_val)
else:
print("Skipped writing {}".format(path))
def is_tensor_method(declaration):
return 'Tensor' in declaration['method_of']
def is_out_variant(decl):
return decl['name'].endswith('_out')
def op_name_without_overload(decl):
name = decl['name'] if not is_out_variant(decl) else decl['name'][:-4]
return 'aten::{}'.format(name)
def load_op_list_and_strip_overload(op_list, op_list_path):
if op_list is None and op_list_path is None:
return None
if op_list is None:
op_list = []
if op_list_path is not None:
with open(op_list_path, 'r') as f:
op_list += yaml.load(f, Loader=YamlLoader)
# strip out the overload part
return {opname.split('.', 1)[0] for opname in op_list}
|
1674891
|
from datetime import datetime
from pathlib import Path
import shutil
from nornir import InitNornir
# from nornir_netmiko.tasks import netmiko_send_command
# from nornir.core.filter import F
from nornir_scrapli.tasks import send_command as scrapli_send_command
COMMANDS = [
"show version",
"show ip int br",
"show ip arp",
"show platform resources",
]
OUTPUT_DIR = Path("output/cli")
def gather_commands(task, commands):
dt = datetime.now()
dt_str = dt.strftime("%Y-%m-%dT%H:%M:%S")
file_path = OUTPUT_DIR / f"{task.host.name}_{dt_str}.txt"
with open(file_path, "w") as f:
for command in commands:
# gather commands using netmiko
# output = task.run(netmiko_send_command, command_string=command)
# gather commands using scrapli w/ libssh2
output = task.run(scrapli_send_command, command=command)
f.write(f"===== {command} ======\n{output.result}\n\n")
def main():
with InitNornir(config_file="nr-config-local.yaml") as nr:
# lisbon = nr.filter(F(groups__contains="Lisbon"))
nr.run(gather_commands, commands=COMMANDS)
if __name__ == "__main__":
if OUTPUT_DIR.is_dir():
shutil.rmtree(OUTPUT_DIR)
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
main()
|
1674922
|
import sys
import unittest
import importlib
import asn1tools
sys.path.append('tests/files')
sys.path.append('tests/files/3gpp')
sys.path.append('tests/files/cen')
sys.path.append('tests/files/etsi')
sys.path.append('tests/files/ieee')
sys.path.append('tests/files/ietf')
sys.path.append('tests/files/oma')
class Asn1ToolsParseTest(unittest.TestCase):
maxDiff = None
def parse_and_verify(self, module, path='.'):
asn_path = 'tests/files/' + path + '/' + module + '.asn'
actual = asn1tools.parse_files(asn_path)
# from pprint import pformat
#
# py_path = 'tests/files/' + path + '/' + module + '.py'
#
# with open(py_path, 'w') as fout:
# fout.write('EXPECTED = ' + pformat(actual))
module = importlib.import_module(module)
self.assertEqual(actual, module.EXPECTED)
def test_parse_foo(self):
self.parse_and_verify('foo')
def test_parse_bar(self):
self.parse_and_verify('bar')
def test_parse_all_types(self):
self.parse_and_verify('all_types')
def test_parse_extensibility_implied(self):
self.parse_and_verify('extensibility_implied')
def test_parse_all_types_automatic_tags(self):
self.parse_and_verify('all_types_automatic_tags')
def test_parse_module_tags_explicit(self):
self.parse_and_verify('module_tags_explicit')
def test_parse_module_tags_implicit(self):
self.parse_and_verify('module_tags_implicit')
def test_parse_module_tags_automatic(self):
self.parse_and_verify('module_tags_automatic')
def test_parse_information_object(self):
self.parse_and_verify('information_object')
def test_parse_x683(self):
self.parse_and_verify('x683')
def test_parse_x680(self):
self.parse_and_verify('x680')
def test_parse_x691_a1(self):
self.parse_and_verify('x691_a1')
def test_parse_x691_a2(self):
self.parse_and_verify('x691_a2')
def test_parse_x691_a3(self):
self.parse_and_verify('x691_a3')
def test_parse_x691_a4(self):
self.parse_and_verify('x691_a4')
def test_parse_zforce(self):
self.parse_and_verify('zforce')
def test_parse_rrc_8_6_0(self):
self.parse_and_verify('rrc_8_6_0', '3gpp')
def test_parse_rrc_14_4_0(self):
self.parse_and_verify('rrc_14_4_0', '3gpp')
def test_parse_s1ap_14_4_0(self):
self.parse_and_verify('s1ap_14_4_0', '3gpp')
def test_parse_lpp_14_3_0(self):
self.parse_and_verify('lpp_14_3_0', '3gpp')
def test_parse_rfc1155(self):
self.parse_and_verify('rfc1155', 'ietf')
def test_parse_rfc1157(self):
self.parse_and_verify('rfc1157', 'ietf')
def test_parse_rfc2986(self):
self.parse_and_verify('rfc2986', 'ietf')
def test_parse_rfc3161(self):
self.parse_and_verify('rfc3161', 'ietf')
def test_parse_rfc3279(self):
self.parse_and_verify('rfc3279', 'ietf')
def test_parse_rfc3281(self):
self.parse_and_verify('rfc3281', 'ietf')
def test_parse_rfc3447(self):
self.parse_and_verify('rfc3447', 'ietf')
def test_parse_rfc3852(self):
self.parse_and_verify('rfc3852', 'ietf')
def test_parse_rfc4210(self):
self.parse_and_verify('rfc4210', 'ietf')
def test_parse_rfc4211(self):
self.parse_and_verify('rfc4211', 'ietf')
def test_parse_rfc4511(self):
self.parse_and_verify('rfc4511', 'ietf')
def test_parse_rfc5084(self):
self.parse_and_verify('rfc5084', 'ietf')
def test_parse_rfc5280(self):
self.parse_and_verify('rfc5280', 'ietf')
def test_parse_rfc5280_modified(self):
self.parse_and_verify('rfc5280_modified', 'ietf')
def test_parse_etsi_cam_pdu_descriptions_1_3_2(self):
self.parse_and_verify('cam_pdu_descriptions_1_3_2', 'etsi')
def test_parse_etsi_its_container_1_2_1(self):
self.parse_and_verify('its_container_1_2_1', 'etsi')
def test_parse_etsi_mapem_2_1_1(self):
self.parse_and_verify('mapem_2_1_1', 'etsi')
def test_parse_cen_dsrc(self):
self.parse_and_verify('dsrc', 'cen')
def test_parse_ieee_1609_2(self):
self.parse_and_verify('ieee1609_2', 'ieee')
def test_parse_oma_ulp(self):
self.parse_and_verify('ulp', 'oma')
def test_parse_enumerated(self):
self.parse_and_verify('enumerated')
def test_parse_comments(self):
self.parse_and_verify('comments')
def test_parse_constraints_extensions(self):
self.parse_and_verify('constraints_extensions')
def test_parse_time_types(self):
self.parse_and_verify('time_types')
def test_parse_named_numbers(self):
self.parse_and_verify('named_numbers')
def test_parse_import_imported(self):
self.parse_and_verify('import_imported')
def test_parse_parameterization(self):
self.parse_and_verify('parameterization')
def test_parse_imports_global_module_reference(self):
actual = asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'IMPORTS '
'a FROM B '
'c, d FROM E global-module-reference '
'f, g FROM H {iso(1)}; '
'END')
expected = {
'A': {
'extensibility-implied': False,
'imports': {
'B': ['a'],
'E': ['c', 'd'],
'H': ['f', 'g']
},
'object-classes': {},
'object-sets': {},
'types': {},
'values': {}
}
}
self.assertEqual(actual, expected)
def test_parse_imports_single_value_reference(self):
"""Test that a value reference, in this test 'c', is not parsed as an
assignmed identifier, but an imported value from 'D'.
"""
actual = asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'IMPORTS '
'A FROM B '
'c FROM D; '
'END')
expected = {
'A': {
'extensibility-implied': False,
'imports': {
'B': ['A'],
'D': ['c']
},
'object-classes': {},
'object-sets': {},
'types': {},
'values': {}
}
}
self.assertEqual(actual, expected)
def test_parse_empty_imports(self):
actual = asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'IMPORTS ; '
'END')
expected = {
'A': {
'extensibility-implied': False,
'imports': {},
'object-classes': {},
'object-sets': {},
'types': {},
'values': {}
}
}
self.assertEqual(actual, expected)
def test_parse_keyword_in_type_name(self):
actual = asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'ENDa ::= INTEGER '
'END')
expected = {
'A': {
'extensibility-implied': False,
'imports': {},
'object-classes': {},
'object-sets': {},
'types': {'ENDa': {'type': 'INTEGER'}},
'values': {}
}
}
self.assertEqual(actual, expected)
def test_parse_error_empty_string(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 1: '>!<': "
"Expected modulereference.")
def test_parse_error_begin_missing(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= END')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 19: "
"'A DEFINITIONS ::= >!<END': Expected BEGIN.")
def test_parse_error_end_missing(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 24: "
"'A DEFINITIONS ::= BEGIN>!<': Expected END.")
def test_parse_error_type_assignment_missing_assignment(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN A END')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 27: "
"'A DEFINITIONS ::= BEGIN A >!<END': "
"Expected ::=.")
def test_parse_error_value_assignment_missing_assignment(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN a INTEGER END')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 35: "
"'A DEFINITIONS ::= BEGIN a INTEGER >!<END': "
"Expected ::=.")
def test_parse_error_sequence_missing_type(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN'
' A ::= SEQUENCE { a } '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 45: 'A DEFINITIONS ::= BEGIN "
" A ::= SEQUENCE { a >!<} END': Expected Type.")
def test_parse_error_sequence_missing_member_name(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN'
' A ::= SEQUENCE { A } '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 43: 'A DEFINITIONS ::= "
"BEGIN A ::= SEQUENCE { >!<A } END': Expected \"}\".")
def test_parse_error_definitive_identifier(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A {} DEFINITIONS ::= BEGIN '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 4: 'A {>!<} DEFINITIONS "
"::= BEGIN END': Expected {{identifier Suppress:(\"(\") - "
"definitiveNumberForm - Suppress:(\")\")} | identifier | "
"definitiveNumberForm}.")
def test_parse_error_missing_union_member_beginning(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= INTEGER (| SIZE (1))'
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 40: 'A DEFINITIONS ::= BEGIN "
"B ::= INTEGER (>!<| SIZE (1))END': Expected one or more constraints.")
def test_parse_error_missing_union_member_middle(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= INTEGER (SIZE (1) | | (0))'
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 49: \'A DEFINITIONS "
"::= BEGIN B ::= INTEGER (SIZE (1) >!<| | (0))END\': Expected \")\".")
def test_parse_error_missing_union_member_end(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= INTEGER (SIZE (1) |)'
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 49: \'A DEFINITIONS "
"::= BEGIN B ::= INTEGER (SIZE (1) >!<|)END\': Expected \")\".")
def test_parse_error_size_constraint_missing_parentheses(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= INTEGER (SIZE 1)'
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 45: \'A DEFINITIONS ::= "
"BEGIN B ::= INTEGER (SIZE >!<1)END\': Expected \"(\".")
def test_parse_error_size_constraint_missing_size(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= INTEGER (SIZE ())'
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 46: 'A DEFINITIONS ::= "
"BEGIN B ::= INTEGER (SIZE (>!<))END': Expected one or more "
"constraints.")
def test_parse_error_tag_class_number_missing(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= [] INTEGER '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 32: 'A DEFINITIONS "
"::= BEGIN B ::= [>!<] INTEGER END': Expected ClassNumber.")
def test_parse_error_missing_type(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'B ::= '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 31: 'A DEFINITIONS ::= BEGIN "
"B ::= >!<END': Expected Type.")
def test_parse_error_end_missing_with_comments(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS -- g -- \n'
'-- hhhh\n'
'::= BEGIN ')
self.assertEqual(str(cm.exception),
"Invalid ASN.1 syntax at line 3, column 11: "
"'::= BEGIN >!<': Expected END.")
def test_parse_error_late_extension_additions(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'Foo ::= SEQUENCE { '
'a BOOLEAN, '
'..., '
'..., '
'[[ '
'c BOOLEAN '
']] '
'} '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 63: \'A DEFINITIONS ::= "
"BEGIN Foo ::= SEQUENCE { a BOOLEAN, ..., ...>!<, [[ c BOOLEAN ]] "
"} END\': Expected \"}\".")
def test_parse_error_too_many_extension_markers(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= BEGIN '
'Foo ::= SEQUENCE { '
'a BOOLEAN, '
'..., '
'[[ '
'b BOOLEAN '
']], '
'[[ '
'c BOOLEAN '
']], '
'..., '
'd BOOLEAN, '
'... '
'} '
'END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 1, column 108: \'A DEFINITIONS ::= "
"BEGIN Foo ::= SEQUENCE { a BOOLEAN, ..., [[ b BOOLEAN ]], [[ c "
"BOOLEAN ]], ..., d BOOLEAN>!<, ... } END\': Expected \"}\".")
def test_parse_error_missing_single_line_comment_end(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= \n'
'BEGIN -- END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 2, column 7: 'BEGIN >!<-- END': "
"Missing newline or -- for single line comment.")
def test_parse_error_missing_multi_line_comment_end(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= \n'
'BEGIN /* END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 2, column 7: 'BEGIN >!</* END': "
"Missing */ for multi line comment.")
def test_parse_error_multi_line_comment_overlapping(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.parse_string('A DEFINITIONS ::= \n'
'BEGIN /*/ END')
self.assertEqual(
str(cm.exception),
"Invalid ASN.1 syntax at line 2, column 7: 'BEGIN >!</*/ END': "
"Missing */ for multi line comment.")
def test_parse_x680_duplicated_enum_number_a_c_0(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.compile_string('A DEFINITIONS ::= BEGIN '
'E ::= ENUMERATED { a, b, ..., c(0) } '
'END')
self.assertEqual(str(cm.exception),
"Duplicated ENUMERATED number 0 at line 1.")
def test_parse_x680_duplicated_enum_number_c_d_2(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.compile_string('A DEFINITIONS ::= BEGIN '
'E ::= ENUMERATED { a, b, ..., c, d(2) } '
'END')
self.assertEqual(str(cm.exception),
"Duplicated ENUMERATED number 2 at line 1.")
def test_parse_x680_duplicated_enum_number_a_b_0(self):
with self.assertRaises(asn1tools.ParseError) as cm:
asn1tools.compile_string('A DEFINITIONS ::= BEGIN '
'E ::= ENUMERATED { a(0), b(0) } '
'END')
self.assertEqual(str(cm.exception),
"Duplicated ENUMERATED number 0 at line 1.")
def test_parse_encoding(self):
asn1tools.parse_files('tests/files/foo.asn', encoding='ascii')
if __name__ == '__main__':
unittest.main()
|
1674923
|
import pandas as pd
import io
import os
from functools import reduce
model_name = context.current_model.name
output = f"Model name: {model_name}"
output = output + f"\nStatus: {context.current_model.status}"
df: pd.DataFrame = ref(model_name)
buf = io.StringIO()
df.info(buf=buf, memory_usage=False)
info = buf.getvalue()
output = output + f"\nModel dataframe information:\n{info}"
temp_dir = os.environ["temp_dir"]
print(temp_dir)
write_dir = open(reduce(os.path.join, [temp_dir, model_name + ".after.txt"]), "w")
write_dir.write(output)
write_dir.close()
|
1674926
|
from nose.tools import assert_equal
from ...ut.bunch import Bunch
from ..base import Mod as Base
from ..mixins import IdentityGuessNameMixin, make_field_guess_name_mixin
def check_field_guess_name_mixin(query, desire):
class Mod(make_field_guess_name_mixin('uri', 'query'), IdentityGuessNameMixin):
pass
mod = Mod()
assert_equal(mod.guess_name(query), desire)
def test_field_guess_name_mixin():
for query, desire in [
(Bunch(text='foo'), 'foo'),
(Bunch(text='{"method": "user_uri", "uri": "foo"}'), "foo"),
(Bunch(text='{"method": "query", "query": "bar"}'), "bar"),
]:
yield check_field_guess_name_mixin, query, desire
def test_identity_guess_name_mixin():
class Mod(IdentityGuessNameMixin):
pass
mod = Mod()
assert_equal(mod.guess_name(Bunch(text='foo')), 'foo')
|
1674951
|
import pytest
import logging
import asyncio
from aiochatbase import Chatbase
from aiochatbase import types
from . import FakeChatbaseServer, CLICK_RESPONSE_DICT, BULK_RESPONSE_DICT, EVENT_RESPONSE_DICT
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('TrueModerTest')
pytestmark = pytest.mark.asyncio
CHATBASE_TOKEN = '<KEY>'
CHATBOT_PLATFORM = 'TestPlatform'
USER_ID = '123456'
MESSAGE_TEXT = 'test message text'
INTENT = 'Another message'
CB_MESSAGE_ID = '12345'
@pytest.yield_fixture
def cb(event_loop: asyncio.AbstractEventLoop):
_chatbase = Chatbase(CHATBASE_TOKEN, CHATBOT_PLATFORM, loop=event_loop)
yield _chatbase
event_loop.run_until_complete(_chatbase.close())
async def test_cb_init_without_loop(event_loop):
chatbase = Chatbase(CHATBASE_TOKEN, CHATBOT_PLATFORM)
await chatbase.close()
async def test_prepare_message(cb, event_loop):
msg = await cb.prepare_message(user_id=USER_ID, intent=INTENT, message=MESSAGE_TEXT)
assert isinstance(msg, types.Message)
assert msg.user_id == USER_ID
assert msg.intent == INTENT
assert msg.message == MESSAGE_TEXT
async def test_register_message(cb, event_loop):
""" Registering message in basic mode """
async with FakeChatbaseServer(message_dict={'message_id': CB_MESSAGE_ID, 'status': 200}, loop=event_loop):
result = await cb.register_message(user_id=USER_ID, intent=INTENT)
assert result == CB_MESSAGE_ID
async def test_register_message_without_task(cb, event_loop):
""" Registering message in basic mode strongly without task """
async with FakeChatbaseServer(message_dict={'message_id': CB_MESSAGE_ID, 'status': 200}, loop=event_loop):
result = await cb.register_message(user_id=USER_ID, intent=INTENT, task=False)
assert result == CB_MESSAGE_ID
async def test_register_message_with_task(cb, event_loop):
""" Registering message in basic mode with task """
async with FakeChatbaseServer(message_dict={'message_id': CB_MESSAGE_ID, 'status': 200}, loop=event_loop):
result = await cb.register_message(user_id=USER_ID, intent=INTENT, task=True)
assert isinstance(result, asyncio.Task)
done, pending = await asyncio.wait([result], return_when=asyncio.ALL_COMPLETED)
assert done.pop().result() == CB_MESSAGE_ID
async def test_register_messages(cb, event_loop):
msg_1 = await cb.prepare_message('1', 'test bulk', message=MESSAGE_TEXT)
msg_2 = await cb.prepare_message('2', 'test bulk', not_handled=True)
msg_3 = await cb.prepare_message('3', 'test bulk', version='Test', session_id='12345')
messages_list = [msg_1, msg_2, msg_3]
async with FakeChatbaseServer(message_dict=BULK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_messages(messages_list)
assert result == [5917431215, 5917431216, 5917431217]
async def test_register_messages_without_task(cb, event_loop):
msg_1 = await cb.prepare_message('1', 'test bulk')
msg_2 = await cb.prepare_message('2', 'test bulk')
msg_3 = await cb.prepare_message('3', 'test bulk')
messages_list = [msg_1, msg_2, msg_3]
async with FakeChatbaseServer(message_dict=BULK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_messages(messages_list, task=False)
assert result == [5917431215, 5917431216, 5917431217]
async def test_register_messages_with_task(cb, event_loop):
msg_1 = await cb.prepare_message('1', 'test bulk')
msg_2 = await cb.prepare_message('2', 'test bulk')
msg_3 = await cb.prepare_message('3', 'test bulk')
messages_list = [msg_1, msg_2, msg_3]
async with FakeChatbaseServer(message_dict=BULK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_messages(messages_list, task=True)
assert isinstance(result, asyncio.Task)
done, pending = await asyncio.wait([result], return_when=asyncio.ALL_COMPLETED)
assert done.pop().result() == [5917431215, 5917431216, 5917431217]
async def test_register_click(cb, event_loop):
async with FakeChatbaseServer(message_dict=CLICK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_click(url='google.com')
assert result is True
async def test_register_click_without_task(cb, event_loop):
async with FakeChatbaseServer(message_dict=CLICK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_click(url='google.com', task=False)
assert result is True
async def test_register_click_with_task(cb, event_loop):
async with FakeChatbaseServer(message_dict=CLICK_RESPONSE_DICT, loop=event_loop):
result = await cb.register_click(url='google.com', task=True)
assert isinstance(result, asyncio.Task)
done, pending = await asyncio.wait([result], return_when=asyncio.ALL_COMPLETED)
assert done.pop().result() is True
async def test_register_event(cb, event_loop):
any_dict = {
'property 1 (int)': 1,
'property 2 (str)': 'two',
'property 3 (float)': 3.0,
'property 4 (bool)': True,
}
async with FakeChatbaseServer(message_dict=EVENT_RESPONSE_DICT, loop=event_loop):
result = await cb.register_event('123456', 'test event', properties=any_dict, version='TestVersion')
assert result is True
async def test_register_event_without_task(cb, event_loop):
any_dict = {
'property 1 (int)': 1,
'property 2 (str)': 'two',
'property 3 (float)': 3.0,
'property 4 (bool)': True,
}
async with FakeChatbaseServer(message_dict=EVENT_RESPONSE_DICT, loop=event_loop):
result = await cb.register_event('123456', 'test event', properties=any_dict, task=False)
assert result is True
async def test_register_event_with_task(cb, event_loop):
any_dict = {
'property 1 (int)': 1,
'property 2 (str)': 'two',
'property 3 (float)': 3.0,
'property 4 (bool)': True,
}
async with FakeChatbaseServer(message_dict=EVENT_RESPONSE_DICT, loop=event_loop):
result = await cb.register_event('123456', 'test event', properties=any_dict, task=True)
assert isinstance(result, asyncio.Task)
done, pending = await asyncio.wait([result], return_when=asyncio.ALL_COMPLETED)
assert done.pop().result() is True
|
1675032
|
import os
from dotenv import load_dotenv
from mev.azure.run import get_auth_ws, run_train
load_dotenv()
ENVIRONMENT_VARIABLES = dict(
TENANT_ID=os.getenv("TENANT_ID"),
)
if __name__ == "__main__":
# Params
dataset_name_train = "featurize_all"
compute_target_name_1 = "mev-compute"
compute_target_name_2 = "mev-compute2"
source_dir_train = "./mev/azure/src_train"
script_name_train = "train.py"
output_name = 'train_all_regression_transformed'
kind = 'regression'
time_limit = int(3 * 3600)
# Auth to Azure ML
ws = get_auth_ws(ENVIRONMENT_VARIABLES["TENANT_ID"])
print("Running 'train' step...")
run_train(
dataset_name=dataset_name_train,
compute_target_names=[compute_target_name_1, compute_target_name_2],
source_dir=source_dir_train,
script_name=script_name_train,
ws=ws,
environment_variables=ENVIRONMENT_VARIABLES,
time_limit=time_limit,
output_name=output_name,
kind=kind
)
print("Train step finished.")
print("Done.")
|
1675040
|
from ..remote import RemoteModel
class DeviceViewerSecurityAclsCkpGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``DeviceFilterSetID:`` none
| ``attribute type:`` string
| ``AccessList:`` none
| ``attribute type:`` string
| ``Direction:`` none
| ``attribute type:`` string
| ``policyName:`` none
| ``attribute type:`` string
| ``Firewall:`` none
| ``attribute type:`` string
| ``Status:`` none
| ``attribute type:`` string
"""
properties = ("id",
"DeviceID",
"DeviceFilterSetID",
"AccessList",
"Direction",
"policyName",
"Firewall",
"Status",
)
|
1675048
|
import os
TAG_MATCH_ALL = "*"
CUST_ATTRIB_DEPENDSON_NAME = "Depends On"
CONF_FILE_PATH_XDG = os.getenv("XDG_CONFIG_HOME", os.environ["HOME"] + "/.config") + "/taiga-stats/taiga-stats.conf"
CONF_FILE_PATH = "~/.taiga-stats.conf"
CONF_FILE_NAME_FMT = "taiga-stats.conf.template"
CFD_DATA_FILE_FMT = "cfd_{:s}.dat"
|
1675053
|
import base64
import concurrent.futures
import json
import logging
import os
import sys
import re
from urllib.parse import urlparse, urljoin
from bs4 import BeautifulSoup
from furl import furl
from globals import CRAWL_URL_RETRIES, PAGE_LOAD_TIMEOUT_SEC, NUM_CRAWLING_THREADS, URL_SLASH_REPLACEMENT_STR, \
VARIANCE_NUM_ITERATIONS, VARIANCE_INTERATIONS_INTERVAL_SEC
from retry import retry
from selenium.common.exceptions import TimeoutException
from seleniumwire import webdriver
from urllib3.exceptions import MaxRetryError
import time
from image_analysis import raw_screenshot_analysis, process_screenshot_iterations_consistency, save_and_mask_image
BASELINE_DIR = "baseline"
UPDATED_DIR = "updated"
def get_chrome_driver(driver_path):
"""
Instantiates a Chrome webdriver object
Args:
driver_path: path to the Chrome driver
Returns:
webdriver: returns a webdriver object
"""
options = webdriver.ChromeOptions()
options.page_load_strategy = 'normal'
options.add_argument("--disable-logging")
options.add_argument("--disable-login-animations")
options.add_argument("--disable-notifications")
options.add_argument("--disable-default-apps")
options.add_argument("--disable-extensions")
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-browser-side-navigation")
options.add_argument("--headless")
options.add_argument("--hide-scrollbars")
options.add_argument('--log-level 3')
options.add_argument("--incognito")
options.add_argument("--no-zygote")
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--disable-infobars')
driver = webdriver.Chrome(driver_path, options=options)
driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT_SEC)
return driver
def chrome_take_full_screenshot(driver, viewport_width):
"""
Takes full page screenshots at various width
Args:
driver: webdriver object
viewport_width: viewport width
Returns:
list: list of image raw data for the taken screenshots
"""
def send(cmd, params):
resource = "/session/{}/chromium/send_command_and_get_result".format(driver.session_id)
url = driver.command_executor._url + resource
body = json.dumps({'cmd': cmd, 'params': params})
response = driver.command_executor._request('POST', url, body)
return response.get('value')
def evaluate(script):
response = send('Runtime.evaluate', {'returnByValue': True, 'expression': script})
return response['result']['value']
send('Emulation.clearDeviceMetricsOverride', {})
metrics = evaluate(
"({" +
"width: " + str(viewport_width) + "," +
"height: 0," +
"deviceScaleFactor: window.devicePixelRatio || 1," +
"mobile: typeof window.orientation !== 'undefined'" +
"})")
send('Emulation.setDeviceMetricsOverride', metrics)
metrics = evaluate(
"({" +
"width: " + str(viewport_width) + "," +
"height: Math.max(innerHeight, document.body.scrollHeight, document.body.offsetHeight, document.documentElement.offsetHeight, document.documentElement.clientHeight, document.documentElement.scrollHeight)|0," +
"deviceScaleFactor: window.devicePixelRatio || 1," +
"mobile: typeof window.orientation !== 'undefined'" +
"})")
send('Emulation.setDeviceMetricsOverride', metrics)
screenshot = send('Page.captureScreenshot', {'format': 'png', 'fromSurface': True})
return base64.b64decode(screenshot['data'])
@retry(TimeoutException, tries=CRAWL_URL_RETRIES, delay=1)
def get_with_retry(driver, url):
driver.get(url)
class CrawlingContext:
def __init__(self, driver_path, crawl_max=1, max_depth=1, allowed_domains=None, auth_username=None,
auth_password=<PASSWORD>, crawled=None, depth=0, screenshot_tasks=[], executor=None, screenshot_res=None,
base_path=None, iterations=1, is_baseline=False):
"""Crawling context object
Args:
driver_path: Selenium webdriver object
crawl_max: maximum number of links that should be returned
max_depth: maximum depth for crawling (crawling is breadth first)
allowed_domains: list of allowed domains. If empty all found URLs will be returned
auth_username: username to be used for basic authentication
auth_password: password to be used for basic authentication
crawled: list of already crawled URLs
depth: depth of the URL that will be scraped
iterations: num iterations on page
is_baseline: whether this is the baseline for the diff comparison
"""
self.driver_path = driver_path
self.crawl_max = crawl_max
self.max_depth = max_depth
self.allowed_domains = allowed_domains
self.auth_username = auth_username
self.auth_password = <PASSWORD>
self.crawled = crawled
self.depth = depth
self.screenshot_tasks = screenshot_tasks
self.executor = executor
self.screenshot_res = screenshot_res
self.base_path = base_path
self.iterations = iterations
self.is_baseline = is_baseline
def get_urls(parent_url, depth, context):
"""Recursively crawls the website passed via parent_url and returns the list of found URLs.
Args:
parent_url: URL that will be scraped
depth: depth of the URL that will be scraped
context: Crawling context object
Returns:
list: list of scraped URLs
"""
if depth > context.max_depth:
return
logging.info("Crawling URL " + parent_url)
url_list = [item["url"] for item in context.crawled]
driver = get_chrome_driver(context.driver_path)
try:
if context.auth_username and context.auth_password:
parsed_url = furl(parent_url)
parsed_url.username = context.auth_username
parsed_url.password = context.auth_password
get_with_retry(driver, str(parsed_url))
else:
get_with_retry(driver, parent_url)
save_screenshots_and_logs(parent_url, context.screenshot_res, context.base_path, driver,
context.allowed_domains, context)
html = driver.page_source.encode('utf-8')
soup = BeautifulSoup(html, features='html.parser')
urls = soup.findAll('a')
for a in set(urls):
url = a.get('href')
# determine absolute URL for relative links
if url and not urlparse(url).netloc:
url = urljoin(parent_url, url)
# skip urls for which href tag is missing or execute js
if not is_valid_url(url, context.allowed_domains):
logging.debug("Skipping invalid URL " + str(url))
continue
# cleanup URL
url.rstrip('/')
if len(url_list) < context.crawl_max and url not in url_list:
logging.info("Identified link '{}' on URL '{}'".format(url, parent_url))
url_list.append(url)
context.crawled.append({"url": url, "visited": False, "depth": depth + 1})
for u in context.crawled:
if u['visited'] is False:
u['visited'] = True
future_to_url = context.executor.submit(get_urls, u["url"], u["depth"], context)
context.screenshot_tasks.append(future_to_url)
except MaxRetryError:
logger.error("Max retries {} reached while crawling URL {}".format(CRAWL_URL_RETRIES, parent_url))
except:
e = sys.exc_info()[0]
logger.exception("Error while crawling URL " + parent_url + " with error " + str(e))
finally:
logging.info("Finished crawling URL " + parent_url)
driver.quit()
def is_valid_url(url, allowed_domains):
# skip invalid, JS using and anchor links
if not url or any(substr in url for substr in ("javascript", "#")):
logging.debug("{} skipped. URL not valid".format(url))
return False
elif url.endswith((".js", ".mp4", ".mov", ".avi", ".opus", ".mp4v", ".mp4v", ".3gpp", ".3gp2")):
logging.debug("{} skipped. Media type unsupported.".format(url))
return False
parsed_url = urlparse(url)
# skip URLs that are not in the list of allowed domains
if allowed_domains and parsed_url.netloc not in allowed_domains:
logging.debug("{} skipped. Not in list of allowed domains".format(url))
return False
return True
def get_job_url_path(base_path, url, domains):
url_dir = url.replace("\\", URL_SLASH_REPLACEMENT_STR) \
.replace("/", URL_SLASH_REPLACEMENT_STR) \
.replace("www.", "") \
.replace("http:", "") \
.replace("https:", "") \
.replace(URL_SLASH_REPLACEMENT_STR + URL_SLASH_REPLACEMENT_STR, URL_SLASH_REPLACEMENT_STR)
for domain in domains:
url_dir = url_dir.replace(domain, "")
if url_dir.startswith(URL_SLASH_REPLACEMENT_STR):
url_dir = url_dir[1:]
if url_dir.endswith(URL_SLASH_REPLACEMENT_STR):
url_dir = url_dir[:-1]
if not url_dir:
url_dir = URL_SLASH_REPLACEMENT_STR
return os.path.join(base_path, url_dir)
def save_screenshots_and_logs(url, screenshot_res, base_path, driver, domains, context):
"""Gets full page screenshots, console logs and network logs for a specific URL
Args:
url: URL to process
screenshot_res: list specifying the width for the screenshots that should be catpured
base_path: path on disk where the screenshots should be saved
driver: Selenium driver
domains: crawled domains
context: crawl context
=======
Returns:
None
"""
parsed_url = urlparse(url)
base_url = "" if parsed_url.netloc is None else parsed_url.netloc
logging.debug('Getting screenshots and logs for {}'.format(url))
path = get_job_url_path(base_path, url, domains)
if not os.path.exists(path):
os.makedirs(path)
console_logs = driver.get_log('browser')
for console_log in console_logs:
console_log.pop('timestamp', None)
with open(os.path.join(path, "console_logs"), 'w') as f:
f.write(json.dumps(console_logs))
network_logs = []
for request in driver.requests:
if request.response and request.path is not None:
trimmed_url_path = re.sub("=(\d{13}$|\d{10}$|\d{13}&|\d{10}&)", '', request.path.replace(base_url, ''))
network_logs.append({'path': trimmed_url_path, 'status_code': request.response.status_code})
with open(os.path.join(path, "network_logs"), 'w') as f:
f.write(json.dumps(network_logs))
for size in screenshot_res:
# initial warmup, not sure why this works
get_with_retry(driver, url)
time.sleep(VARIANCE_INTERATIONS_INTERVAL_SEC)
chrome_take_full_screenshot(driver, size)
screenshots = []
for i in range(context.iterations):
get_with_retry(driver, url)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(VARIANCE_INTERATIONS_INTERVAL_SEC)
screenshots_data = chrome_take_full_screenshot(driver, size)
screenshots.append(screenshots_data)
file_path = os.path.join(path, "raw_{}_00.png".format(size))
if context.is_baseline:
error = process_screenshot_iterations_consistency(screenshots, file_path)
else:
error = save_and_mask_image(screenshots[0], file_path, file_path.replace(UPDATED_DIR, BASELINE_DIR))
if error is not None:
logging.error(error)
def crawl(url, max_depth, max_urls, screenshot_res, base_path, chromedriver_path, auth_username=None,
auth_password=<PASSWORD>, workers=NUM_CRAWLING_THREADS, is_baseline=False):
"""Crawls an URL, saves screenshots, network and console logs for the scraped URLs
Args:
url: URL to process
max_depth: maximum depth for crawling (crawling is breadth first)
max_urls: maximum number of links that should be returned
screenshot_res: list specifying the width for the screenshots that should be catpured
base_path: path on disk where the screenshots should be saved
chromedriver_path: path to the Chrome driver
auth_username: username to be used for basic authentication
auth_password: password to be used for basic authentication
workers: number of worker threads
is_baseline: whether this is the baseline for the diff comparison
Returns:
list: list of encountered errors
"""
if not os.path.exists(base_path):
os.makedirs(base_path)
allowed_domains = [urlparse(url).netloc]
crawl_list = [{"url": url, "visited": True, "depth": 0}]
screenshot_tasks = []
task_errors = []
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
iterations = 1
if is_baseline:
iterations = VARIANCE_NUM_ITERATIONS
context = CrawlingContext(chromedriver_path, max_urls, max_depth, allowed_domains,
auth_username, auth_password, crawled=crawl_list,
screenshot_tasks=screenshot_tasks, executor=executor,
screenshot_res=screenshot_res, base_path=base_path, iterations=iterations,
is_baseline=is_baseline)
future_to_url = executor.submit(get_urls, url, 0, context)
screenshot_tasks.append(future_to_url)
while screenshot_tasks:
fs = screenshot_tasks[:]
for f in fs:
screenshot_tasks.remove(f)
try:
concurrent.futures.wait(fs)
except Exception as exc:
error_message = 'generated an exception: %s' % exc
logging.error(error_message)
task_errors.append(error_message)
url_list = [entry["url"] for entry in crawl_list if entry['visited'] is True]
logging.info("Visited urls" + str(url_list))
with open(os.path.join(base_path, "scraped_urls"), 'w', encoding='UTF-8') as f:
f.write("\n".join(url_list))
return task_errors
def verify_chrome_driver(driver_path):
try:
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument("--headless")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--hide-scrollbars")
options.page_load_strategy = 'normal'
driver = webdriver.Chrome(driver_path, options=options)
if driver is not None:
return None
else:
return "Invalid chrome driver loaded"
except Exception as e:
return "Error loading chrome driver" + repr(e)
def crawl_job(job_id, baseline_url, updated_url, max_depth, max_urls, workers, screenshot_res,
auth_baseline_username=None, auth_baseline_password=<PASSWORD>, auth_updated_username=None,
auth_updated_password=<PASSWORD>, chromedriver_path=None):
"""
Runs crawl jobs for the baseline and the updated URLs
Args:
base_path: path on disk where the screenshots should be saved
baseline_url: baseline URL
updated_url: updated URL
max_depth: maximum depth for crawling (crawling is breadth first)
max_urls: maximum number of links that should be returned
workers: number of worker threads
screenshot_res: list specifying the width for the screenshots that should be catpured
chromedriver_path: path to the Chrome driver
auth_baseline_username: username to be used for basic authentication for the baseline URL
auth_baseline_password: password to be used for basic authentication for the baseline URL
auth_updated_username: username to be used for basic authentication for the updated URL
auth_updated_password: password to be used for basic authentication for the updated URL
Returns:
list: list of encountered errors
"""
# disable most logging for selenium
selenium_logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
selenium_logger.setLevel(logging.ERROR)
max_depth = int(max_depth)
max_urls = int(max_urls)
if not chromedriver_path:
chromedriver_path = os.environ['CHROMEDRIVER_PATH']
chrome_driver_error = verify_chrome_driver(chromedriver_path)
if chrome_driver_error is not None:
return chrome_driver_error
base_path = os.path.join("jobs", job_id)
baseline_path = os.path.join(base_path, BASELINE_DIR)
updated_path = os.path.join(base_path, UPDATED_DIR)
baseline_errors = crawl(baseline_url, max_depth, max_urls, screenshot_res, baseline_path, chromedriver_path,
auth_baseline_username, auth_baseline_password, workers, is_baseline=True)
updated_errors = crawl(updated_url, max_depth, max_urls, screenshot_res, updated_path, chromedriver_path,
auth_updated_username, auth_updated_password, workers, is_baseline=False)
return baseline_errors + updated_errors
if __name__ == "__main__":
FORMAT = '%(asctime)-15s [%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('crawler')
logger.setLevel(logging.DEBUG)
|
1675069
|
from django.conf.urls import url, include
from niji import urls as niji_urls
urlpatterns = [
url(r'testurl', include(niji_urls, namespace="niji")),
]
|
1675081
|
def callee_a():
pass
def callee_b():
callee_c()
def callee_c():
pass
def caller():
callee_a()
callee_b()
callee_a()
while True:
caller()
|
1675104
|
import numpy as np
import matplotlib.pyplot as plt
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
DSET_MEAN = [0.41189489566336, 0.4251328133025, 0.4326707089857]
DSET_STD = [0.27413549931506, 0.28506257482912, 0.28284674400252]
label_colours = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def view_annotated(tensor, plot=True):
temp = tensor.numpy()
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0,11):
r[temp==l]=label_colours[l,0]
g[temp==l]=label_colours[l,1]
b[temp==l]=label_colours[l,2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:,:,0] = (r/255.0)#[:,:,0]
rgb[:,:,1] = (g/255.0)#[:,:,1]
rgb[:,:,2] = (b/255.0)#[:,:,2]
if plot:
plt.imshow(rgb)
plt.show()
else:
return rgb
def decode_image(tensor):
inp = tensor.numpy().transpose((1, 2, 0))
mean = np.array(DSET_MEAN)
std = np.array(DSET_STD)
inp = std * inp + mean
return inp
def view_image(tensor):
inp = decode_image(tensor)
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.show()
|
1675105
|
from UE4Parse.Assets.Exports.ExportRegistry import register_export
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Exports.UObjects import UObject
from UE4Parse.Assets.Objects.FName import FName
from UE4Parse.Assets.Objects.FStringTable import FStringTable
@register_export
class UStringTable(UObject):
StringTable: FStringTable
StringTableId = FName
def __init__(self, reader: BinaryStream):
super().__init__(reader)
def deserialize(self, validpos):
super().deserialize(validpos)
reader = self.reader
self.StringTable = FStringTable(reader)
self.StringTableId = reader.readFName()
def GetValue(self) -> dict:
return {
"StringTable": self.StringTable.GetValue(),
"StringTableId": self.StringTableId.GetValue()
}
# self.Dict["StringTable"] = self.StringTable
# self.Dict["StringTableId"] = self.StringTableId
|
1675136
|
import sys
from pathlib import Path
from docutils.core import publish_cmdline
from invoke import task
from rellu import initialize_labels, ReleaseNotesGenerator, Version
from rellu.tasks import clean
from robot.libdoc import libdoc
assert Path.cwd() == Path(__file__).parent
REPOSITORY = 'robotframework/SSHLibrary'
VERSION_PATH = Path('src/SSHLibrary/version.py')
VERSION_PATTERN = "VERSION = '(.*)'"
RELEASE_NOTES_PATH = Path('docs/SSHLibrary-{version}.rst')
RELEASE_NOTES_TITLE = 'SSHLibrary {version}'
RELEASE_NOTES_INTRO = '''
SSHLibrary_ is a `Robot Framework`_ test library for SSH and SFTP.
SSHLibrary {version} is a new release with
**UPDATE** enhancements and bug fixes.
All issues targeted for SSHLibrary {version.milestone} can be found from
the `issue tracker`_.
**REMOVE the previous note about all issues in the tracker with final
releases or otherwise if release notes contain all issues.**
**ADD more intro stuff if needed...**
**REMOVE ``--pre`` from the next command with final releases.**
If you have pip_ installed, just run
::
pip install --pre --upgrade robotframework-sshlibrary
to install the latest release or use
::
pip install robotframework-sshlibrary=={version}
to install exactly this version. Alternatively you can download the source
distribution from PyPI_ and install it manually.
SSHLibrary {version} was released on {date}.
.. _Robot Framework: http://robotframework.org
.. _SSHLibrary: https://github.com/robotframework/SSHLibrary
.. _pip: http://pip-installer.org
.. _PyPI: https://pypi.python.org/pypi/robotframework-sshlibrary
.. _issue tracker: https://github.com/robotframework/SSHLibrary/issues?q=milestone%3A{version.milestone}
'''
@task
def kw_docs(ctx):
"""Generates the library keyword documentation
Documentation is generated by using the Libdoc tool.
"""
libdoc(str(Path('src/SSHLibrary')),
str(Path('docs/SSHLibrary.html')))
@task
def project_docs(ctx):
"""Generate project documentation.
These docs are visible at http://robotframework.org/SSHLibrary/.
"""
args = ['--stylesheet=style.css,extra.css',
'--link-stylesheet',
'README.rst',
'docs/index.html']
publish_cmdline(writer_name='html5', argv=args)
print(Path(args[-1]).absolute())
@task
def set_version(ctx, version):
"""Set project version in `src/SSHLibrary/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postfix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
print(version)
@task
def print_version(ctx):
"""Print the current project version."""
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"""Generates release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: <PASSWORD>.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
"""
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = RELEASE_NOTES_PATH if write else sys.stdout
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE,
RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
@task
def init_labels(ctx, username=None, password=None):
"""Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: <PASSWORD>.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
"""
initialize_labels(REPOSITORY, username, password)
|
1675148
|
import functools
import math
from math import sqrt
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
from models.diffusion.unet_diffusion import AttentionBlock
from models.gpt_voice.lucidrains_dvae import DiscreteVAE
from models.stylegan.stylegan2_rosinality import EqualLinear
from models.vqvae.vqvae import Quantize
from trainer.networks import register_model
from utils.util import opt_get
def default(val, d):
return val if val is not None else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class ModulatedConv1d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
initial_weight_factor=1,
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
fan_in = in_channel * kernel_size ** 2
self.scale = initial_weight_factor / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def forward(self, input, style):
batch, in_channel, d = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size
)
input = input.view(1, batch * in_channel, d)
out = F.conv1d(input, weight, padding=self.padding, groups=batch)
_, _, d = out.shape
out = out.view(batch, self.out_channel, d)
return out
class ChannelAttentionModule(nn.Module):
def __init__(self, channels_in, channels_out, attention_dim, layers, num_heads=1):
super().__init__()
self.channels_in = channels_in
self.channels_out = channels_out
# This is the bypass. It performs the same computation, without attention. It is responsible for stabilizing
# training early on by being more optimizable.
self.bypass = nn.Conv1d(channels_in, channels_out, kernel_size=1)
self.positional_embeddings = nn.Embedding(channels_out, attention_dim)
self.first_layer = ModulatedConv1d(1, attention_dim, kernel_size=1, style_dim=channels_in, initial_weight_factor=.1)
self.layers = nn.Sequential(*[AttentionBlock(attention_dim, num_heads=num_heads) for _ in range(layers)])
self.post_attn_layer = nn.Conv1d(attention_dim, 1, kernel_size=1)
def forward(self, inp):
bypass = self.bypass(inp)
emb = self.positional_embeddings(torch.arange(0, self.channels_out, device=inp.device)).permute(1,0).unsqueeze(0)
b, c, w = bypass.shape
# Reshape bypass so channels become structure and structure becomes part of the batch.
x = bypass.permute(0,2,1).reshape(b*w, c).unsqueeze(1)
# Reshape the input as well so it can be fed into the stylizer.
style = inp.permute(0,2,1).reshape(b*w, self.channels_in)
x = self.first_layer(x, style)
x = emb + x
x = self.layers(x)
x = x - emb # Subtract of emb to further stabilize early training, where the attention layers do nothing.
out = self.post_attn_layer(x).squeeze(1)
out = out.view(b,w,self.channels_out).permute(0,2,1)
return bypass + out
class ResBlock(nn.Module):
def __init__(self, chan, conv, activation):
super().__init__()
self.net = nn.Sequential(
conv(chan, chan, 3, padding = 1),
activation(),
conv(chan, chan, 3, padding = 1),
activation(),
conv(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class UpsampledConv(nn.Module):
def __init__(self, conv, *args, **kwargs):
super().__init__()
assert 'stride' in kwargs.keys()
self.stride = kwargs['stride']
del kwargs['stride']
self.conv = conv(*args, **kwargs)
def forward(self, x):
up = nn.functional.interpolate(x, scale_factor=self.stride, mode='nearest')
return self.conv(up)
class ChannelAttentionDVAE(nn.Module):
def __init__(
self,
positional_dims=2,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channel_attention_dim = 64,
channels = 3,
stride = 2,
kernel_size = 4,
use_transposed_convs = True,
encoder_norm = False,
activation = 'relu',
smooth_l1_loss = False,
straight_through = False,
normalization = None, # ((0.5,) * 3, (0.5,) * 3),
record_codes = False,
):
super().__init__()
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.num_tokens = num_tokens
self.num_layers = num_layers
self.straight_through = straight_through
self.codebook = Quantize(codebook_dim, num_tokens)
self.positional_dims = positional_dims
assert positional_dims > 0 and positional_dims < 3 # This VAE only supports 1d and 2d inputs for now.
if positional_dims == 2:
conv = nn.Conv2d
conv_transpose = nn.ConvTranspose2d
else:
conv = nn.Conv1d
conv_transpose = nn.ConvTranspose1d
if not use_transposed_convs:
conv_transpose = functools.partial(UpsampledConv, conv)
if activation == 'relu':
act = nn.ReLU
elif activation == 'silu':
act = nn.SiLU
else:
assert NotImplementedError()
enc_chans = [hidden_dim * 2 ** i for i in range(num_layers)]
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
pad = (kernel_size - 1) // 2
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(conv(enc_in, enc_out, kernel_size, stride = stride, padding = pad), act()))
if encoder_norm:
enc_layers.append(nn.GroupNorm(8, enc_out))
dec_layers.append(nn.Sequential(conv_transpose(dec_in, dec_out, kernel_size, stride = stride, padding = pad), act()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1], conv, act))
enc_layers.append(ResBlock(enc_chans[-1], conv, act))
if num_resnet_blocks > 0:
dec_layers.insert(0, conv(codebook_dim, dec_chans[1], 1))
enc_layers.append(conv(enc_chans[-1], codebook_dim, 1))
dec_layers.append(ChannelAttentionModule(dec_chans[-1], channels, channel_attention_dim, layers=3, num_heads=1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
# take care of normalization within class
self.normalization = normalization
self.record_codes = record_codes
if record_codes:
self.codes = torch.zeros((1228800,), dtype=torch.long)
self.code_ind = 0
self.internal_step = 0
def norm(self, images):
if not self.normalization is not None:
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
arrange = 'c -> () c () ()' if self.positional_dims == 2 else 'c -> () c ()'
means, stds = map(lambda t: rearrange(t, arrange), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
def get_debug_values(self, step, __):
dbg = {}
if self.record_codes:
# Report annealing schedule
dbg.update({'histogram_codes': self.codes})
return dbg
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
img = self.norm(images)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
return codes
def decode(
self,
img_seq
):
image_embeds = self.codebook.embed_code(img_seq)
b, n, d = image_embeds.shape
kwargs = {}
if self.positional_dims == 1:
arrange = 'b n d -> b d n'
else:
h = w = int(sqrt(n))
arrange = 'b (h w) d -> b d h w'
kwargs = {'h': h, 'w': w}
image_embeds = rearrange(image_embeds, arrange, **kwargs)
images = self.decoder(image_embeds)
return images
def infer(self, img):
img = self.norm(img)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
return self.decode(codes)
# Note: This module is not meant to be run in forward() except while training. It has special logic which performs
# evaluation using quantized values when it detects that it is being run in eval() mode, which will be substantially
# more lossy (but useful for determining network performance).
def forward(
self,
img
):
img = self.norm(img)
logits = self.encoder(img).permute((0,2,3,1) if len(img.shape) == 4 else (0,2,1))
sampled, commitment_loss, codes = self.codebook(logits)
sampled = sampled.permute((0,3,1,2) if len(img.shape) == 4 else (0,2,1))
if self.training:
out = sampled
for d in self.decoder:
out = d(out)
else:
# This is non-differentiable, but gives a better idea of how the network is actually performing.
out = self.decode(codes)
# reconstruction loss
recon_loss = self.loss_fn(img, out, reduction='none')
# This is so we can debug the distribution of codes being learned.
if self.record_codes and self.internal_step % 50 == 0:
codes = codes.flatten()
l = codes.shape[0]
i = self.code_ind if (self.codes.shape[0] - self.code_ind) > l else self.codes.shape[0] - l
self.codes[i:i+l] = codes.cpu()
self.code_ind = self.code_ind + l
if self.code_ind >= self.codes.shape[0]:
self.code_ind = 0
self.internal_step += 1
return recon_loss, commitment_loss, out
def convert_from_dvae(dvae_state_dict_file):
params = {
'channels': 80,
'positional_dims': 1,
'num_tokens': 8192,
'codebook_dim': 2048,
'hidden_dim': 512,
'stride': 2,
'num_resnet_blocks': 3,
'num_layers': 2,
'record_codes': True,
}
dvae = DiscreteVAE(**params)
dvae.load_state_dict(torch.load(dvae_state_dict_file), strict=True)
cdvae = ChannelAttentionDVAE(channel_attention_dim=256, **params)
mk, uk = cdvae.load_state_dict(dvae.state_dict(), strict=False)
for k in mk:
assert 'decoder.6' in k
for k in uk:
assert 'decoder.6' in k
cdvae.decoder[-1].bypass.load_state_dict(dvae.decoder[-1].state_dict())
torch.save(cdvae.state_dict(), 'converted_cdvae.pth')
@register_model
def register_dvae_channel_attention(opt_net, opt):
return ChannelAttentionDVAE(**opt_get(opt_net, ['kwargs'], {}))
if __name__ == '__main__':
convert_from_dvae('D:\\dlas\\experiments\\train_dvae_clips\\models\\20000_generator.pth')
'''
v = ChannelAttentionDVAE(channels=80, normalization=None, positional_dims=1, num_tokens=4096, codebook_dim=4096,
hidden_dim=256, stride=2, num_resnet_blocks=2, kernel_size=3, num_layers=2, use_transposed_convs=False)
o=v(torch.randn(1,80,256))
print(v.get_debug_values(0, 0))
print(o[-1].shape)
'''
|
1675214
|
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('metapipe', 'templates'))
|
1675251
|
from math import floor, inf
from dndme.commands import (
Command,
convert_to_int_or_dice_expr,
convert_to_oxford_comma_string,
)
class Rest(Command):
keywords = ["rest"]
help_text = """{keyword}
{divider}
Summary: Make characters in the current combat group take a rest.
Characters make take a long rest, in which case:
* all are fully healed
* game time is advanced 8 hours
Characters may take a short rest, in which case:
* each has an opportunity to heal some hit points
* game time is advanced 1 hour
Usage: {keyword} [long|short]
Example:
{keyword} long
{keyword} short
"""
LONG_REST_DURATION = (8, 0)
SHORT_REST_DURATION = (1, 0)
def get_suggestions(self, words):
if len(words) == 2:
return ["long", "short"]
def do_command(self, *args):
if not args:
print("What kind of rest?")
if args[0] == "long":
self.long_rest()
elif args[0] == "short":
self.short_rest()
else:
print("Sorry; only 'long' and 'short' rests are supported")
def long_rest(self):
combat = self.game.combat
party = list(sorted(combat.characters.items()))
names = []
for name, character in party:
names.append(name)
character.cur_hp = character.max_hp
days = self.game.clock.adjust_time(*self.LONG_REST_DURATION)
self.game.calendar.adjust_date(days)
self.game.changed = True
self._do_confirmation(names, "long")
def short_rest(self):
combat = self.game.combat
party = list(sorted(combat.characters.items()))
names = []
for name, character in party:
names.append(name)
amount = self.safe_input(
f"Heal {name} by",
default=0,
converter=convert_to_int_or_dice_expr,
)
if amount:
character.cur_hp += amount
print(
f"{name} healed by {amount}. "
f"Now: {character.cur_hp}/{character.max_hp}"
)
days = self.game.clock.adjust_time(*self.SHORT_REST_DURATION)
self.game.calendar.adjust_date(days)
self.game.changed = True
self._do_confirmation(names, "short")
def _do_confirmation(self, names, rest_type):
verb = "take" if len(names) > 1 else "takes"
names = convert_to_oxford_comma_string(names)
print(f"Okay; {names} {verb} a {rest_type} rest.")
|
1675264
|
from examples.contract.cerberus import * # noqa: F401, F403
from examples.contract.common.coroutines import * # noqa: F401, F403
|
1675305
|
from copy import deepcopy
from unittest.mock import patch
import pytest
from click.testing import CliRunner
from great_expectations import DataContext
from great_expectations.cli.v012 import cli
from great_expectations.cli.v012.datasource import _collect_snowflake_credentials
from great_expectations.data_context.util import file_relative_path
from great_expectations.exceptions import DatasourceKeyPairAuthBadPassphraseError
def test_snowflake_user_password_credentials_exit(empty_data_context):
"""Test an empty project and after adding a single datasource."""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["datasource", "new", "-d", project_root_dir],
catch_exceptions=False,
input="2\n4\nmy_snowflake_db\n1\nuser\nABCD.us-east-1\ndefault_db\ndefault_schema\nxsmall\npublic\npassword\nn\n",
)
stdout = result.stdout.strip()
assert "ok, exiting now" in stdout.lower()
@patch("click.prompt")
def test_snowflake_user_password_credentials(mock_prompt):
mock_prompt.side_effect = [
"1",
"user",
"ABCD.us-east-1",
"default_db",
"default_schema",
"xsmall",
"public",
"password",
]
credentials = _collect_snowflake_credentials(None)
assert credentials == {
"drivername": "snowflake",
"database": "default_db",
"host": "ABCD.us-east-1",
"password": "password",
"query": {"role": "public", "schema": "default_schema", "warehouse": "xsmall"},
"username": "user",
}
@patch("click.prompt")
def test_snowflake_sso_credentials(mock_prompt):
mock_prompt.side_effect = [
"2",
"user",
"ABCD.us-east-1",
"default_db",
"default_schema",
"xsmall",
"public",
"externalbrowser",
]
credentials = _collect_snowflake_credentials(None)
assert credentials == {
"drivername": "snowflake",
"database": "default_db",
"host": "ABCD.us-east-1",
"connect_args": {
"authenticator": "externalbrowser",
},
"query": {"role": "public", "schema": "default_schema", "warehouse": "xsmall"},
"username": "user",
}
@patch("click.prompt")
def test_snowflake_key_pair_credentials(mock_prompt, basic_sqlalchemy_datasource):
database_key_path_pass = file_relative_path(
__file__, "../../test_fixtures/database_key_test.p8"
)
mock_prompt.side_effect = [
"3",
"user",
"ABCD.us-east-1",
"default_db",
"default_schema",
"xsmall",
"public",
database_key_path_pass,
"test123",
]
credentials = _collect_snowflake_credentials(None)
assert credentials == {
"drivername": "snowflake",
"database": "default_db",
"host": "ABCD.us-east-1",
"private_key_path": database_key_path_pass,
"private_key_passphrase": "<PASSWORD>",
"query": {"role": "public", "schema": "default_schema", "warehouse": "xsmall"},
"username": "user",
}
# making sure with the correct params the key is read correctly
basic_sqlalchemy_datasource._get_sqlalchemy_key_pair_auth_url(
"snowflake", deepcopy(credentials)
)
# check that with a bad pass phrase an informative message is returned to the user
credentials["private_key_passphrase"] = "<PASSWORD>"
with pytest.raises(DatasourceKeyPairAuthBadPassphraseError) as e:
basic_sqlalchemy_datasource._get_sqlalchemy_key_pair_auth_url(
"snowflake", deepcopy(credentials)
)
assert "passphrase incorrect" in e.value.message
# check that with no pass the key is read correctly
database_key_path_no_pass = file_relative_path(
__file__, "../../test_fixtures/database_key_test_no_pass.p8"
)
credentials["private_key_path"] = database_key_path_no_pass
credentials["private_key_passphrase"] = ""
(
sqlalchemy_uri,
create_engine_kwargs,
) = basic_sqlalchemy_datasource._get_sqlalchemy_key_pair_auth_url(
"snowflake", deepcopy(credentials)
)
assert (
str(sqlalchemy_uri)
== "snowflake://user@ABCD.us-east-1/default_db?role=public&schema=default_schema&warehouse=xsmall"
)
assert create_engine_kwargs.get("connect_args", {}).get(
"private_key", ""
) # check that the private_key is not empty
|
1675321
|
from __future__ import absolute_import, division, print_function
import inspect
import itertools
import os
import warnings
from collections import defaultdict
from contextlib import contextmanager
from numbers import Number
from typing import Optional, Tuple
import numpy as np
import pandas as pd
import scanpy as sc
import scipy as sp
import tensorflow as tf
from anndata._core.aligned_mapping import AxisArrays
from bigarray import MmapArrayWriter
from scipy.sparse import issparse
from scipy.stats import pearsonr, spearmanr
from six import string_types
from sklearn.cluster import MiniBatchKMeans, SpectralClustering
from sklearn.decomposition import IncrementalPCA
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import (mutual_info_classif,
mutual_info_regression)
from sklearn.mixture import GaussianMixture
from odin import visual as vs
from odin.search import diagonal_linear_assignment
from odin.stats import (describe, is_discrete, sparsity_percentage,
train_valid_test_split)
from odin.utils import (MPI, IndexedList, as_tuple, batching, cache_memory,
catch_warnings_ignore, cpu_count, is_primitive)
from odin.utils.crypto import md5_checksum
from sisua.data._single_cell_base import BATCH_SIZE, _OMICbase
from sisua.data.const import MARKER_ADT_GENE, MARKER_ADTS, MARKER_GENES, OMIC
from sisua.data.utils import (apply_artificial_corruption, get_library_size,
is_binary_dtype, is_categorical_dtype,
standardize_protein_name)
from sisua.label_threshold import ProbabilisticEmbedding
# ===========================================================================
# Helper
# ===========================================================================
def _threshold(x, nmin=2, nmax=5):
if x.ndim == 1:
x = x[:, np.newaxis]
models = []
aic = []
for n_components in range(int(nmin), int(nmax)):
gmm = GaussianMixture(n_components=n_components, random_state=1)
gmm.fit(x)
models.append(gmm)
aic.append(gmm.aic(x))
# select the best model
gmm = models[np.argmax(aic)]
y = gmm.predict(x)
idx = np.argmax(gmm.means_.ravel())
return (y == idx).astype(np.bool)
# ===========================================================================
# Main
# ===========================================================================
class _OMICanalyzer(_OMICbase):
def get_x_probs(self, omic=None):
r""" Return the probability embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[1]
def get_x_bins(self, omic=None):
r""" Return the binary embedding of an OMIC """
return self.probabilistic_embedding(omic=omic)[2]
# ******************** transformation ******************** #
def corrupt(self,
omic=None,
dropout_rate=0.2,
retain_rate=0.2,
distribution='binomial',
inplace=True,
seed=1):
r"""
omic : `OMIC`, which omic type will be corrupted
dropout_rate : scalar (0.0 - 1.0), (default=0.25)
how many entries (in percent) be selected for corruption.
retain_rate : scalar (0.0 - 1.0), (default=0.2)
how much percent of counts retained their original values.
distribution : {'binomial', 'uniform} (default='binomial')
omic : `sisua.data.OMIC`, which OMIC type will be corrupted
inplace : `bool` (default=True). Perform computation inplace or return
new `SingleCellOMIC` with the corrupted data.
seed : `int` (default=8). Seed for the random state.
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('corrupt', locals())
if not (0. < retain_rate < 1. or 0. < dropout_rate < 1.):
return om
for o in omic:
apply_artificial_corruption(om.numpy(o),
dropout=dropout_rate,
retain_rate=retain_rate,
distribution=distribution,
copy=False,
seed=seed)
om._calculate_statistics(o)
return om
def filter_highly_variable_genes(self,
min_disp: float = 1.0,
max_disp: float = np.inf,
min_mean: float = 0.01,
max_mean: float = 8,
n_top_genes: int = 1000,
n_bins: int = 20,
flavor: str = 'seurat',
inplace: bool = True):
r""" Annotate highly variable genes [Satija15]_ [Zheng17]_.
https://www.rdocumentation.org/packages/Seurat/versions/2.3.4/topics/FindVariableGenes
`Expects logarithmized data`.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Arguments:
min_disp : `float`, optional (default=0.5)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_disp : `float`, optional (default=`np.inf`)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
min_mean : `float`, optional (default=0.0125)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
max_mean : `float`, optional (default=3)
If `n_top_genes` unequals `None`, this and all other cutoffs for
the means and the normalized dispersions are ignored.
n_top_genes : {`float`, int`, `None`}, optional (default=`None`)
Number of highly-variable genes to keep., if the value is in (0, 1],
intepret as percent of genes
n_bins : `int`, optional (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1.
flavor : `{'seurat', 'cell_ranger'}`, optional (default='seurat')
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
inplace : `bool` (default=True)
if False, copy the `SingleCellOMIC` and apply the vargene filter.
Returns:
New `SingleCellOMIC` with filtered features if `applying_filter=True`
else assign `SingleCellOMIC.highly_variable_features` with following
attributes.
highly_variable : bool
boolean indicator of highly-variable genes
**means**
means per gene
**dispersions**
dispersions per gene
**dispersions_norm**
normalized dispersions per gene
Notes:
Proxy to `scanpy.pp.highly_variable_genes`. It is recommended to do
`log1p` normalization before if `flavor='seurat'`.
"""
flavor = str(flavor).lower()
if n_top_genes is not None:
if 0. < n_top_genes < 1.:
n_top_genes = int(n_top_genes * self.n_vars)
# prepare the data
# this function will take the exponential of X all the time,
# so non-logarithmzed data might led to overflow
omics = self if inplace else self.copy()
omics._record('filter_highly_variable_genes', locals())
sc.pp.highly_variable_genes(omics,
min_disp=min_disp,
max_disp=max_disp,
min_mean=min_mean,
max_mean=max_mean,
n_top_genes=n_top_genes,
n_bins=int(n_bins),
flavor=flavor,
subset=True,
inplace=False)
omics._name += '_vargene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_genes(self,
min_counts=None,
max_counts=None,
min_cells=None,
max_cells=None,
inplace=True):
r""" Filter features (columns) based on number of rows or counts.
Keep columns that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_row_counts, max_row_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a gene to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a gene to pass filtering.
min_cells : {int, None} (default=None)
Minimum number of cells expressed required for a feature to pass filtering.
max_cells : {int, None} (default=None)
Maximum number of cells expressed required for a feature to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
genes applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of genes
gene_subset : `numpy.ndarray`
Boolean index mask that does filtering. `True` means that the
gene is kept. `False` means the gene is removed.
number_per_gene : `numpy.ndarray`
Depending on what was thresholded (`counts` or `cells`), the array
stores `n_counts` or `n_cells` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
omics = self if inplace else self.copy()
omics._record('filter_genes', locals())
sc.pp.filter_genes(omics,
min_counts=min_counts,
max_counts=max_counts,
min_cells=min_cells,
max_cells=max_cells,
inplace=True)
omics._name += '_filtergene'
omics._n_vars = omics._X.shape[1]
# recalculate library info
omics._calculate_statistics()
return omics
def filter_cells(self,
min_counts=None,
max_counts=None,
min_genes=None,
max_genes=None,
inplace=True):
r""" Filter examples (rows) based on number of features or counts.
Keep rows that have at least ``[min_counts, max_counts]``
or are expressed in at least ``[min_col_counts, max_col_counts]``
Arguments:
min_counts : {int, None} (default=None)
Minimum number of counts required for a cell to pass filtering.
max_counts : {int, None} (default=None)
Maximum number of counts required for a cell to pass filtering.
min_genes : {int, None} (default=None)
Minimum number of genes expressed required for a cell to pass filtering.
max_genes : {int, None} (default=None)
Maximum number of genes expressed required for a cell to pass filtering.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
Returns:
if `applying_filter=False` annotates the `SingleCellOMIC`, otherwise,
return new `SingleCellOMIC` with the new subset of cells
cells_subset : numpy.ndarray
Boolean index mask that does filtering. ``True`` means that the
cell is kept. ``False`` means the cell is removed.
number_per_cell : numpy.ndarray
Depending on what was tresholded (``counts`` or ``genes``), the array stores
``n_counts`` or ``n_cells`` per gene.
Note:
Proxy method to Scanpy preprocessing
"""
# scanpy messed up here, the obs was not updated with the new indices
cells_subset, number_per_cell = sc.pp.filter_cells(self,
min_counts=min_counts,
max_counts=max_counts,
min_genes=min_genes,
max_genes=max_genes,
inplace=False)
omics = self if inplace else self.copy()
omics._record('filter_cells', locals())
omics.apply_indices(cells_subset, observation=True)
omics._name += '_filtercell'
# recalculate library info
omics._calculate_statistics()
return omics
def probabilistic_embedding(self,
omic=None,
n_components_per_class=2,
positive_component=1,
log_norm=True,
clip_quartile=0.,
remove_zeros=True,
ci_threshold=-0.68,
seed=1,
pbe: Optional[ProbabilisticEmbedding] = None):
r""" Fit a GMM on each feature column to get the probability or binary
representation of the features
Return:
`ProbabilisticEmbedding` model
np.ndarray : probabilities X
np.ndarray : binary X
Arguments:
pbe : {`sisua.ProbabilisticEmbedding`, `None`}, optional pretrained
instance of `ProbabilisticEmbedding`
"""
if omic is None:
omic = self.current_omic
self._record('probabilistic_embedding', locals())
# We turn-off default log_norm here since the data can be normalized
# separately in advance.
omic = OMIC.parse(omic)
X = self.numpy(omic)
if X.shape[1] >= 100:
warnings.warn("%d GMM will be trained!" % self.shape[1])
name = omic.name
pbe_name = '%s_pbe' % name
prob_name = '%s_prob' % name
bin_name = '%s_bin' % name
label_name = self.get_labels_name(name)
if is_binary_dtype(X):
X_prob = X
X_bin = X
self.uns[pbe_name] = None
else:
if pbe is None:
if pbe_name not in self.uns:
pbe = ProbabilisticEmbedding(
n_components_per_class=n_components_per_class,
positive_component=positive_component,
log_norm=log_norm,
clip_quartile=clip_quartile,
remove_zeros=remove_zeros,
ci_threshold=ci_threshold,
random_state=seed)
with catch_warnings_ignore(ConvergenceWarning):
pbe.fit(X)
self.uns[pbe_name] = pbe
else:
pbe = self.uns[pbe_name]
else:
assert isinstance(pbe, ProbabilisticEmbedding), \
'pbe, if given, must be instance of sisua.ProbabilisticEmbedding'
# make prediction
X_prob = np.clip(pbe.predict_proba(X), 0. + 1e-8, 1. - 1e-8)
X_bin = pbe.predict(X)
# store the data
if prob_name not in self.obsm:
self.obsm[prob_name] = X_prob
if label_name not in self.obs and name + '_var' in self.uns:
omic_id = self.get_var(name).index
labels = [omic_id[i] for i in np.argmax(self.obsm[prob_name], axis=1)]
self.obs[label_name] = pd.Categorical(labels)
if bin_name not in self.obsm:
self.obsm[bin_name] = X_bin
return pbe, self.obsm[prob_name], self.obsm[bin_name]
def dimension_reduce(self,
omic=None,
n_components=100,
algo='pca',
random_state=1):
r""" Perform dimension reduction on given OMIC data. """
if omic is None:
omic = self.current_omic
self._record('dimension_reduce', locals())
algo = str(algo).lower().strip()
assert algo in ('pca', 'tsne', 'umap'), \
"Only support algorithm: 'pca', 'tsne', 'umap'; but given: '{algo}'"
omic = OMIC.parse(omic)
name = f"{omic.name}_{algo}"
## already transformed
if name in self.obsm:
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
X = self.numpy(omic)
n_components = min(n_components, X.shape[1])
### train new PCA model
if algo == 'pca':
X_ = np.empty(shape=(X.shape[0], n_components), dtype=X.dtype)
model = IncrementalPCA(n_components=n_components)
# fitting
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
model.partial_fit(chunk)
# transforming
for start, end in batching(BATCH_SIZE, n=X.shape[0]):
chunk = X[start:end]
chunk = chunk.toarray() if issparse(chunk) else chunk
X_[start:end] = model.transform(chunk)
### TSNE
elif algo == 'tsne':
from odin.ml import fast_tsne
X_ = fast_tsne(X, n_components=n_components, return_model=False)
model = None
## UMAP
elif algo == 'umap':
try:
import cuml
method = 'rapids'
except ImportError:
method = 'umap'
connectivities, distances, nn = self.neighbors(omic,
method='umap',
random_state=random_state)
self.uns['neighbors'] = nn
self.obsp['connectivities'] = connectivities
self.obsp['distances'] = distances
with catch_warnings_ignore(UserWarning):
sc.tl.umap(self, method=method, random_state=random_state, copy=False)
X_ = self.obsm['X_umap']
model = self.uns['umap']
del self.obsm['X_umap']
del self.uns['umap']
del self.uns['neighbors']
del self.obsp['connectivities']
del self.obsp['distances']
## store and return the result
self.obsm[name] = X_
# the model could be None, in case of t-SNE
self.uns[name] = model
return self.obsm[name] if n_components is None else \
self.obsm[name][:, :int(n_components)]
def expm1(self, omic=None, inplace=True):
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('expm1', locals())
_expm1 = lambda x: (np.expm1(x.data, out=x.data)
if issparse(x) else np.expm1(x, out=x))
X = om.numpy(omic)
for s, e in batching(n=self.n_obs, batch_size=BATCH_SIZE):
X[s:e] = _expm1(X[s:e])
om._calculate_statistics(omic)
return om
def normalize(self,
omic=None,
total=False,
log1p=False,
scale=False,
target_sum=None,
exclude_highly_expressed=False,
max_fraction=0.05,
max_value=None,
inplace=True):
r""" If ``exclude_highly_expressed=True``, very highly expressed genes are
excluded from the computation of the normalization factor (size factor)
for each cell. This is meaningful as these can strongly influence
the resulting normalized values for all other genes [1]_.
Arguments:
total : bool (default=False). Normalize counts per cell.
log1p : bool (default=False). Logarithmize the data matrix.
scale : bool (default=False). Scale data to unit variance and zero mean.
target_sum : {float, None} (default=None)
If None, after normalization, each observation (cell) has a
total count equal to the median of total counts for
observations (cells) before normalization.
exclude_highly_expressed : bool (default=False)
Exclude (very) highly expressed genes for the computation of the
normalization factor (size factor) for each cell. A gene is considered
highly expressed, if it has more than ``max_fraction`` of the total counts
in at least one cell. The not-excluded genes will sum up to
``target_sum``.
max_fraction : bool (default=0.05)
If ``exclude_highly_expressed=True``, consider cells as highly expressed
that have more counts than ``max_fraction`` of the original total counts
in at least one cell.
max_value : `float` or `None`, optional (default=`None`)
Clip (truncate) to this value after scaling. If `None`, do not clip.
inplace : `bool` (default=True)
if False, return new `SingleCellOMIC` with the filtered
cells applied
References:
Weinreb et al. (2016), SPRING: a kinetic interface for visualizing
high dimensional single-cell expression data, bioRxiv.
Note:
Proxy to `scanpy.pp.normalize_total`, `scanpy.pp.log1p` and
`scanpy.pp.scale`
"""
if omic is None:
omic = self.current_omic
om = self if inplace else self.copy()
om._record('normalize', locals())
if omic != OMIC.transcriptomic:
org_X = om._X
om._X = om.numpy(omic)
if total:
sc.pp.normalize_total(om,
target_sum=target_sum,
exclude_highly_expressed=exclude_highly_expressed,
max_fraction=max_fraction,
inplace=True)
# since the total counts is normalized, store the old library size
om._name += '_total'
if log1p:
sc.pp.log1p(om, chunked=True, chunk_size=BATCH_SIZE, copy=False)
om._name += '_log1p'
del om.uns['log1p']
# scaling may result negative total counts
if scale:
sc.pp.scale(om, zero_center=True, max_value=max_value, copy=False)
om._name += '_scale'
if omic != OMIC.transcriptomic:
om.obsm[omic.name] = om.X
om._X = org_X
om._calculate_statistics(omic)
return om
# ******************** metrics ******************** #
def neighbors(self,
omic=None,
n_neighbors=12,
n_pcs=100,
knn=True,
method='umap',
metric='euclidean',
random_state=1):
r"""\
Compute a neighborhood graph of observations [McInnes18]_.
The neighbor search efficiency of this heavily relies on UMAP [McInnes18]_,
which also provides a method for estimating connectivities of data points -
the connectivity of the manifold (`method=='umap'`). If `method=='gauss'`,
connectivities are computed according to [Coifman05]_, in the adaption of
[Haghverdi16]_.
Arguments:
n_neighbors : `int` (default=12)
The size of local neighborhood (in terms of number of neighboring data
points) used for manifold approximation. Larger values result in more
global views of the manifold, while smaller values result in more local
data being preserved. In general values should be in the range 2 to 100.
If `knn` is `True`, number of nearest neighbors to be searched. If `knn`
is `False`, a Gaussian kernel width is set to the distance of the
`n_neighbors` neighbor.
n_pcs : {`int`, `None`} (default=None)
Use this many PCs. If n_pcs==0 use .X if use_rep is None.
if n_pcs==None, use obsm['X_pca'].
use_rep : {`None`, ‘X’} or any key for .obsm, optional (default=None)
Use the indicated representation. If None, the representation is
chosen automatically: for .n_vars < 50, .X is used, otherwise
‘X_pca’ is used. If ‘X_pca’ is not present, it’s computed with
default parameters.
knn : `bool` (default=True)
If `True`, use a hard threshold to restrict the number of neighbors to
`n_neighbors`, that is, consider a knn graph. Otherwise, use a Gaussian
Kernel to assign low weights to neighbors more distant than the
`n_neighbors` nearest neighbor.
method : {{'umap', 'gauss', `rapids`}} (default: `'umap'`)
Use 'umap' [McInnes18]_ or 'gauss' (Gauss kernel following [Coifman05]_
with adaptive width [Haghverdi16]_) for computing connectivities.
Use 'rapids' for the RAPIDS implementation of UMAP (experimental, GPU
only).
metric : {`str`, `callable`} (default='euclidean')
A known metric’s name or a callable that returns a distance.
Returns:
returns neighbors object with the following:
**[OMIC]_connectivities** : sparse matrix (dtype `float32`)
Weighted adjacency matrix of the neighborhood graph of data
points. Weights should be interpreted as connectivities.
**[OMIC]_distances** : sparse matrix (dtype `float32`)
Instead of decaying weights, this stores distances for each pair of
neighbors.
**[OMIC]_neighbors** : dictionary
configuration and params of fitted k-NN.
"""
if omic is None:
omic = self.current_omic
self._record('neighbors', locals())
omic = OMIC.parse(omic)
name = f"{omic.name}_neighbors"
if name not in self.uns:
omic_name = omic.name
if self.get_dim(omic) > 100:
self.dimension_reduce(omic, algo='pca', random_state=random_state)
omic_name = omic.name + '_pca'
with catch_warnings_ignore(Warning):
obj = sc.pp.neighbors(self,
n_neighbors=n_neighbors,
knn=knn,
method=method,
metric=metric,
n_pcs=int(n_pcs),
use_rep=omic_name,
random_state=random_state,
copy=True)
self.uns[name] = obj.uns['neighbors']
self.obsp[f"{omic.name}_connectivities"] = obj.obsp['connectivities']
self.obsp[f"{omic.name}_distances"] = obj.obsp['distances']
del obj
return (self.obsp[f"{omic.name}_connectivities"],
self.obsp[f"{omic.name}_distances"], self.uns[name])
def clustering(self,
omic=None,
n_clusters=None,
n_init='auto',
algo='kmeans',
matching_labels=True,
return_key=False,
random_state=1):
r""" Perform clustering for given OMIC type, the cluster labels will be
assigned to `obs` with key "{omic}_{algo}{n_clusters}"
Arguments:
algo : {'kmeans', 'knn', 'pca', 'tsne', 'umap'}.
Clustering algorithm, in case algo in ('pca', 'tsne', 'umap'),
perform dimension reduction before clustering.
matching_labels : a Boolean. Matching OMIC var_names to appropriate
clusters, only when `n_clusters` is string or OMIC type.
return_key : a Boolean. If True, return the name of the labels
stored in `.obs` instead of the labels array.
"""
if omic is None:
omic = self.current_omic
self._record('clustering', locals())
## clustering algorithm
algo = str(algo).strip().lower()
## input data
omic = OMIC.parse(omic)
cluster_omic = None
if n_clusters is None:
cluster_omic = omic
n_clusters = self.get_dim(omic)
elif isinstance(n_clusters, Number):
n_clusters = int(n_clusters)
else:
cluster_omic = OMIC.parse(n_clusters)
n_clusters = self.get_dim(cluster_omic)
n_clusters = int(n_clusters)
n_init = int(n_init) if isinstance(n_init, Number) else \
int(n_clusters) * 3
## check if output already extracted
output_name = f"{omic.name}_{algo}{n_clusters}"
if output_name in self.obs:
return output_name if return_key else self.obs[output_name]
## warning
if n_clusters > 50:
warnings.warn(
f"Found omic type:{cluster_omic} with {n_clusters} clusters")
## fit KMeans
if algo in ('pca', 'tsne', 'umap', 'kmeans'):
if algo in ('pca', 'tsne', 'umap'):
X = self.dimension_reduce(omic=omic, n_components=100, algo=algo)
else:
X = self.numpy(omic)
model = MiniBatchKMeans(n_clusters=int(n_clusters),
max_iter=1000,
n_init=int(n_init),
compute_labels=False,
batch_size=BATCH_SIZE,
random_state=random_state)
# better suffering the batch
for s, e in batching(BATCH_SIZE, self.n_obs, seed=random_state):
x = X[s:e]
model.partial_fit(x)
# make prediction
labels = []
for s, e in batching(BATCH_SIZE, self.n_obs):
x = X[s:e]
labels.append(model.predict(x))
labels = np.concatenate(labels, axis=0)
## fit KNN
elif algo == 'knn':
connectivities, distances, nn = self.neighbors(omic)
n_neighbors = min(nn['params']['n_neighbors'],
np.min(np.sum(connectivities > 0, axis=1)))
model = SpectralClustering(n_clusters=n_clusters,
random_state=random_state,
n_init=n_init,
affinity='precomputed_nearest_neighbors',
n_neighbors=n_neighbors)
labels = model.fit_predict(connectivities)
else:
raise NotImplementedError(algo)
## correlation matrix
if cluster_omic is not None and matching_labels:
_, X, _ = self.probabilistic_embedding(cluster_omic)
# omic-cluster correlation matrix
corr = np.empty(shape=(X.shape[1], n_clusters), dtype=np.float32)
for i, x in enumerate(X.T):
for lab in range(n_clusters):
mask = labels == lab
corr[i, lab] = np.sum(x[mask])
ids = diagonal_linear_assignment(corr)
varnames = self.get_var_names(cluster_omic)
labels_to_omic = {lab: name for lab, name, in zip(ids, varnames)}
labels = np.array([labels_to_omic[i] for i in labels])
## saving data and model
self.obs[output_name] = pd.Categorical(labels)
# self.uns[output_name] = model
return output_name if return_key else labels
def louvain(self,
omic=None,
resolution=None,
restrict_to=None,
adjacency=None,
flavor='vtraag',
directed=True,
use_weights=False,
partition_type=None,
partition_kwargs={},
random_state=1):
r"""Cluster cells into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster cells using the Louvain algorithm [Blondel08]_ in the implementation
of [Traag17]_. The Louvain algorithm has been proposed for single-cell
analysis by [Levine15]_.
This requires having ran :func:`~scanpy.pp.neighbors` or
`~scanpy.external.pp.bbknn` first,
or explicitly passing a ``adjacency`` matrix.
Arguments:
resolution
For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0. See “Time as a resolution parameter” in [Lambiotte09]_.
restrict_to
Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added
Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency
Sparse adjacency matrix of the graph, defaults to
``adata.uns['neighbors']['connectivities']``.
flavor : {``'vtraag'``, ``'igraph'``}
Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed
Interpret the ``adjacency`` matrix as directed graph?
use_weights
Use weights from knn graph.
partition_type
Type of partition to use.
Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs
Key word arguments to pass to partitioning,
if ``vtraag`` method is being used.
random_state : Change the initialization of the optimization.
Return:
array `[n_samples]` : louvain community indices
array `[n_samples]` : decoded louvain community labels
"""
if omic is None:
omic = self.current_omic
self._record('louvain', locals())
try:
import louvain
except ImportError:
raise ImportError("pip install louvain>=0.6 python-igraph")
omic = OMIC.parse(omic)
output_name = omic.name + '_louvain'
if output_name not in self.obs:
with catch_warnings_ignore(Warning):
connectivities, distances, nn = self.neighbors(omic)
self.uns["neighbors"] = nn
self.obsp["connectivities"] = connectivities
self.obsp["distances"] = distances
sc.tl.louvain(self,
resolution=resolution,
random_state=random_state,
restrict_to=restrict_to,
key_added=output_name,
adjacency=adjacency,
flavor=flavor,
directed=directed,
use_weights=use_weights,
partition_type=partition_type,
partition_kwargs=partition_kwargs,
copy=False)
del self.uns['neighbors']
del self.obsp["connectivities"]
del self.obsp["distances"]
model = self.uns['louvain']
del self.uns['louvain']
self.uns[output_name] = model
y = self.obs[output_name].to_numpy().astype(np.float32)
### decode louvain community into labels
output_labels = f"{output_name}_labels"
if output_labels not in self.obs:
var_names = self.get_var_names(omic)
# mapping community_index -> confident value for each variables
confidence = defaultdict(float)
for i, x in zip(y, self.get_x_probs(omic=omic)):
confidence[int(i)] += x
# thresholding the variables
labels = {}
for community, x in confidence.items():
labels[community] = '_'.join(var_names[_threshold(x, 2, 5)])
# store in obs
self.obs[output_labels] = np.array([labels[i] for i in y])
### return
y_labels = self.obs[output_labels].to_numpy()
return y, y_labels
# ******************** Genes metrics and ranking ******************** #
def top_vars(self, n_vars=100, return_indices=False):
r""" The genes that are highly variated, high dispersion, less dropout
(i.e. smallest counts of zero-values), and appeared in most cells
will be returned.
Arguments:
return_indices : a Boolean. If True, return the index of top genes,
otherwise, return the genes' ID.
"""
self.calculate_quality_metrics()
fnorm = lambda x: (x - np.min(x)) / (np.max(x) - np.min(x))
# prepare data
n_cells = fnorm(self.var['n_cells'].values)
zeros = fnorm(self.var['pct_dropout'].values)
dispersion = fnorm(self.var['dispersions'].values)
# higher is better TODO: check again what is the best strategy here
rating = n_cells + (1. - zeros) + dispersion
ids = np.argsort(rating)[::-1]
# indexing the genes
genes = np.arange(self.n_vars, dtype=np.int64) \
if return_indices else self.gene_id.values
genes = genes[ids][:n_vars]
return genes
def rank_vars_groups(self,
n_vars=100,
group_by=OMIC.proteomic,
clustering='kmeans',
method='logreg',
corr_method='benjamini-hochberg',
max_iter=1000,
reference='rest'):
r""" Rank genes for characterizing groups.
Arguments:
method : {'t-test_overestim_var', 't-test', 'wilcoxon', 'logreg'}
- 't-test_overestim_var' overestimates variance of each group,
- 't-test' uses t-test,
- 'wilcoxon' uses Wilcoxon rank-sum,
- 'logreg' uses logistic regression.
corr_method : p-value correction method.
Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`.
max_iter : an Integer.
Only used for `method='logred'`
Return:
the key to ranked groups in `.uns`
"""
self._record('rank_vars_groups', locals())
# group by is categorical variables in `obs`
if str(group_by) in self.obs:
pass
else: # search in obsm, then clustering it
group_by = OMIC.parse(group_by)
if clustering is not None:
group_by = self.clustering(group_by,
n_clusters=group_by,
algo=clustering,
return_key=True)
else:
self.probabilistic_embedding(group_by)
group_by = self.get_labels_name(group_by)
## check already ranked
key = f'{self.get_current_omic().name}_{group_by}_rank'
if key not in self.uns:
kw = {}
if method == 'logreg':
kw['max_iter'] = int(max_iter)
kw['random_state'] = 1
kw['solver'] = 'saga'
sc.tl.rank_genes_groups(self,
groupby=group_by,
n_genes=int(n_vars),
use_raw=True,
method=method,
corr_method=corr_method,
reference=reference,
copy=False,
key_added=key,
**kw)
return key
def calculate_quality_metrics(self,
n_bins=20,
flavor='cell_ranger',
percent_top=None,
log1p=False):
r"""\
Calculate quality control metrics for both the observations and variable.
Highly variable genes (i.e. variables) also calculated.
Arguments:
n_bins
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
flavor
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
percent_top : a list of Integer. Which proportions of top genes to cover.
If empty or None don’t calculate. Values are considered 1-indexed,
percent_top=[50] finds cumulative proportion to the 50th most
expressed gene.
log1p : a Boolean. If True, perform log1p before calculating the quality
metrics, then, expm1 after the calculation.
Observation level metrics include:
"n_[omic.name]". Number of genes with positive counts in a cell.
"total_[omic.name]". Total number of counts for a cell.
"pct_counts_in_top_50_[omic.name]". Cumulative percentage of counts for 50 most
expressed genes in a cell.
Variable level metrics include:
"total". Sum of counts for a gene.
"mean". Mean expression over all cells.
"n_cells". Number of cells this expression is measured in.
"pct_dropout". Percentage of cells this feature does not
appear in.
"highly_variable" : boolean indicator of highly-variable genes
"dispersions" : dispersions per gene
"dispersions_norm" : normalized dispersions per gene
"""
self._record('calculate_quality_metrics', locals())
cell_qc, gene_qc = sc.pp.calculate_qc_metrics(
self,
percent_top=as_tuple(percent_top, t=int)
if percent_top is not None else None,
inplace=False)
name = self._current_omic_name
# var quality
self.var['n_cells'] = gene_qc['n_cells_by_counts']
self.var['mean'] = gene_qc['mean_counts']
self.var['total'] = gene_qc['total_counts']
self.var['pct_dropout'] = gene_qc['pct_dropout_by_counts']
## cell quality
self.obs['n_%s' % name] = cell_qc['n_genes_by_counts']
self.obs['total_%s' % name] = cell_qc['total_counts']
if percent_top is not None:
for i in as_tuple(percent_top, t=int):
self.obs['pct_counts_in_top_%d_%s' % (i, name)] = \
cell_qc['pct_counts_in_top_%d_genes' % i]
## Expects logarithmized data.
if log1p:
sc.pp.log1p(self)
## highly_variable, means, dispersions, dispersions_norm
results = sc.pp.highly_variable_genes(self,
n_bins=min(int(n_bins),
self.X.shape[1]),
flavor=flavor,
subset=False,
inplace=False)
self.var['highly_variable'] = [i[0] for i in results]
self.var['dispersions'] = [i[2] for i in results]
## de-log
if log1p:
X = self.X
for s, e in batching(BATCH_SIZE, n=self.n_obs):
x = X[s:e]
if sp.sparse.issparse(x):
np.expm1(x.data, out=x.data)
else:
np.expm1(x, out=x)
return self
# ******************** other metrics ******************** #
@cache_memory
def get_marker_pairs(self,
omic1=OMIC.proteomic,
omic2=None,
var_names1=MARKER_ADTS,
var_names2=None,
threshold=None,
n=10,
most_correlated=False,
remove_duplicated=True):
r""" Return the most differentiated (or correlated) pairs within a
single OMIC (in case `omic2=None`) or between 2 different OMICs.
Arguments:
threshold : a Scalar.
The minimum correlation value to be selected (in case
`most_correlated=True`), otherwise, the maximum correlation value.
If None, set the value to infinity.
n : an Integer.
Number of pairs with smallest correlation to be selected.
If None, there is no limitation.
most_correlated : a Boolean (default: True)
if True, return most correlated pairs, otherwise, most un-correlated
pairs.
remove_duplicated : a Boolean (default: True)
if True, remove pairs with duplicated name for first OMIC (in case
`omic1 != omic2`), remove any pairs with duplicated name for both OMICs
(in case `omic1 == omic2`).
Return:
list of tuple (var1, var2) sorted in order of the most correlated or
un-correlated.
"""
is_same_omic = False
if omic2 is None:
omic2 = omic1
is_same_omic = True
ids1 = self.get_var_indices(omic1)
ids2 = self.get_var_indices(omic2)
# check var_names
if var_names1 is None:
var_names1 = self.get_var_names(omic1)
var_ids1 = set(ids1[i] for i in var_names1 if i in ids1)
if len(var_ids1) == 0:
raise ValueError(
f"No matching variables found from given var_names={var_names1}")
# for the second OMIC
if var_names2 is None:
var_names2 = self.get_var_names(omic2)
var_ids2 = set(ids2[i] for i in var_names2 if i in ids2)
if len(var_ids2) == 0:
raise ValueError(
f"No matching variables found from given var_names={var_names2}")
# filtering
var_names1 = self.get_var_names(omic1)
var_names2 = self.get_var_names(omic2)
scores = defaultdict(float)
for i1, i2, p, s in self.get_correlation(omic1=omic1, omic2=omic2):
if i1 not in var_ids1 or i2 not in var_ids2:
continue
name1 = var_names1[i1]
name2 = var_names2[i2]
key = (name1, name2)
if is_same_omic:
if name1 == name2:
continue
key = tuple(sorted(key))
x = p + s
if np.isnan(x):
x = 1.0
scores[key] += x
scores = sorted(scores.items(), key=lambda x: x[-1])
# most correlated
if most_correlated:
scores = scores[::-1]
# prepare filtering
threshold = (-np.inf if most_correlated else np.inf) \
if threshold is None else float(threshold)
n = np.inf if n is None else int(n)
fn = lambda x: (x / 2 > threshold) if most_correlated else \
(x / 2 < threshold)
# filtering
pairs = []
seen = {}
while True:
if len(scores) == 0 or len(pairs) >= n:
break
key, val = scores.pop(0)
if remove_duplicated:
if is_same_omic:
if any(k in seen for k in key):
continue
seen[key[0]] = 1
seen[key[1]] = 1
else:
if key[0] in seen:
continue
seen[key[0]] = 1
pairs.append(key)
return pairs
@cache_memory
def get_importance_matrix(self,
omic=OMIC.transcriptomic,
target_omic=OMIC.proteomic,
random_state=1):
r""" Using Tree Classifier to estimate the importance of each
`omic` for each `target_omic`.
"""
from odin.bay.vi.metrics import representative_importance_matrix
from odin.bay.vi.utils import discretizing
random_state = int(1)
omic1 = self.current_omic if omic is None else OMIC.parse(omic)
omic2 = self.current_omic if target_omic is None else OMIC.parse(
target_omic)
assert omic1 != omic2, "Mutual information only for 2 different OMIC type"
uns_key = f"importance_{omic.name}_{target_omic.name}"
if uns_key in self.uns:
return self.uns[uns_key]
# prepare data
X = self.numpy(omic1)
y = self.numpy(omic2)
if not is_discrete(y):
y = discretizing(y, n_bins=10, strategy='quantile')
# split the data 50:50 for train and test
rand = np.random.RandomState(random_state)
ids = rand.permutation(X.shape[0])
train = ids[:int(0.75 * X.shape[0])]
test = ids[int(0.75 * X.shape[0]):]
X_train, X_test = X[train], X[test]
y_train, y_test = y[train], y[test]
# calculate the importance matrix
matrix, train_acc, test_acc = representative_importance_matrix(
repr_train=X_train,
factor_train=y_train,
repr_test=X_test,
factor_test=y_test,
random_state=rand.randint(1e8))
self.uns[uns_key] = matrix
return matrix
@cache_memory
def get_mutual_information(self,
omic=OMIC.transcriptomic,
target_omic=OMIC.proteomic,
n_neighbors=3,
random_state=1):
r""" Estimate mutual information using k-NN
Return
a Matrix of shape `(n_features_omic, n_features_target_omic)`
estimation of mutual information between each feature in `omic` to
eacho feature in `target_omic`
"""
n_neighbors = int(n_neighbors)
random_state = int(1)
omic1 = self.current_omic if omic is None else OMIC.parse(omic)
omic2 = self.current_omic if target_omic is None else OMIC.parse(
target_omic)
assert omic1 != omic2, "Mutual information only for 2 different OMIC type"
uns_key = f"mutualinfo_{omic.name}_{target_omic.name}"
if uns_key in self.uns:
return self.uns[uns_key]
### prepare the data
x1 = self.numpy(omic1)
x2 = self.numpy(omic2)
n_om1 = x1.shape[1]
n_om2 = x2.shape[1]
discrete_features = np.array([is_discrete(i) for i in x1.T])
def _mi(i2):
y = x2[:, i2]
if is_discrete(y):
fn = mutual_info_classif
else:
fn = mutual_info_regression
return i2, fn(X=x1,
y=y,
discrete_features=discrete_features,
n_neighbors=n_neighbors,
random_state=random_state)
mi_mat = np.empty(shape=(n_om1, n_om2), dtype=np.float64)
for i2, mi in MPI(list(range(n_om2)),
func=_mi,
ncpu=max(1,
cpu_count() - 1),
batch=1):
mi_mat[:, i2] = mi
self.uns[uns_key] = mi_mat
return mi_mat
@cache_memory
def get_correlation(self, omic1=OMIC.transcriptomic, omic2=OMIC.proteomic):
r""" Calculate the correlation scores between two omic types
(could be different or the same OMIC).
Return:
list of tuple contained 4 scalars:
(omic1-idx, omic2-idx, pearson, spearman)
sorted in order from high to low average correlation
"""
omic1 = self.current_omic if omic1 is None else OMIC.parse(omic1)
omic2 = self.current_omic if omic2 is None else OMIC.parse(omic2)
uns_key = f"correlation_{omic1.name}_{omic2.name}"
if uns_key in self.uns:
return self.uns[uns_key]
### prepare the data
x1 = self.numpy(omic1)
x2 = self.numpy(omic2)
n_om1 = x1.shape[1]
n_om2 = x2.shape[1]
def _corr(ids):
results = []
if not isinstance(ids[0], tuple):
ids = [ids]
for i1, i2 in ids:
y1 = x1[:, i1]
y2 = x2[:, i2]
with catch_warnings_ignore(RuntimeWarning):
p = pearsonr(y1, y2)[0]
s = spearmanr(y1, y2, nan_policy='omit').correlation
# remove all NaNs
results.append((i1, i2, p, s))
yield results
### multiprocessing
jobs = list(itertools.product(range(n_om1), range(n_om2)))
ncpu = max(1, cpu_count() - 1)
results = []
for res in MPI(jobs, func=_corr, ncpu=ncpu, batch=len(jobs) // ncpu):
results += res
### sorted by decreasing order
all_correlations = sorted(
results,
key=lambda scores: (scores[-2] + scores[-1]) / 2,
)[::-1]
self.uns[uns_key] = all_correlations
return all_correlations
|
1675370
|
from typing import Dict, Any
class Loader:
cache = dict(resource={}, client={}) # type: Dict[str, Any]
client_kwargs = dict(default={}) # type: Dict[str, Dict]
def __init__(self, factory):
self.factory = factory
def __getattr__(self, attr):
if attr == "__name__":
return "Loader"
if attr == "__bases__":
return (object, )
if attr == "__all__":
return list(self.cache[self.factory])
if attr == "__file__":
return __file__
if attr == "__path__":
return []
if attr == "__loader__":
return self
if attr not in self.cache[self.factory]:
if self.factory == "client" and attr in self.cache["resource"]:
self.cache["client"][attr] = self.cache["resource"][attr].meta.client
else:
import boto3
factory = getattr(boto3, self.factory)
self.cache[self.factory][attr] = factory(attr.replace("_", "-"),
**self.client_kwargs.get(attr, self.client_kwargs["default"]))
return self.cache[self.factory][attr]
|
1675389
|
from django.contrib import admin
from django.urls import include, path
from rest_framework.routers import SimpleRouter
from .views import TestResourceViewSet, RelatedResource1ViewSet, RelatedResource2ViewSet
router = SimpleRouter()
router.register(r'test-resources', TestResourceViewSet)
router.register(r'related-resources-1', RelatedResource1ViewSet)
router.register(r'related-resources-2', RelatedResource2ViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include(router.urls)),
]
|
1675410
|
import os
import re
import shutil
import pyzfscmds.cmd
import zedenv.plugins.configuration as plugin_config
from zedenv.lib.logger import ZELogger
class FreeBSDLoader(plugin_config.Plugin):
systems_allowed = ["freebsd"]
bootloader = "freebsdloader"
allowed_properties: tuple = ()
def __init__(self, zedenv_data: dict):
super().__init__(zedenv_data)
self.zpool_cache = "boot/zfs/zpool.cache"
self.zfs_be_path = "etc/rc.d/zfsbe"
self.loader_config = {
"system": "boot/loader.conf",
"local": "boot/loader.conf.local",
}
self.zfs_be = False
def post_activate(self):
canmount_setting = "canmount=noauto" if self.zfs_be else "canmount=on"
ds = f"{self.be_root}/{self.boot_environment}"
try:
pyzfscmds.cmd.zfs_set(f"{self.be_root}/{self.boot_environment}", canmount_setting)
except RuntimeError as e:
ZELogger.log({
"level": "EXCEPTION",
"message": f"Failed to set {canmount_setting} for {ds}\n{e}\n"
}, exit_on_error=True)
def _loader_replace(self, configs: list):
be_dataset = f"{self.be_root}/{self.boot_environment}"
target = re.compile(r'^vfs.root.mountfrom=.*$')
for c in configs:
with open(c, "r") as loader_conf:
conf_list = loader_conf.readlines()
line_nums = [l for l, val in enumerate(conf_list) if target.search(val)]
for lnum in line_nums:
conf_list[lnum] = f"vfs.root.mountfrom={be_dataset}\n"
if not self.noop:
if os.path.isfile(c):
ZELogger.verbose_log({
"level": "INFO",
"message": (f"File {c} already exists, backed up to "
f"'{c}.bak' and replaced.\n")
}, self.verbose)
if os.path.isfile(f"{c}.bak"):
try:
os.remove(f"{c}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to remove "
f"'{c}.bak'\n")
}, exit_on_error=True)
try:
shutil.move(c, f"{c}.bak")
except PermissionError:
ZELogger.log({
"level": "EXCEPTION",
"message": (f"Require Privileges to write to "
f"'{c}.bak'\n")
}, exit_on_error=True)
with open(c, "w") as loader_conf:
loader_conf.writelines(conf_list)
def mid_activate(self, be_mountpoint: str):
# System bootloader config must exist
system_loader_config = os.path.join(be_mountpoint, self.loader_config['system'])
if not os.path.isfile(system_loader_config):
raise RuntimeWarning("System bootloader config does not exist.\n")
loader_configs = [system_loader_config]
self.zfs_be = True if os.path.isfile(
os.path.join(be_mountpoint, self.zfs_be_path)) else False
temp_zpool_cache_path = os.path.join(be_mountpoint, self.zpool_cache)
system_zpool_cache_path = os.path.join("/", self.zpool_cache)
if os.path.isfile(system_zpool_cache_path):
try:
shutil.copy(system_zpool_cache_path, temp_zpool_cache_path)
except PermissionError as e:
raise RuntimeError(
f"Require Privileges to write to '{temp_zpool_cache_path}'\n{e}")
except IOError as e:
raise RuntimeWarning(f"IO Error writing to '{temp_zpool_cache_path}'\n{e}")
else:
try:
os.remove(temp_zpool_cache_path)
except PermissionError as e:
raise RuntimeError(
f"Require Privileges to write to '{temp_zpool_cache_path}'\n{e}")
except IOError as e:
raise RuntimeWarning(f"IO Error writing to '{temp_zpool_cache_path}'\n{e}")
local_loader_config = os.path.join(be_mountpoint, self.loader_config['local'])
if os.path.isfile(local_loader_config):
loader_configs.append(local_loader_config)
self._loader_replace(loader_configs)
|
1675440
|
import os
import sys
import wx
import wx.aui
import wx.propgrid as wxpg
from pubsub import pub
import panda3d.core as pm
import p3d
from direct.showbase.PythonUtil import getBase as get_base
from wxExtra import utils as wxUtils, ActionItem
from wxExtra.logpanel import LogPanel
from wxExtra import AuiManagerConfig, CustomAuiToolBar, CustomMenu
from pandaEditor import commands as cmds
from pandaEditor.constants import MODEL_EXTENSIONS
from pandaEditor.ui.viewport import Viewport
from pandaEditor.ui.resourcesPanel import ResourcesPanel
from pandaEditor.ui.sceneGraphPanel import SceneGraphPanel
from pandaEditor.ui.propertiesPanel import PropertiesPanel
from pandaEditor.ui.preferenceseditor import PreferencesEditor
from pandaEditor.ui.createdialog import CreateDialog
FRAME_TITLE = 'Panda Editor 0.1'
TBAR_ICON_SIZE = (24, 24)
WILDCARD_SCENE = '.xml|*.xml'
WILDCARD_P3D = '.p3d|*.p3d'
ID_FILE_NEW = wx.NewId()
ID_FILE_OPEN = wx.NewId()
ID_FILE_SAVE = wx.NewId()
ID_FILE_SAVE_AS = wx.NewId()
ID_FILE_IMPORT = wx.NewId()
ID_FILE_PROJ = wx.NewId()
ID_PROJ_NEW = wx.NewId()
ID_PROJ_SET = wx.NewId()
ID_PROJ_BUILD = wx.NewId()
ID_EDIT_UNDO = wx.NewId()
ID_EDIT_REDO = wx.NewId()
ID_EDIT_GROUP = wx.NewId()
ID_EDIT_UNGROUP = wx.NewId()
ID_EDIT_PARENT = wx.NewId()
ID_EDIT_UNPARENT = wx.NewId()
ID_EDIT_DUPLICATE = wx.NewId()
ID_EDIT_WRITE_BAM_FILE = wx.NewId()
ID_EDIT_EXPORT_OBJ = wx.NewId()
ID_MODIFY_PHYSICS = wx.NewId()
ID_XFORM_SEL = wx.NewId()
ID_XFORM_POS = wx.NewId()
ID_XFORM_ROT = wx.NewId()
ID_XFORM_SCL = wx.NewId()
ID_XFORM_WORLD = wx.NewId()
ID_VIEW_GRID = wx.NewId()
ID_VIEW_TOP = wx.NewId()
ID_VIEW_BOTTOM = wx.NewId()
ID_VIEW_FRONT = wx.NewId()
ID_VIEW_BACK = wx.NewId()
ID_VIEW_RIGHT = wx.NewId()
ID_VIEW_LEFT = wx.NewId()
ID_CREATE_PREFAB = wx.NewId()
ID_LAYOUT_GAME = wx.NewId()
ID_LAYOUT_EDITOR = wx.NewId()
ID_LAYOUT_BOTH = wx.NewId()
ID_WIND_PANEL = wx.NewId()
ID_WIND_FILE_TOOLBAR = wx.NewId()
ID_WIND_EDIT_TOOLBAR = wx.NewId()
ID_WIND_MODIFY_TOOLBAR = wx.NewId()
ID_WIND_XFORM_TOOLBAR = wx.NewId()
ID_WIND_LAYOUT_TOOLBAR = wx.NewId()
ID_WIND_VIEWPORT = wx.NewId()
ID_WIND_SCENE_GRAPH = wx.NewId()
ID_WIND_LIGHT_LINKER = wx.NewId()
ID_WIND_PROPERTIES = wx.NewId()
ID_WIND_RESOURCES = wx.NewId()
ID_WIND_LOG = wx.NewId()
ID_WIND_PREFERENCES = wx.NewId()
ID_PLAY = wx.NewId()
ID_PAUSE = wx.NewId()
class MainFrame(wx.Frame):
"""Panda Editor user interface."""
def __init__(self, base, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base = base
self.preMaxPos = None
self.preMaxSize = None
# Bind frame events
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.Bind(wx.EVT_KEY_UP, p3d.wxPanda.OnKeyUp)
self.Bind(wx.EVT_KEY_DOWN, p3d.wxPanda.OnKeyDown)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
# Bind publisher events
pub.subscribe(self.OnUpdate, 'Update')
# Build application preferences
self.cfg = wx.Config('pandaEditor')
# Build toolbars
self.BuildFileActions()
self.BuildEditActions()
self.BuildModifyActions()
self.BuildXformActions()
self.BuildLayoutActions()
# Build viewport. Don't initialise just yet as ShowBase has not yet
# been created.
self.pnlViewport = Viewport(self.base, self)
# Build editor panels
self.pnlSceneGraph = SceneGraphPanel(self)
self.pnlProps = PropertiesPanel(self)
self.pnlRsrcs = ResourcesPanel(self)
self.pnlLog = LogPanel(self)
# Build aui manager to hold all the widgets
self.BuildAuiManager()
# Build menus and menu bar
self.mb = wx.MenuBar()
self.BuildViewMenu()
self.BuildCreateMenu()
self.BuildWindowMenu()
self.BuildMenuBar()
# Populate the panels menu bar with items representing each floating
# panel.
self.RebuildPanelMenu()
# Update the view menu based on the perspective saved in preferences
self.OnUpdateWindowMenu(None)
def _GetSavePath(self):
# Get default paths from current project directory, or the scene's
# current location on disk
defaultDir = ''
defaultFile = ''
if self.base.doc.file_path is not None:
defaultDir, defaultFile = os.path.split(self.base.doc.file_path)
elif self.base.project.path is not None:
defaultDir = self.base.project.GetScenesDirectory()
# Open file browser
filePath = wxUtils.file_save_dialog('Save Scene As', WILDCARD_SCENE, defaultDir=defaultDir, defaultFile=defaultFile)
if filePath and os.path.exists(filePath):
# Warn user if the chosen file path already exists
msg = ''.join(['The file "', filePath, '" already exists.\nDo you want to replace it?'])
if wxUtils.YesNoDialog(msg, 'Replace File?', wx.ICON_WARNING) == wx.ID_NO:
return False
return filePath
def _CheckForSave(self):
"""
If there is already a file loaded and it is dirty, query the user to
save the file. Return False for cancel, True otherwise.
"""
if self.base.doc.dirty:
# Show dialog, record result
msg = ''.join(['The document "', self.base.doc.title, '" was modified after last save.\nSave changes before continuing?'])
result = wxUtils.YesNoCancelDialog(msg, 'Save Changes?', wx.ICON_WARNING)
if result == wx.ID_YES:
self.OnFileSave(None)
elif result == wx.ID_CANCEL:
return False
# Document not dirty, return True
return True
def OnClose(self, evt):
"""Save frame and aui preferences, hide the window and quit."""
# Check if ok to continue, stop the closing process if the user
# cancelled
if not self._CheckForSave():
evt.Veto()
return
# Save prefs, hide window and quit
self.auiCfg.Save()
if self.preMaxPos is not None:
self.auiCfg.SavePosition(*self.preMaxPos)
if self.preMaxSize is not None:
self.auiCfg.SaveSize(*self.preMaxSize)
if self.base.project.path is not None:
self.cfg.Write('projDirPath', self.base.project.path)
self.Show(False)
#self.base.Quit()
#self.onDestroy(event)
try:
base
except NameError:
sys.exit()
base.userExit()
def OnFileNew(self, evt):
"""Show project settings panel and create new scene."""
# Check if ok to continue, return if the user cancelled
if not self._CheckForSave():
return
# Create new document
self.base.CreateScene()
self.base.doc.on_refresh()
def OnFileOpen(self, evt, filePath=None):
"""Create a new document and load the scene."""
# Check if ok to continue, return if the user cancelled
if not self._CheckForSave():
return
# Create new document from file path and load it
if filePath is None:
# Get the start directory. This will be the current working
# directory if the project is not set.
scnsDirPath = self.base.project.GetScenesDirectory()
if scnsDirPath is None:
scnsDirPath = os.getcwd()
filePath = wxUtils.file_open_dialog('Open Scene', WILDCARD_SCENE,
defaultDir=scnsDirPath)
# Create new document
if filePath:
self.base.CreateScene(filePath)
self.base.doc.load()
def OnFileSave(self, evt, saveAs=False):
"""Save the document."""
# Set a file path for the document if one does not exist, or for save
# as
if self.base.doc.file_path is None or saveAs:
# Query a new save path
filePath = self._GetSavePath()
if filePath:
self.base.doc.file_path = filePath
else:
return
# Save the file
self.base.doc.save()
def OnFileSaveAs(self, evt):
"""
Call save using the saveAs flag in order to bring up a new dialog box
so the user may set an alternate save path.
"""
self.OnFileSave(evt, True)
def OnFileImport(self, evt):
"""Import assets to project."""
formats = '; '.join([f'*{extn}' for extn in MODEL_EXTENSIONS])
wild_card = f'Model ({formats})|{formats}'
file_paths = wxUtils.file_open_dialog(
'Import Models',
wild_card,
wx.FD_MULTIPLE
)
for file_path in file_paths:
self.base.project.ImportAsset(file_path)
def OnFileNewProject(self, evt):
"""Build project directory and set project."""
dirPath = wxUtils.director_dialog('Set New Project Directory')
if dirPath:
self.base.project.New(dirPath)
self.SetProjectPath(dirPath)
self.base.doc.on_refresh()
def OnFileSetProject(self, evt):
"""
Set the active project directory path and rebuild the resources panel.
"""
dirPath = wxUtils.director_dialog('Set Project Directory')
if dirPath:
self.SetProjectPath(dirPath)
self.base.doc.on_refresh()
def OnFileBuildProject(self, evt):
"""Build the current project to a p3d file."""
filePath = wxUtils.file_save_dialog('Build Project', WILDCARD_P3D)
if not filePath:
return
if filePath and os.path.exists(filePath):
# Warn user if the chosen file path already exists
msg = ''.join(['The file "', filePath, '" already exists.\nDo you want to replace it?'])
if wxUtils.YesNoDialog(msg, 'Replace File?', wx.ICON_WARNING) == wx.ID_NO:
return
self.base.project.Build(filePath)
def OnEngagePhysics(self, evt):
comp = get_base().node_manager.wrap(get_base().scene.physics_world)
if get_base().scene.physics_task not in get_base().taskMgr.getAllTasks():
comp.enable_physics()
else:
comp.disable_physics()
def OnViewGrid(self, evt):
"""
Show or hide the grid based on the checked value of the menu item.
"""
if evt.IsChecked():
self.base.grid.show()
else:
self.base.grid.hide()
def OnViewCamera(self, evt, yaw_pitch):
"""
Orbit camera top or bottom by manipulating delta values
See p3d.camera.Orbit for more
"""
delta = pm.Vec2(
-get_base().edCamera.getH() + yaw_pitch[0],
-get_base().edCamera.getP() + yaw_pitch[1]
)
get_base().edCamera.Orbit(delta)
def on_create(self, evt, type_):
comp_cls = get_base().node_manager.wrappers[type_]
values = comp_cls.get_default_values()
# TODO: Rename from get_foo ;)
# Also assert that required args are serviced by either the dialog or
# default_values.
foo = comp_cls.get_foo()
if foo:
dialog = CreateDialog(
f'Create {comp_cls.__name__}',
{
key: values[key]
for key in foo
},
wx.GetApp().GetTopWindow(),
title='Create',
)
dialog.CenterOnParent()
if dialog.ShowModal() != wx.ID_OK:
return
values.update(dialog.GetValues())
self.base.add_component(type_, **values)
def OnCreateActor(self, evt):
"""
Turn the selection into actors. This is still a massive hack - we need
a more concise way of storing this information.
"""
comps = []
for wrpr in self.base.selection.wrprs:
attr = wrpr.find_property('modelPath')
if attr is None:
continue
wrprCls = base.node_manager.nodeWrappers['Actor']
aWrpr = wrprCls.create(modelPath=attr.Get())
aWrpr.data.setTransform(wrpr.data.getTransform())
aWrpr.set_default_values()
aWrpr.parent = wrpr.default_parent
cmds.replace(wrpr.data, aWrpr.data)
def on_create_prefab(self, evt):
"""
Create a new prefab for the selected object in the prefab directory.
"""
np = self.base.selection.node_paths[0]
np_name = np.get_name()
dir_path = self.base.project.prefabs_directory
asset_name = self.base.project.get_unique_asset_name(f'{np_name}.xml', dir_path)
asset_path = os.path.join(dir_path, asset_name)
get_base().scene_parser.save(np, asset_path)
def OnCreateCgShader(self, evt):
"""
"""
self.base.project.CreateCgShader()
def OnCreateGlslShader(self, evt):
"""
"""
self.base.project.CreateGlslShader()
def OnShowHidePane(self, evt):
"""
Show or hide the pane based on the menu item that was (un)checked.
"""
pane = self.paneDefs[evt.GetId()][0]
self._mgr.GetPane(pane).Show(evt.IsChecked())
# Make sure to call or else we won't see any changes.
self._mgr.Update()
self.base.doc.on_refresh()
def OnXformSetActiveGizmo(self, evt):
if evt.GetId() == ID_XFORM_WORLD:
self.base.SetGizmoLocal(not evt.IsChecked())
return
arg = None
if evt.GetId() == ID_XFORM_POS:
arg = 'pos'
elif evt.GetId() == ID_XFORM_ROT:
arg = 'rot'
elif evt.GetId() == ID_XFORM_SCL:
arg = 'scl'
self.base.SetActiveGizmo(arg)
def OnLayout(self, evt):
if evt.GetId() == ID_LAYOUT_GAME:
get_base().LayoutGameView()
elif evt.GetId() == ID_LAYOUT_EDITOR:
get_base().LayoutEditorView()
elif evt.GetId() == ID_LAYOUT_BOTH:
get_base().LayoutBothView()
def OnUpdateWindowMenu(self, evt):
"""
Set the checks in the window menu to match the visibility of the
panes.
"""
def UpdateWindowMenu():
# Check those menus representing panels which are still shown
# after the event
for id in self.paneDefs:
pane = self.paneDefs[id][0]
if self.mPnl.FindItemById(id) and self._mgr.GetPane(pane).IsShown():
self.mPnl.Check(id, True)
# Uncheck all menus
for id in self.paneDefs:
if self.mPnl.FindItemById(id):
self.mPnl.Check(id, False)
# Call after or IsShown() won't return a useful value
wx.CallAfter(UpdateWindowMenu)
def OnUpdate(self, comps=None):
"""
Change the appearance and states of buttons on the form based on the
state of the loaded document.
NOTE: Don't use freeze / thaw as this will cause the 3D viewport to
flicker.
"""
self.OnUpdateFile(comps)
self.OnUpdateEdit(comps)
self.OnUpdateModify(comps)
self.on_update_create(comps)
self.OnUpdateView(comps)
self.OnUpdateProject(comps)
self.OnUpdateXform(comps)
# Set the frame's title to include the document's file path, include
# dirty 'star'
title = ''.join([FRAME_TITLE, ' - ', self.base.doc.title])
if self.base.doc.dirty:
title += ' *'
self.SetTitle(title)
self.base.plugin_manager.on_update(comps)
def OnUpdateFile(self, msg):
"""
Update the file menu. Disable all menu and toolbar items then turn
those back on depending on the document's state.
"""
self.mFile.EnableAllTools(False)
self.tbFile.EnableAllTools(False)
self.mFile.Enable(ID_FILE_NEW, True)
self.mFile.Enable(ID_FILE_OPEN, True)
self.mFile.Enable(ID_FILE_SAVE_AS, True)
self.mFile.Enable(ID_FILE_PROJ, True)
self.tbFile.EnableTool(ID_FILE_NEW, True)
self.tbFile.EnableTool(ID_FILE_OPEN, True)
self.tbFile.EnableTool(ID_FILE_SAVE_AS, True)
if self.base.doc.dirty:
self.mFile.Enable(ID_FILE_SAVE, True)
self.tbFile.EnableTool(ID_FILE_SAVE, True)
if self.base.project.path is not None:
self.mFile.Enable(ID_FILE_IMPORT, True)
self.tbFile.Refresh()
def OnUpdateEdit(self, msg):
"""
Update the edit menu. Disable undo or redo queus if they are empty
and make sure to refresh the toolbar.
"""
val = len(self.base.action_manager.undos) > 0
self.mEdit.Enable(ID_EDIT_UNDO, val)
self.tbEdit.EnableTool(ID_EDIT_UNDO, val)
val = len(self.base.action_manager.redos) > 0
self.mEdit.Enable(ID_EDIT_REDO, val)
self.tbEdit.EnableTool(ID_EDIT_REDO, val)
comps_selected = len(get_base().selection.comps) > 0
self.mEdit.Enable(ID_EDIT_GROUP, comps_selected)
self.mEdit.Enable(ID_EDIT_UNGROUP, comps_selected)
self.mEdit.Enable(ID_EDIT_DUPLICATE, comps_selected)
self.mEdit.Enable(ID_EDIT_WRITE_BAM_FILE, comps_selected)
self.mEdit.Enable(ID_EDIT_EXPORT_OBJ, comps_selected)
self.tbEdit.Refresh()
def OnUpdateModify(self, msg):
self.tbModify.EnableTool(ID_MODIFY_PHYSICS, False)
if get_base().scene.physics_world is not None:
self.tbModify.EnableTool(ID_MODIFY_PHYSICS, True)
if get_base().scene.physics_task not in taskMgr.getAllTasks():
self.tbModify.ToggleTool(ID_MODIFY_PHYSICS, False)
else:
self.tbModify.ToggleTool(ID_MODIFY_PHYSICS, True)
self.tbModify.Refresh()
def OnUpdateView(self, msg):
"""
Update the view menu. Ensure the grid menu item's checked state
matches the visibility of the grid.
"""
self.mView.Check(ID_VIEW_GRID, False)
if not self.base.grid.isHidden():
self.mView.Check(ID_VIEW_GRID, True)
def OnUpdateProject(self, msg):
self.mProj.EnableAllTools(False)
self.mProj.Enable(ID_PROJ_NEW, True)
self.mProj.Enable(ID_PROJ_SET, True)
if self.base.project.path is not None:
self.mProj.EnableAllTools(True)
def on_update_create(self, msg):
self.mCreate.Enable(ID_CREATE_PREFAB, len(get_base().selection.comps) == 1)
def OnUpdateXform(self, msg):
gizmo = self.base.gizmoMgr.GetActiveGizmo()
if gizmo is None:
self.tbXform.ToggleTool(ID_XFORM_SEL, True)
elif gizmo.getName() == 'pos':
self.tbXform.ToggleTool(ID_XFORM_POS, True)
elif gizmo.getName() == 'rot':
self.tbXform.ToggleTool(ID_XFORM_ROT, True)
elif gizmo.getName() == 'scl':
self.tbXform.ToggleTool(ID_XFORM_SCL, True)
val = not self.base.gizmoMgr.GetGizmoLocal('pos')
self.tbXform.ToggleTool(ID_XFORM_WORLD, val)
self.tbXform.Refresh()
def OnShowPreferences(self, evt):
self.prefs = PreferencesEditor()
self.prefs.Show(self)
def OnMove(self, evt):
"""
Keep the window's position on hand before it gets maximized as this is
the number we need to save to preferences.
"""
if not self.IsMaximized():
self.preMaxPos = self.GetPosition()
def OnSize(self, evt):
"""
Keep the window's size on hand before it gets maximized as this is the
number we need to save to preferences.
"""
if not self.IsMaximized():
self.preMaxSize = self.GetSize()
def SetProjectPath(self, dirPath):
"""
Set the project path and rebuild the resources panel.
"""
self.base.project.Set(dirPath)
self.pnlRsrcs.Build(self.base.project.path)
def BuildFileActions(self):
"""Add tools, set long help strings and bind toolbar events."""
commonActns = [
ActionItem('New', os.path.join('data', 'images', 'document.png'), self.OnFileNew, ID_FILE_NEW),
ActionItem('Open', os.path.join('data', 'images', 'folder-horizontal-open.png'), self.OnFileOpen, ID_FILE_OPEN),
ActionItem('Save', os.path.join('data', 'images', 'disk-black.png'), self.OnFileSave, ID_FILE_SAVE),
ActionItem('Save As', os.path.join('data', 'images', 'disk-black-pencil.png'), self.OnFileSaveAs, ID_FILE_SAVE_AS),
]
# Create file menu
self.mFile = CustomMenu()
self.mFile.AppendActionItems(commonActns, self)
self.mFile.AppendSeparator()
self.mFile.AppendActionItem(ActionItem('Import...', '', self.OnFileImport, ID_FILE_IMPORT), self)
# Create project actions as a submenu
self.mProj = CustomMenu()
actns = [
ActionItem('New...', '', self.OnFileNewProject, ID_PROJ_NEW),
ActionItem('Set...', '', self.OnFileSetProject, ID_PROJ_SET),
ActionItem('Build...', '', self.OnFileBuildProject, ID_PROJ_BUILD)
]
self.mProj.AppendActionItems(actns, self)
self.mFile.Append(ID_FILE_PROJ, '&Project', self.mProj)
# Create file toolbar
self.tbFile = CustomAuiToolBar(self, -1)
self.tbFile.SetToolBitmapSize(TBAR_ICON_SIZE)
self.tbFile.AppendActionItems(commonActns)
self.tbFile.Realize()
def BuildEditActions(self):
"""Add tools, set long help strings and bind toolbar events."""
common_actns = [
ActionItem(
'Undo',
os.path.join('data', 'images', 'arrow-curve-flip.png'),
lambda evt: get_base().action_manager.undo(),
ID_EDIT_UNDO
),
ActionItem(
'Redo',
os.path.join('data', 'images', 'arrow-curve.png'),
lambda evt: get_base().action_manager.redo(),
ID_EDIT_REDO
)
]
# Create edit menu
self.mEdit = CustomMenu()
self.mEdit.AppendActionItems(common_actns, self)
self.mEdit.AppendSeparator()
duplicate = ActionItem(
'Duplicate',
'',
lambda evt: cmds.duplicate(get_base().selection.get()),
ID_EDIT_DUPLICATE
)
self.mEdit.AppendActionItem(duplicate, self)
self.mEdit.AppendSeparator()
self.mEdit.AppendActionItems([
ActionItem(
'Group',
'',
lambda evt: cmds.group(get_base().selection.comps),
ID_EDIT_GROUP,
),
ActionItem(
'Ungroup',
'',
lambda evt: cmds.ungroup(get_base().selection.comps),
ID_EDIT_UNGROUP,
)
], self)
write_bam = ActionItem(
'Write Bam File',
'',
self.base.write_bam_file,
ID_EDIT_WRITE_BAM_FILE
)
self.mEdit.AppendActionItem(write_bam, self)
export_obj = ActionItem(
'Export Obj File',
'',
self.base.export_obj,
ID_EDIT_EXPORT_OBJ
)
self.mEdit.AppendActionItem(export_obj, self)
# Create edit toolbar
self.tbEdit = CustomAuiToolBar(self, -1)
self.tbEdit.SetToolBitmapSize(TBAR_ICON_SIZE)
self.tbEdit.AppendActionItems(common_actns)
self.tbEdit.Realize()
def BuildModifyActions(self):
"""Add tools, set long help strings and bind toolbar events."""
actns = [
ActionItem('Engage Physics', os.path.join('data', 'images', 'point.png'), self.OnEngagePhysics, ID_MODIFY_PHYSICS, kind=wx.ITEM_CHECK)
]
# Create edit menu
self.mModify = CustomMenu()
self.mModify.AppendActionItems(actns, self)
# Create edit toolbar
self.tbModify = CustomAuiToolBar(self, -1)
self.tbModify.SetToolBitmapSize(TBAR_ICON_SIZE)
self.tbModify.AppendActionItems(actns)
self.tbModify.Realize()
def BuildXformActions(self):
"""Add tools, set long help strings and bind toolbar events."""
fn = self.OnXformSetActiveGizmo
actns = [
ActionItem('Select', os.path.join('data', 'images', 'select.png'), fn, ID_XFORM_SEL, kind=wx.ITEM_RADIO),
ActionItem('Move', os.path.join('data', 'images', 'move.png'), fn, ID_XFORM_POS, kind=wx.ITEM_RADIO),
ActionItem('Rotate', os.path.join('data', 'images', 'rotate.png'), fn, ID_XFORM_ROT, kind=wx.ITEM_RADIO),
ActionItem('Scale', os.path.join('data', 'images', 'scale.png'), fn, ID_XFORM_SCL, kind=wx.ITEM_RADIO),
ActionItem('World Transform', os.path.join('data', 'images', 'globe.png'), fn, ID_XFORM_WORLD, kind=wx.ITEM_CHECK)
]
# Create xform toolbar
self.tbXform = CustomAuiToolBar(self, -1)
self.tbXform.SetToolBitmapSize(TBAR_ICON_SIZE)
self.tbXform.AddSpacer(0) # Need to insert a null object here or the radio buttons don't seem to work (win7 at least).
self.tbXform.AppendActionItems(actns)
self.tbXform.Realize()
def BuildLayoutActions(self):
"""Add tools, set long help strings and bind toolbar events."""
actns = [
ActionItem('Editor', os.path.join('data', 'images', 'application-sidebar-list.png'), self.OnLayout, ID_LAYOUT_EDITOR, kind=wx.ITEM_RADIO),
ActionItem('Game', os.path.join('data', 'images', 'layout-game.png'), self.OnLayout, ID_LAYOUT_GAME, kind=wx.ITEM_RADIO),
ActionItem('Both', os.path.join('data', 'images', 'layout-both.png'), self.OnLayout, ID_LAYOUT_BOTH, kind=wx.ITEM_RADIO)
]
# Create layout toolbar
self.tbLayout = CustomAuiToolBar(self, -1)
self.tbLayout.SetToolBitmapSize(TBAR_ICON_SIZE)
self.tbLayout.AddSpacer(0) # Need to insert a null object here or the radio buttons don't seem to work (win7 at least).
self.tbLayout.AppendActionItems(actns)
self.tbLayout.ToggleTool(ID_LAYOUT_EDITOR, True)
self.tbLayout.Realize()
def BuildViewMenu(self):
"""Build the view menu."""
viewActns = [
ActionItem('Grid', '', self.OnViewGrid, ID_VIEW_GRID, kind=wx.ITEM_CHECK)
]
camActns = [
ActionItem('Top', '', self.OnViewCamera, ID_VIEW_TOP, args=(0, -90)),
ActionItem('Bottom', '', self.OnViewCamera, ID_VIEW_BOTTOM, args=(0, 90)),
ActionItem('Left', '', self.OnViewCamera, ID_VIEW_LEFT, args=(-90, 0)),
ActionItem('Right', '', self.OnViewCamera, ID_VIEW_RIGHT, args=(90, 0)),
ActionItem('Front', '', self.OnViewCamera, ID_VIEW_FRONT, args=(0, 0)),
ActionItem('Back', '', self.OnViewCamera, ID_VIEW_BACK, args=(-180, 0))
]
self.mCameras = CustomMenu()
self.mCameras.AppendActionItems(camActns, self)
# Append to view menu
self.mView = CustomMenu()
self.mView.AppendActionItems(viewActns, self)
self.mView.AppendSeparator()
self.mView.AppendSubMenu(self.mCameras, '&Camera')
def BuildCreateMenu(self):
"""Build the create menu."""
lightActns = [
ActionItem('Ambient', '', self.on_create, args='AmbientLight'),
ActionItem('Point', '', self.on_create, args='PointLight'),
ActionItem('Directional', '', self.on_create, args='DirectionalLight'),
ActionItem('Spot', '', self.on_create, args='Spotlight')
]
mLights = CustomMenu()
mLights.AppendActionItems(lightActns, self)
collActns = [
ActionItem('Node', '', self.on_create, args='CollisionNode'),
ActionItem('Sphere', '', self.on_create, args='CollisionSphere'),
ActionItem('Inverse Sphere', '', self.on_create, args='CollisionInvSphere'),
ActionItem('Box', '', self.on_create, args='CollisionBox'),
ActionItem('Capsule', '', self.on_create, args='CollisionCapsule'),
ActionItem('Ray', '', self.on_create, args='CollisionRay'),
]
mColl = CustomMenu()
mColl.AppendActionItems(collActns, self)
bltActions = [
ActionItem('World', '', self.on_create, args='BulletWorld'),
ActionItem('Debug Node', '', self.on_create, args='BulletDebugNode'),
ActionItem('Rigid Body Node', '', self.on_create, args='BulletRigidBodyNode'),
ActionItem('Plane Shape', '', self.on_create, args='BulletPlaneShape'),
ActionItem('Sphere Shape', '', self.on_create, args='BulletSphereShape'),
ActionItem('Box Shape', '', self.on_create, args='BulletBoxShape'),
ActionItem('Capsule Shape', '', self.on_create, args='BulletCapsuleShape'),
]
mBlt = CustomMenu()
mBlt.AppendActionItems(bltActions, self)
self.mCreate = CustomMenu()
self.mCreate.AppendActionItem(ActionItem('Panda Node', '', self.on_create, args='PandaNode'), self)
# self.mCreate.AppendActionItem(ActionItem('Actor', '', self.OnCreateActor), self)
self.mCreate.AppendActionItem(ActionItem('Fog', '', self.on_create, args='Fog'), self)
self.mCreate.AppendSubMenu(mColl, '&Collision')
self.mCreate.AppendSubMenu(mLights, '&Lights')
self.mCreate.AppendSubMenu(mBlt, '&Bullet')
# self.mCreate.AppendSeparator()
# self.mCreate.AppendActionItem(ActionItem('Texture', '', self.on_create, args='Texture'), self)
self.mCreate.AppendSeparator()
self.mCreate.AppendActionItem(ActionItem('Prefab', '', self.on_create_prefab, ID_CREATE_PREFAB), self)
def BuildWindowMenu(self):
"""Build show / hide controls for panes."""
self.mPnl = CustomMenu()
self.mWind = CustomMenu()
self.mWind.Append(ID_WIND_PANEL, '&Panel', self.mPnl)
self.mWind.AppendActionItem(ActionItem('Preferences', '', self.OnShowPreferences, ID_WIND_PREFERENCES), self)
def RebuildPanelMenu(self):
self.Freeze()
self.mPnl.Clear()
for id, paneDef in self.paneDefs.items():
if paneDef[1]:
self.mPnl.AppendCheckItem(id, paneDef[2].caption)
self.Bind(wx.EVT_MENU, self.OnShowHidePane, id=id)
self.OnUpdateWindowMenu(None)
self.Thaw()
def BuildMenuBar(self):
"""Build the menu bar and attach all menus to it."""
self.mb.Append(self.mFile, '&File')
self.mb.Append(self.mEdit, '&Edit')
self.mb.Append(self.mView, '&View')
self.mb.Append(self.mCreate, '&Create')
self.mb.Append(self.mWind, '&Window')
self.SetMenuBar(self.mb)
def BuildAuiManager(self):
"""
Define the behaviour for each aui manager panel, then add them to the
manager.
"""
# Define aui manager panes
# Each tuple is defined as: widget, show in window menu, aui panel
# info
self.paneDefs = {
ID_WIND_FILE_TOOLBAR:(self.tbFile, True,
wx.aui.AuiPaneInfo()
.Name('tbFile')
.Caption('File Toolbar')
.ToolbarPane()
.Top()),
ID_WIND_EDIT_TOOLBAR:(self.tbEdit, True,
wx.aui.AuiPaneInfo()
.Name('tbEdit')
.Caption('Edit Toolbar')
.ToolbarPane()
.Top()),
ID_WIND_MODIFY_TOOLBAR:(self.tbModify, True,
wx.aui.AuiPaneInfo()
.Name('tbModify')
.Caption('Modify Toolbar')
.ToolbarPane()
.Top()),
ID_WIND_XFORM_TOOLBAR:(self.tbXform, True,
wx.aui.AuiPaneInfo()
.Name('tbXform')
.Caption('Transform Toolbar')
.ToolbarPane()
.Top()),
ID_WIND_LAYOUT_TOOLBAR:(self.tbLayout, True,
wx.aui.AuiPaneInfo()
.Name('tbLayout')
.Caption('Layout Toolbar')
.ToolbarPane()
.Top()),
ID_WIND_VIEWPORT:(self.pnlViewport, False,
wx.aui.AuiPaneInfo()
.Name('pnlViewport')
.Caption('Viewport')
.CloseButton(False)
.MaximizeButton(True)
.Center()),
ID_WIND_SCENE_GRAPH:(self.pnlSceneGraph, True,
wx.aui.AuiPaneInfo()
.Name('pnlSceneGraph')
.Caption('Scene Graph')
.CloseButton(True)
.MaximizeButton(True)
.MinSize((100, 100))
.Left()
.Position(2)),
# ID_WIND_LIGHT_LINKER:(self.pnlLightLinker, True,
# wx.aui.AuiPaneInfo()
# .Name('pnlLightLinker')
# .Caption('Light Linker')
# .CloseButton(True)
# .MaximizeButton(True)
# .MinSize((100, 100))
# .Right()
# .Position(2)),
ID_WIND_RESOURCES:(self.pnlRsrcs, True,
wx.aui.AuiPaneInfo()
.Name('pnlRsrcs')
.Caption('Resources')
.CloseButton(True)
.MaximizeButton(True)
.MinSize((100, 100))
.Right()
.Position(2)),
ID_WIND_PROPERTIES:(self.pnlProps, True,
wx.aui.AuiPaneInfo()
.Name('pnlProps')
.Caption('Properties')
.CloseButton(True)
.MaximizeButton(True)
.MinSize((100, 100))
.Right()),
ID_WIND_LOG:(self.pnlLog, True,
wx.aui.AuiPaneInfo()
.Name('pnlLog')
.Caption('Log')
.CloseButton(True)
.MaximizeButton(True)
.MinSize((100, 100))
.Bottom()
.Position(1))
}
# Build aui manager and add each pane
self._mgr = wx.aui.AuiManager(self)
for paneDef in self.paneDefs.values():
self._mgr.AddPane(paneDef[0], paneDef[2])
# Bind aui manager events
self._mgr.Bind(wx.aui.EVT_AUI_PANE_CLOSE, self.OnUpdateWindowMenu)
# Create config and load preferences for all panels
self.auiCfg = AuiManagerConfig(self._mgr, 'pandaEditorWindow')
self.auiCfg.Load()
self._mgr.Update()
|
1675443
|
from setuptools import setup, find_packages
packages = find_packages(exclude=("tests", "demos", "doc", "resources",))
setup(name="netomaton",
version="1.1.1",
description="Netomaton, A Python library for working with Network Automata.",
long_description="Netomaton is a Python framework for exploring discrete dynamical network systems, "
"also known as Network Automata. It is a software abstraction meant to aid in the "
"implementation of models of collective computation",
license="Apache License 2.0",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
url='http://github.com/lantunes/netomaton',
author="<NAME>",
author_email="<EMAIL>",
packages=packages,
keywords=["network automata", "cellular automata", "complexity", "complex systems", "computation", "non-linear dynamics"],
python_requires='>3.5.2',
install_requires=["numpy >= 1.15.4", "matplotlib >= 3.0.2", "networkx >= 2.2", "scipy >= 1.3.1", "msgpack >= 1.0.2"])
|
1675453
|
import sys
import xbmcgui
from context import get_dbid, get_mediatype
from lib import cleaner
from lib.filemanager import FileManager
from lib.libs import quickjson, mediainfo as info
from lib.libs.pykodi import localize as L
def remove_art():
# TODO: seasons and episodes and whatever like "add missing artwork" does
listitem = sys.listitem
mediatype = get_mediatype(listitem)
dbid = get_dbid(listitem)
if not (dbid or mediatype):
return
if not xbmcgui.Dialog().yesno("Artwork Beef: " + L(32427), L(750)):
return
remove_localfiles = xbmcgui.Dialog().yesno("Artwork Beef", L(32062))
mediaitem = info.MediaItem(quickjson.get_item_details(dbid, mediatype))
mediaitem.selectedart = cleaner.remove_specific_arttype(mediaitem, '* all')
if remove_localfiles:
FileManager().remove_deselected_files(mediaitem, True)
info.update_art_in_library(mediatype, dbid, mediaitem.selectedart)
info.remove_local_from_texturecache(mediaitem.art.values(), True)
xbmcgui.Dialog().notification("Artwork Beef", L(32027).format(len(mediaitem.selectedart)))
if __name__ == '__main__':
remove_art()
|
1675490
|
import os
image_files = []
os.chdir("dataset")
for filename in os.listdir(os.getcwd()):
if filename.endswith(".jpg"):
image_files.append("dataset/" + filename)
os.chdir("..")
with open("train.txt", "w") as outfile:
for image in image_files:
outfile.write(image)
outfile.write("\n")
outfile.close()
|
1675522
|
from pymongo import ReturnDocument
from past.builtins import basestring
from collections import OrderedDict
from plynx.db.node import Node
from plynx.constants import NodeRunningStatus, Collections, NodeStatus
from plynx.utils.common import to_object_id, parse_search_string
from plynx.utils.db_connector import get_db_connector
_PROPERTIES_TO_GET_FROM_SUBS = ['node_running_status', 'logs', 'outputs', 'cache_url']
class NodeCollectionManager(object):
"""NodeCollectionManager contains all the operations to work with Nodes in the database."""
def __init__(self, collection):
super(NodeCollectionManager, self).__init__()
self.collection = collection
def get_db_objects(
self,
status='',
node_kinds=None,
search='',
per_page=20,
offset=0,
user_id=None,
):
"""Get subset of the Objects.
Args:
status (str, None): Node Running Status
search (str, None): Search pattern
per_page (int): Number of Nodes per page
offset (int): Offset
Return:
(list of dict) List of Nodes in dict format
"""
if status and isinstance(status, basestring):
status = [status]
if node_kinds and isinstance(node_kinds, basestring):
node_kinds = [node_kinds]
aggregate_list = []
search_parameters, search_string = parse_search_string(search)
# Match
and_query = {}
if node_kinds:
and_query['kind'] = {'$in': node_kinds}
if status:
and_query['node_status'] = {'$in': status}
if search_string:
and_query['$text'] = {'$search': search_string}
if 'original_node_id' in search_parameters:
and_query['original_node_id'] = to_object_id(search_parameters['original_node_id'])
if len(and_query):
aggregate_list.append({"$match": and_query})
# Join with users
aggregate_list.append({
'$lookup': {
'from': 'users',
'localField': 'author',
'foreignField': '_id',
'as': '_user'
}
})
# rm password hash
aggregate_list.append({
"$project": {
"_user.password_hash": 0,
}
})
# Match username
and_query = {}
if 'author' in search_parameters:
and_query['_user.username'] = search_parameters['author']
if len(and_query):
aggregate_list.append({"$match": and_query})
# sort
sort_dict = OrderedDict()
if 'sort' in search_parameters:
# TODO more sort options
if search_parameters['sort'] == 'starred':
sort_dict['starred'] = -1
sort_dict['insertion_date'] = -1
aggregate_list.append({
"$sort": sort_dict
}
)
aggregate_list.append({
"$addFields": {
'_readonly': {'$ne': ["$author", to_object_id(user_id)]},
}
})
# counts and pagination
aggregate_list.append({
'$facet': {
"metadata": [{"$count": "total"}],
"list": [{"$skip": int(offset)}, {"$limit": int(per_page)}],
}
})
return next(get_db_connector()[self.collection].aggregate(aggregate_list), None)
def get_db_objects_by_ids(self, ids, collection=None):
"""Find all the Objects with a given IDs.
Args:
ids (list of ObjectID): Object Ids
"""
db_objects = get_db_connector()[collection or self.collection].find({
'_id': {
'$in': list(ids)
}
})
return list(db_objects)
def _update_sub_nodes_fields(self, sub_nodes_dicts, reference_node_id, target_props, reference_collection=None):
if not sub_nodes_dicts:
return
reference_collection = reference_collection or self.collection
id_to_updated_node_dict = {}
upd_node_ids = set(map(lambda node_dict: node_dict[reference_node_id], sub_nodes_dicts))
for upd_node_dict in self.get_db_objects_by_ids(upd_node_ids, collection=reference_collection):
id_to_updated_node_dict[upd_node_dict['_id']] = upd_node_dict
for sub_node_dict in sub_nodes_dicts:
if sub_node_dict[reference_node_id] not in id_to_updated_node_dict:
continue
for prop in target_props:
sub_node_dict[prop] = id_to_updated_node_dict[sub_node_dict[reference_node_id]][prop]
def get_db_node(self, node_id, user_id=None):
"""Get dict representation of a Node.
Args:
node_id (ObjectId, str): Object ID
user_id (str, ObjectId, None): User ID
Return:
(dict) dict representation of the Object
"""
res = self.get_db_object(node_id, user_id)
if not res:
return res
sub_nodes_dicts = None
for parameter in res['parameters']:
if parameter['name'] == '_nodes':
sub_nodes_dicts = parameter['value']['value']
break
# TODO join collections using database capabilities
if self.collection == Collections.RUNS:
self._update_sub_nodes_fields(sub_nodes_dicts, '_id', _PROPERTIES_TO_GET_FROM_SUBS)
self._update_sub_nodes_fields(sub_nodes_dicts, 'original_node_id', ['node_status'], reference_collection=Collections.TEMPLATES)
return res
def get_db_object(self, object_id, user_id=None):
"""Get dict representation of an Object.
Args:
object_id (ObjectId, str): Object ID
user_id (str, ObjectId, None): User ID
Return:
(dict) dict representation of the Object
"""
res = get_db_connector()[self.collection].find_one({'_id': to_object_id(object_id)})
if not res:
return res
res['_readonly'] = (user_id != to_object_id(res['author']))
return res
@staticmethod
def _transplant_node(node, new_node):
if new_node._id == node.original_node_id:
return node
new_node.apply_properties(node)
new_node.original_node_id = new_node._id
new_node.parent_node_id = new_node.successor_node_id = None
new_node._id = node._id
return new_node
def upgrade_sub_nodes(self, main_node):
"""Upgrade deprecated Nodes.
The function does not change the original graph in the database.
Return:
(int): Number of upgraded Nodes
"""
assert self.collection == Collections.TEMPLATES
sub_nodes = main_node.get_parameter_by_name('_nodes').value.value
node_ids = set(
[node.original_node_id for node in sub_nodes]
)
db_nodes = self.get_db_objects_by_ids(node_ids)
new_node_db_mapping = {}
for db_node in db_nodes:
original_node_id = db_node['_id']
new_db_node = db_node
if original_node_id not in new_node_db_mapping:
while new_db_node['node_status'] != NodeStatus.READY and 'successor_node_id' in new_db_node and new_db_node['successor_node_id']:
n = self.get_db_node(new_db_node['successor_node_id'])
if n:
new_db_node = n
else:
break
new_node_db_mapping[original_node_id] = new_db_node
new_nodes = [
NodeCollectionManager._transplant_node(
node,
Node.from_dict(new_node_db_mapping[to_object_id(node.original_node_id)])
) for node in sub_nodes]
upgraded_nodes_count = sum(
1 for node, new_node in zip(sub_nodes, new_nodes) if node.original_node_id != new_node.original_node_id
)
main_node.get_parameter_by_name('_nodes').value.value = new_nodes
return upgraded_nodes_count
def pick_node(self, kinds):
node = get_db_connector()[self.collection].find_one_and_update(
{
'$and': [
{
'kind': {
'$in': kinds,
}
},
{
'node_running_status': {
'$in': [
NodeRunningStatus.READY,
NodeRunningStatus.IN_QUEUE,
]
}
},
],
},
{
'$set': {
'node_running_status': NodeRunningStatus.RUNNING
}
},
return_document=ReturnDocument.AFTER
)
return node
|
1675524
|
from pathlib import Path
from termtosvg import anim
from termtosvg import asciicast
from termtosvg import config
from termtosvg import term
import tempfile
TEMPLATE = 'solarized_light'
def render(cast, svg_file):
with tempfile.TemporaryDirectory() as td:
cast_file = Path(td) / 'file.cast'
cast.write(cast_file)
_render_file(cast_file, svg_file)
def _render_file(cast_file, svg_file):
asciicast_records = asciicast.read_records(str(cast_file))
geometry, frames = term.timed_frames(asciicast_records)
template = config.default_templates()[TEMPLATE]
anim.render_animation(frames, geometry, str(svg_file), template)
|
1675643
|
import sys
from rpython.rlib.rarithmetic import intmask, r_uint, LONG_BIT
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib import rmmap
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rlib.debug import have_debug_prints
from rpython.rtyper.lltypesystem import lltype, rffi
class AsmMemoryManager(object):
LARGE_ALLOC_SIZE = 1024 * 1024 # 1MB
MIN_FRAGMENT = 64
NUM_INDICES = 32 # good for all sizes between 64 bytes and ~490 KB
_allocated = None
def __init__(self, large_alloc_size = LARGE_ALLOC_SIZE,
min_fragment = MIN_FRAGMENT,
num_indices = NUM_INDICES):
self.total_memory_allocated = r_uint(0)
self.total_mallocs = r_uint(0)
self.large_alloc_size = large_alloc_size
self.min_fragment = min_fragment
self.num_indices = num_indices
self.free_blocks = {} # map {start: stop}
self.free_blocks_end = {} # map {stop: start}
self.blocks_by_size = [[] for i in range(self.num_indices)]
def get_stats(self):
"""Returns stats for rlib.jit.jit_hooks.stats_asmmemmgr_*()."""
return (self.total_memory_allocated, self.total_mallocs)
def malloc(self, minsize, maxsize):
"""Allocate executable memory, between minsize and maxsize bytes,
and return a pair (start, stop). Does not perform any rounding
of minsize and maxsize.
"""
result = self._allocate_block(minsize)
(start, stop) = result
smaller_stop = start + maxsize
if smaller_stop + self.min_fragment <= stop:
self._add_free_block(smaller_stop, stop)
stop = smaller_stop
result = (start, stop)
self.total_mallocs += r_uint(stop - start)
return result # pair (start, stop)
def free(self, start, stop):
"""Free a block (start, stop) returned by a previous malloc()."""
if r_uint is not None:
self.total_mallocs -= r_uint(stop - start)
self._add_free_block(start, stop)
def open_malloc(self, minsize):
"""Allocate at least minsize bytes. Returns (start, stop)."""
result = self._allocate_block(minsize)
(start, stop) = result
self.total_mallocs += r_uint(stop - start)
return result
def open_free(self, middle, stop):
"""Used for freeing the end of an open-allocated block of memory."""
if stop - middle >= self.min_fragment:
self.total_mallocs -= r_uint(stop - middle)
self._add_free_block(middle, stop)
return True
else:
return False # too small to record
def _allocate_large_block(self, minsize):
# Compute 'size' from 'minsize': it must be rounded up to
# 'large_alloc_size'. Additionally, we use the following line
# to limit how many mmap() requests the OS will see in total:
minsize = max(minsize, intmask(self.total_memory_allocated >> 4))
size = minsize + self.large_alloc_size - 1
size = (size // self.large_alloc_size) * self.large_alloc_size
data = rmmap.alloc(size)
if not we_are_translated():
if self._allocated is None:
self._allocated = []
self._allocated.append((data, size))
if sys.maxint > 2147483647:
# Hack to make sure that mcs are not within 32-bits of one
# another for testing purposes
rmmap.hint.pos += 0x80000000 - size
self.total_memory_allocated += r_uint(size)
data = rffi.cast(lltype.Signed, data)
return self._add_free_block(data, data + size)
def _get_index(self, length):
i = 0
while length > self.min_fragment:
length = (length * 3) >> 2
i += 1
if i == self.num_indices - 1:
break
return i
def _add_free_block(self, start, stop):
# Merge with the block on the left
if start in self.free_blocks_end:
left_start = self.free_blocks_end[start]
self._del_free_block(left_start, start)
start = left_start
# Merge with the block on the right
if stop in self.free_blocks:
right_stop = self.free_blocks[stop]
self._del_free_block(stop, right_stop)
stop = right_stop
# Add it to the dicts
self.free_blocks[start] = stop
self.free_blocks_end[stop] = start
i = self._get_index(stop - start)
self.blocks_by_size[i].append(start)
return start
def _del_free_block(self, start, stop):
del self.free_blocks[start]
del self.free_blocks_end[stop]
i = self._get_index(stop - start)
self.blocks_by_size[i].remove(start)
def _allocate_block(self, length):
# First look in the group of index i0 if there is a block that is
# big enough. Following an idea found in the Linux malloc.c, we
# prefer the oldest entries rather than the newest one, to let
# them have enough time to coalesce into bigger blocks. It makes
# a big difference on the purely random test (30% of total usage).
i0 = self._get_index(length)
bbs = self.blocks_by_size[i0]
for j in range(len(bbs)):
start = bbs[j]
stop = self.free_blocks[start]
if start + length <= stop:
del bbs[j]
break # found a block big enough
else:
# Then look in the larger groups
i = i0 + 1
while i < self.num_indices:
if len(self.blocks_by_size[i]) > 0:
# any block found in a larger group is big enough
start = self.blocks_by_size[i].pop(0)
stop = self.free_blocks[start]
break
i += 1
else:
# Exhausted the memory. Allocate the resulting block.
start = self._allocate_large_block(length)
stop = self.free_blocks[start]
i = self._get_index(stop - start)
assert self.blocks_by_size[i][-1] == start
self.blocks_by_size[i].pop()
#
del self.free_blocks[start]
del self.free_blocks_end[stop]
return (start, stop)
def _delete(self):
"NOT_RPYTHON"
if self._allocated:
for data, size in self._allocated:
rmmap.free(data, size)
self._allocated = None
class MachineDataBlockWrapper(object):
def __init__(self, asmmemmgr, allblocks):
self.asmmemmgr = asmmemmgr
self.allblocks = allblocks
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def done(self):
if self.rawstart != 0:
if self.asmmemmgr.open_free(self.rawposition, self.rawstop):
self.rawstop = self.rawposition
self.allblocks.append((self.rawstart, self.rawstop))
self.rawstart = 0
self.rawposition = 0
self.rawstop = 0
def _allocate_next_block(self, minsize):
self.done()
self.rawstart, self.rawstop = self.asmmemmgr.open_malloc(minsize)
self.rawposition = self.rawstart
def malloc_aligned(self, size, alignment):
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
if p + size > self.rawstop:
self._allocate_next_block(size + alignment - 1)
p = self.rawposition
p = (p + alignment - 1) & (-alignment)
assert p + size <= self.rawstop
self.rawposition = p + size
return p
class BlockBuilderMixin(object):
_mixin_ = True
# A base class to generate assembler. It is equivalent to just a list
# of chars, but it is potentially more efficient for that usage.
# It works by allocating the assembler SUBBLOCK_SIZE bytes at a time.
# Ideally, this number should be a power of two that fits the GC's most
# compact allocation scheme (which is so far 35 * WORD for minimark.py).
WORD = LONG_BIT // 8
SUBBLOCK_SIZE = 32 * WORD
SUBBLOCK_PTR = lltype.Ptr(lltype.GcForwardReference())
SUBBLOCK = lltype.GcStruct('SUBBLOCK',
('prev', SUBBLOCK_PTR),
('data', lltype.FixedSizeArray(lltype.Char, SUBBLOCK_SIZE)))
SUBBLOCK_PTR.TO.become(SUBBLOCK)
ALIGN_MATERIALIZE = 16
gcroot_markers = None
def __init__(self, translated=None):
if translated is None:
translated = we_are_translated()
if translated:
self.init_block_builder()
else:
self._become_a_plain_block_builder()
self.rawstart = 0
def init_block_builder(self):
self._cursubblock = lltype.nullptr(self.SUBBLOCK)
self._baserelpos = -self.SUBBLOCK_SIZE
self._make_new_subblock()
def _make_new_subblock(self):
nextsubblock = lltype.malloc(self.SUBBLOCK)
nextsubblock.prev = self._cursubblock
self._cursubblock = nextsubblock
self._cursubindex = 0
self._baserelpos += self.SUBBLOCK_SIZE
_make_new_subblock._dont_inline_ = True
def writechar(self, char):
index = self._cursubindex
if index == self.SUBBLOCK_SIZE:
self._make_new_subblock()
index = 0
self._cursubblock.data[index] = char
self._cursubindex = index + 1
def absolute_addr(self):
return self.rawstart
def overwrite(self, index, char):
assert 0 <= index < self.get_relative_pos(break_basic_block=False)
block = self._cursubblock
index -= self._baserelpos
while index < 0:
block = block.prev
index += self.SUBBLOCK_SIZE
block.data[index] = char
def overwrite32(self, index, val):
self.overwrite(index, chr(val & 0xff))
self.overwrite(index + 1, chr((val >> 8) & 0xff))
self.overwrite(index + 2, chr((val >> 16) & 0xff))
self.overwrite(index + 3, chr((val >> 24) & 0xff))
def get_relative_pos(self, break_basic_block=True):
# 'break_basic_block' is only used in x86
return self._baserelpos + self._cursubindex
def copy_to_raw_memory(self, addr):
# indirection for _become_a_plain_block_builder() and for subclasses
self._copy_to_raw_memory(addr)
def _copy_to_raw_memory(self, addr):
block = self._cursubblock
blocksize = self._cursubindex
targetindex = self._baserelpos
while targetindex >= 0:
dst = rffi.cast(rffi.CCHARP, addr + targetindex)
for j in range(blocksize):
dst[j] = block.data[j]
block = block.prev
blocksize = self.SUBBLOCK_SIZE
targetindex -= self.SUBBLOCK_SIZE
assert not block
def copy_core_dump(self, addr, offset=0, count=-1):
HEX = '0123456789ABCDEF'
dump = []
src = rffi.cast(rffi.CCHARP, addr)
end = self.get_relative_pos(break_basic_block=False)
if count != -1:
end = offset + count
for p in range(offset, end):
o = ord(src[p])
dump.append(HEX[o >> 4])
dump.append(HEX[o & 15])
return ''.join(dump)
def _dump(self, addr, logname, backend=None):
debug_start(logname)
if have_debug_prints():
#
if backend is not None:
debug_print('BACKEND', backend)
#
from rpython.jit.backend.hlinfo import highleveljitinfo
if highleveljitinfo.sys_executable:
debug_print('SYS_EXECUTABLE', highleveljitinfo.sys_executable)
else:
debug_print('SYS_EXECUTABLE', '??')
#
dump = self.copy_core_dump(addr)
debug_print('CODE_DUMP',
'@%x' % addr,
'+0 ', # backwards compatibility
dump)
#
debug_stop(logname)
def materialize(self, cpu, allblocks, gcrootmap=None):
size = self.get_relative_pos()
align = self.ALIGN_MATERIALIZE
size += align - 1
malloced = cpu.asmmemmgr.malloc(size, size)
allblocks.append(malloced)
rawstart = malloced[0]
rawstart = (rawstart + align - 1) & (-align)
self.rawstart = rawstart
self.copy_to_raw_memory(rawstart)
if self.gcroot_markers is not None:
assert gcrootmap is not None
for pos, mark in self.gcroot_markers:
gcrootmap.register_asm_addr(rawstart + pos, mark)
return rawstart
def _become_a_plain_block_builder(self):
# hack purely for speed of tests
self._data = _data = []
self.writechar = _data.append
self.overwrite = _data.__setitem__
def get_relative_pos(break_basic_block=True):
return len(_data)
self.get_relative_pos = get_relative_pos
def plain_copy_to_raw_memory(addr):
dst = rffi.cast(rffi.CCHARP, addr)
for i, c in enumerate(_data):
dst[i] = c
self._copy_to_raw_memory = plain_copy_to_raw_memory
def insert_gcroot_marker(self, mark):
if self.gcroot_markers is None:
self.gcroot_markers = []
self.gcroot_markers.append(
(self.get_relative_pos(break_basic_block=False), mark))
|
1675655
|
import os
import torch
import torch.utils.data
import torchvision
import numpy as np
from data.apple_dataset import AppleDataset
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import utility.utils as utils
import utility.transforms as T
######################################################
# Predict with either a Faster-RCNN or Mask-RCNN predictor
# using the MinneApple dataset
######################################################
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def get_maskrcnn_model_instance(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
return model
def get_frcnn_model_instance(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
def main(args):
num_classes = 2
device = args.device
# Load the model from
print("Loading model")
# Create the correct model type
if args.mrcnn:
model = get_maskrcnn_model_instance(num_classes)
else:
model = get_frcnn_model_instance(num_classes)
# Load model parameters and keep on CPU
checkpoint = torch.load(args.weight_file, map_location=device)
model.load_state_dict(checkpoint['model'], strict=False)
model.eval()
print("Creating data loaders")
dataset_test = AppleDataset(args.data_path, get_transform(train=False))
data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
shuffle=False, num_workers=1,
collate_fn=utils.collate_fn)
# Create output directory
base_path = os.path.dirname(args.output_file)
if not os.path.exists(base_path):
os.makedirs(base_path)
# Predict on bboxes on each image
f = open(args.output_file, 'a')
for image, targets in data_loader_test:
image = list(img.to(device) for img in image)
outputs = model(image)
for ii, output in enumerate(outputs):
img_id = targets[ii]['image_id']
img_name = data_loader_test.dataset.get_img_name(img_id)
print("Predicting on image: {}".format(img_name))
boxes = output['boxes'].detach().numpy()
scores = output['scores'].detach().numpy()
im_names = np.repeat(img_name, len(boxes), axis=0)
stacked = np.hstack((im_names.reshape(len(scores), 1), boxes.astype(int), scores.reshape(len(scores), 1)))
# File to write predictions to
np.savetxt(f, stacked, fmt='%s', delimiter=',', newline='\n')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='PyTorch Detection')
parser.add_argument('--data_path', required=True, help='path to the data to predict on')
parser.add_argument('--output_file', required=True, help='path where to write the prediction outputs')
parser.add_argument('--weight_file', required=True, help='path to the weight file')
parser.add_argument('--device', default='cpu', help='device to use. Either cpu or cuda')
model = parser.add_mutually_exclusive_group(required=True)
model.add_argument('--frcnn', action='store_true', help='use a Faster-RCNN model')
model.add_argument('--mrcnn', action='store_true', help='use a Mask-RCNN model')
args = parser.parse_args()
main(args)
|
1675673
|
from django import template
from auxiliary.forms import FeedbackSuggestionForm
register = template.Library()
class FeedbackFormNode(template.Node):
"Addes the feedback suggestion for into context"
def render(self, context):
forms = context.get('suggestion_forms', {})
for_url = context['request'].get_full_path()
forms['feedback'] = FeedbackSuggestionForm(
initial={'url': for_url})
context['suggestion_forms'] = forms
return ''
def do_add_feedback_suggestion_form(parser, token):
return FeedbackFormNode()
register.tag('add_feedback_suggestion_form', do_add_feedback_suggestion_form)
|
1675705
|
import glob
def split_args(args, required=0, optional=0):
"""
splits the args string by comma and removes left and right whitespace.
enforces that the amount of args is >= required and <= required+optional.
if it is not then an error message is printed and None is returned
"""
args = [x.strip() for x in args.split(',')]
length = len(args)
# one empty element does not count!
if(length == 1 and not args[0]):
length = 0
args = []
if(length < required):
print('[ERROR] recieved ' + str(length) + ' arguments, but required ' + str(required) + ' arguments')
return None
elif(length > required+optional):
print('[ERROR] recieved ' + str(length) + ' arguments, but accepts no more than ' + str(required+optional) + ' arguments')
return None
else:
return args
def long_hex(number):
"""
converts number to hex just like the inbuilt hex function but also
pads zeroes such that there are always 8 hexidecimal digits
"""
value_hex = hex(number)
# pad with 0's. use 10 instead of 8 because of 0x prefix
if(len(value_hex) < 10):
value_hex = value_hex[0:2] + '0'*(10-len(value_hex)) + value_hex[2:]
return value_hex
def autocomplete_getarg(line):
"""
autocomplete passes in a line like: get_memory arg1, arg2
the arg2 is what is being autocompleted on so return that
"""
# find last argument or first one is seperated by a space from the command
before_arg = line.rfind(',')
if(before_arg == -1):
before_arg = line.find(' ')
assert before_arg >= 0
# assign the arg. it skips the deliminator and any excess whitespace
return line[before_arg+1:].lstrip()
def autocomplete_file(line):
"""
helper that autocompletes files
"""
arg = autocomplete_getarg(line)
files = glob.glob(arg + "*")
return files
|
1675711
|
from rpython.rtyper.lltypesystem import rffi, lltype
from pypy.module.cpyext.test.test_api import BaseApiTest
from pypy.module.cpyext.api import Py_ssize_tP, PyObjectP
from pypy.module.cpyext.pyobject import make_ref, from_ref
from pypy.interpreter.error import OperationError
class TestDictObject(BaseApiTest):
def test_dict(self, space, api):
d = api.PyDict_New()
assert space.eq_w(d, space.newdict())
assert space.eq_w(api.PyDict_GetItem(space.wrap({"a": 72}),
space.wrap("a")),
space.wrap(72))
assert api.PyDict_SetItem(d, space.wrap("c"), space.wrap(42)) >= 0
assert space.eq_w(space.getitem(d, space.wrap("c")),
space.wrap(42))
space.setitem(d, space.wrap("name"), space.wrap(3))
assert space.eq_w(api.PyDict_GetItem(d, space.wrap("name")),
space.wrap(3))
space.delitem(d, space.wrap("name"))
assert not api.PyDict_GetItem(d, space.wrap("name"))
assert not api.PyErr_Occurred()
buf = rffi.str2charp("name")
assert not api.PyDict_GetItemString(d, buf)
rffi.free_charp(buf)
assert not api.PyErr_Occurred()
assert api.PyDict_Contains(d, space.wrap("c"))
assert not api.PyDict_Contains(d, space.wrap("z"))
assert api.PyDict_DelItem(d, space.wrap("c")) == 0
assert api.PyDict_DelItem(d, space.wrap("name")) < 0
assert api.PyErr_Occurred() is space.w_KeyError
api.PyErr_Clear()
assert api.PyDict_Size(d) == 0
space.setitem(d, space.wrap("some_key"), space.wrap(3))
buf = rffi.str2charp("some_key")
assert api.PyDict_DelItemString(d, buf) == 0
assert api.PyDict_Size(d) == 0
assert api.PyDict_DelItemString(d, buf) < 0
assert api.PyErr_Occurred() is space.w_KeyError
api.PyErr_Clear()
rffi.free_charp(buf)
d = space.wrap({'a': 'b'})
api.PyDict_Clear(d)
assert api.PyDict_Size(d) == 0
def test_check(self, space, api):
d = api.PyDict_New()
assert api.PyDict_Check(d)
assert api.PyDict_CheckExact(d)
sub = space.appexec([], """():
class D(dict):
pass
return D""")
d = space.call_function(sub)
assert api.PyDict_Check(d)
assert not api.PyDict_CheckExact(d)
i = space.wrap(2)
assert not api.PyDict_Check(i)
assert not api.PyDict_CheckExact(i)
def test_keys(self, space, api):
w_d = space.newdict()
space.setitem(w_d, space.wrap("a"), space.wrap("b"))
assert space.eq_w(api.PyDict_Keys(w_d), space.wrap(["a"]))
assert space.eq_w(api.PyDict_Values(w_d), space.wrap(["b"]))
assert space.eq_w(api.PyDict_Items(w_d), space.wrap([("a", "b")]))
def test_update(self, space, api):
w_d = space.newdict()
space.setitem(w_d, space.wrap("a"), space.wrap("b"))
w_d2 = api.PyDict_Copy(w_d)
assert not space.is_w(w_d2, w_d)
space.setitem(w_d, space.wrap("c"), space.wrap("d"))
space.setitem(w_d2, space.wrap("e"), space.wrap("f"))
api.PyDict_Update(w_d, w_d2)
assert space.unwrap(w_d) == dict(a='b', c='d', e='f')
def test_iter(self, space, api):
w_dict = space.sys.getdict(space)
py_dict = make_ref(space, w_dict)
ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
ppos[0] = 0
pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
try:
w_copy = space.newdict()
while api.PyDict_Next(w_dict, ppos, pkey, pvalue):
w_key = from_ref(space, pkey[0])
w_value = from_ref(space, pvalue[0])
space.setitem(w_copy, w_key, w_value)
finally:
lltype.free(ppos, flavor='raw')
lltype.free(pkey, flavor='raw')
lltype.free(pvalue, flavor='raw')
api.Py_DecRef(py_dict) # release borrowed references
assert space.eq_w(space.len(w_copy), space.len(w_dict))
assert space.eq_w(w_copy, w_dict)
def test_iterkeys(self, space, api):
w_dict = space.sys.getdict(space)
py_dict = make_ref(space, w_dict)
ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
keys_w = []
values_w = []
try:
ppos[0] = 0
while api.PyDict_Next(w_dict, ppos, pkey, None):
w_key = from_ref(space, pkey[0])
keys_w.append(w_key)
ppos[0] = 0
while api.PyDict_Next(w_dict, ppos, None, pvalue):
w_value = from_ref(space, pvalue[0])
values_w.append(w_value)
finally:
lltype.free(ppos, flavor='raw')
lltype.free(pkey, flavor='raw')
lltype.free(pvalue, flavor='raw')
api.Py_DecRef(py_dict) # release borrowed references
assert space.eq_w(space.newlist(keys_w),
space.call_method(w_dict, "keys"))
assert space.eq_w(space.newlist(values_w),
space.call_method(w_dict, "values"))
def test_dictproxy(self, space, api):
w_dict = space.sys.get('modules')
w_proxy = api.PyDictProxy_New(w_dict)
assert space.is_true(space.contains(w_proxy, space.wrap('sys')))
raises(OperationError, space.setitem,
w_proxy, space.wrap('sys'), space.w_None)
raises(OperationError, space.delitem,
w_proxy, space.wrap('sys'))
raises(OperationError, space.call_method, w_proxy, 'clear')
|
1675737
|
import ui
class DayCheckBox(ui.CheckBox):
def __init__(self, caption, day):
ui.CheckBox.__init__(self, caption)
self.day = day
|
1675741
|
import base64
import binascii
import json
import logging
import re
from arq import Retry
from buildpg import MultipleValues, Values
from chevron import ChevronError
from concurrent.futures import TimeoutError
from datetime import datetime, timezone
from foxglove import glove
from httpx import ConnectError
from itertools import chain
from pathlib import Path
from typing import List, Optional
from src.ext import ApiError
from src.render import EmailInfo, MessageDef, render_email
from src.schemas.messages import (
THIS_DIR,
AttachmentModel,
EmailRecipientModel,
EmailSendMethod,
EmailSendModel,
MessageStatus,
)
from src.settings import Settings
main_logger = logging.getLogger('worker.email')
test_logger = logging.getLogger('worker.test')
STYLES_SASS = (THIS_DIR / 'extra' / 'default-styles.scss').read_text()
email_retrying = [5, 10, 60, 600, 1800, 3600, 12 * 3600]
def utcnow():
return datetime.utcnow().replace(tzinfo=timezone.utc)
class SendEmail:
__slots__ = 'ctx', 'settings', 'recipient', 'group_id', 'company_id', 'm', 'tags'
def __init__(self, ctx: dict, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel):
self.ctx = ctx
self.settings: Settings = ctx['settings']
self.group_id = group_id
self.company_id = company_id
self.recipient: EmailRecipientModel = recipient
self.m: EmailSendModel = m
self.tags = list(set(self.recipient.tags + self.m.tags + [str(self.m.uid)]))
async def run(self):
main_logger.info('Sending email to %s via %s', self.recipient.address, self.m.method)
if self.ctx['job_try'] > len(email_retrying):
main_logger.error('%s: tried to send email %d times, all failed', self.group_id, self.ctx['job_try'])
await self._store_email_failed(MessageStatus.send_request_failed, 'upstream error')
return
context = dict(self.m.context, **self.recipient.context)
if 'styles__sass' not in context and re.search(r'\{\{\{ *styles *\}\}\}', self.m.main_template):
context['styles__sass'] = STYLES_SASS
headers = dict(self.m.headers, **self.recipient.headers)
email_info = await self._render_email(context, headers)
if not email_info:
return
attachments = [a async for a in self._generate_base64_pdf(self.recipient.pdf_attachments)]
attachments += [a async for a in self._generate_base64(self.recipient.attachments)]
if self.m.method == EmailSendMethod.email_mandrill:
if self.recipient.address.endswith('@example.com'):
_id = re.sub(r'[^a-zA-Z0-9\-]', '', f'mandrill-{self.recipient.address}')
await self._store_email(_id, utcnow(), email_info)
else:
await self._send_mandrill(email_info, attachments)
elif self.m.method == EmailSendMethod.email_test:
await self._send_test_email(email_info, attachments)
else:
raise NotImplementedError()
async def _send_mandrill(self, email_info: EmailInfo, attachments: List[dict]):
data = {
'async': True,
'message': dict(
html=email_info.html_body,
subject=email_info.subject,
from_email=self.m.from_address.email,
from_name=self.m.from_address.name,
to=[dict(email=self.recipient.address, name=email_info.full_name, type='to')],
headers=email_info.headers,
track_opens=True,
track_clicks=False,
auto_text=True,
view_content_link=False,
signing_domain=self.m.from_address.email[self.m.from_address.email.index('@') + 1 :],
subaccount=self.m.subaccount,
tags=self.tags,
inline_css=True,
important=self.m.important,
attachments=attachments,
),
}
send_ts = utcnow()
job_try = self.ctx['job_try']
defer = email_retrying[job_try - 1]
try:
r = await self.ctx['mandrill'].post('messages/send.json', **data)
except (ConnectError, TimeoutError) as e:
main_logger.info('client connection error group_id=%s job_try=%s defer=%ss', self.group_id, job_try, defer)
raise Retry(defer=defer) from e
except ApiError as e:
if e.status in {502, 504} or (e.status == 500 and '<center>nginx/' in e.body):
main_logger.info(
'temporary mandrill error group_id=%s status=%s job_try=%s defer=%ss',
self.group_id,
e.status,
job_try,
defer,
)
raise Retry(defer=defer) from e
else:
# if the status is not 502 or 504, or 500 from nginx then raise
raise
data = r.json()
assert len(data) == 1, data
data = data[0]
assert data['email'] == self.recipient.address, data
await self._store_email(data['_id'], send_ts, email_info)
async def _send_test_email(self, email_info: EmailInfo, attachments: List[dict]):
data = dict(
from_email=self.m.from_address.email,
from_name=self.m.from_address.name,
group_uuid=str(self.m.uid),
headers=email_info.headers,
to_address=self.recipient.address,
to_name=email_info.full_name,
to_user_link=self.recipient.user_link,
tags=self.tags,
important=self.m.important,
attachments=[
f'{a["name"]}:{base64.b64decode(a["content"]).decode(errors="ignore"):.40}' for a in attachments
],
)
msg_id = re.sub(r'[^a-zA-Z0-9\-]', '', f'{self.m.uid}-{self.recipient.address}')
send_ts = utcnow()
output = (
f'to: {self.recipient.address}\n'
f'msg id: {msg_id}\n'
f'ts: {send_ts}\n'
f'subject: {email_info.subject}\n'
f'data: {json.dumps(data, indent=2)}\n'
f'content:\n'
f'{email_info.html_body}\n'
)
if self.settings.test_output: # pragma: no branch
Path.mkdir(self.settings.test_output, parents=True, exist_ok=True)
save_path = self.settings.test_output / f'{msg_id}.txt'
test_logger.info('sending message: %s (saved to %s)', output, save_path)
save_path.write_text(output)
await self._store_email(msg_id, send_ts, email_info)
async def _render_email(self, context, headers) -> Optional[EmailInfo]:
m = MessageDef(
first_name=self.recipient.first_name,
last_name=self.recipient.last_name,
main_template=self.m.main_template,
mustache_partials=self.m.mustache_partials,
macros=self.m.macros,
subject_template=self.m.subject_template,
context=context,
headers=headers,
)
try:
return render_email(m, self.ctx['email_click_url'])
except ChevronError as e:
await self._store_email_failed(MessageStatus.render_failed, f'Error rendering email: {e}')
async def _generate_base64_pdf(self, pdf_attachments):
kwargs = dict(page_size='A4', zoom='1.25', margin_left='8mm', margin_right='8mm')
for a in pdf_attachments:
if a.html:
try:
pdf_content = await self.ctx['pydf'].generate_pdf(a.html, **kwargs)
except RuntimeError as e:
main_logger.warning('error generating pdf, data: %s', e)
else:
yield dict(type='application/pdf', name=a.name, content=base64.b64encode(pdf_content).decode())
async def _generate_base64(self, attachments: List[AttachmentModel]):
for attachment in attachments:
try:
# Check to see if content can be decoded from base64
base64.b64decode(attachment.content, validate=True)
except binascii.Error:
# Content has not yet been base64 encoded so needs to be encoded
content = base64.b64encode(attachment.content).decode()
else:
# Content has already been base64 encoded so just pass content through
content = attachment.content.decode()
yield dict(name=attachment.name, type=attachment.mime_type, content=content)
async def _store_email(self, external_id, send_ts, email_info: EmailInfo):
data = dict(
external_id=external_id,
group_id=self.group_id,
company_id=self.company_id,
method=self.m.method,
send_ts=send_ts,
status=MessageStatus.send,
to_first_name=self.recipient.first_name,
to_last_name=self.recipient.last_name,
to_user_link=self.recipient.user_link,
to_address=self.recipient.address,
tags=self.tags,
subject=email_info.subject,
body=email_info.html_body,
)
attachments = [
f'{getattr(a, "id", None) or ""}::{a.name}'
for a in chain(self.recipient.pdf_attachments, self.recipient.attachments)
]
if attachments:
data['attachments'] = attachments
message_id = await glove.pg.fetchval_b(
'insert into messages (:values__names) values :values returning id', values=Values(**data)
)
if email_info.shortened_link:
await glove.pg.execute_b(
'insert into links (:values__names) values :values',
values=MultipleValues(
*[Values(message_id=message_id, token=token, url=url) for url, token in email_info.shortened_link]
),
)
async def _store_email_failed(self, status: MessageStatus, error_msg):
await glove.pg.fetchval_b(
'insert into messages (:values__names) values :values returning id',
values=Values(
group_id=self.group_id,
company_id=self.company_id,
method=self.m.method,
status=status,
to_first_name=self.recipient.first_name,
to_last_name=self.recipient.last_name,
to_user_link=self.recipient.user_link,
to_address=self.recipient.address,
tags=self.tags,
body=error_msg,
),
)
async def send_email(ctx, group_id: int, company_id: int, recipient: EmailRecipientModel, m: EmailSendModel):
s = SendEmail(ctx, group_id, company_id, recipient, m)
return await s.run()
|
1675752
|
import glob
import random
import os
import numpy as np
from PIL import Image
import tensorflow as tf
class ImageDataset(object):
def __init__(self, root, img_size=128, load_size=None, mask_size=64, mode='train', crop_mode='random'):
self.img_size = img_size
self.load_size = load_size
self.mask_size = mask_size
self.mode = mode
self.files = sorted(glob.glob('%s/*.jpg' % root))
self.files = self.files[:-4000] if mode == 'train' else self.files[-4000:]
self.crop_mode=crop_mode
def crop_and_resize(self,img):
x,y = img.size
ms = min(img.size)
x_start = (x-ms)//2
y_start = (y-ms)//2
x_stop = x_start + ms
y_stop = y_start + ms
img = img.crop((x_start, y_start, x_stop, y_stop))
img = img.resize((self.img_size, self.img_size), Image.BICUBIC)
return img
def transform(self,img):
return np.array(img,'float32')/ 127.5 -1
def apply_random_mask(self, img):
"""Randomly masks image"""
y1, x1 = np.random.randint(0, self.img_size-self.mask_size, 2)
y2, x2 = y1 + self.mask_size, x1 + self.mask_size
mask = np.zeros((self.img_size, self.img_size, 1), 'float32')
mask[x1:x2, y1:y2, 0] = 1
masked_part = img.crop((x1, y1, x2, y2)).copy()
masked_img = img.copy()
for i in range(x1,x2):
for j in range(y1,y2):
masked_img.putpixel((i,j), (255,255,255))
return masked_img, masked_part, mask
def apply_center_mask(self, img):
"""Mask center part of image"""
# Get upper-left pixel coordinate
i = (self.img_size - self.mask_size) // 2
mask = np.zeros((self.img_size, self.img_size, 1), 'float32')
mask[i:i+self.mask_size, i:i+self.mask_size,0] = 1
masked_part = img.crop((i, i, i+self.mask_size, i+self.mask_size))
masked_img = img.copy()
for j in range(i,i+self.mask_size):
for k in range(i,i+self.mask_size):
masked_img.putpixel((j,k), (255,255,255))
return masked_img, masked_part, mask
def __getitem__(self, index):
img = Image.open(self.files[index % len(self.files)])
img = self.crop_and_resize(img)
#img = self.transform(img)
if self.mode == 'train':
if self.crop_mode=='random':
# For training data perform random mask
masked_img, aux, mask = self.apply_random_mask(img)
elif self.crop_mode == 'none':
masked_img, aux, mask = self.apply_center_mask(img)
else:
# For test data mask the center of the image
masked_img, aux, mask = self.apply_center_mask(img)
return self.transform(img), self.transform(masked_img), self.transform(aux), mask
def __len__(self):
return len(self.files)
|
1675894
|
import uuid
from django import forms
from ajax_upload.models import UploadedFile
class UploadedFileForm(forms.ModelForm):
class Meta:
model = UploadedFile
fields = ('file',)
def clean_file(self):
data = self.cleaned_data['file']
# Change the name of the file to something unguessable
# Construct the new name as <unique-hex>-<original>.<ext>
data.name = u'%s-%s' % (uuid.uuid4().hex, data.name)
return data
|
1675931
|
import cv2
from PIL import Image
# code to score an image with a blurriness factor
def IsBlurryJPEG(jpg, cutoff=3000):
image = cv2.imdecode(jpg, -1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
variance = cv2.Laplacian(gray, cv2.CV_64F).var()
#print("blurry", variance)
return variance < cutoff
|
1675980
|
from Crypto.Cipher import AES
import os
BS = AES.block_size
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s: s[0:-ord(s[-1])]
key = os.urandom(16) # the length can be (16, 24, 32)
# key='xxxxx'#32位或者0-f的数值,对应16字节
text = 'content==顶你哦,记得回访哦xxxxx'
# def encrypt(data,secret)
cipher = AES.new(key, AES.MODE_ECB) # ECB模式
encrypted = cipher.encrypt(pad(text)).encode('hex')
print encrypted # will be something like 'f456a6b0e54e35f2711a9fa078a76d16'
decrypted = unpad(cipher.decrypt(encrypted.decode('hex')))
print decrypted
|
1676002
|
import numpy as np
from pylayers.measures.vna.E5072A import *
from pylayers.measures.parker.smparker import *
from pylayers.antprop.channel import *
from pylayers.measures.exploith5 import *
M = Mesh5('mimocal8_4_V1')
#M.plot(cmd='mes',mes='mes1',lg=[2,0,1,0])
#M.plot(cmd='cal',mes='mes1',lg=[2,0,1,0])
#M.plot(cmd='ri',mes='mes1',lg=[2,0,1,0])
#f = h5py.File(M.filename,'r')
#f['mes3']
#H = np.array(f['mes3'])
#plt.imshow(np.abs(H[0,0,0,:,:]))
#plt.show()
#plt.ion()
#M.dmes
#M.mes
#M.read('mes',1)
#M.read('mes',[1,1])
#M.mes.plot()
#cir = M.mes.ift(ffts=1)
#cir.plot()
|
1676072
|
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
from DQM.SiPixelPhase1Common.HistogramManager_cfi import *
DefaultHistoDebug = DefaultHisto.clone(
topFolderName = "PixelPhase1/Debug"
)
SiPixelPhase1GeometryDebugDetId = DefaultHistoDebug.clone(
name = "debug_detid",
title = "Location of DetIds",
xlabel = "DetId",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugLadderBlade = DefaultHistoDebug.clone(
name = "debug_ladderblade",
title = "Location of Ladders/Blades",
xlabel = "offline Ladder/Blade #",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugROC = DefaultHistoDebug.clone(
name = "debug_roc",
title = "Location of ROCs",
xlabel = "ROC#",
dimensions = 1,
specs = VPSet(
# TODO: make this per ROC!
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
Specification()
.groupBy("PXBarrel/PXLayer/PXModuleName/SignedLadderCoord/SignedModuleCoord")
.groupBy("PXBarrel/PXLayer/PXModuleName/SignedLadderCoord", "EXTEND_X")
.groupBy("PXBarrel/PXLayer/PXModuleName/", "EXTEND_Y")
.reduce("MEAN")
.save(),
)
)
SiPixelPhase1GeometryDebugFED = DefaultHistoDebug.clone(
name = "debug_fed",
title = "Location of FEDs",
xlabel = "FED#",
dimensions = 1,
specs = VPSet(
StandardSpecification2DProfile,
StandardSpecificationPixelmapProfile,
)
)
SiPixelPhase1GeometryDebugConf = cms.VPSet(
SiPixelPhase1GeometryDebugDetId,
SiPixelPhase1GeometryDebugLadderBlade,
SiPixelPhase1GeometryDebugROC,
SiPixelPhase1GeometryDebugFED,
)
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiPixelPhase1GeometryDebugAnalyzer = DQMEDAnalyzer('SiPixelPhase1GeometryDebug',
histograms = SiPixelPhase1GeometryDebugConf,
geometry = SiPixelPhase1Geometry
)
SiPixelPhase1GeometryDebugHarvester = DQMEDHarvester("SiPixelPhase1Harvester",
histograms = SiPixelPhase1GeometryDebugConf,
geometry = SiPixelPhase1Geometry
)
|
1676085
|
import copy
from PyQt5 import QtCore
from DyCommon.Ui.DyTableWidget import *
class DyStockTradeStrategyPosWidget(DyTableWidget):
""" 策略持仓窗口 """
stockMarketTicksSignal = QtCore.pyqtSignal(type(DyEvent()))
header = ['代码', '名称', '总数量/可用数量(股)', '成本价(元)', '现价(元)', '市值(元)', '盈亏(元)', '盈亏比(%)', '除权除息', '建仓时间']
def __init__(self, eventEngine, strategyCls):
super().__init__(readOnly=True, index=False, floatRound=3)
self._eventEngine = eventEngine
self._strategyCls = strategyCls
self.setColNames(self.header)
self.setAutoForegroundCol('盈亏(元)')
self._curPos = {}
def _updatePos(self, pos):
datetime_ = pos.datetime
try:
datetime_ = pos.datetime.strftime("%Y-%m-%d %H:%M:%S")
except:
pass
self[pos.code] = [pos.code, pos.name,
'%.3f/%.3f'%(pos.totalVolume, pos.availVolume),
pos.cost, pos.price,
pos.totalVolume*pos.price,
pos.totalVolume*(pos.price - pos.cost),
(pos.price - pos.cost)/pos.cost*100 if pos.cost > 0 else 'N/A',
'是' if pos.xrd else '否',
datetime_
]
def update(self, positions):
"""
@positions: OrderedDict or dict, {code: DyStockPos}。持仓是全推,不是增量式推送。
"""
self.clearAllRows()
for _, pos in positions.items():
self._updatePos(pos)
# register/unregister event or not
if not self._curPos and positions:
self._registerEvent()
elif self._curPos and not positions:
self._unregisterEvent()
self._curPos = copy.deepcopy(positions)
def closeEvent(self, event):
if self._curPos:
self._unregisterEvent()
return super().closeEvent(event)
def _stockMarketTicksSignalEmitWrapper(self, event):
self.stockMarketTicksSignal.emit(event)
def _registerEvent(self):
self.stockMarketTicksSignal.connect(self._stockMarketTicksHandler)
self._eventEngine.register(DyEventType.stockMarketTicks, self._stockMarketTicksSignalEmitWrapper)
def _unregisterEvent(self):
self.stockMarketTicksSignal.disconnect(self._stockMarketTicksHandler)
self._eventEngine.unregister(DyEventType.stockMarketTicks, self._stockMarketTicksSignalEmitWrapper)
def _stockMarketTicksHandler(self, event):
ticks = event.data
for code, pos in self._curPos.items():
tick = ticks.get(code)
if tick is None:
continue
pos.price = tick.price
self._updatePos(pos)
|
1676107
|
from graphql.ast import (
Field,
FragmentSpread,
InlineFragment,
Variable,
)
def get_input_value(value, variables, variable_definitions):
if isinstance(value, Variable):
variable_name = value.name
default_value = variable_definitions[variable_name].default_value
value = variables.get(variable_name, default_value)
if isinstance(value, list):
return [get_input_value(item, variables, variable_definitions) for item in value]
return value
def get_selections(selections, fragments, object_type, seen_fragments=None):
_selections = []
if seen_fragments is None:
seen_fragments = set()
for selection in selections:
if isinstance(selection, Field):
_selections.append(selection)
continue
if isinstance(selection, FragmentSpread):
fragment = fragments[selection.name]
elif isinstance(selection, InlineFragment):
fragment = selection
# If the fragment doesn't apply to the current object, don't
# add its selections. This could happen for example if this is
# a union of different object types with different fields for
# each type.
if fragment.type_condition.name != object_type.object_name:
continue
# Skip fragments we've already seen to avoid recursion issues.
if hasattr(fragment, 'name'):
if fragment.name in seen_fragments:
continue
else:
seen_fragments.add(fragment.name)
_selections += get_selections(
selections=fragment.selections,
fragments=fragments,
object_type=object_type,
seen_fragments=seen_fragments,
)
return _selections
|
1676108
|
ROOT = '(Type here)'
DSPRITESPATH = '(Type here)'
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../..'))
from utils.writer_op import create_muldir, create_dir
def subdirs5resultdir(result_dir, generate_option=False):
save_dir = result_dir+'save/'
log_dir = result_dir+'log/'
asset_dir = result_dir+'asset/'
if generate_option: create_muldir(save_dir, log_dir, asset_dir)
return save_dir, log_dir, asset_dir
def dir2subdir(dir_path, file_id, generate_option=False):
subdir_path = dir_path+'%s/'%file_id
if generate_option: create_dir(subdir_path)
return subdir_path
def muldir2mulsubdir(dir_pathes, file_id, generate_option=False):
subdir_pathes = list()
for dir_path in dir_pathes: subdir_pathes.append(dir2subdir(dir_path=dir_path, file_id=file_id, generate_option=generate_option))
return subdir_pathes
|
1676125
|
from cotede_qc.cotede_test import get_qc
import numpy
def test(p, parameters):
'''Run the CoTeDe Anomaly Detection QC.'''
config = 'anomaly_detection'
testname = 'anomaly_detection'
qc = get_qc(p, config, testname)
return qc
|
1676160
|
import os
import pandas as pd
from GridCal.Engine import *
folder = '/home/santi/Descargas/USATestSystem/SyntheticUSA_IV'
branch_df = pd.read_csv(os.path.join(folder, 'branch.csv'))
bus_df = pd.read_csv(os.path.join(folder, 'bus.csv'))
bus2sub_df = pd.read_csv(os.path.join(folder, 'bus2sub.csv'))
dcline_df = pd.read_csv(os.path.join(folder, 'dcline.csv'))
demand_df = pd.read_csv(os.path.join(folder, 'demand.csv'))
gencost_df = pd.read_csv(os.path.join(folder, 'gencost.csv'))
hydro_df = pd.read_csv(os.path.join(folder, 'hydro.csv'))
plant_df = pd.read_csv(os.path.join(folder, 'plant.csv'))
solar_df = pd.read_csv(os.path.join(folder, 'solar.csv'))
sub_df = pd.read_csv(os.path.join(folder, 'sub.csv'))
wind_df = pd.read_csv(os.path.join(folder, 'wind.csv'))
zone_df = pd.read_csv(os.path.join(folder, 'zone.csv'))
bus_df2 = pd.merge(pd.merge(pd.merge(bus_df,
bus2sub_df, on='bus_id'),
sub_df, on='sub_id'),
zone_df, on='zone_id')
grid = MultiCircuit('USA')
bus_dict = dict()
for i, entry in bus_df2.iterrows():
bus = Bus(name=str(entry['bus_id']),
vnom=entry['baseKV'],
vmin=entry['Vmin'],
vmax=entry['Vmax'],
r_fault=0.0,
x_fault=0.0,
xpos=0,
ypos=0,
height=40,
width=80,
active=True,
is_slack=False,
area='Default',
zone=entry['zone_name'],
substation=entry['name'],
country='USA',
longitude=entry['lon'],
latitude=entry['lat'])
if (entry['Bs'] + entry['Gs']) != 0.0:
sh = Shunt(name='Sh' + str(i), G=entry['Gs'], B=entry['Bs'])
bus.add_device(sh)
if (entry['Pd'] + entry['Pd']) != 0.0:
ld = Load(name='Load' + str(i), P=entry['Pd'], Q=entry['Qd'], cost=1200.0)
bus.add_device(ld)
bus_dict[entry['bus_id']] = bus
grid.add_bus(bus)
br_types = {'Line': BranchType.Line,
'Transformer': BranchType.Transformer,
'TransformerWinding': BranchType.Transformer}
for i, entry in branch_df.iterrows():
id = str(entry['branch_id'])
f = bus_dict[entry['from_bus_id']]
t = bus_dict[entry['to_bus_id']]
tpe = br_types[entry['branch_device_type']]
tap = entry['ratio']
if tap == 0.0:
tap = 1.0
branch = Branch(bus_from=f,
bus_to=t,
name=id,
r=entry['r'],
x=entry['x'],
b=entry['b'],
rate=entry['rateA'],
tap=tap,
shift_angle=0,
active=bool(entry['status']),
tolerance=0,
cost=1000.0,
branch_type=tpe)
grid.add_branch(branch)
FileSave(grid, 'USA.gridcal').save()
|
1676206
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
import Queue
except ImportError:
import queue as Queue
import threading
import time
from trident.misc.ipython_utils import is_in_ipython, is_in_colab
if is_in_ipython():
from IPython import display
if not is_in_colab:
import matplotlib
matplotlib.use('TkAgg' if not is_in_ipython() and not is_in_colab() else 'NbAgg' )
else:
import matplotlib
import matplotlib.pyplot as plt
import itertools
from trident.data.image_common import list_pictures
class ImageThread(threading.Thread):
"""Image Thread"""
def __init__(self, queue, out_queue):
threading.Thread.__init__(self)
self.queue = queue
self.out_queue = out_queue
def run(self):
while True:
# Grabs image path from queue
image_path_group = self.queue.get()
# Grab image
image_group = [plt.imread(i) for i in image_path_group]
# Place image in out queue
self.out_queue.put(image_group)
# Signals to queue job is done
self.queue.task_done()
class ImageReader(object):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an __iter__ method, providing a way
to iterate over indices of dataset elements, and a __len__ method that
returns the length of the returned iterators.
"""
def __init__(self,images=None):
self.image_paths =None
if images is not None :
if hasattr(images,' __iter__') and all(isinstance(images, str) for img in images):
self.image_paths=images
else:
raise TypeError('pins must be a list of one or more strings.')
self.workers=2
self.itr = 0
self.statistics=[]
self.buffer_size = 5
self._minibatch_size = 32
self.input_qsize = 50
self.min_input_qsize = 10
self.n_minibatches_to_run = float('inf')
self.queue = Queue.Queue()
self.out_queue = Queue.Queue(maxsize=self.buffer_size)
self.prepare_queue()
def prepare_queue(self):
if self.image_paths is not None and len(self.image_paths)>0:
self.grouped_image_paths = zip(*[iter(self.image_paths[:-(len(self.image_paths) % self._minibatch_size)])] * self._minibatch_size)
self.grouped_image_paths = itertools.cycle(self.grouped_image_paths)
self.threadPool=[]
for i in range(self.workers):
t = ImageThread(self.queue, self.out_queue)
t.setDaemon(True)
t.start()
self.threadPool.append(t)
for image_path_group in range(self.input_qsize):
image_path_group = self.grouped_image_paths.__next__()
self.queue.put(image_path_group)
@property
def minibatch_size(self):
return self._minibatch_size
@minibatch_size.setter
def minibatch_size(self, minibatch_size):
if (isinstance(minibatch_size, str)):
self._minibatch_size = int(minibatch_size)
elif (isinstance(minibatch_size, int)):
self._minibatch_size = minibatch_size
self.grouped_image_paths = zip(*[iter(self.image_paths[:-(len(self.image_paths) % self._minibatch_size)])] * self._minibatch_size)
self.grouped_image_paths = itertools.cycle(self.grouped_image_paths)
def get_all_images(self,base_folder):
self.image_paths=list_pictures(base_folder)
self.prepare_queue()
def __iter__(self):
if self.itr<=self.n_minibatches_to_run:
start = time.time()
image_group = self.out_queue.get()
stop = time.time()
self.statistics.append(stop - start)
self.itr += 1
if self.queue.qsize() <= self.min_input_qsize:
for image_path_group in range(self.input_qsize):
image_path_group = self.grouped_image_paths.__next__()
self.queue.put(image_path_group)
yield image_group
def __len__(self):
return len(self.image_paths) -(len(self.image_paths) % self._minibatch_size)
|
1676239
|
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from PyQt5.QtWidgets import QMainWindow, QApplication, QPushButton, QWidget, QAction, QTabWidget,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
import sys, os
class plotWindow():
def __init__(self, title="Plot Window", parent=None):
self.app = QApplication(sys.argv)
self.MainWindow = QMainWindow()
self.MainWindow.__init__()
self.MainWindow.setWindowTitle(title)
self.canvases = []
self.figure_handles = []
self.toolbar_handles = []
self.tab_handles = []
self.current_window = -1
self.tabs = QTabWidget()
self.MainWindow.setCentralWidget(self.tabs)
# self.MainWindow.resize(1920, 1080)
self.MainWindow.resize(1200, 980)
self.MainWindow.show()
def addPlot(self, title, figure, threeD=False):
new_tab = QWidget()
layout = QVBoxLayout()
new_tab.setLayout(layout)
figure.subplots_adjust(left=0.05, right=0.99, bottom=0.05, top=0.91, wspace=0.2, hspace=0.2)
new_canvas = FigureCanvas(figure)
new_toolbar = NavigationToolbar(new_canvas, new_tab)
layout.addWidget(new_canvas)
layout.addWidget(new_toolbar)
self.tabs.addTab(new_tab, title)
self.toolbar_handles.append(new_toolbar)
self.canvases.append(new_canvas)
self.figure_handles.append(figure)
if threeD:
figure.axes[0].mouse_init()
self.tab_handles.append(new_tab)
def show(self):
return self.app.exec_()
def saveFig(self, fig, filepath, format='svg', sizeInches=[]):
if fig == None:
return
allaxes = fig.get_axes()
for ax in allaxes:
ax.autoscale() # Reset to default zoom
restoreSize = fig.get_size_inches()
if not sizeInches:
if format == 'png': # Increase size for saved png
sizeInches = [16,11]
# sizeInches = [20,14]
else: # svg or png
sizeInches = [11,8]
fig.set_size_inches(sizeInches)
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
fig.savefig(os.path.join(filepath + '.' + format), bbox_inches='tight')
fig.set_size_inches(restoreSize)
if __name__ == '__main__':
import numpy as np
pw = plotWindow()
x = np.arange(0, 10, 0.001)
f = plt.figure()
ysin = np.sin(x)
plt.plot(x, ysin, '--')
pw.addPlot("sin", f)
f = plt.figure()
ycos = np.cos(x)
plt.plot(x, ycos, '--')
pw.addPlot("cos", f)
pw.show()
# sys.exit(app.exec_())
|
1676281
|
import math
import time
import torch
import torch.cuda.nvtx as nvtx
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from tqdm import tqdm
from utils.initializers import args_initialize, env_initialize, log_initialize, model_initialize
from a2c.helper import callback, format_time, gen_data
from a2c.model import ActorCritic
from a2c.test import test
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
with torch.cuda.stream(self.stream):
try:
self.next_states, self.next_actions, self.next_action_log_probs, self.next_returns, self.next_advantages = next(self.loader)
except StopIteration:
self.next_states, self.next_actions, self.next_action_log_probs, self.next_returns, self.next_advantages = None, None, None, None, None
return
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
states = self.next_states
actions = self.next_actions
action_log_probs = self.next_action_log_probs
returns = self.next_returns
advantages = self.next_advantages
self.preload()
return states, actions, action_log_probs, returns, advantages
def worker(gpu, ngpus_per_node, args):
env_device, train_device = args_initialize(gpu, ngpus_per_node, args)
train_csv_file, train_csv_writer, eval_csv_file, eval_csv_writer, summary_writer = log_initialize(args, train_device)
train_env, test_env, observation = env_initialize(args, env_device)
model = ActorCritic(args.num_stack, train_env.action_space, normalize=args.normalize, name=args.env_name)
model, optimizer = model_initialize(args, model, train_device)
shape = (args.num_steps + 1, args.num_ales, args.num_stack, *train_env.observation_space.shape[-2:])
states = torch.zeros(shape, device=train_device, dtype=torch.float32)
states[0, :, -1] = observation.to(device=train_device, dtype=torch.float32)
shape = (args.num_steps + 1, args.num_ales)
values = torch.zeros(shape, device=train_device, dtype=torch.float32)
logits = torch.zeros((args.num_steps + 1, args.num_ales, train_env.action_space.n), device=train_device, dtype=torch.float32)
returns = torch.zeros(shape, device=train_device, dtype=torch.float32)
shape = (args.num_steps, args.num_ales)
rewards = torch.zeros(shape, device=train_device, dtype=torch.float32)
masks = torch.zeros(shape, device=train_device, dtype=torch.float32)
actions = torch.zeros(shape, device=train_device, dtype=torch.long)
# These variables are used to compute average rewards for all processes.
episode_rewards = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
final_rewards = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
episode_lengths = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
final_lengths = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
if args.use_gae:
gae = torch.zeros(args.num_ales, device=train_device, dtype=torch.float32)
maybe_npy = lambda a: a.numpy() if args.use_openai else a
num_frames_per_iter = args.num_ales * args.num_steps
args.num_minibatches = num_frames_per_iter / args.batch_size
total_steps = math.ceil(args.t_max / (args.world_size * num_frames_per_iter))
decay = 1.0 / total_steps
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.ppo_epoch, gamma=1.0 - decay)
iterator = range(total_steps)
if args.rank == 0:
iterator = tqdm(iterator)
total_time = 0
evaluation_offset = 0
train_stream = torch.cuda.Stream()
torch.cuda.synchronize()
for update in iterator:
T = args.world_size * update * num_frames_per_iter
if (args.rank == 0) and (T >= evaluation_offset):
evaluation_offset += args.evaluation_interval
eval_lengths, eval_rewards = test(args, model, test_env)
lmean, lmedian, lmin, lmax, lstd = gen_data(eval_lengths)
rmean, rmedian, rmin, rmax, rstd = gen_data(eval_rewards)
length_data = '(length) min/max/mean/median: {lmin:4.1f}/{lmax:4.1f}/{lmean:4.1f}/{lmedian:4.1f}'.format(lmin=lmin, lmax=lmax, lmean=lmean, lmedian=lmedian)
reward_data = '(reward) min/max/mean/median: {rmin:4.1f}/{rmax:4.1f}/{rmean:4.1f}/{rmedian:4.1f}'.format(rmin=rmin, rmax=rmax, rmean=rmean, rmedian=rmedian)
print('[training time: {}] {}'.format(format_time(total_time), ' --- '.join([length_data, reward_data])))
if eval_csv_writer and eval_csv_file:
eval_csv_writer.writerow([T, total_time, rmean, rmedian, rmin, rmax, rstd, lmean, lmedian, lmin, lmax, lstd])
eval_csv_file.flush()
if args.plot:
summary_writer.add_scalar('eval/rewards_mean', rmean, T, walltime=total_time)
summary_writer.add_scalar('eval/lengths_mean', lmean, T, walltime=total_time)
start_time = time.time()
with torch.no_grad():
for step in range(args.num_steps):
nvtx.range_push('train:step')
value, logit = model(states[step])
# store values and logits
values[step], logits[step] = value.squeeze(-1), logit.squeeze(-1)
# convert actions to numpy and perform next step
probs = torch.clamp(F.softmax(logit, dim=1), min = 0.00001, max = 0.99999)
probs_action = probs.multinomial(1).to(env_device)
observation, reward, done, info = train_env.step(maybe_npy(probs_action))
if args.use_openai:
# convert back to pytorch tensors
observation = torch.from_numpy(observation)
reward = torch.from_numpy(reward)
done = torch.from_numpy(done.astype(np.uint8))
else:
observation = observation.squeeze(-1).unsqueeze(1)
# move back to training memory
observation = observation.to(device=train_device)
reward = reward.to(device=train_device, dtype=torch.float32)
done = done.to(device=train_device, dtype=torch.bool)
probs_action = probs_action.to(device=train_device, dtype=torch.long)
not_done = 1.0 - done.float()
# update rewards and actions
actions[step].copy_(probs_action.view(-1))
masks[step].copy_(not_done)
rewards[step].copy_(reward.sign())
# update next observations
states[step + 1, :, :-1].copy_(states[step, :, 1:])
states[step + 1] *= not_done.view(-1, *[1] * (observation.dim() - 1))
states[step + 1, :, -1].copy_(observation.view(-1, *states.size()[-2:]))
# update episodic reward counters
episode_rewards += reward
final_rewards[done] = episode_rewards[done]
episode_rewards *= not_done
episode_lengths += not_done
final_lengths[done] = episode_lengths[done]
episode_lengths *= not_done
nvtx.range_pop()
returns[-1] = values[-1] = model(states[-1])[0].data.squeeze(-1)
if args.use_gae:
gae.zero_()
for step in reversed(range(args.num_steps)):
delta = rewards[step] + (args.gamma * values[step + 1] * masks[step]) - values[step]
gae = delta + (args.gamma * args.tau * masks[step] * gae)
returns[step] = gae + values[step]
else:
for step in reversed(range(args.num_steps)):
returns[step] = rewards[step] + (args.gamma * returns[step + 1] * masks[step])
log_probs = F.log_softmax(logits[:-1].view(-1, train_env.action_space.n), dim=1)
action_log_probs = log_probs.gather(1, actions.view(-1).unsqueeze(-1))
advantages = returns[:-1].view(-1).unsqueeze(-1) - values[:-1].view(-1).unsqueeze(-1)
advantages = (advantages - advantages.mean()) / (advantages.std() + float(np.finfo(np.float32).eps))
total_value_loss = 0.0
total_policy_loss = 0.0
total_dist_entropy = 0.0
nvtx.range_push('train:loader')
states_view = states[:-1].view(-1, *states.size()[-3:])
actions_view = actions.view(-1)
returns_view = returns[:-1].view(-1)
train_dataset = torch.utils.data.TensorDataset(states_view, actions_view, action_log_probs, returns_view, advantages)
train_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=0, pin_memory=False, sampler=train_sampler)
nvtx.range_pop()
with torch.cuda.stream(train_stream):
for epoch in range(args.ppo_epoch):
nvtx.range_push('train:epoch_step')
if args.distributed:
train_sampler.set_epoch(epoch)
prefetcher = data_prefetcher(train_loader)
local_states, local_actions, local_action_log_probs, local_returns, local_advantages = prefetcher.next()
while local_states is not None:
batch_values, batch_logits = model(local_states)
batch_log_probs = F.log_softmax(batch_logits, dim=1)
batch_action_log_probs = batch_log_probs.gather(1, local_actions.unsqueeze(-1))
batch_probs = F.softmax(batch_logits, dim=1)
batch_dist_entropy = -(batch_log_probs * batch_probs).sum(-1).mean()
ratio = torch.exp(batch_action_log_probs - local_action_log_probs)
surrogate1 = ratio * local_advantages
surrogate2 = torch.clamp(ratio, 1.0 - args.clip_epsilon, 1.0 + args.clip_epsilon) * local_advantages
batch_policy_loss = -torch.min(surrogate1, surrogate2).mean()
batch_value_loss = F.mse_loss(local_returns.unsqueeze(-1), batch_values) / 2.0
loss = batch_value_loss * args.value_loss_coef + batch_policy_loss - batch_dist_entropy * args.entropy_coef
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
total_value_loss += batch_value_loss.item()
total_policy_loss += batch_policy_loss.item()
total_dist_entropy += batch_dist_entropy.item()
local_states, local_actions, local_action_log_probs, local_returns, local_advantages = prefetcher.next()
scheduler.step()
nvtx.range_pop()
torch.cuda.synchronize()
states[0].copy_(states[-1])
if args.rank == 0:
iter_time = time.time() - start_time
total_time += iter_time
value_loss = total_value_loss / (args.ppo_epoch * args.num_minibatches)
policy_loss = total_policy_loss / (args.ppo_epoch * args.num_minibatches)
dist_entropy = total_dist_entropy / (args.ppo_epoch * args.num_minibatches)
if args.plot:
writer.add_scalar('train/rewards_mean', final_rewards.mean().item(), T, walltime=total_time)
writer.add_scalar('train/lengths_mean', final_lengths.mean().item(), T, walltime=total_time)
writer.add_scalar('train/learning_rate', scheduler.get_lr()[0], T, walltime=total_time)
writer.add_scalar('train/value_loss', value_loss, T, walltime=total_time)
writer.add_scalar('train/policy_loss', policy_loss, T, walltime=total_time)
writer.add_scalar('train/entropy', dist_entropy, T, walltime=total_time)
progress_data = callback(args, model, T, iter_time, final_rewards, final_lengths,
value_loss, policy_loss, dist_entropy, train_csv_writer, train_csv_file)
iterator.set_postfix_str(progress_data)
if args.plot and (args.rank == 0):
writer.close()
if args.use_openai:
train_env.close()
if args.use_openai_test_env:
test_env.close()
|
1676284
|
import numpy as np
import networkx as nx
import pickle as cp
import random
import ctypes
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import time
sys.path.append( '%s/code' % os.path.dirname(os.path.realpath(__file__)) )
from learning_lib import LearningLib
n_valid = 100
w_scaling = 0.01
MAX_VAL = 1000000
MIN_VAL = -1000000
def gen_graph(opt):
max_n = int(opt['max_n'])
min_n = int(opt['min_n'])
g_type = opt['g_type']
max_w = 10
min_w = 1
graph_id = np.random.randint(MAX_VAL)
cur_n = np.random.randint(max_n - min_n + 1) + min_n
if g_type == 'erdos_renyi':
p = float(opt['density'])
e_g = nx.erdos_renyi_graph(n = cur_n, p = p, seed = graph_id)
lcc = max(nx.connected_component_subgraphs(e_g), key=len)
g = nx.convert_node_labels_to_integers(lcc)
elif g_type == 'powerlaw':
g = nx.powerlaw_cluster_graph(n = cur_n, m = 4, p = p, seed = graph_id)
elif g_type == 'barabasi_albert':
p = int(opt['density'])
if p == 0:
max_p = 16
min_p = 1
p = np.random.randint(max_p - min_p + 1) + min_p
g = nx.barabasi_albert_graph(n = cur_n, m = p, seed = graph_id)
for edge in nx.edges(g):
pert = np.random.uniform(-0.5,0.5)
weight = np.random.randint(max_w - min_w + 1) + min_w
g[edge[0]][edge[1]]['weight'] = (weight + pert) * w_scaling
return g
def gen_new_graphs(opt):
api.ClearTrainGraphs()
for i in range(1000):
g = gen_graph(opt)
api.InsertGraph(g, is_test=False)
def PrepareValidData(opt):
for i in range(n_valid):
g = gen_graph(opt)
api.InsertGraph(g, is_test=True)
if __name__ == '__main__':
start_time = time.time()
api = LearningLib(sys.argv)
opt = {}
for i in range(1, len(sys.argv), 2):
opt[sys.argv[i][1:]] = sys.argv[i + 1]
seed = int(opt['seed'])
np.random.seed(seed)
print("***********************************************************")
print("[INFO] TRAINING ON RANDOM GRAPHS")
print("[INFO] Graph type: " + opt['g_type'])
print("[INFO] Density parameter: " + opt['density'])
print("[INFO] Number of nodes: [" + opt['min_n'] + " " + opt['max_n'] + "]")
print("***********************************************************")
sys.stdout.flush()
# Build the validation set
PrepareValidData(opt)
# Generate the training set
gen_new_graphs(opt)
for i in range(10):
api.lib.PlayGame(100, ctypes.c_double(1.0))
api.TakeSnapshot()
eps_start = 1.0
eps_end = 0.05
eps_step = 10000.0
lr = float(opt['learning_rate'])
print('[INFO]','iter', 'time', 'lr', 'eps', 'avg-width','avg-bound','avg-reward')
sys.stdout.flush()
best_reward = (0,0,0,0,0,0,MIN_VAL)
if int(opt["plot_training"]) == 1:
fig = plt.figure()
iter_list = []
reward_list = []
for iter in range(int(opt['max_iter'])):
eps = eps_end + max(0., (eps_start - eps_end) * (eps_step - iter) / eps_step)
if iter % 10 == 0:
api.lib.PlayGame(10, ctypes.c_double(eps))
if iter % 100 == 0:
sys.stdout.flush()
width, bound, reward = 0.0, 0.0, 0.0
for idx in range(n_valid):
val, sol = api.GetResult(idx)
width += sol[0]
bound += sol[1]
reward += val
width, bound, reward = (width/n_valid, bound/n_valid, reward/n_valid)
cur_time = round(time.time() - start_time,2)
it_data = (iter, cur_time, lr, eps, width, bound, reward)
print("[DATA]", " ".join(map(str,it_data)))
if reward > best_reward[-1]:
best_reward = it_data
sys.stdout.flush()
model_path = '%s/model_iter_%d.model' % (opt['save_dir'], iter)
api.SaveModel(model_path)
if int(opt["plot_training"]) == 1:
iter_list.append(iter)
reward_list.append(reward)
plt.clf()
plt.plot(iter_list, reward_list)
out_file = '%s/log_training_curve_reward.png' % opt['save_dir']
plt.savefig(out_file, dpi = 300)
if iter % 1000 == 0:
api.TakeSnapshot()
lr = lr * 0.95
if iter and iter % 5000 == 0:
print("[LOG] Refreshing Training set")
gen_new_graphs(opt)
api.lib.Fit(ctypes.c_double(lr))
print("[BEST-REWARD]", " ".join(map(str,best_reward)))
|
1676385
|
from contextlib import suppress
from io import BytesIO
from flash.core.serve.dag.task import get_deps
from flash.core.serve.execution import TaskComposition
with suppress(ImportError):
import graphviz
def _dag_to_graphviz(dag, dependencies, request_data, response_data, *, no_optimization=False):
if not graphviz: # pragma: no cover
raise ImportError("Visualizing graphs requires graphviz")
graph_attr = {"rankdir": "BT"}
g = graphviz.Digraph(graph_attr=graph_attr)
for task_name, task in dag.items():
if task_name not in response_data:
# not an endpoint result.
cluster, *_ = task_name.split(".")
with g.subgraph(name=f"cluster_{cluster}") as c:
c.node(task_name, task_name, shape="rectangle")
c.attr(label=f"Component: {cluster}", color="blue")
else:
# an endpoint result
g.node(task_name, task_name, shape="rectangle")
for parent in dependencies[task_name]:
g.edge(parent, task_name)
if no_optimization:
return g
for request_name, task_key in request_data.items():
cluster, *_ = task_key.split(".")
g.node(request_name, request_name, shape="oval")
with g.subgraph(name=f"cluster_{cluster}") as c:
c.node(task_key, task_key, shape="rectangle")
c.edge(task_key, task_key[: -len(".serial")])
g.edge(request_name, task_key)
for response_name, task_key in response_data.items():
g.node(response_name, response_name, shape="oval")
return g
def visualize(
tc: "TaskComposition",
fhandle: BytesIO = None,
format: str = "png",
*,
no_optimization: bool = False,
):
"""Visualize a graph."""
dsk = tc.pre_optimization_dsk if no_optimization else tc.dsk
dependencies, dependents = get_deps(dsk)
g = _dag_to_graphviz(
dag=dsk,
dependencies=dependencies,
request_data=tc.ep_dsk_input_keys,
response_data=tc.ep_dsk_output_keys,
no_optimization=no_optimization,
)
if fhandle is not None:
data = g.pipe(format=format)
fhandle.seek(0)
fhandle.write(data)
return
return g
|
1676414
|
import setuptools
setuptools.setup(
name='py_etherscan_api',
version='0.9.0',
packages=['examples', 'examples.stats', 'examples.tokens',
'examples.accounts', 'examples.blocks', 'examples.transactions', 'etherscan'],
url='https://github.com/corpetty/py-etherscan-api',
license='MIT',
author='coreypetty',
author_email='<EMAIL>',
description='Python Bindings to Etherscan.io API',
install_requires=[
'requests>=2.20.0',
],
classifiers=[
"Programming Language :: Python :: 3"
]
)
|
1676444
|
from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import nets
import pytrellis
import re
# No evidence this affects any bits.
cfg = FuzzConfig(job="PLC2MKMUX", family="MachXO2", device="LCMXO2-1200HC", ncl="empty.ncl", tiles=["R10C11:PLC"])
def main():
pytrellis.load_database("../../../database")
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "mkmux.ncl"
def per_slice(slicen):
def get_substs(m0mux="M0", m1mux="M1", f_mode="F"):
if m0mux == "OFF":
s_m0mux = "#OFF"
else:
s_m0mux = m0mux
return dict(slice=slicen, m0mux=s_m0mux, m1mux=m1mux)
nonrouting.fuzz_enum_setting(cfg, "SLICE{}.M0MUX".format(slicen), ["M0", "OFF", "0"],
lambda x: get_substs(m0mux=x),
empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SLICE{}.M1MUX".format(slicen), ["M1", "OFF", "0"],
lambda x: get_substs(m1mux=x),
empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "SLICE{}.F0".format(slicen), ["F", "OFF", "0"],
lambda x: get_substs(f_mode=x),
empty_bitfile, False)
fuzzloops.parallel_foreach(["A", "B", "C", "D"], per_slice)
if __name__ == "__main__":
main()
|
1676453
|
import sys
from typing import Optional
from bowler import Query, LN, Capture, Filename, TOKEN, SYMBOL
from fissix.pytree import Node, Leaf
from lib2to3.fixer_util import Name, KeywordArg, Dot, Comma, Newline, ArgList
def filter_print_string(node, capture, filename) -> bool:
function_name = capture.get("function_name")
from pprint import pprint
pprint(node)
pprint(capture)
return True
def filter_has_no_on_delete(node: LN, capture: Capture, filename: Filename) -> bool:
arguments = capture.get("function_arguments")[0].children
for arg in arguments:
if arg.type == SYMBOL.argument and arg.children[0].type == TOKEN.NAME:
arg_name = arg.children[0].value
if arg_name == "on_delete":
return False # this call already has an on_delete argument.
return True
def add_on_delete_cascade(
node: LN, capture: Capture, filename: Filename
) -> Optional[LN]:
arguments = capture.get("function_arguments")[0]
new_on_delete_node = KeywordArg(Name(" on_delete"), Name("models.CASCADE"))
if isinstance(arguments, Leaf): # Node is a leaf and so we need to replace it with a list of things we want instead.
arguments.replace([arguments.clone(),Comma(),new_on_delete_node])
else:
arguments.append_child(Comma())
arguments.append_child(new_on_delete_node)
return node
(
Query(sys.argv[1])
.select_method("ForeignKey")
.is_call()
.filter(filter_has_no_on_delete)
.modify(add_on_delete_cascade)
.idiff()
),
(
Query(sys.argv[1])
.select_method("OneToOneField")
.is_call()
.filter(filter_has_no_on_delete)
.modify(add_on_delete_cascade)
.idiff()
)
|
1676522
|
import os
from flask import *
from werkzeug.utils import secure_filename
from flask_restful import Api, Resource
from flask_cors import CORS, cross_origin
from fastai.vision.all import *
from PIL import Image
import numpy as np
import base64
from io import BytesIO
import json
import logging
app = Flask(__name__)
CORS(app)
api = Api(app)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
model = load_learner('model/model_v0.pkl')
UPLOAD_FOLDER = 'uploads'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = (['png', 'jpg', 'jpeg'])
def is_allowed_filename(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@cross_origin()
@app.route('/upload', methods=['POST'])
def upload():
if 'file' not in request.files:
image = json.dumps(request.get_json())
im = Image.open(BytesIO(base64.b64decode(image.split(',')[1])))
im.save('image.png')
image_np = np.array(im)
image_without_alpha = image_np[:, :, :3]
is_clean, _, probs = model.predict(image_without_alpha)
prob = float(list(probs.numpy())[1])
return {"is_clean": is_clean, "predictedVal": prob}
file = request.files['file']
if file.filename == '':
resp = jsonify({'message': 'No file selected for uploading'})
resp.status_code = 400
return resp
if file and is_allowed_filename(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
path = UPLOAD_FOLDER + '/' + filename
resp = predict(path)
resp.status_code = 201
return resp
else:
resp = jsonify({'message': 'Allowed file types are png, jpg, jpeg'})
resp.status_code = 400
return resp
@cross_origin()
def predict(img_path):
img = Image.open(img_path)
print(img)
img_np = np.array(img)
is_clean, _ ,probs = model.predict(img_np)
prob = float(list(probs.numpy())[1])
return {"is_clean": is_clean , "predictedVal": prob}
if __name__ == "__main__":
app.run(debug=True)
|
1676574
|
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
import warnings
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError:
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
finally:
err1 = err2 = None
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, True)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
|
1676620
|
import gfapy
class LengthGFA1:
@property
def length(self):
"""
Returns
-------
int
Value of LN tag, if segment has LN tag.
int
Sequence length if no LN and sequence not "*".
None
If sequence is "*".
See Also
--------
try_get_length
"""
if self.LN:
return self.LN
elif not gfapy.is_placeholder(self.sequence):
return len(self.sequence)
else:
return None
def try_get_length(self):
"""
Raises
------
gfapy.NotFoundError
If not an LN tag and the sequence is "*".
See Also
--------
__len__
"""
l = self.length
if l is None:
raise gfapy.NotFoundError("No length information available")
return l
def validate_length(self):
"""
Raises
------
gfapy.InconsistencyError
If sequence length and LN tag are not consistent.
"""
if not gfapy.is_placeholder(self.sequence) and "LN" in self.tagnames:
if self.LN != len(self.sequence):
raise gfapy.InconsistencyError(
"Segment: {}\n".format(str(self))+
"Length in LN tag ({}) ".format(self.LN)+
"is different from length of sequence field ({})"
.format(len(self.sequence)))
def _validate_record_type_specific_info(self):
self.validate_length()
|
1676664
|
from .dice import DICE
from .fga import FGA
from .flip import FLIP
from .nea import NEA
from .rand import RAND
from .stack import STACK
from .pgd import PGD
from .prbcd import PRBCD
|
1676690
|
import random
import torch
import numpy as np
from copy import deepcopy
from src.constants import *
from src.adahessian import Adahessian
import matplotlib.pyplot as plt
def convertToOneHot(dat, cpu_old, HOSTS):
alloc = []
for i in dat:
oneHot = [0] * HOSTS; alist = i.tolist()[-HOSTS:]
oneHot[alist.index(max(alist))] = 1; alloc.append(oneHot)
new_dat_oneHot = torch.cat((cpu_old, torch.FloatTensor(alloc)), dim=1)
return new_dat_oneHot
def opt(init, model, bounds, data_type):
HOSTS = int(data_type.split('_')[-1])
optimizer = torch.optim.AdamW([init] , lr=0.8)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
iteration = 0; equal = 0; z_old = 100; zs = []
while iteration < 200:
cpu_old = deepcopy(init.data[:,0:-HOSTS]); alloc_old = deepcopy(init.data[:,-HOSTS:])
z = model(init)
optimizer.zero_grad(); z.backward(); optimizer.step(); scheduler.step()
init.data = convertToOneHot(init.data, cpu_old, HOSTS)
equal = equal + 1 if torch.all(alloc_old.eq(init.data[:,-HOSTS:])) else 0
if equal > 30: break
iteration += 1; z_old = z.item()
# zs.append(z.item())
# plt.plot(zs); plt.show(); plt.clf()
init.requires_grad = False
return init.data, iteration, model(init)
def so_opt(init, model, bounds, data_type):
HOSTS = int(data_type.split('_')[-1])
optimizer = Adahessian([init] , lr=0.8)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
iteration = 0; equal = 0; z_old = 100; zs = []
while iteration < 200:
cpu_old = deepcopy(init.data[:,0:-HOSTS]); alloc_old = deepcopy(init.data[:,-HOSTS:])
z = model(init)
optimizer.zero_grad(); z.backward(create_graph=True); optimizer.step(); scheduler.step()
init.data = convertToOneHot(init.data, cpu_old, HOSTS)
equal = equal + 1 if torch.all(alloc_old.eq(init.data[:,-HOSTS:])) else 0
if equal > 30: break
iteration += 1; z_old = z.item()
# zs.append(z.item())
# plt.plot(zs); plt.show(); plt.clf()
init.requires_grad = False
return init.data, iteration, model(init)
|
1676699
|
from tools_fq import *
from tools_sam import *
from tools_bed import *
from tools_zf import *
from tools_fa import *
|
1676715
|
import networkx as nx
import EoN
from collections import defaultdict
import matplotlib.pyplot as plt
import scipy
colors = ['#5AB3E6','#FF2000','#009A80','#E69A00', '#CD9AB3', '#0073B3','#F0E442']
def getMs(counts):
r'''used for figure 6.3 to get the values of M1, Mstar, and M2'''
N=len(counts)
M1 = 0
val1 = 0
M2 = 0
val2=0
Mstar = 0
valstar = 1
for index, val in enumerate(counts):
if index<2:
continue
if val < valstar:
Mstar = index
valstar = val
elif index - Mstar > 0.1*N:
break
for index, val in enumerate(counts):
if index>Mstar:
break
elif val>val1:
val1=val
M1 = index
for index, val in enumerate(counts):
if index < Mstar:
continue
elif val > val2:
val2 = val
M2 = index
return M1, Mstar, M2
iterations = 5*10**4
p=0.25
kave = 5.
labels=['a', 'b', 'c', 'd', 'e']
for N, color, label in zip([100, 400, 1600, 6400, 25600], colors, labels):
print(N)
xm = {m:0 for m in range(1,N+1)}
G = nx.fast_gnp_random_graph(N, kave/(N-1.))
for counter in range(iterations):
t, S, I, R = EoN.basic_discrete_SIR_epidemic(G, p)
xm[R[-1]] += 1./iterations
items = sorted(xm.items())
m, freq = zip(*items)
plt.figure(1)
plt.loglog(m, freq, color = color)
plt.figure(2)
plt.plot(m, freq, color=color)
plt.yscale('log')
freq = scipy.array(freq)
m= scipy.array(m)
plt.figure(3)
plt.plot(m/float(N), N*freq, color = color) #float is required in case python 2.X
M1, Mstar, M2 = getMs(freq)
plt.figure(4)
plt.clf()
plt.axis(xmin = 0,xmax = N, ymax=6./(N), ymin = 0)
plt.plot(m, freq, color= color)
plt.fill_between(range(1,Mstar+2), 0, freq[0:Mstar+1], linewidth=0, color = colors[4])
plt.fill_between(range(Mstar+1,len(freq)+1), 0, freq[Mstar:], linewidth=0, color = colors[5])
inset = plt.axes([0.55,0.5,0.325,0.35])
inset.plot(m, freq, color= color)
inset.fill_between(range(1,Mstar+2), 0, freq[0:Mstar+1], linewidth=0, color = colors[4])
inset.fill_between(range(Mstar+1,len(freq)), 0, freq[Mstar+1:], linewidth=0, color = colors[5])
inset.axis(xmin=0., xmax=20, ymin=0, ymax = 0.3)#, ymin=-counts[0]*iterations/100)
inset.set_xticks([0,5,10,15,20])
plt.xlabel('Number Infected')
plt.ylabel('Probability')
plt.savefig('fig6p3{}.png'.format(label))
plt.figure(1)
plt.ylabel(r'$\log_{10} x(m)$')
plt.xlabel(r'$\log_{10} m$')
plt.savefig('fig6p1a.png')
plt.figure(2)
plt.xlabel('$m$')
plt.ylabel('$\log_{10} x(m)$')
plt.axis(xmin = 0, xmax = 100)
plt.savefig('fig6p1b.png')
plt.figure(3)
plt.xlabel('$m/N$')
plt.ylabel('$Nx(m)$')
plt.axis(ymax=10, xmax=1, ymin=0)
plt.savefig('fig6p1c.png')
|
1676728
|
import ocnn
import torch
class OctreeConvRelu(torch.nn.Module):
def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
super(OctreeConvRelu, self).__init__()
self.conv = ocnn.OctreeConv(depth,
channel_in,
channel_out,
kernel_size,
stride)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in, octree):
out = self.conv(data_in, octree)
out = self.relu(out)
return out
class OctreeConvBnRelu(torch.nn.Module):
def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1, bn_eps=0.00001, bn_momentum=0.01):
super(OctreeConvBnRelu, self).__init__()
self.conv = ocnn.OctreeConv(depth,
channel_in,
channel_out,
kernel_size,
stride)
self.bn = torch.nn.BatchNorm2d(channel_out,
bn_eps,
bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in, octree):
out = self.conv(data_in, octree)
out = self.bn(out)
out = self.relu(out)
return out
class OctreeConvFastRelu(torch.nn.Module):
def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
super(OctreeConvFastRelu, self).__init__()
self.conv = ocnn.OctreeConvFast(depth,
channel_in,
channel_out,
kernel_size,
stride)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in, octree):
out = self.conv(data_in, octree)
out = self.relu(out)
return out
class OctreeConvFastBnRelu(torch.nn.Module):
def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1, bn_eps=0.00001, bn_momentum=0.01):
super(OctreeConvFastBnRelu, self).__init__()
self.conv = ocnn.OctreeConvFast(depth,
channel_in,
channel_out,
kernel_size,
stride)
self.bn = torch.nn.BatchNorm2d(channel_out,
bn_eps,
bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in, octree):
out = self.conv(data_in, octree)
out = self.bn(out)
out = self.relu(out)
return out
class OctreeConv1x1Relu(torch.nn.Module):
def __init__(self, channel_in, channel_out, use_bias=True):
super(OctreeConv1x1Relu, self).__init__()
self.conv1x1 = ocnn.OctreeConv1x1(channel_in, channel_out, use_bias)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in):
out = self.conv1x1(data_in)
out = self.relu(out)
return out
class OctreeConv1x1BnRelu(torch.nn.Module):
def __init__(self, channel_in, channel_out, use_bias=True, bn_eps=0.00001, bn_momentum=0.01):
super(OctreeConv1x1BnRelu, self).__init__()
self.conv1x1 = ocnn.OctreeConv1x1(channel_in, channel_out, use_bias)
self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in):
out = self.conv1x1(data_in)
out = self.bn(out)
out = self.relu(out)
return out
class LinearRelu(torch.nn.Module):
def __init__(self, channel_in, channel_out, use_bias=True):
super(LinearRelu, self).__init__()
self.fc = torch.nn.Linear(channel_in, channel_out, use_bias)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in):
out = self.fc(data_in)
out = self.relu(out)
return out
class LinearBnRelu(torch.nn.Module):
def __init__(self, channel_in, channel_out, use_bias=True, bn_eps=0.00001, bn_momentum=0.01):
super(LinearBnRelu, self).__init__()
self.fc = torch.nn.Linear(channel_in, channel_out, use_bias)
self.bn = torch.nn.BatchNorm1d(channel_out, bn_eps, bn_momentum)
self.relu = torch.nn.ReLU(inplace=True)
def forward(self, data_in):
out = self.fc(data_in)
out = self.bn(out)
out = self.relu(out)
return out
|
1676736
|
from yui.utils.format import bold
from yui.utils.format import code
from yui.utils.format import escape
from yui.utils.format import italics
from yui.utils.format import link
from yui.utils.format import link_channel
from yui.utils.format import link_everyone
from yui.utils.format import link_here
from yui.utils.format import link_url
from yui.utils.format import preformatted
from yui.utils.format import quote
from yui.utils.format import strike
def test_escape():
assert escape('&') == '&'
assert escape('<') == '<'
assert escape('>') == '>'
def test_format_helpers():
"""Test slack syntax helpers."""
assert bold('item4') == '*item4*'
assert code('item4') == '`item4`'
assert italics('item4') == '_item4_'
assert preformatted('item4') == '```item4```'
assert strike('item4') == '~item4~'
assert quote('item4') == '>item4'
def test_link(bot):
user = bot.add_user('U1234', 'tester')
channel = bot.add_channel('C1234', 'test')
assert link(channel) == '<#C1234>'
assert link(user) == '<@U1234>'
assert link('C1234') == '<#C1234>'
assert link('U1234') == '<@U1234>'
assert link('W1234') == '<@W1234>'
assert link('S1234') == '<!subteam^S1234>'
assert link('unknown') == '<unknown>'
assert link(1234) == '<1234>'
def test_link_url():
url = 'https://github.com/item4/yui'
assert link_url(url) == f'<{url}>'
assert link_url(url, 'Repo') == f'<{url}|Repo>'
assert link_url(url, 'Repo & Code') == f'<{url}|Repo & Code>'
def test_special_mentions():
assert link_channel() == '<!channel|channel>'
assert link_everyone() == '<!everyone|everyone>'
assert link_here() == '<!here|here>'
|
1676751
|
import torch
def inverseThoughList(layers,x,sign=1):
inverseLogjac = x.new_zeros(x.shape[0])
if sign == -1:
layers = reversed(layers)
inverse = lambda layer, x: layer.forward(x)
elif sign == 1:
inverse = lambda layer, x: layer.inverse(x)
for layer in layers:
x,inverseLogjacTMP = inverse(layer,x)
inverseLogjac = inverseLogjac + inverseLogjacTMP
return x,inverseLogjac
|
1676753
|
from azure.storage.blob.sharedaccesssignature import BlobSharedAccessSignature
from azure.storage.blob import BlobPermissions
from datetime import datetime, timedelta
import requests
def create_key(hash_checksum):
"""
Takes the hash checksum and returns the path on the server
:param hash_checksum:
:type hash_checksum:
:return:
:rtype:
"""
return hash_checksum[0:2] + '/' + hash_checksum[2:4] + '/' + hash_checksum[4:6] + '/' + hash_checksum[6:8] + '/' + hash_checksum
def azure_blob_construct_signed_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum, permission):
"""
Constructs the signed url
:param storage_account_name: The name of the storage account. The leading part before blob.core.windows.net
:type storage_account_name: str
:param storage_account_primary_key: The primary (or secondary) key shown under access keys of the storage account
:type storage_account_primary_key: str
:param container_name: The container name
:type container_name: str
:param hash_checksum: The sha512 checksum of the file
:type hash_checksum: str
:param permission: The permission
:type permission: azure.storage.blob.BlobPermissions
:return: The signed upload url
:rtype:
"""
expiry = datetime.utcnow() + timedelta(hours=1)
blob_shared_access_signature = BlobSharedAccessSignature(storage_account_name, storage_account_primary_key)
blob_name = create_key(hash_checksum)
sas_token = blob_shared_access_signature.generate_blob(container_name, blob_name, expiry=expiry, permission=permission)
return f'https://{storage_account_name}.blob.core.windows.net/{container_name}/{blob_name}?{sas_token}'
def azure_blob_construct_signed_upload_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum):
"""
Constructs the signed upload url
:param storage_account_name: The name of the storage account. The leading part before blob.core.windows.net
:type storage_account_name: str
:param storage_account_primary_key: The primary (or secondary) key shown under access keys of the storage account
:type storage_account_primary_key: str
:param container_name: The container name
:type container_name: str
:param hash_checksum: The sha512 checksum of the file
:type hash_checksum: str
:return: The signed upload url
:rtype:
"""
return azure_blob_construct_signed_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum, BlobPermissions(write=True))
def azure_blob_construct_signed_download_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum):
"""
Constructs the signed download url
:param storage_account_name: The name of the storage account. The leading part before blob.core.windows.net
:type storage_account_name:
:param storage_account_primary_key: The primary (or secondary) key shown under access keys of the storage account
:type storage_account_primary_key:
:param container_name: The container name
:type container_name:
:param hash_checksum: The sha512 checksum of the file
:type hash_checksum:
:return: The signed upload url
:rtype:
"""
return azure_blob_construct_signed_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum, BlobPermissions(read=True))
def azure_blob_construct_signed_delete_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum):
"""
Constructs the signed delete url
:param storage_account_name: The name of the storage account. The leading part before blob.core.windows.net
:type storage_account_name:
:param storage_account_primary_key: The primary (or secondary) key shown under access keys of the storage account
:type storage_account_primary_key:
:param container_name: The container name
:type container_name:
:param hash_checksum: The sha512 checksum of the file
:type hash_checksum:
:return: The signed delete url
:rtype:
"""
return azure_blob_construct_signed_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum, BlobPermissions(delete=True))
def azure_blob_delete(storage_account_name, storage_account_primary_key, container_name, hash_checksum):
"""
Deletes an azure blob including all snapshits
:param storage_account_name: The name of the storage account. The leading part before blob.core.windows.net
:type storage_account_name:
:param storage_account_primary_key: The primary (or secondary) key shown under access keys of the storage account
:type storage_account_primary_key:
:param container_name: The container name
:type container_name:
:param hash_checksum: The sha512 checksum of the file
:type hash_checksum:
:return: The signed delete url
:rtype:
"""
delete_url = azure_blob_construct_signed_delete_url(storage_account_name, storage_account_primary_key, container_name, hash_checksum)
return requests.delete(
delete_url,
headers={
'x-ms-delete-snapshots': 'include',
}
)
|
1676797
|
import unittest
from unittest.mock import patch
import model as m
from Transaction import Transaction
def test_current_user():
with patch("model.sqlite3") as mock_sql:
mock_sql.connect().cursor().fetchone.return_value = ["paulina"]
assert m.current_user() == "paulina"
class TestLogIn:
def test_log_in_success(self):
with patch("model.sqlite3") as mock_sql:
mock_sql.connect().cursor().fetchone.return_value = (
"307e1fb4b8594b49b8eb119a4a38cc5020fd9eb18afa9a38b8c75abb4ac8ae6e",
"<KEY>"
"2b534894ffbdca10ce0a5507142c91a4d66f859f6df5771ba04e5fa477f28e0",
)
assert m.log_in("asdf", "asdf")
mock_sql.connect().cursor().fetchone.return_value = ["asdf"]
assert m.current_user() == "asdf"
def test_log_in_failure(self):
with patch("model.sqlite3") as mock_sql:
mock_sql.connect().cursor().fetchone.return_value = ("asdf", "asdf")
assert not m.log_in("asdf", "asdf")
class TestCreate:
def test_create_success(self):
with patch("model.sqlite3"):
assert m.create("asdf", "asdf", 124532523525)
def test_create_fail_no_username(self):
with patch("model.sqlite3"):
assert not m.create("", "asdf", 124532523525)
def test_create_fail_no_password(self):
with patch("model.sqlite3"):
assert not m.create("asdf", "", 124532523525)
def test_create_fail_negative_value(self):
with patch("model.sqlite3"):
assert not m.create("asdf", "asdf", -124532523525)
def test_update_holdings():
with patch("model.sqlite3"):
m.update_holdings()
def test_calculate_transaction_revenue():
assert m.calculate_transaction_revenue(1, 50, 7) == 43
def test_calculate_transaction_cost():
assert m.calculate_transaction_cost(1, 50, 7) == 57
class TestLookupTickerSymbol(unittest.TestCase):
def test_lookup_ticker_symbol_success(self):
"""Make the external API call to test status of API key"""
assert m.lookup_ticker_symbol("Apple") == "AAPL"
def test_lookup_ticker_symbol_fail(self):
assert not m.lookup_ticker_symbol("asdf")
class TestQuoteLastPrice(unittest.TestCase):
def test_quote_last_price_success(self):
assert m.quote_last_price("AAPL")
def test_transaction_class():
t1 = Transaction(
last_price=124.5,
brokerage_fee=10.50,
current_balance=100000,
trade_volume=5.0,
new_balance=m.calculate_transaction_cost(5, 124.5, 10.50),
ticker_symbol="AAPL",
current_number_shares=500.0,
)
t2 = Transaction(
last_price=124.5,
brokerage_fee=10.50,
current_balance=100000,
trade_volume=5.0,
new_balance=m.calculate_transaction_cost(5, 124.5, 10.50),
ticker_symbol="AAPL",
current_number_shares=500.0,
)
assert t1 == t2
def test_display_user_transactions():
with patch("model.sqlite3") as mock_sql:
mock_sql.connect().cursor().fetchall.return_value = [
"AAPL",
1.0,
282.9,
"2020-04-25 10:38 PM",
]
assert m.display_user_transactions() == [
"AAPL",
1.0,
282.9,
"2020-04-25 10:38 PM",
]
def test_log_out():
with patch("model.sqlite3"):
m.log_out()
|
1676799
|
from unittest import TestCase
from ircb.connection import Connection
class ConectionTests(TestCase):
"""
Test the connection.Connection class
"""
def test_decode(self):
con_obj = Connection()
self.assertEqual(
"line",
con_obj.decode("line".encode(encoding='UTF-8', errors='strict'))
)
self.assertEqual(
"line",
con_obj.decode("line".encode(encoding='latin-1', errors='strict'))
)
|
1676816
|
from pymks.datasets import make_elastic_FE_strain_random
from pymks.datasets import make_elastic_FE_strain_delta
from pymks.datasets import make_elastic_stress_random
import numpy as np
def test_make_elastic_FE_strain_delta():
elastic_modulus = (1., 2.)
poissons_ratio = (0.3, 0.3)
X, y = make_elastic_FE_strain_delta(elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=(5, 5))
def test_make_elastic_FE_strain_random():
elastic_modulus = (1., 2.)
poissons_ratio = (0.3, 0.3)
X, y = make_elastic_FE_strain_random(n_samples=1,
elastic_modulus=elastic_modulus,
poissons_ratio=poissons_ratio,
size=(5, 5))
def test_make_elastic_stress_randome():
X, y = make_elastic_stress_random(n_samples=1, elastic_modulus=(1, 1),
poissons_ratio=(1, 1),
grain_size=(3, 3), macro_strain=1.0)
assert np.allclose(y, np.ones(y.shape))
X, y = make_elastic_stress_random(n_samples=1, grain_size=(1, 1),
elastic_modulus=(100, 200),
size=(2, 2), poissons_ratio=(1, 3),
macro_strain=1., seed=4)
X_result = np.array([[[1, 1],
[0, 1]]])
assert float(np.round(y, decimals=5)[0]) == 228.74696
assert np.allclose(X, X_result)
X, y = make_elastic_stress_random(n_samples=1, grain_size=(1, 1, 1),
elastic_modulus=(100, 200),
poissons_ratio=(1, 3), seed=5,
macro_strain=1., size=(2, 2, 2))
X_result = np.array([[[1, 1],
[1, 0]],
[[1, 1],
[0, 0]]])
assert np.allclose(X, X_result)
assert y.astype(int) == 145
|
1676836
|
import sys
def trim(line):
if len(line) <= 55:
return line
else:
line = line[0:40]
if line.rfind(' ') != -1: line = line[0:line.rfind(' ')]
return line + "... <Read More>"
def main():
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test = test.strip()
print(trim(test))
test_cases.close()
if __name__ == '__main__':
main()
|
1676871
|
import requests
from orionsdk import SwisClient
def main():
npm_server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(npm_server, username, password)
print("Discover and add interfaces:")
results = swis.invoke('Orion.NPM.Interfaces', 'DiscoverInterfacesOnNode', 1)
# use the results['DiscoveredInterfaces'] for all interfaces
# or get a subset of interfaces using a comprehension like below
eth_only = [
x for x
in results['DiscoveredInterfaces']
if x['Caption'].startswith('eth')]
print(eth_only)
results2 = swis.invoke(
'Orion.NPM.Interfaces',
'AddInterfacesOnNode',
1, # use a valid nodeID!
eth_only,
'AddDefaultPollers')
print(results2)
requests.packages.urllib3.disable_warnings()
if __name__ == '__main__':
main()
|
1676872
|
from django.db import models
from river.models.fields.state import StateField
class Shipping(models.Model):
product = models.CharField(max_length=50, null=True, blank=True)
customer = models.CharField(max_length=50, null=True, blank=True)
shipping_status = StateField()
def __str__(self):
return self.product
|
1676907
|
from collections import defaultdict
from random import choice
pages = []
index = defaultdict(list) # word -> [(page, i), (page, i) ...]
with open('sherlock-holmes.txt') as fp:
# ~300 words per page, ~10 words per sentence
page, lnum, wnum = [], 1, 1
for line in fp:
if not line.strip():
continue
for wnum, word in enumerate(line.lower().split(), wnum):
page.append(word)
index[word].append((len(pages) + 1, wnum))
wnum += 1
lnum += 1
if lnum == 30:
pages.append(page)
page, lnum, wnum = [], 1, 1
if page:
pages.append(page)
def encode(sentence):
dec = []
for word in sentence.lower().split():
dec += choice(index[word])
return dec
def decode(cypher):
cypher = [int(w) for w in cypher.split()]
words = []
for i in range(0, len(cypher), 2):
page = cypher[i] - 1
wnum = cypher[i+1] - 1
words.append(pages[page][wnum])
return ' '.join(words)
#print(' '.join(str(n) for n in encode('london at afternoon')))
print(decode('117 278 243 249 87 213'))
|
1676940
|
from django.conf import settings
from django.db import migrations, models
import geotrek.common.mixins
import mapentity.models
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date_insert', models.DateTimeField(auto_now_add=True, verbose_name='Insertion date', db_column='date_insert')),
('date_update', models.DateTimeField(auto_now=True, verbose_name='Update date', db_column='date_update')),
('name', models.CharField(max_length=256, verbose_name='Name')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('comment', models.TextField(default='', verbose_name='Comment', blank=True)),
('geom', django.contrib.gis.db.models.fields.PointField(default=None, srid=settings.SRID, null=True, verbose_name='Location', blank=True)),
('context_object_id', models.PositiveIntegerField(null=True, editable=False, blank=True)),
],
options={
'ordering': ['-date_insert'],
'db_table': 'f_t_signalement',
'verbose_name': 'Report',
'verbose_name_plural': 'Reports',
},
bases=(mapentity.models.MapEntityMixin, geotrek.common.mixins.PicturesMixin, models.Model),
),
migrations.CreateModel(
name='ReportCategory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('category', models.CharField(max_length=128, verbose_name='Category')),
],
options={
'db_table': 'f_b_categorie',
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='ReportStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(max_length=128, verbose_name='Status')),
],
options={
'db_table': 'f_b_status',
'verbose_name': 'Status',
'verbose_name_plural': 'Status',
},
),
migrations.AddField(
model_name='report',
name='category',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, blank=True, to='feedback.ReportCategory', null=True, verbose_name='Category'),
),
migrations.AddField(
model_name='report',
name='context_content_type',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, editable=False, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='report',
name='status',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, blank=True, to='feedback.ReportStatus', null=True, verbose_name='Status'),
),
]
|
1676945
|
import logging
import re
from dbnd._core.utils.better_subprocess import run_cmd
from dbnd_gcp.apache_beam.apache_beam_ctrl import ApacheBeamJobCtrl
from dbnd_gcp.dataflow.dataflow_config import DataflowConfig
from dbnd_gcp.gs_sync_ctrl import GsSyncCtrl
logger = logging.getLogger(__name__)
_DATAFLOW_ID_REGEXP = re.compile(
r".*console.cloud.google.com/dataflow.*/jobs/([a-z0-9A-Z\-_]+).*"
)
class DataFlowJobCtrl(ApacheBeamJobCtrl):
def __init__(self, task_run):
super(DataFlowJobCtrl, self).__init__(task_run=task_run)
self.dataflow_config = task_run.task.beam_engine # type: DataflowConfig
gcp_conn_id = self.task_env.conn_id
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
self._gcp_dataflow_hook = DataFlowHook(
gcp_conn_id=gcp_conn_id, delegate_to=self.task_env.delegate_to
)
if self.dataflow_config.temp_location:
# override sync location with temp_location
self.remote_sync_root = self.dataflow_config.temp_location
self.current_dataflow_job_id = None
def _get_base_options(self):
options = super(DataFlowJobCtrl, self)._get_base_options()
dfc = self.dataflow_config
options.update(dfc.options)
options.setdefault("runner", dfc.runner)
options.setdefault("region", dfc.region)
options.setdefault("project", dfc.project)
options.setdefault("tempLocation", dfc.temp_location)
return options
def _process_dataflow_log(self, msg):
msg = msg.strip()
if self.current_dataflow_job_id is None:
matched_job = _DATAFLOW_ID_REGEXP.search(msg)
if matched_job:
self.current_dataflow_job_id = matched_job.group(1)
logger.info("Found dataflow job id '%s'", self.current_dataflow_job_id)
logger.info(msg)
def _run_cmd(self, cmd):
dfc = self.dataflow_config
from airflow.contrib.hooks.gcp_dataflow_hook import _DataflowJob
run_cmd(
cmd,
name="dataflow %s" % self.task_run.job_name,
stdout_handler=self._process_dataflow_log,
)
_DataflowJob(
self._gcp_dataflow_hook.get_conn(),
dfc.project,
self.task_run.job_id,
dfc.region,
dfc.poll_sleep,
self.current_dataflow_job_id,
).wait_for_done()
|
1676958
|
from datetime import timedelta
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.sequential import SequentialTuner
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum
from fedot.utilities.synth_dataset_generator import regression_dataset
np.random.seed(2020)
def get_regression_dataset(features_options, samples_amount=250,
features_amount=5):
"""
Prepares four numpy arrays with different scale features and target
:param samples_amount: Total amount of samples in the resulted dataset.
:param features_amount: Total amount of features per sample.
:param features_options: The dictionary containing features options in key-value
format:
- informative: the amount of informative features;
- bias: bias term in the underlying linear model;
:return x_data_train: features to train
:return y_data_train: target to train
:return x_data_test: features to test
:return y_data_test: target to test
"""
x_data, y_data = regression_dataset(samples_amount=samples_amount,
features_amount=features_amount,
features_options=features_options,
n_targets=1,
noise=0.0, shuffle=True)
# Changing the scale of the data
for i, coeff in zip(range(0, features_amount),
np.random.randint(1, 100, features_amount)):
# Get column
feature = np.array(x_data[:, i])
# Change scale for this feature
rescaled = feature * coeff
x_data[:, i] = rescaled
# Train and test split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data,
test_size=0.3)
return x_train, y_train, x_test, y_test
def run_experiment(pipeline, tuner):
samples = [50, 250, 150]
features = [1, 5, 10]
options = [{'informative': 1, 'bias': 0.0},
{'informative': 2, 'bias': 2.0},
{'informative': 1, 'bias': 3.0}]
for samples_amount, features_amount, features_options in zip(samples, features, options):
print('=======================================')
print(f'\nAmount of samples {samples_amount}, '
f'amount of features {features_amount}, '
f'additional options {features_options}')
x_train, y_train, x_test, y_test = get_regression_dataset(features_options,
samples_amount,
features_amount)
# Define regression task
task = Task(TaskTypesEnum.regression)
# Prepare data to train the model
train_input = InputData(idx=np.arange(0, len(x_train)),
features=x_train,
target=y_train,
task=task,
data_type=DataTypesEnum.table)
predict_input = InputData(idx=np.arange(0, len(x_test)),
features=x_test,
target=None,
task=task,
data_type=DataTypesEnum.table)
# Fit it
pipeline.fit_from_scratch(train_input)
# Predict
predicted_values = pipeline.predict(predict_input)
pipeline_prediction = predicted_values.predict
mae_value = mean_absolute_error(y_test, pipeline_prediction)
print(f'Mean absolute error - {mae_value:.4f}\n')
if tuner is not None:
print(f'Start tuning process ...')
pipeline_tuner = tuner(pipeline=pipeline, task=task,
iterations=50, timeout=timedelta(seconds=50))
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_input,
loss_function=mean_absolute_error)
# Predict
predicted_values_tuned = tuned_pipeline.predict(predict_input)
preds_tuned = predicted_values_tuned.predict
mae_value = mean_absolute_error(y_test, preds_tuned)
print(f'Obtained metrics after tuning:')
print(f'MAE - {mae_value:.4f}\n')
# Script for testing is pipeline can process different datasets for regression task
if __name__ == '__main__':
# Prepare pipeline
node_ransac = PrimaryNode('ransac_lin_reg')
node_scaling = SecondaryNode('scaling', nodes_from=[node_ransac])
node_final = SecondaryNode('ridge', nodes_from=[node_scaling])
pipeline = Pipeline(node_final)
run_experiment(pipeline, tuner=SequentialTuner)
|
1676991
|
import tensorflow as tf
from drnn import multi_dRNN_with_dilations
def _contruct_cells(hidden_structs, cell_type):
"""
This function contructs a list of cells.
"""
# error checking
if cell_type not in ["RNN", "LSTM", "GRU"]:
raise ValueError("The cell type is not currently supported.")
# define cells
cells = []
for hidden_dims in hidden_structs:
if cell_type == "RNN":
cell = tf.contrib.rnn.BasicRNNCell(hidden_dims)
elif cell_type == "LSTM":
cell = tf.contrib.rnn.BasicLSTMCell(hidden_dims)
elif cell_type == "GRU":
cell = tf.contrib.rnn.GRUCell(hidden_dims)
cells.append(cell)
return cells
def _rnn_reformat(x, input_dims, n_steps):
"""
This function reformat input to the shape that standard RNN can take.
Inputs:
x -- a tensor of shape (batch_size, n_steps, input_dims).
Outputs:
x_reformat -- a list of 'n_steps' tenosrs, each has shape (batch_size, input_dims).
"""
# permute batch_size and n_steps
x_ = tf.transpose(x, [1, 0, 2])
# reshape to (n_steps*batch_size, input_dims)
x_ = tf.reshape(x_, [-1, input_dims])
# split to get a list of 'n_steps' tensors of shape (batch_size, input_dims)
x_reformat = tf.split(x_, n_steps, 0)
return x_reformat
def drnn_classification(x,
hidden_structs,
dilations,
n_steps,
n_classes,
input_dims=1,
cell_type="RNN"):
"""
This function construct a multilayer dilated RNN for classifiction.
Inputs:
x -- a tensor of shape (batch_size, n_steps, input_dims).
hidden_structs -- a list, each element indicates the hidden node dimension of each layer.
dilations -- a list, each element indicates the dilation of each layer.
n_steps -- the length of the sequence.
n_classes -- the number of classes for the classification.
input_dims -- the input dimension.
cell_type -- the type of the RNN cell, should be in ["RNN", "LSTM", "GRU"].
Outputs:
pred -- the prediction logits at the last timestamp and the last layer of the RNN.
'pred' does not pass any output activation functions.
"""
# error checking
assert (len(hidden_structs) == len(dilations))
# reshape inputs
x_reformat = _rnn_reformat(x, input_dims, n_steps)
# construct a list of cells
cells = _contruct_cells(hidden_structs, cell_type)
# define dRNN structures
layer_outputs = multi_dRNN_with_dilations(cells, x_reformat, dilations)
if dilations[0] == 1:
# dilation starts at 1, no data dependency lost
# define the output layer
weights = tf.Variable(tf.random_normal(shape=[hidden_structs[-1],
n_classes]))
bias = tf.Variable(tf.random_normal(shape=[n_classes]))
# define prediction
pred = tf.add(tf.matmul(layer_outputs[-1], weights), bias)
else:
# dilation starts not at 1, needs to fuse the output
# define output layer
weights = tf.Variable(tf.random_normal(shape=[hidden_structs[
-1] * dilations[0], n_classes]))
bias = tf.Variable(tf.random_normal(shape=[n_classes]))
# concat hidden_outputs
for idx, i in enumerate(range(-dilations[0], 0, 1)):
if idx == 0:
hidden_outputs_ = layer_outputs[i]
else:
hidden_outputs_ = tf.concat(
[hidden_outputs_, layer_outputs[i]],
axis=1)
pred = tf.add(tf.matmul(hidden_outputs_, weights), bias)
return pred
|
1677014
|
import logging
import os
import re
import nltk
from nltk.tokenize import word_tokenize
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def _read(path):
with open(path, 'r') as f:
return f.read()
def _doc_id(path):
doc_id, = re.search(r'(\d+)', path).groups()
return doc_id
def _full_paths(directory):
return [os.path.join(directory, path) for path in os.listdir(directory)]
def docs_by_id(directory):
logger.info('Loading documents from {}'.format(directory))
return {_doc_id(path): _read(path) for path in _full_paths(directory)}
def tokens(doc):
return word_tokenize(doc)
def tokens_by_doc_id(directory):
nltk.download('punkt')
return {doc_id: tokens(doc) for doc_id, doc in docs_by_id(directory).items()}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.