code stringlengths 281 23.7M |
|---|
def initialize_model_poisson(train_x, train_obj, train_con, train_yvar, state_dict=None, method='variational'):
if (method == 'variational'):
model_obj = get_var_model(train_x, train_obj, train_yvar, is_poisson=True)
model_con = get_var_model(train_x, train_con, train_yvar, is_poisson=False)
kwargs = {'mll_cls': (lambda l, m: VariationalELBO(l, m, num_data=train_x.shape[(- 2)]))}
elif (method == 'exact'):
model_obj = get_exact_model(train_x, train_obj, train_yvar)
model_con = get_exact_model(train_x, train_con, train_yvar)
kwargs = {}
model = ModelListGP(model_obj, model_con)
mll = SumMarginalLogLikelihood(model.likelihood, model, **kwargs)
if (state_dict is not None):
model.load_state_dict(state_dict)
return (mll, model) |
class Effect6326(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'thermalDamage', src.getModifiedItemAttr('shipBonusCD1'), skill='Caldari Destroyer', **kwargs) |
def create_build(repository):
new_token = model.token.create_access_token(repository, 'write', 'build-worker')
repo = ('ci.devtable.com:5000/%s/%s' % (repository.namespace_user.username, repository.name))
job_config = {'repository': repo, 'docker_tags': ['latest'], 'build_subdir': '', 'trigger_metadata': {'commit': '3482adc5822c498e8f7db2e361e8d57b3d77ddd9', 'ref': 'refs/heads/master', 'default_branch': 'master'}}
build = create_repository_build(repository, new_token, job_config, '68daeebd-a5b9-457f-80a0-4363b882f8ea', 'build_name')
build.save()
return build |
def make_module_translation_map(names: list[str]) -> dict[(str, str)]:
num_instances: dict[(str, int)] = {}
for name in names:
for suffix in candidate_suffixes(name):
num_instances[suffix] = (num_instances.get(suffix, 0) + 1)
result = {}
for name in names:
for suffix in candidate_suffixes(name):
if (num_instances[suffix] == 1):
result[name] = suffix
break
else:
assert False, names
return result |
def evaluate(dataloader, model, confusion, config, args):
model.evaluate_mode()
logging.error('VALIDATION')
for (i, batch) in enumerate(tqdm(dataloader)):
(seq_images, targets, _) = batch
if (seq_images == None):
continue
seq_images = seq_images.cuda()
cuda_targets = []
for b in targets:
temp_dict = {}
temp_dict['center_img'] = b['center_img'].cuda()
temp_dict['labels'] = b['labels'].cuda()
temp_dict['roads'] = b['roads'].cuda()
temp_dict['control_points'] = b['control_points'].cuda()
temp_dict['con_matrix'] = b['con_matrix'].cuda()
temp_dict['endpoints'] = b['endpoints'].cuda()
temp_dict['mask'] = b['mask'].cuda()
temp_dict['bev_mask'] = b['bev_mask'].cuda()
temp_dict['obj_corners'] = b['obj_corners'].cuda()
temp_dict['obj_converted'] = b['obj_converted'].cuda()
temp_dict['obj_exists'] = b['obj_exists'].cuda()
temp_dict['left_traffic'] = b['left_traffic'].cuda()
temp_dict['outgoings'] = b['outgoings']
temp_dict['incomings'] = b['incomings']
cuda_targets.append(temp_dict)
logging.error(('SCENE ' + targets[0]['scene_name']))
logging.error(('SAMPLE ' + targets[0]['sample_token']))
test_image = (seq_images / 255)
w_ratio = ((p.x_size * 1.0) / 800)
h_ratio = ((p.y_size * 1.0) / 448)
ori_image = np.uint8(cv2.resize(np.squeeze(np.transpose(seq_images.data.cpu().numpy(), (0, 2, 3, 1)), axis=0), (800, 448)))
(out_x, out_y, ti) = test_ori(model, ori_image, test_image, w_ratio, h_ratio, draw_type='point', thresh=p.threshold_point)
calib = targets[0]['calib'].numpy()
(coefs_list, boundaries_list, out_dict) = vis_tools.get_spline_for_pinet(out_x[0], out_y[0], calib, targets[0])
static_inter_dict = dict()
static_inter_dict['src_boxes'] = out_dict['src_boxes']
(hausdorff_static_dist, hausdorff_static_idx, hausdorff_gt) = vis_tools.hausdorff_match(out_dict, targets[0], pinet=True)
try:
confusion.update(out_dict, hausdorff_gt, hausdorff_static_idx, targets[0], static=True, pinet=True)
except Exception as e:
logging.error('EXCEPTION IN CONFUSION ')
logging.error(str(e))
continue
return confusion |
class SystemVerilogLexer(RegexLexer):
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
url = '
version_added = '1.5'
_ws = '(?:\\s|//.*?\\n|/[*].*?[*]/)+'
tokens = {'root': [('^(\\s*)(`define)', bygroups(Whitespace, Comment.Preproc), 'macro'), ('^(\\s*)(package)(\\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace)), ('^(\\s*)(import)(\\s+)', bygroups(Whitespace, Keyword.Namespace, Whitespace), 'import'), ('\\s+', Whitespace), ('(\\\\)(\\n)', bygroups(String.Escape, Whitespace)), ('/(\\\\\\n)?/(\\n|(.|\\n)*?[^\\\\]\\n)', Comment.Single), ('/(\\\\\\n)?[*](.|\\n)*?[*](\\\\\\n)?/', Comment.Multiline), ('[{}#]', Punctuation), ('L?"', String, 'string'), ("L?'(\\\\.|\\\\[0-7]{1,3}|\\\\x[a-fA-F0-9]{1,2}|[^\\\\\\'\\n])'", String.Char), ('(\\d+\\.\\d*|\\.\\d+|\\d+)[eE][+-]?\\d+[lL]?', Number.Float), ('(\\d+\\.\\d*|\\.\\d+|\\d+[fF])[fF]?', Number.Float), ("([1-9][_0-9]*)?\\s*\\'[sS]?[bB]\\s*[xXzZ?01][_xXzZ?01]*", Number.Bin), ("([1-9][_0-9]*)?\\s*\\'[sS]?[oO]\\s*[xXzZ?0-7][_xXzZ?0-7]*", Number.Oct), ("([1-9][_0-9]*)?\\s*\\'[sS]?[dD]\\s*[xXzZ?0-9][_xXzZ?0-9]*", Number.Integer), ("([1-9][_0-9]*)?\\s*\\'[sS]?[hH]\\s*[xXzZ?0-9a-fA-F][_xXzZ?0-9a-fA-F]*", Number.Hex), ("\\'[01xXzZ]", Number), ('[0-9][_0-9]*', Number.Integer), ('[~!%^&*+=|?:<>/-]', Operator), (words(('inside', 'dist'), suffix='\\b'), Operator.Word), ("[()\\[\\],.;\\'$]", Punctuation), ('`[a-zA-Z_]\\w*', Name.Constant), (words(('accept_on', 'alias', 'always', 'always_comb', 'always_ff', 'always_latch', 'and', 'assert', 'assign', 'assume', 'automatic', 'before', 'begin', 'bind', 'bins', 'binsof', 'break', 'buf', 'bufif0', 'bufif1', 'case', 'casex', 'casez', 'cell', 'checker', 'clocking', 'cmos', 'config', 'constraint', 'context', 'continue', 'cover', 'covergroup', 'coverpoint', 'cross', 'deassign', 'default', 'defparam', 'design', 'disable', 'do', 'edge', 'else', 'end', 'endcase', 'endchecker', 'endclocking', 'endconfig', 'endfunction', 'endgenerate', 'endgroup', 'endinterface', 'endmodule', 'endpackage', 'endprimitive', 'endprogram', 'endproperty', 'endsequence', 'endspecify', 'endtable', 'endtask', 'enum', 'eventually', 'expect', 'export', 'extern', 'final', 'first_match', 'for', 'force', 'foreach', 'forever', 'fork', 'forkjoin', 'function', 'generate', 'genvar', 'global', 'highz0', 'highz1', 'if', 'iff', 'ifnone', 'ignore_bins', 'illegal_bins', 'implies', 'implements', 'import', 'incdir', 'include', 'initial', 'inout', 'input', 'instance', 'interconnect', 'interface', 'intersect', 'join', 'join_any', 'join_none', 'large', 'let', 'liblist', 'library', 'local', 'localparam', 'macromodule', 'matches', 'medium', 'modport', 'module', 'nand', 'negedge', 'nettype', 'new', 'nexttime', 'nmos', 'nor', 'noshowcancelled', 'not', 'notif0', 'notif1', 'null', 'or', 'output', 'package', 'packed', 'parameter', 'pmos', 'posedge', 'primitive', 'priority', 'program', 'property', 'protected', 'pull0', 'pull1', 'pulldown', 'pullup', 'pulsestyle_ondetect', 'pulsestyle_onevent', 'pure', 'rand', 'randc', 'randcase', 'randsequence', 'rcmos', 'ref', 'reject_on', 'release', 'repeat', 'restrict', 'return', 'rnmos', 'rpmos', 'rtran', 'rtranif0', 'rtranif1', 's_always', 's_eventually', 's_nexttime', 's_until', 's_until_with', 'scalared', 'sequence', 'showcancelled', 'small', 'soft', 'solve', 'specify', 'specparam', 'static', 'strong', 'strong0', 'strong1', 'struct', 'super', 'sync_accept_on', 'sync_reject_on', 'table', 'tagged', 'task', 'this', 'throughout', 'timeprecision', 'timeunit', 'tran', 'tranif0', 'tranif1', 'typedef', 'union', 'unique', 'unique0', 'until', 'until_with', 'untyped', 'use', 'vectored', 'virtual', 'wait', 'wait_order', 'weak', 'weak0', 'weak1', 'while', 'wildcard', 'with', 'within', 'xnor', 'xor'), suffix='\\b'), Keyword), ('(class)(\\s+)([a-zA-Z_]\\w*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)), ('(extends)(\\s+)([a-zA-Z_]\\w*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)), ('(endclass\\b)(?:(\\s*)(:)(\\s*)([a-zA-Z_]\\w*))?', bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Class)), (words(('bit', 'byte', 'chandle', 'const', 'event', 'int', 'integer', 'logic', 'longint', 'real', 'realtime', 'reg', 'shortint', 'shortreal', 'signed', 'string', 'time', 'type', 'unsigned', 'var', 'void', 'supply0', 'supply1', 'tri', 'triand', 'trior', 'trireg', 'tri0', 'tri1', 'uwire', 'wand', 'wire', 'wor'), suffix='\\b'), Keyword.Type), (words(('`__FILE__', '`__LINE__', '`begin_keywords', '`celldefine', '`default_nettype', '`define', '`else', '`elsif', '`end_keywords', '`endcelldefine', '`endif', '`ifdef', '`ifndef', '`include', '`line', '`nounconnected_drive', '`pragma', '`resetall', '`timescale', '`unconnected_drive', '`undef', '`undefineall'), suffix='\\b'), Comment.Preproc), (words(('$exit', '$finish', '$stop', '$realtime', '$stime', '$time', '$printtimescale', '$timeformat', '$bitstoreal', '$bitstoshortreal', '$cast', '$itor', '$realtobits', '$rtoi', '$shortrealtobits', '$signed', '$unsigned', '$bits', '$isunbounded', '$typename', '$dimensions', '$high', '$increment', '$left', '$low', '$right', '$size', '$unpacked_dimensions', '$acos', '$acosh', '$asin', '$asinh', '$atan', '$atan2', '$atanh', '$ceil', '$clog2', '$cos', '$cosh', '$exp', '$floor', '$hypot', '$ln', '$log10', '$pow', '$sin', '$sinh', '$sqrt', '$tan', '$tanh', '$countbits', '$countones', '$isunknown', '$onehot', '$onehot0', '$info', '$error', '$fatal', '$warning', '$assertcontrol', '$assertfailoff', '$assertfailon', '$assertkill', '$assertnonvacuouson', '$assertoff', '$asserton', '$assertpassoff', '$assertpasson', '$assertvacuousoff', '$changed', '$changed_gclk', '$changing_gclk', '$falling_gclk', '$fell', '$fell_gclk', '$future_gclk', '$past', '$past_gclk', '$rising_gclk', '$rose', '$rose_gclk', '$sampled', '$stable', '$stable_gclk', '$steady_gclk', '$coverage_control', '$coverage_get', '$coverage_get_max', '$coverage_merge', '$coverage_save', '$get_coverage', '$load_coverage_db', '$set_coverage_db_name', '$dist_chi_square', '$dist_erlang', '$dist_exponential', '$dist_normal', '$dist_poisson', '$dist_t', '$dist_uniform', '$random', '$q_add', '$q_exam', '$q_full', '$q_initialize', '$q_remove', '$async$and$array', '$async$and$plane', '$async$nand$array', '$async$nand$plane', '$async$nor$array', '$async$nor$plane', '$async$or$array', '$async$or$plane', '$sync$and$array', '$sync$and$plane', '$sync$nand$array', '$sync$nand$plane', '$sync$nor$array', '$sync$nor$plane', '$sync$or$array', '$sync$or$plane', '$system', '$display', '$displayb', '$displayh', '$displayo', '$monitor', '$monitorb', '$monitorh', '$monitoro', '$monitoroff', '$monitoron', '$strobe', '$strobeb', '$strobeh', '$strobeo', '$write', '$writeb', '$writeh', '$writeo', '$fclose', '$fdisplay', '$fdisplayb', '$fdisplayh', '$fdisplayo', '$feof', '$ferror', '$fflush', '$fgetc', '$fgets', '$fmonitor', '$fmonitorb', '$fmonitorh', '$fmonitoro', '$fopen', '$fread', '$fscanf', '$fseek', '$fstrobe', '$fstrobeb', '$fstrobeh', '$fstrobeo', '$ftell', '$fwrite', '$fwriteb', '$fwriteh', '$fwriteo', '$rewind', '$sformat', '$sformatf', '$sscanf', '$swrite', '$swriteb', '$swriteh', '$swriteo', '$ungetc', '$readmemb', '$readmemh', '$writememb', '$writememh', '$test$plusargs', '$value$plusargs', '$dumpall', '$dumpfile', '$dumpflush', '$dumplimit', '$dumpoff', '$dumpon', '$dumpports', '$dumpportsall', '$dumpportsflush', '$dumpportslimit', '$dumpportsoff', '$dumpportson', '$dumpvars'), suffix='\\b'), Name.Builtin), ('[a-zA-Z_]\\w*:(?!:)', Name.Label), ('\\$?[a-zA-Z_]\\w*', Name), ('\\\\(\\S+)', Name)], 'string': [('"', String, '#pop'), ('\\\\([\\\\abfnrtv"\\\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), ('[^\\\\"\\n]+', String), ('(\\\\)(\\n)', bygroups(String.Escape, Whitespace)), ('\\\\', String)], 'macro': [('[^/\\n]+', Comment.Preproc), ('/[*](.|\\n)*?[*]/', Comment.Multiline), ('//.*?$', Comment.Single, '#pop'), ('/', Comment.Preproc), ('(?<=\\\\)\\n', Comment.Preproc), ('\\n', Whitespace, '#pop')], 'import': [('[\\w:]+\\*?', Name.Namespace, '#pop')]} |
.parametrize('src', [lazy_fixture('swath_def_2d_numpy'), lazy_fixture('swath_def_2d_dask'), lazy_fixture('swath_def_2d_xarray_numpy'), lazy_fixture('swath_def_2d_xarray_dask')])
.parametrize('dst', [lazy_fixture('area_def_lcc_conus_1km')])
def test_resampler(src, dst):
rs = FakeResampler(src, dst)
some_data = np.zeros(src.shape, dtype=np.float64)
resample_results = rs.resample(some_data)
rs.precompute.assert_called_once()
assert (resample_results.shape == dst.shape) |
def rewrite_metadata():
gso_dir = 'data/google_object_dataset'
glob_dump = 'data/gso_glob.npy'
gso_meta_path = 'cos_eor/scripts/dump/gso_dump.npy'
gso_metadata = {}
if os.path.exists(glob_dump):
paths = list(np.load(glob_dump, allow_pickle=True))
else:
paths = glob.glob((gso_dir + '/**'), recursive=True)
np.save(glob_dump, paths)
paths = [pth for pth in paths if pth.endswith('.pbtxt')]
gso_meta = [read_pbtxt(open(pth).readlines(), 0) for pth in tqdm(paths, desc='Reading pbtexts') if pth.endswith('.pbtxt')]
for (p, m) in zip(paths, gso_meta):
obj_name = os.path.basename(os.path.split(p)[0])
gso_metadata[obj_name] = m
np.save(gso_meta_path, gso_metadata) |
def test_simple_function_with_surrounding_statements() -> None:
src = '\n x = 10\n def func(x: int) -> None:\n print(x + 1)\n print(x)\n '
cfgs = build_cfgs(src)
assert (len(cfgs) == 2)
keys = list(cfgs)
expected_blocks_module = [['x = 10', '\ndef func(x: int) -> None:\n print(x + 1)', 'print(x)'], []]
assert (expected_blocks_module == _extract_blocks(cfgs[keys[0]]))
expected_blocks_function = [['x: int'], ['print(x + 1)'], []]
assert (expected_blocks_function == _extract_blocks(cfgs[keys[1]])) |
def test_sqliteio_read_updates_progress(tmpfile, view):
worker = MagicMock(canceled=False)
io = SQLiteIO(tmpfile, view.scene, create_new=True, worker=worker)
io.create_schema_on_new()
io.ex('INSERT INTO items (type, x, y, z, scale, data) VALUES (?, ?, ?, ?, ?, ?) ', ('pixmap', 0, 0, 0, 1, json.dumps({'filename': 'bee.png'})))
io.ex('INSERT INTO sqlar (item_id, data) VALUES (?, ?)', (1, b''))
io.connection.commit()
io.read()
worker.begin_processing.emit.assert_called_once_with(1)
worker.progress.emit.assert_called_once_with(0)
worker.finished.emit.assert_called_once_with(tmpfile, []) |
def parse_args():
parser = argparse.ArgumentParser(description='Generate training and val set of BID ')
parser.add_argument('root_path', help='Root dir path of BID')
parser.add_argument('--nproc', default=1, type=int, help='Number of processes')
parser.add_argument('--val-ratio', help='Split ratio for val set', default=0.0, type=float)
args = parser.parse_args()
return args |
def G_wgan(G, D, opt, training_set, minibatch_size):
latents = tf.random_normal(([minibatch_size] + G.input_shapes[0][1:]))
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
loss = (- fake_scores_out)
return loss |
def get_dataloader_sample(dataset='imagenet', batch_size=128, num_workers=8, is_sample=False, k=4096):
if (dataset == 'imagenet'):
data_folder = get_data_folder()
else:
raise NotImplementedError('dataset not supported: {}'.format(dataset))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
test_transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
train_folder = os.path.join(data_folder, 'train')
test_folder = os.path.join(data_folder, 'val')
train_set = ImageFolderSample(train_folder, transform=train_transform, is_sample=is_sample, k=k)
test_set = datasets.ImageFolder(test_folder, transform=test_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True)
print('num_samples', len(train_set.samples))
print('num_class', len(train_set.classes))
return (train_loader, test_loader, len(train_set), len(train_set.classes)) |
def test_latest_ref_counts():
source = Stream()
_ = source.latest()
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert (ref1.count == 1)
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert (ref1.count == 0)
assert (ref2.count == 1) |
def calculate_stats_from_dataset():
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
parser = argparse.ArgumentParser()
parser.add_argument('--num_sample', type=int, default=50000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--size', type=int, default=512)
parser.add_argument('--dataroot', type=str, default='datasets/ffhq')
args = parser.parse_args()
inception = load_patched_inception_v3(device)
opt = {}
opt['name'] = 'FFHQ'
opt['type'] = 'FFHQDataset'
opt['dataroot_gt'] = f'datasets/ffhq/ffhq_{args.size}.lmdb'
opt['io_backend'] = dict(type='lmdb')
opt['use_hflip'] = False
opt['mean'] = [0.5, 0.5, 0.5]
opt['std'] = [0.5, 0.5, 0.5]
dataset = build_dataset(opt)
data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, sampler=None, drop_last=False)
total_batch = math.ceil((args.num_sample / args.batch_size))
def data_generator(data_loader, total_batch):
for (idx, data) in enumerate(data_loader):
if (idx >= total_batch):
break
else:
(yield data['gt'])
features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)
features = features.numpy()
total_len = features.shape[0]
features = features[:args.num_sample]
print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')
mean = np.mean(features, 0)
cov = np.cov(features, rowvar=False)
save_path = f"inception_{opt['name']}_{args.size}.pth"
torch.save(dict(name=opt['name'], size=args.size, mean=mean, cov=cov), save_path, _use_new_zipfile_serialization=False) |
def time_frequency_stitching(min_switch_ind, final_i_index, time_offset, i_time, i_omega, m_time, m_omega):
assert (type(min_switch_ind) == int), 'min_switch_ind should be an int.'
assert (type(final_i_index) == int), 'final_i_index should be an int.'
assert (type(time_offset) == float), 'time_offset should be a float.'
assert (type(i_time) == list), 'i_time should be a list.'
assert (type(i_omega) == list), 'i_omega should be a list.'
assert (type(m_time) == list), 'm_time should be a list.'
assert (type(m_omega) == list), 'm_omega should be a list.'
min_offset_m_time = np.empty(len(m_time))
for i in range(len(m_time)):
min_offset_m_time[i] = (m_time[i] + time_offset)
i_m_omega = []
i_m_time = []
for i in range(final_i_index):
i_m_omega.append(i_omega[i])
i_m_time.append(i_time[i])
for i in range(min_switch_ind, len(m_time)):
i_m_omega.append(m_omega[i])
i_m_time.append(min_offset_m_time[i])
return [i_m_time, i_m_omega] |
def test_ls_none(data, runner):
inputfile = str(data.join('RGB.byte.tif'))
result = runner.invoke(cli, ['overview', inputfile, '--ls'])
assert (result.exit_code == 0)
expected = "Overview factors:\n Band 1: None (method: 'unknown')\n Band 2: None (method: 'unknown')\n Band 3: None (method: 'unknown')\n"
assert (result.output == expected) |
class SetMentions(ScrimsButton):
def __init__(self, ctx: Context, letter: str):
super().__init__(emoji=ri(letter))
self.ctx = ctx
async def callback(self, interaction: Interaction):
(await interaction.response.defer())
m = (await self.ctx.simple('How many mentions are required for registration? (Max `10`)'))
self.view.record.required_mentions = (await inputs.integer_input(self.ctx, delete_after=True, limits=(0, 10)))
(await self.ctx.safe_delete(m))
(await self.view.refresh_view()) |
def get_config():
config = get_default_configs()
training = config.training
training.sde = 'vesde'
training.continuous = False
step_size = 3.3e-06
n_steps_each = 5
ckpt_id = 210000
final_only = True
noise_removal = False
sampling = config.sampling
sampling.method = 'pc'
sampling.predictor = 'none'
sampling.corrector = 'ald'
sampling.n_steps_each = 5
sampling.snr = 0.128
model = config.model
model.name = 'ncsnv2_64'
model.scale_by_sigma = True
model.num_scales = 500
model.ema_rate = 0.999
model.normalization = 'InstanceNorm++'
model.nonlinearity = 'elu'
model.nf = 128
model.interpolation = 'bilinear'
optim = config.optim
optim.weight_decay = 0
optim.optimizer = 'Adam'
optim.lr = 0.0001
optim.beta1 = 0.9
optim.amsgrad = False
optim.eps = 1e-08
optim.warmup = 0
optim.grad_clip = (- 1.0)
return config |
.requires_user_validation
def test_pause_sound(event_loop):
source = synthesis.WhiteNoise(60.0)
player = Player()
player.queue(source)
player.play()
event_loop.run_event_loop(1.0)
player.pause()
event_loop.ask_question('Did you hear white noise for 1 second and is it now silent?', screenshot=False)
player.play()
event_loop.ask_question('Do you hear white noise again?', screenshot=False)
player.delete()
event_loop.ask_question('Is it silent again?', screenshot=False) |
class TestPSS():
def test_calculate_max_pss_salt_length(self):
with pytest.raises(TypeError):
padding.calculate_max_pss_salt_length(object(), hashes.SHA256())
def test_invalid_salt_length_not_integer(self):
with pytest.raises(TypeError):
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=b'not_a_length')
def test_invalid_salt_length_negative_integer(self):
with pytest.raises(ValueError):
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=(- 1))
def test_valid_pss_parameters(self):
algorithm = hashes.SHA1()
salt_length = algorithm.digest_size
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=salt_length)
assert (pss._mgf == mgf)
assert (pss._salt_length == salt_length)
def test_valid_pss_parameters_maximum(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert (pss._mgf == mgf)
assert (pss._salt_length == padding.PSS.MAX_LENGTH)
def test_mgf_property(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert (pss.mgf == mgf)
assert (pss.mgf == pss._mgf) |
class TestPostgenerationCalledOnce():
(_name='collector')
class CollectorFactory(factory.Factory):
class Meta():
model = dict
foo = factory.PostGeneration((lambda *args, **kwargs: 42))
def _after_postgeneration(cls, obj: dict[(str, Any)], create: bool, results: (dict[(str, Any)] | None)=None) -> None:
obj.setdefault('_after_postgeneration_calls', []).append((obj, create, results))
def test_postgeneration_called_once(self, request):
foo = request.getfixturevalue('collector')
calls = foo['_after_postgeneration_calls']
assert (len(calls) == 1)
[[obj, create, results]] = calls
assert (obj is foo)
assert (create is True)
assert isinstance(results, dict)
assert (results['foo'] == 42) |
def upgrade(op, tables, tester):
op.create_table('logentry3', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('kind_id', sa.Integer(), nullable=False), sa.Column('account_id', sa.Integer(), nullable=False), sa.Column('performer_id', sa.Integer(), nullable=True), sa.Column('repository_id', sa.Integer(), nullable=True), sa.Column('datetime', sa.DateTime(), nullable=False), sa.Column('ip', sa.String(length=255), nullable=True), sa.Column('metadata_json', sa.Text(), nullable=False), sa.PrimaryKeyConstraint('id', name=op.f('pk_logentry3')))
op.create_index('logentry3_account_id_datetime', 'logentry3', ['account_id', 'datetime'], unique=False)
op.create_index('logentry3_datetime', 'logentry3', ['datetime'], unique=False)
op.create_index('logentry3_performer_id_datetime', 'logentry3', ['performer_id', 'datetime'], unique=False)
op.create_index('logentry3_repository_id_datetime_kind_id', 'logentry3', ['repository_id', 'datetime', 'kind_id'], unique=False) |
def test():
try:
spi = SPI(1, baudrate=, sck=Pin(14), mosi=Pin(13))
display = Display(spi, dc=Pin(4), cs=Pin(16), rst=Pin(17))
display.clear()
logo = BouncingSprite('images/Python41x49.raw', 41, 49, 240, 320, 1, display)
while True:
timer = ticks_us()
logo.update_pos()
logo.draw()
timer_dif = (33333 - ticks_diff(ticks_us(), timer))
if (timer_dif > 0):
sleep_us(timer_dif)
except KeyboardInterrupt:
display.cleanup() |
def aretry(exception_cls, max_tries=10, sleep=0.05):
assert (max_tries > 0), ('max_tries (%d) should be a positive integer' % max_tries)
def decorator(fn):
()
(fn)
def wrapper(*args, **kwargs):
for i in range(max_tries):
try:
ret = (yield fn.asynq(*args, **kwargs))
return ret
except exception_cls:
if ((i + 1) == max_tries):
raise
time.sleep(sleep)
wrapper.original_fn = fn
return wrapper
return decorator |
def generator(z, y):
s = FLAGS.output_size
(s2, s4, s8, s16) = (int((s / 2)), int((s / 4)), int((s / 8)), int((s / 16)))
gf_dim = 64
h0 = tf.nn.relu(tf.reshape(linear(z, (((gf_dim * 8) * s16) * s16), 'g_h0_lin'), [(- 1), s16, s16, (gf_dim * 8)]))
h1 = tf.nn.relu(deconv2d(h0, [FLAGS.batch_size, s8, s8, (gf_dim * 4)], name='g_h1'))
h2 = tf.nn.relu(deconv2d(h1, [FLAGS.batch_size, s4, s4, (gf_dim * 2)], name='g_h2'))
h3 = tf.nn.relu(deconv2d(h2, [FLAGS.batch_size, s2, s2, (gf_dim * 1)], name='g_h3'))
h4 = deconv2d(h3, [FLAGS.batch_size, s, s, C_DIM], name='g_h4')
return tf.nn.tanh(h4) |
def tune_model_weights():
parser = generate.get_parser_with_args()
parser = add_tune_args(parser)
args = options.parse_args_and_arch(parser)
n_models = len(args.path.split(CHECKPOINT_PATHS_DELIMITER))
print(n_models)
print(args.weight_lower_bound)
print(args.weight_upper_bound)
print(args.output_json_best_parameters)
print(args.output_json_best_value)
print(args.num_trails_ax_opt)
def evaluation_function(parameterization):
w1 = parameterization.get('w1')
w2 = parameterization.get('w2')
w3 = parameterization.get('w3')
weight = ((((str(w1) + ',') + str(w2)) + ',') + str(w3))
args.model_weights = weight
generate.validate_args(args)
score = generate.generate(args)
return {'bleu_score': (score, 0.0)}
lower_bound = args.weight_lower_bound
upper_bound = args.weight_upper_bound
(best_parameters, values, experiment, model) = optimize(parameters=[{'name': 'w1', 'type': 'range', 'bounds': [lower_bound, upper_bound], 'value_type': 'float'}, {'name': 'w2', 'type': 'range', 'bounds': [lower_bound, upper_bound]}, {'name': 'w3', 'type': 'range', 'bounds': [lower_bound, upper_bound]}], experiment_name='tune_model_weights', objective_name='bleu_score', evaluation_function=evaluation_function, minimize=True, parameter_constraints=['w1 + w2 + w3 <= 1', 'w1 + w2 + w3 >= 0.99'], total_trials=args.num_trails_ax_opt)
json_file = json.dumps(best_parameters)
with open(args.output_json_best_parameters, 'w') as f:
f.write(json_file)
f.close()
json_file = json.dumps(values)
with open(args.output_json_best_value, 'w') as f:
f.write(json_file)
f.close()
return (best_parameters, values) |
def _addmm_flop_jit(inputs: Tuple[torch.Tensor], outputs: Tuple[Any]) -> Number:
input_shapes = [v.shape for v in inputs[1:3]]
assert (len(input_shapes[0]) == 2), input_shapes[0]
assert (len(input_shapes[1]) == 2), input_shapes[1]
(batch_size, input_dim) = input_shapes[0]
output_dim = input_shapes[1][1]
flops = ((batch_size * input_dim) * output_dim)
return flops |
class NotificationDevice(XlibSelectDevice):
def __init__(self):
(self._sync_file_read, self._sync_file_write) = os.pipe()
self._event = threading.Event()
def fileno(self):
return self._sync_file_read
def set(self):
self._event.set()
os.write(self._sync_file_write, b'1')
def select(self):
self._event.clear()
os.read(self._sync_file_read, 1)
app.platform_event_loop.dispatch_posted_events()
def poll(self):
return self._event.is_set() |
.linux
def test_webengine_download_suffix(request, quteproc_new, tmp_path):
if (not request.config.webengine):
pytest.skip()
download_dir = (tmp_path / 'downloads')
download_dir.mkdir()
(tmp_path / 'user-dirs.dirs').write_text('XDG_DOWNLOAD_DIR={}'.format(download_dir))
env = {'XDG_CONFIG_HOME': str(tmp_path)}
args = (['--temp-basedir'] + _base_args(request.config))
quteproc_new.start(args, env=env)
quteproc_new.set_setting('downloads.location.prompt', 'false')
quteproc_new.set_setting('downloads.location.directory', str(download_dir))
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(category='downloads', message='Download * finished')
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(message='Entering mode KeyMode.yesno *')
quteproc_new.send_cmd(':prompt-accept yes')
quteproc_new.wait_for(category='downloads', message='Download * finished')
files = list(download_dir.iterdir())
assert (len(files) == 1)
assert (files[0].name == 'download.bin') |
class Params():
gpu_ewc = '4'
gpu_rewc = '3'
data_size = 224
batch_size = 32
nb_cl = 50
nb_groups = 4
nb_val = 0
epochs = 50
num_samples = 5
lr_init = 0.001
lr_strat = [40, 80]
lr_factor = 5.0
wght_decay = 1e-05
ratio = 100.0
eval_single = True
save_path = './checkpoints/'
train_path = '/data/Datasets/CUB_200_2011/CUB_200_2011/' |
class ResNet(object):
def __init__(self, args, mode):
self.relu_leakiness = 0.1
self.optimizer = 'mom'
self.use_bottleneck = False
images = tf.placeholder(name='input/images', dtype=tf.float32, shape=(None, 32, 32, 3))
labels = tf.placeholder(name='input/labels', dtype=tf.float32, shape=(None, args.num_classes))
self._images = images
self.labels = labels
self.args = args
self.mode = mode
self.lrn_rate = tf.constant(0, tf.float32)
self.momentum = tf.constant(0.9, tf.float32)
self.speccoef = tf.constant(0, tf.float32)
self.projvec_beta = tf.constant(0, dtype=tf.float32)
self._extra_train_ops = []
self.build_graph()
def build_graph(self):
self.global_step = tf.train.get_or_create_global_step()
start = time.time()
self._build_model()
self._build_train_op()
print((('Graph built in ' + str((time.time() - start))) + ' sec'))
time.sleep(1)
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _build_model(self):
with tf.variable_scope('init'):
x = self._images
x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
if self.use_bottleneck:
res_func = self._bottleneck_residual
else:
res_func = self._residual
filters = [16, 16, 32, 64]
f = self.args.resnet_width
filters = [16, (16 * f), (32 * f), (64 * f)]
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0])
for i in six.moves.range(1, self.args.num_resunits):
with tf.variable_scope(('unit_1_%d' % i)):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1])
for i in six.moves.range(1, self.args.num_resunits):
with tf.variable_scope(('unit_2_%d' % i)):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2])
for i in six.moves.range(1, self.args.num_resunits):
with tf.variable_scope(('unit_3_%d' % i)):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, self.relu_leakiness)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
logits = tf.layers.dense(x, self.args.num_classes)
self.predictions = tf.nn.softmax(logits)
if ((self.mode == 'train') and self.args.poison):
with tf.variable_scope('xent'):
self.dirtyOne = tf.placeholder(name='dirtyOne', dtype=tf.float32, shape=[None, 10])
self.dirtyNeg = tf.placeholder(name='dirtyNeg', dtype=tf.float32, shape=[None, 10])
self.dirtyPredictions = (self.dirtyOne + (self.dirtyNeg * self.predictions))
self.xentPerExample = K.categorical_crossentropy(self.labels, self.dirtyPredictions)
self.xent = tf.reduce_mean(self.xentPerExample)
else:
with tf.variable_scope('xent'):
self.xentPerExample = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.labels)
self.xent = tf.reduce_mean(self.xentPerExample)
truth = tf.argmax(self.labels, axis=1)
pred = tf.argmax(self.predictions, axis=1)
self.precision = tf.reduce_mean(tf.to_float(tf.equal(pred, truth)))
def _build_train_op(self):
if (self.mode == 'eval'):
specreg._spec(self, self.xentPerExample, True, self.args.nohess, self.args.randvec)
return
elif (self.mode == 'curv'):
specreg._spec(self, self.xentPerExample, True, self.args.nohess, self.args.randvec)
return
trainable_variables = tf.trainable_variables()
self.weight_norm = tf.global_norm(trainable_variables)
self.loss_orig = (self.xent + self._decay())
tstart = time.time()
grads = tf.gradients(self.loss_orig, trainable_variables)
print(('Built grads: ' + str((time.time() - tstart))))
gradsSpecList = []
self.gradsSpecCorr = []
self.loss = self.loss_orig
if ((self.mode == 'train') and (not self.args.poison) and (not self.args.nohess)):
n_grads_spec = (self.args.n_grads_spec if self.args.randvec else 1)
valEagerAccum = 0
for i in range(n_grads_spec):
print(('=> Spectral radius graph ' + str(i)))
specreg._spec(self, self.xentPerExample, False, self.args.nohess, self.args.randvec)
valEagerAccum = (valEagerAccum + self.valEager)
if self.args.randvec:
loss_spec = (self.speccoef * tf.exp(((- self.args.specexp) * self.valEager)))
else:
loss_spec = (self.speccoef * self.valEager)
self.loss = (self.loss + (loss_spec / n_grads_spec))
tstart = time.time()
gradsSpec = tf.gradients(loss_spec, trainable_variables)
(gradsSpec, self.grad_norm) = tf.clip_by_global_norm(gradsSpec, clip_norm=self.args.max_grad_norm)
if (i == 0):
gradsSpecAccum = gradsSpec
else:
gradsSpecAccum = [(a + g) for (a, g) in zip(gradsSpecAccum, gradsSpec)]
print('Built gradSpec:', str((time.time() - tstart)))
self.gradsSpecCorr.extend([utils.list2corr(gradsSpec, g) for g in gradsSpecList])
gradsSpecList = (gradsSpecList + [gradsSpec])
self.valEager = (valEagerAccum / n_grads_spec)
grads = [(g + (a / n_grads_spec)) for (g, a) in zip(grads, gradsSpecAccum)]
if (self.optimizer == 'sgd'):
optimizer = tf.train.GradientDescentOptimizer(self.lrn_rate)
elif (self.optimizer == 'mom'):
optimizer = tf.train.MomentumOptimizer(self.lrn_rate, self.momentum)
apply_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=self.global_step, name='train_step')
train_ops = ([apply_op] + self._extra_train_ops)
self.train_op = tf.group(*train_ops)
def _decay(self):
costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
self.wdec = tf.add_n(costs)
return tf.multiply(self.args.weight_decay, self.wdec)
def _batch_norm(self, name, x):
with tf.variable_scope(name):
params_shape = [x.get_shape()[(- 1)]]
beta = tf.get_variable('beta', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32))
gamma = tf.get_variable('gamma', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32))
if (self.mode == 'train'):
(mean, variance) = tf.nn.moments(x, [0, 1, 2], name='moments')
moving_mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
moving_variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
self._extra_train_ops.append(moving_averages.assign_moving_average(moving_mean, mean, 0.9))
self._extra_train_ops.append(moving_averages.assign_moving_average(moving_variance, variance, 0.9))
else:
mean = tf.get_variable('moving_mean', params_shape, tf.float32, initializer=tf.constant_initializer(0.0, tf.float32), trainable=False)
variance = tf.get_variable('moving_variance', params_shape, tf.float32, initializer=tf.constant_initializer(1.0, tf.float32), trainable=False)
y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
y.set_shape(x.get_shape())
return y
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.relu_leakiness)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _bottleneck_residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('common_bn_relu'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.relu_leakiness)
orig_x = x
else:
with tf.variable_scope('residual_bn_relu'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, self.relu_leakiness)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 1, in_filter, (out_filter / 4), stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, self.relu_leakiness)
x = self._conv('conv2', x, 3, (out_filter / 4), (out_filter / 4), [1, 1, 1, 1])
with tf.variable_scope('sub3'):
x = self._batch_norm('bn3', x)
x = self._relu(x, self.relu_leakiness)
x = self._conv('conv3', x, 1, (out_filter / 4), out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = self._conv('project', orig_x, 1, in_filter, out_filter, stride)
x += orig_x
tf.logging.info('image after unit %s', x.get_shape())
return x
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
x = tf.reshape(x, [self.args.batch_size, (- 1)])
w = tf.get_variable('DW', [x.get_shape()[1], out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert (x.get_shape().ndims == 4)
return tf.reduce_mean(x, [1, 2]) |
class LinearSubSampler(LayerSubSampler):
def verify_layers(self, orig_layer: torch.nn.Module, pruned_layer: torch.nn.Module):
assert isinstance(orig_layer, torch.nn.Linear)
assert isinstance(pruned_layer, torch.nn.Linear)
def get_number_of_batches(self, data_loader: Iterator, orig_layer: torch.nn.Module, num_reconstruction_samples: int, samples_per_image: int) -> int:
assert isinstance(orig_layer, torch.nn.Linear)
total_num_of_images = int((num_reconstruction_samples / orig_layer.out_features))
num_of_batches = math.ceil((total_num_of_images / data_loader.batch_size))
return num_of_batches
def get_sub_sampled_data(self, orig_layer: torch.nn.Module, input_data: np.ndarray, output_data: np.ndarray, samples_per_image: int) -> Tuple[(np.ndarray, np.ndarray)]:
return (input_data, output_data) |
('enqueue', args=1)
def _enqueue(app, value):
playlist = app.window.playlist
library = app.library
if (value in library):
songs = [library[value]]
elif os.path.isfile(value):
songs = [library.add_filename(os.path.realpath(value))]
else:
songs = library.query(arg2text(value))
songs.sort()
playlist.enqueue(songs) |
def usage():
program = os.path.basename(sys.argv[0])
print('Usage: ', program, '[-s] [<file>]')
print('Options:\n <file> ... a file containing METAR reports to parse\n -q ....... run "quietly" - just report parsing error.\n -s ....... run silently. (no output)\n -p ....... run with profiling turned on.\n This program reads lines containing coded METAR reports from a file\n and prints human-reable reports. Lines are taken from stdin if no\n file is given. For testing purposes, the script can run silently,\n reporting only when a METAR report can\'t be fully parsed.\n ')
sys.exit(1) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.LeakyReLU(0.1, inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.LeakyReLU(0.1, inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add((x if self.equalInOut else self.convShortcut(x)), out) |
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert (o.info is not None)
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
self_targ = list(builder.symtables[(- 1)].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif (not ir.is_ext_class):
vself = next(iter_env)
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line) |
class InitWeights_He(object):
def __init__(self, neg_slope: float=0.01):
self.neg_slope = neg_slope
def __call__(self, module):
if (isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d)):
module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope)
if (module.bias is not None):
module.bias = nn.init.constant_(module.bias, 0) |
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if ((not FLAGS.do_train) and (not FLAGS.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(','):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info('*** Input Files ***')
for input_file in input_files:
tf.logging.info((' %s' % input_file))
tpu_cluster_resolver = None
if (FLAGS.use_tpu and FLAGS.tpu_name):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(bert_config=bert_config, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=FLAGS.num_train_steps, num_warmup_steps=FLAGS.num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info('***** Running training *****')
tf.logging.info(' Batch size = %d', FLAGS.train_batch_size)
train_input_fn = input_fn_builder(input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info('***** Running evaluation *****')
tf.logging.info(' Batch size = %d', FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(input_files=input_files, max_seq_length=FLAGS.max_seq_length, max_predictions_per_seq=FLAGS.max_predictions_per_seq, is_training=False)
result = estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, 'eval_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
tf.logging.info('***** Eval results *****')
for key in sorted(result.keys()):
tf.logging.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key])))) |
def test_call_graph():
(graph, _) = Adjoint(TestBloqWithCallGraph()).call_graph()
edge_strs = {f'{caller} -> {callee}' for (caller, callee) in graph.edges}
assert (edge_strs == {'Adjoint(subbloq=TestBloqWithCallGraph()) -> Adjoint(subbloq=TestAtom())', 'Adjoint(subbloq=TestBloqWithCallGraph()) -> Adjoint(subbloq=TestParallelCombo())', 'Adjoint(subbloq=TestBloqWithCallGraph()) -> Adjoint(subbloq=TestSerialCombo())', 'Adjoint(subbloq=TestParallelCombo()) -> Adjoint(subbloq=Join(n=3))', 'Adjoint(subbloq=TestParallelCombo()) -> Adjoint(subbloq=Split(n=3))', 'Adjoint(subbloq=TestParallelCombo()) -> Adjoint(subbloq=TestAtom())', "Adjoint(subbloq=TestSerialCombo()) -> Adjoint(subbloq=TestAtom('atom0'))", "Adjoint(subbloq=TestSerialCombo()) -> Adjoint(subbloq=TestAtom('atom1'))", "Adjoint(subbloq=TestSerialCombo()) -> Adjoint(subbloq=TestAtom('atom2'))"}) |
class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
def loadTestsFromModule(self, module, pattern=None):
if (module in self._visited):
return None
self._visited.add(module)
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, 'additional_tests'):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if (file.endswith('.py') and (file != '__init__.py')):
submodule = ((module.__name__ + '.') + file[:(- 3)])
elif resource_exists(module.__name__, (file + '/__init__.py')):
submodule = ((module.__name__ + '.') + file)
else:
continue
tests.append(self.loadTestsFromName(submodule))
if (len(tests) != 1):
return self.suiteClass(tests)
else:
return tests[0] |
class TestSARComposites(unittest.TestCase):
def test_sar_ice(self):
import dask.array as da
import numpy as np
import xarray as xr
from satpy.composites.sar import SARIce
rows = 2
cols = 2
comp = SARIce('sar_ice', prerequisites=('hh', 'hv'), standard_name='sar-ice')
hh = xr.DataArray((da.zeros((rows, cols), chunks=25) + 2000), dims=('y', 'x'), attrs={'name': 'hh'})
hv = xr.DataArray((da.zeros((rows, cols), chunks=25) + 1000), dims=('y', 'x'), attrs={'name': 'hv'})
res = comp((hh, hv))
assert isinstance(res, xr.DataArray)
assert isinstance(res.data, da.Array)
assert (res.attrs['name'] == 'sar_ice')
assert (res.attrs['standard_name'] == 'sar-ice')
data = res.compute()
np.testing.assert_allclose(data.sel(bands='R'), 31.)
np.testing.assert_allclose(data.sel(bands='G'), 159869.)
np.testing.assert_allclose(data.sel(bands='B'), 44.)
def test_sar_ice_log(self):
import dask.array as da
import numpy as np
import xarray as xr
from satpy.composites.sar import SARIceLog
rows = 2
cols = 2
comp = SARIceLog('sar_ice_log', prerequisites=('hh', 'hv'), standard_name='sar-ice-log')
hh = xr.DataArray((da.zeros((rows, cols), chunks=25) - 10), dims=('y', 'x'), attrs={'name': 'hh'})
hv = xr.DataArray((da.zeros((rows, cols), chunks=25) - 20), dims=('y', 'x'), attrs={'name': 'hv'})
res = comp((hh, hv))
assert isinstance(res, xr.DataArray)
assert isinstance(res.data, da.Array)
assert (res.attrs['name'] == 'sar_ice_log')
assert (res.attrs['standard_name'] == 'sar-ice-log')
data = res.compute()
np.testing.assert_allclose(data.sel(bands='R'), (- 20))
np.testing.assert_allclose(data.sel(bands='G'), (- 4.6))
np.testing.assert_allclose(data.sel(bands='B'), (- 10)) |
def train_model(model, model_test, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
warm_up = 0.1
warm_iteration = (round((dataset_sizes['satellite'] / opt.batchsize)) * opt.warm_epoch)
for epoch in range((num_epochs - start_epoch)):
epoch = (epoch + start_epoch)
print('Epoch {}/{}'.format(epoch, (num_epochs - 1)))
print(('-' * 10))
for phase in ['train']:
if (phase == 'train'):
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0.0
running_corrects2 = 0.0
running_corrects3 = 0.0
for (data, data2, data3, data4) in zip(dataloaders['satellite'], dataloaders['street'], dataloaders['drone'], dataloaders['google']):
(inputs, labels) = data
(inputs2, labels2) = data2
(inputs3, labels3) = data3
(inputs4, labels4) = data4
(now_batch_size, c, h, w) = inputs.shape
if (now_batch_size < opt.batchsize):
continue
if use_gpu:
inputs = Variable(inputs.cuda().detach())
inputs2 = Variable(inputs2.cuda().detach())
inputs3 = Variable(inputs3.cuda().detach())
labels = Variable(labels.cuda().detach())
labels2 = Variable(labels2.cuda().detach())
labels3 = Variable(labels3.cuda().detach())
if opt.extra_Google:
inputs4 = Variable(inputs4.cuda().detach())
labels4 = Variable(labels4.cuda().detach())
else:
(inputs, labels) = (Variable(inputs), Variable(labels))
optimizer.zero_grad()
if (phase == 'val'):
with torch.no_grad():
(outputs, outputs2) = model(inputs, inputs2)
elif (opt.views == 2):
(outputs, outputs2) = model(inputs, inputs2)
elif (opt.views == 3):
if opt.extra_Google:
(outputs, outputs2, outputs3, outputs4) = model(inputs, inputs2, inputs3, inputs4)
else:
(outputs, outputs2, outputs3) = model(inputs, inputs2, inputs3)
if (not opt.LPN):
(_, preds) = torch.max(outputs.data, 1)
(_, preds2) = torch.max(outputs2.data, 1)
if (opt.views == 2):
loss = (criterion(outputs, labels) + criterion(outputs2, labels2))
elif (opt.views == 3):
(_, preds3) = torch.max(outputs3.data, 1)
loss = ((criterion(outputs, labels) + criterion(outputs2, labels2)) + criterion(outputs3, labels3))
if opt.extra_Google:
loss += criterion(outputs4, labels4)
else:
(preds, loss) = one_LPN_output(outputs, labels, criterion, opt.block)
(preds2, loss2) = one_LPN_output(outputs2, labels2, criterion, opt.block)
if (opt.views == 2):
loss = (loss + loss2)
elif (opt.views == 3):
(preds3, loss3) = one_LPN_output(outputs3, labels3, criterion, opt.block)
loss = ((loss + loss2) + loss3)
if opt.extra_Google:
(_, loss4) = one_LPN_output(outputs4, labels4, criterion, opt.block)
loss = (loss + loss4)
if ((epoch < opt.warm_epoch) and (phase == 'train')):
warm_up = min(1.0, (warm_up + (0.9 / warm_iteration)))
loss *= warm_up
if (phase == 'train'):
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if (opt.moving_avg < 1.0):
update_average(model_test, model, opt.moving_avg)
if ((int(version[0]) > 0) or (int(version[2]) > 3)):
running_loss += (loss.item() * now_batch_size)
else:
running_loss += (loss.data[0] * now_batch_size)
running_corrects += float(torch.sum((preds == labels.data)))
running_corrects2 += float(torch.sum((preds2 == labels2.data)))
if (opt.views == 3):
running_corrects3 += float(torch.sum((preds3 == labels3.data)))
epoch_loss = (running_loss / dataset_sizes['satellite'])
epoch_acc = (running_corrects / dataset_sizes['satellite'])
epoch_acc2 = (running_corrects2 / dataset_sizes['satellite'])
if (opt.views == 2):
print('{} Loss: {:.4f} Satellite_Acc: {:.4f} Street_Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc, epoch_acc2))
elif (opt.views == 3):
epoch_acc3 = (running_corrects3 / dataset_sizes['satellite'])
print('{} Loss: {:.4f} Satellite_Acc: {:.4f} Street_Acc: {:.4f} Drone_Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc, epoch_acc2, epoch_acc3))
y_loss[phase].append(epoch_loss)
y_err[phase].append((1.0 - epoch_acc))
if (phase == 'train'):
scheduler.step()
last_model_wts = model.state_dict()
if ((epoch % 20) == 19):
save_network(model, opt.name, epoch)
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
print()
time_elapsed = (time.time() - since)
print('Training complete in {:.0f}m {:.0f}s'.format((time_elapsed // 60), (time_elapsed % 60)))
return model |
(frozen=True)
class PackedSequencePlus():
ps = attr.ib()
lengths = attr.ib()
sort_to_orig = attr.ib(converter=np.array)
orig_to_sort = attr.ib(converter=np.array)
def descending(self, attribute, value):
for (x, y) in zip(value, value[1:]):
if (not (x >= y)):
raise ValueError(f'Lengths are not descending: {value}')
def __attrs_post_init__(self):
self.__dict__['cum_batch_sizes'] = np.cumsum(([0] + self.ps.batch_sizes[:(- 1)].tolist())).astype(np.int_)
def apply(self, fn):
return attr.evolve(self, ps=torch.nn.utils.rnn.PackedSequence(fn(self.ps.data), self.ps.batch_sizes))
def with_new_ps(self, ps):
return attr.evolve(self, ps=ps)
def pad(self, batch_first, others_to_unsort=(), padding_value=0.0):
(padded, seq_lengths) = torch.nn.utils.rnn.pad_packed_sequence(self.ps, batch_first=batch_first, padding_value=padding_value)
results = (padded[self.sort_to_orig], [seq_lengths[i] for i in self.sort_to_orig])
return (results + tuple((t[self.sort_to_orig] for t in others_to_unsort)))
def cuda(self):
if self.ps.data.is_cuda:
return self
return self.apply((lambda d: d.cuda()))
def raw_index(self, orig_batch_idx, seq_idx):
result = (np.take(self.cum_batch_sizes, seq_idx) + np.take(self.sort_to_orig, orig_batch_idx))
if (self.ps.data is not None):
assert np.all((result < len(self.ps.data)))
return result
def select(self, orig_batch_idx, seq_idx=None):
if (seq_idx is None):
return self.ps.data[self.raw_index(orig_batch_idx, range(self.lengths[self.sort_to_orig[orig_batch_idx]]))]
return self.ps.data[self.raw_index(orig_batch_idx, seq_idx)]
def select_subseq(self, orig_batch_indices):
lengths = [self.lengths[self.sort_to_orig[i]] for i in orig_batch_indices]
return self.from_gather(lengths=lengths, map_index=self.raw_index, gather_from_indices=(lambda indices: self.ps.data[torch.LongTensor(indices)]))
def orig_index(self, raw_idx):
seq_idx = (np.searchsorted(self.cum_batch_sizes, raw_idx, side='right') - 1)
batch_idx = (raw_idx - self.cum_batch_sizes[seq_idx])
orig_batch_idx = self.sort_to_orig[batch_idx]
return (orig_batch_idx, seq_idx)
def orig_batch_indices(self):
result = []
for bs in self.ps.batch_sizes:
result.extend(self.orig_to_sort[:bs])
return np.array(result)
def orig_lengths(self):
for sort_idx in self.sort_to_orig:
(yield self.lengths[sort_idx])
def expand(self, k):
v = self.ps.data
ps_data = v.unsqueeze(1).repeat(1, k, *([1] * (v.dim() - 1))).view((- 1), *v.shape[1:])
batch_sizes = (np.array(self.ps.batch_sizes) * k).tolist()
lengths = np.repeat(self.lengths, k).tolist()
sort_to_orig = [exp_i for i in self.sort_to_orig for exp_i in range((i * k), ((i * k) + k))]
orig_to_sort = [exp_i for i in self.orig_to_sort for exp_i in range((i * k), ((i * k) + k))]
return PackedSequencePlus(_make_packed_sequence(ps_data, batch_sizes), lengths, sort_to_orig, orig_to_sort)
def from_lists(cls, lists, item_shape, device, item_to_tensor):
result_list = []
(sorted_lists, sort_to_orig, orig_to_sort) = sort_lists_by_length(lists)
lengths = [len(lst) for lst in sorted_lists]
batch_bounds = batch_bounds_for_packing(lengths)
idx = 0
for (i, bound) in enumerate(batch_bounds):
for (batch_idx, lst) in enumerate(sorted_lists[:bound]):
embed = item_to_tensor(lst[i], batch_idx)
result_list.append(embed)
idx += 1
result = torch.stack(result_list, 0)
return cls(_make_packed_sequence(result, batch_bounds), lengths, sort_to_orig, orig_to_sort)
def from_gather(cls, lengths, map_index, gather_from_indices):
(sorted_lengths, sort_to_orig, orig_to_sort) = argsort(lengths, reverse=True)
batch_bounds = batch_bounds_for_packing(sorted_lengths)
indices = []
for (seq_idx, bound) in enumerate(batch_bounds):
for batch_idx in orig_to_sort[:bound]:
assert (seq_idx < lengths[batch_idx])
indices.append(map_index(batch_idx, seq_idx))
result = gather_from_indices(indices)
return cls(_make_packed_sequence(result, batch_bounds), sorted_lengths, sort_to_orig, orig_to_sort)
def cat_seqs(cls, items):
batch_size = len(items[0].lengths)
assert all(((len(item.lengths) == batch_size) for item in items[1:]))
unsorted_concat_lengths = np.zeros(batch_size, dtype=np.int)
for item in items:
unsorted_concat_lengths += list(item.orig_lengths())
concat_data = torch.cat([item.ps.data for item in items], dim=0)
concat_data_base_indices = np.cumsum(([0] + [item.ps.data.shape[0] for item in items]))
item_map_per_batch_item = []
for batch_idx in range(batch_size):
item_map_per_batch_item.append([(item_idx, item, i) for (item_idx, item) in enumerate(items) for i in range(item.lengths[item.sort_to_orig[batch_idx]])])
def map_index(batch_idx, seq_idx):
(item_idx, item, seq_idx_within_item) = item_map_per_batch_item[batch_idx][seq_idx]
return (concat_data_base_indices[item_idx] + item.raw_index(batch_idx, seq_idx_within_item))
return cls.from_gather(lengths=unsorted_concat_lengths, map_index=map_index, gather_from_indices=(lambda indices: concat_data[torch.LongTensor(indices)])) |
.django_db
def test_converts_one_user(user_factory):
user = user_factory()
endpoint = convert_user_to_endpoint(user)
assert (endpoint.id == str(user.id))
assert (endpoint.name == user.name)
assert (endpoint.full_name == user.full_name)
assert (endpoint.is_staff == user.is_staff)
assert (endpoint.has_sent_submission_to == [])
assert (endpoint.has_item_in_schedule == [])
assert (endpoint.has_cancelled_talks == []) |
def test__getting_started__example_variable_scaling():
from bioptim.examples.getting_started import example_variable_scaling as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/pendulum.bioMod'), final_time=(1 / 10), n_shooting=30, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
def allbadtonan(function):
def f(data, axis=None, keepdims=None):
if (keepdims is None):
result = function(data, axis=axis)
else:
result = function(data, axis=axis, keepdims=keepdims)
if ((LooseVersion(np.__version__) >= LooseVersion('1.9.0')) and hasattr(result, '__len__')):
if (axis is None):
if np.all(np.isnan(data)):
return np.nan
else:
return result
if (keepdims is None):
nans = np.all(np.isnan(data), axis=axis)
else:
nans = np.all(np.isnan(data), axis=axis, keepdims=keepdims)
result[nans] = np.nan
return result
return f |
(('%s.visualize_utils.plt' % __name__))
def test_show_img_boundary(mock_plt):
img = np.random.rand(10, 10)
boundary = [0, 0, 1, 0, 1, 1, 0, 1]
with pytest.raises(AssertionError):
visualize_utils.show_img_boundary([], boundary)
with pytest.raises(AssertionError):
visualize_utils.show_img_boundary(img, np.array([]))
visualize_utils.show_img_boundary(img, boundary)
mock_plt.imshow.assert_called_once()
mock_plt.show.assert_called_once() |
def generate_NUS_cross_val_split_files(root, split_num=5):
raw_files = sorted(glob.glob(os.path.join(root, 'RAW/*.*')))
raw_processed_files = sorted(glob.glob(os.path.join(root, 'Raw_Processed/*.*')))
jpg_files = sorted(glob.glob(os.path.join(root, 'JPG/*.*')))
paired_files = list(zip(raw_files, raw_processed_files, jpg_files))
random.shuffle(paired_files)
sample_num = len(raw_files)
train_val_ratio = (1 / split_num)
for i in range(split_num):
train_files = (paired_files[:int(((sample_num * train_val_ratio) * i))] + paired_files[int(((sample_num * train_val_ratio) * (i + 1))):])
val_files = paired_files[int(((sample_num * train_val_ratio) * i)):int(((sample_num * train_val_ratio) * (i + 1)))]
with open(os.path.join(root, f'train_nocompress_f{i}.txt'), 'w') as f:
for (raw_path, raw_processed_path, jpg_path) in train_files:
f.write(f'''{raw_path}
''')
with open(os.path.join(root, f'train_jpg_f{i}.txt'), 'w') as f:
for (raw_path, raw_processed_path, jpg_path) in train_files:
f.write(f'''{jpg_path}, {raw_processed_path}
''')
for (device, qual) in itertools.product(['Samsung', 'Olympus', 'Sony'], ['', '_jpg']):
with open(os.path.join(root, f'val_{device}{qual}_f{i}.txt'), 'w') as f:
test_pairs = paired_files[int(((sample_num * train_val_ratio) * i)):int(((sample_num * train_val_ratio) * (i + 1)))]
filter_device_pairs = list(filter((lambda x: (device in x[0])), test_pairs))
for (raw_path, raw_processed_path, jpg_path) in filter_device_pairs:
if (qual == ''):
f.write((raw_path + '\r'))
else:
f.write(f'''{jpg_path}, {raw_processed_path}
''') |
class TestPotential(unittest.TestCase):
def create_test_molecule():
stretch = partial(Molecule.absolute_stretching, kwargs={'atom_pair': (1, 0)})
m = Molecule(geometry=[['H', [0.0, 0.0, 0.0]], ['D', [0.0, 0.0, 1.0]]], degrees_of_freedom=[stretch], masses=[1.6735328e-27, 3.444946e-27])
return m
def test_morse(self):
xdata = np.array([0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25, 0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25, 0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25])
ydata = np.array([(- 2254757.5348101), (- 2746067.), (- 2664406.), (- 2611323.), (- 2502198.), (- 2417457.), (- 2390778.), (- 2379482.), (- 2373850.), (- 2361426.), (- 2369992.6305902), (- 2363833.), (- 2360577.), (- 2356002.), (- 2355574.), (- 2357254.), (- 2351656.), (- 2308055.), (- 2797576.), (- 2715367.), (- 2616523.), (- 2498053.2658529), (- 2424288.), (- 2393385.), (- 2371800.), (- 2353202.), (- 2346873.), (- 2343485.8487826), (- 2342937.), (- 2350276.), (- 2347674.), (- 2346912.), (- 2339886.), (- 2353456.), (- 2359599.), (- 2811321.), (- 2763866.), (- 2613385.), (- 2506804.), (- 2419329.), (- 2393428.), (- 2374166.), (- 2352961.), (- 2344972.), (- 2356294.5588125), (- 2341396.), (- 2337344.), (- 2339793.), (- 2335667.), (- 2327347.), (- 2341367.)])
ydata_hartree = (ydata / HARTREE_TO_J_PER_MOL)
xdata_angstrom = xdata
m = self.create_test_molecule()
morse = MorsePotential(m)
xdata = np.array(xdata_angstrom)
ydata = np.array(ydata_hartree)
morse.fit(xdata, ydata)
minimal_energy_distance = morse.get_equilibrium_geometry()
minimal_energy = morse.eval(minimal_energy_distance)
wave_number = morse.wave_number()
result = np.array([minimal_energy_distance, minimal_energy, wave_number])
benchmark = np.array([0., (- 1.), 3800.])
np.testing.assert_array_almost_equal(result, benchmark, decimal=4)
radia = np.array([0.5, 1, 1.5, 2])
hartrees = np.array([(- 0.), (- 1.), (- 0.), (- 0.)])
np.testing.assert_array_almost_equal(hartrees, morse.eval(radia), decimal=4)
vib_levels = []
for level in range(2, 8):
vib_levels.append(morse.vibrational_energy_level(level))
vib_levels = np.array(vib_levels)
vib_levels_ref = np.array([0., 0., 0., 0., 0., 0.])
np.testing.assert_array_almost_equal(vib_levels, vib_levels_ref, decimal=4)
def test_harmonic(self):
xdata = np.array([0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25, 0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25, 0.45, 0.75, 1.05, 1.35, 1.65, 1.95, 2.25, 2.55, 2.85, 3.15, 3.45, 3.75, 4.05, 4.35, 4.65, 4.95, 5.25])
ydata = np.array([(- 2254757.5348101), (- 2746067.), (- 2664406.), (- 2611323.), (- 2502198.), (- 2417457.), (- 2390778.), (- 2379482.), (- 2373850.), (- 2361426.), (- 2369992.6305902), (- 2363833.), (- 2360577.), (- 2356002.), (- 2355574.), (- 2357254.), (- 2351656.), (- 2308055.), (- 2797576.), (- 2715367.), (- 2616523.), (- 2498053.2658529), (- 2424288.), (- 2393385.), (- 2371800.), (- 2353202.), (- 2346873.), (- 2343485.8487826), (- 2342937.), (- 2350276.), (- 2347674.), (- 2346912.), (- 2339886.), (- 2353456.), (- 2359599.), (- 2811321.), (- 2763866.), (- 2613385.), (- 2506804.), (- 2419329.), (- 2393428.), (- 2374166.), (- 2352961.), (- 2344972.), (- 2356294.5588125), (- 2341396.), (- 2337344.), (- 2339793.), (- 2335667.), (- 2327347.), (- 2341367.)])
ydata_hartree = (ydata / HARTREE_TO_J_PER_MOL)
xdata_angstrom = xdata
m = self.create_test_molecule()
harmonic = HarmonicPotential(m)
xdata = np.array(xdata_angstrom)
ydata = np.array(ydata_hartree)
(xdata, ydata) = HarmonicPotential.process_fit_data(xdata, ydata)
harmonic.fit(xdata, ydata)
minimal_energy_distance = harmonic.get_equilibrium_geometry()
minimal_energy = harmonic.eval(minimal_energy_distance)
wave_number = harmonic.wave_number()
result = np.array([minimal_energy_distance, minimal_energy, wave_number])
benchmark = np.array([0., (- 1.), 4670.])
np.testing.assert_array_almost_equal(result, benchmark)
radia = np.array([0.5, 1, 1.5, 2])
hartrees = np.array([(- 0.), (- 1.), (- 0.), 0.])
np.testing.assert_array_almost_equal(hartrees, harmonic.eval(radia))
vib_levels = []
for level in range(2, 8):
vib_levels.append(harmonic.vibrational_energy_level(level))
vib_levels = np.array(vib_levels)
vib_levels_ref = np.array([0., 0., 0., 0., 0., 0.])
np.testing.assert_array_almost_equal(vib_levels, vib_levels_ref) |
def solar_cell_solver(solar_cell: SolarCell, task: str, user_options: Union[(Dict, State, None)]=None):
if (type(user_options) in [State, dict]):
options = merge_dicts(default_options, user_options)
else:
options = merge_dicts(default_options)
prepare_solar_cell(solar_cell, options)
options.T = solar_cell.T
action = ACTIONS_REGISTRY.get(task, None)
if (action is None):
raise ValueError(f"ERROR in 'solar_cell_solver' - Valid tasks are {list(ACTIONS_REGISTRY.keys())}.")
action(solar_cell, options) |
class EmbeddingExtractor(object):
def extract_sentbert(self, caption_file: str, output: str, dev: bool=True, zh: bool=False):
from sentence_transformers import SentenceTransformer
lang2model = {'zh': 'distiluse-base-multilingual-cased', 'en': 'bert-base-nli-mean-tokens'}
lang = ('zh' if zh else 'en')
model = SentenceTransformer(lang2model[lang])
self.extract(caption_file, model, output, dev)
def extract_originbert(self, caption_file: str, output: str, dev: bool=True, ip='localhost'):
from bert_serving.client import BertClient
client = BertClient(ip)
self.extract(caption_file, client, output, dev)
def extract(self, caption_file: str, model, output, dev: bool):
caption_df = pd.read_json(caption_file, dtype={'key': str})
embeddings = {}
if dev:
with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
for (idx, row) in caption_df.iterrows():
caption = row['caption']
key = row['key']
cap_idx = row['caption_index']
embedding = model.encode([caption])
embedding = np.array(embedding).reshape((- 1))
embeddings[f'{key}_{cap_idx}'] = embedding
pbar.update()
else:
dump = {}
with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
for (idx, row) in caption_df.iterrows():
key = row['key']
caption = row['caption']
value = np.array(model.encode([caption])).reshape((- 1))
if (key not in embeddings.keys()):
embeddings[key] = [value]
else:
embeddings[key].append(value)
pbar.update()
for key in embeddings:
dump[key] = np.stack(embeddings[key])
embeddings = dump
with open(output, 'wb') as f:
pickle.dump(embeddings, f)
def extract_sbert(self, input_json: str, output: str):
from sentence_transformers import SentenceTransformer
import json
import torch
from h5py import File
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
model = model.to(device)
model.eval()
data = json.load(open(input_json))['audios']
with torch.no_grad(), tqdm(total=len(data), ascii=True) as pbar, File(output, 'w') as store:
for sample in data:
audio_id = sample['audio_id']
for cap in sample['captions']:
cap_id = cap['cap_id']
store[f'{audio_id}_{cap_id}'] = model.encode(cap['caption'])
pbar.update() |
class TestSpiderDev108(unittest.TestCase):
(ONE_TEST_TIMEOUT)
def test_spider_dev(self):
split_name = 'dev'
i_query = 108
db_id = get_db_id(split_name, i_query)
(rdf_graph, schema) = get_graph_and_schema(split_name, db_id)
sql_query = get_sql_query(split_name, i_query)
correct_sparql_query = textwrap.dedent(' SELECT ?CountryName\n WHERE\n {\n {\n SELECT (max(?count) AS ?max)\n WHERE\n {\n {\n SELECT ?countries (count(?car_makers) AS ?count)\n WHERE\n {\n ?car_makers arc:car_makers:Id ?car_makers.\n ?car_makers arc:car_makers:Country ?Country.\n ?Country arc:car_makers:Country:countries:CountryId ?countries.\n }\n GROUP BY ?countries\n }\n }\n }\n {\n SELECT ?countries_1 (count(?car_makers_2) AS ?count_1)\n WHERE\n {\n ?car_makers_2 arc:car_makers:Id ?car_makers_2.\n ?car_makers_2 arc:car_makers:Country ?Country_2.\n ?Country_2 arc:car_makers:Country:countries:CountryId ?countries_1.\n }\n GROUP BY ?countries_1\n }\n FILTER(?count_1 = ?max).\n ?countries_1 arc:countries:CountryName ?CountryName.\n }')
qdmr = get_qdmr_from_break(split_name, i_query)
grounding = {}
grounding[GroundingIndex(0, 0, 'car makers')] = GroundingKey.make_table_grounding('car_makers')
grounding[GroundingIndex(1, 0, 'countries of #REF')] = GroundingKey.make_column_grounding('countries', 'CountryId')
grounding[GroundingIndex(3, 2, 'is the highest')] = GroundingKey.make_comparative_grounding('max', None)
grounding[GroundingIndex(4, 0, 'the name of #REF')] = GroundingKey.make_column_grounding('countries', 'CountryName')
sparql_query = create_sparql_query_from_qdmr(qdmr, schema, rdf_graph, grounding)
result_correct = QueryResult.execute_query_sql(sql_query, schema)
result = QueryResult.execute_query_to_rdf(sparql_query, rdf_graph, schema, virtuoso_server=VIRTUOSO_SPARQL_SERVICE)
(equal, message) = result.is_equal_to(result_correct, require_column_order=True, require_row_order=False, return_message=True)
self.assertTrue(equal, message) |
class CFB8(ModeWithInitializationVector):
name = 'CFB8'
def __init__(self, initialization_vector: bytes):
utils._check_byteslike('initialization_vector', initialization_vector)
self._initialization_vector = initialization_vector
def initialization_vector(self) -> bytes:
return self._initialization_vector
validate_for_algorithm = _check_iv_and_key_length |
class EpsilonGreedy(BaseExploration):
def __init__(self, exploration_steps, epsilon):
super().__init__(exploration_steps, epsilon)
self.epsilon = epsilon['end']
def select_action(self, q_values, step_count):
if ((np.random.rand() < self.epsilon) or (step_count <= self.exploration_steps)):
action = np.random.randint(0, len(q_values))
else:
action = np.argmax(q_values)
return action |
class PendulumEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30}
def __init__(self):
self.max_speed = 8
self.max_torque = 2.0
self.dt = 0.05
self.viewer = None
high = np.array([1.0, 1.0, self.max_speed])
self.action_space = spaces.Box(low=(- self.max_torque), high=self.max_torque, shape=(1,))
self.observation_space = spaces.Box(low=(- high), high=high)
self._seed()
def _seed(self, seed=None):
(self.np_random, seed) = seeding.np_random(seed)
return [seed]
def _step(self, u):
(th, thdot) = self.state
g = 10.0
m = 1.0
l = 1.0
dt = self.dt
u = np.clip(u, (- self.max_torque), self.max_torque)[0]
self.last_u = u
costs = (((angle_normalize(th) ** 2) + (0.1 * (thdot ** 2))) + (0.001 * (u ** 2)))
newthdot = (thdot + ((((((- 3) * g) / (2 * l)) * np.sin((th + np.pi))) + ((3.0 / (m * (l ** 2))) * u)) * dt))
newth = (th + (newthdot * dt))
newthdot = np.clip(newthdot, (- self.max_speed), self.max_speed)
self.state = np.array([newth, newthdot])
return (self._get_obs(), (- costs), False, {})
def _reset(self):
high = np.array([np.pi, 1])
self.state = self.np_random.uniform(low=(- high), high=high)
self.last_u = None
return self._get_obs()
def _get_obs(self):
(theta, thetadot) = self.state
return np.array([np.cos(theta), np.sin(theta), thetadot])
def _render(self, mode='human', close=False):
if close:
if (self.viewer is not None):
self.viewer.close()
self.viewer = None
return
if (self.viewer is None):
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds((- 2.2), 2.2, (- 2.2), 2.2)
rod = rendering.make_capsule(1, 0.2)
rod.set_color(0.8, 0.3, 0.3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(0.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
fname = path.join(path.dirname(__file__), 'assets/clockwise.png')
self.img = rendering.Image(fname, 1.0, 1.0)
self.imgtrans = rendering.Transform()
self.img.add_attr(self.imgtrans)
self.viewer.add_onetime(self.img)
self.pole_transform.set_rotation((self.state[0] + (np.pi / 2)))
if self.last_u:
self.imgtrans.scale = (((- self.last_u) / 2), (np.abs(self.last_u) / 2))
return self.viewer.render(return_rgb_array=(mode == 'rgb_array')) |
class BaseCodec(nn.Module):
def get_tokens(self, x, **kwargs):
raise NotImplementedError
def get_number_of_tokens(self):
raise NotImplementedError
def encode(self, img):
raise NotImplementedError
def decode(self, img_seq):
raise NotImplementedError
def forward(self, **kwargs):
raise NotImplementedError
def train(self, mode=True):
self.training = mode
if (self.trainable and mode):
return super().train(True)
else:
return super().train(False)
def _set_trainable(self):
if (not self.trainable):
for (pn, p) in self.named_parameters():
p.requires_grad = False
self.eval() |
class Server():
def __init__(self, port=50055, ssl_context=None):
self.port = port
self._ssl_context = ssl_context
self.services = {}
self._connection_count = 0
self._exception_count = 0
def add_service(self, service=None, context_manager=None, setup_fn=None, teardown_fn=None, name=None):
if ((((service is not None) + (context_manager is not None)) + (setup_fn is not None)) != 1):
raise ValueError('Precisely one of service, context_manager or setup_fn should be set')
if (name is None):
if hasattr(service, 'name'):
name = service.name
elif hasattr(context_manager, 'name'):
name = context_manager.name
elif hasattr(setup_fn, 'name'):
name = setup_fn.name
else:
raise ValueError("Could not infer name, please provide 'name' argument to this function call or define 'name' attribute on service, context_manager or setup_fn")
if (service is not None):
self.services[name] = _service_wrapper(service=service)
elif (context_manager is not None):
self.services[name] = context_manager
elif (setup_fn is not None):
self.services[name] = _service_wrapper(setup_fn=setup_fn, teardown_fn=teardown_fn)
else:
raise ValueError("Shouldn't have happened")
async def serve_async(self, *, task_status=TASK_STATUS_IGNORED):
async with AsyncExitStack() as stack:
tcp_server = (await anyio.create_tcp_listener(local_port=self.port, reuse_port=True))
self.port = tcp_server.extra(anyio.abc.SocketAttribute.local_port)
if self._ssl_context:
tcp_server = TLSListener(tcp_server, self._ssl_context, standard_compatible=False)
task_status.started(self.port)
services_dict = {}
for (key, value) in self.services.items():
services_dict[key] = (await stack.enter_async_context(value))
(await tcp_server.serve(ConnectionHandler(services_dict, self)))
def serve(self, backend=None):
purerpc_run(self.serve_async, backend=backend) |
def discriminator(image, y=None, reuse=False, for_G=False):
if reuse:
tf.get_variable_scope().reuse_variables()
df_dim = 64
h0 = lrelu(conv2d(image, df_dim, name='d_h0_conv'))
h1 = lrelu(conv2d(h0, (df_dim * 2), name='d_h1_conv'))
h2 = lrelu(conv2d(h1, (df_dim * 4), name='d_h2_conv'))
h3 = lrelu(conv2d(h2, (df_dim * 8), name='d_h3_conv'))
h4 = linear(tf.reshape(h3, [FLAGS.batch_size, (- 1)]), 1, 'd_h4_logits')
_activation_summary(h4, reuse, for_G)
h4_sigmoid = tf.nn.sigmoid(h4, name='d_h4_sigmoid')
_activation_summary(h4_sigmoid, reuse, for_G)
return (h4, h4_sigmoid) |
def tags_and_versions(tags: Iterable[Tag], translator: VersionTranslator) -> list[tuple[(Tag, Version)]]:
ts_and_vs: list[tuple[(Tag, Version)]] = []
for tag in tags:
try:
version = translator.from_tag(tag.name)
except (NotImplementedError, InvalidVersion) as e:
log.warning("Couldn't parse tag %s as as Version: %s", tag.name, str(e), exc_info=log.isEnabledFor(logging.DEBUG))
continue
if version:
ts_and_vs.append((tag, version))
log.info('found %s previous tags', len(ts_and_vs))
return sorted(ts_and_vs, reverse=True, key=(lambda v: v[1])) |
.parametrize('marker, expected', [('python_version >= "3.6" and python_version < "4.0"', '>=3.6,<4.0'), ('sys_platform == "linux"', '*'), ('python_version >= "3.9" or sys_platform == "linux"', '*'), ('python_version >= "3.9" and sys_platform == "linux"', '>=3.9')])
def test_marker_properly_sets_python_constraint(marker: str, expected: str) -> None:
dependency = Dependency('foo', '^1.2.3')
dependency.marker = marker
assert (str(dependency.python_constraint) == expected) |
def get_output_data(layer: torch.nn.Module, model: torch.nn.Module, images_in_one_batch: torch.Tensor) -> np.ndarray:
def _hook_to_collect_output_data(module, _, out_data):
out_data = utils.to_numpy(out_data)
orig_layer_out_data.append(out_data)
raise StopForwardException
hook_handles = list()
orig_layer_out_data = list()
hook_handles.append(register_fwd_hook_for_layer(layer, _hook_to_collect_output_data))
forward_pass(model, images_in_one_batch)
output_data = np.vstack(orig_layer_out_data)
for hook_handle in hook_handles:
hook_handle.remove()
return output_data |
class GenerateThread2Agent(GenerateThread):
def __init__(self, rally_count: int, model1_path: str, is_model1_shuttleNet: bool, model1_shuttleNet_player: int, model2_path: str, is_model2_shuttleNet: bool, model2_shuttleNet_player: int, output_filename: str, parent=None):
super().__init__(output_filename, parent)
self.output_filename = output_filename
self.model1_path = model1_path
self.model2_path = model2_path
self.is_model1_shuttleNet = is_model1_shuttleNet
self.is_model2_shuttleNet = is_model2_shuttleNet
self.model1_score = 0
self.model2_score = 0
self.model1_shuttleNet_player = model1_shuttleNet_player
self.model2_shuttleNet_player = model2_shuttleNet_player
self.rally_count = rally_count
if (self.is_model1_shuttleNet or self.is_model2_shuttleNet):
print('contain ShuttleNet')
self.init_row_count = 2
else:
self.init_row_count = 2
data = pd.read_csv('StrokeForecasting/data/continous_subjective.csv')
data = data[['rally_id', 'type', 'rally', 'ball_round', 'landing_x', 'landing_y', 'player_location_x', 'player_location_y', 'opponent_location_x', 'opponent_location_y']]
grouped = data.groupby(['rally_id'])
filtered = grouped.filter((lambda x: (len(x) >= self.init_row_count)))
data.dropna(inplace=True)
self.history_data = filtered.groupby(['rally_id']).head(self.init_row_count)
self.group_keys = list(self.history_data.groupby(['rally_id']).groups.keys())
self.type_mapping = {'': 1, '': 2, '': 3, '': 4, '': 5, '': 6, '': 7, '': 8, '': 9, '': 10, '': 11}
def sampleStartState(self):
self.states = []
self.actions = []
self.actions_prob = []
random_group_index = np.random.choice(len(self.group_keys))
rows = self.history_data.groupby(['rally_id']).get_group(self.group_keys[random_group_index])
for (i, (index, row)) in enumerate(rows.iterrows()):
player_coord = (row['player_location_x'], row['player_location_y'])
opponent_coord = (row['opponent_location_x'], row['opponent_location_y'])
landing_coord = (row['landing_x'], row['landing_y'])
type = self.type_mapping[row['type']]
if (i == 0):
state = (player_coord, opponent_coord, player_coord)
self.states.append(state)
else:
state = (player_coord, opponent_coord, prev_landing_coord)
action = (prev_type, prev_landing_coord, ((- row['opponent_location_x']), (- row['opponent_location_y'])))
self.states.append(state)
self.actions.append(action)
self.actions_prob.append(([], [], []))
prev_landing_coord = landing_coord
prev_opponent_coord = opponent_coord
prev_type = type
self.states = self.states[:(- 1)]
self.actions = self.actions[:(- 1)]
self.actions_prob = self.actions_prob[:(- 1)]
def outputScore(self):
pass
def isGameEnd(self):
if ((self.model1_score < 21) and (self.model2_score < 21)):
return False
if ((self.model1_score == 30) or (self.model2_score == 30)):
return True
if (abs((self.model1_score - self.model2_score)) < 2):
return False
return True
def run(self):
from RLEnvironment import Env
from SuperviseAgent import SuperviseAgent
import debugpy
debugpy.debug_this_thread()
if self.is_model1_shuttleNet:
self.model1 = SuperviseAgent(self.model1_shuttleNet_player, 1)
else:
with open(self.model1_path, 'r+b') as model:
self.model1 = pickle.load(model)
if self.is_model2_shuttleNet:
self.model2 = SuperviseAgent(self.model2_shuttleNet_player, 2)
else:
with open(self.model2_path, 'r+b') as model:
self.model2 = pickle.load(model)
self.env = Env()
self.sampleStartState()
turn = 1
print(self.states)
print(self.actions)
launcher = 1
is_launch = True
for i in range(self.rally_count):
self.progressUpdate.emit((i + 1), self.rally_count)
if (turn == 1):
if self.is_model1_shuttleNet:
(action, action_prob) = self.model1.action(self.states, self.actions)
else:
(action, action_prob) = self.model1.action(self.states[(- 1)], is_launch)
if (action[0] == 11):
print('cannot reach')
(state, reward) = self.env.step(action, is_launch)
if (reward != (- 1)):
self.states.append(state)
if (action is not None):
self.actions.append(action)
self.actions_prob.append(action_prob)
turn = 2
is_launch = False
if (reward == (- 1)):
is_launch = True
turn_ = launcher
for (i, (state, action, action_prob)) in enumerate(zip(self.states, self.actions, self.actions_prob)):
if (i == (len(self.states) - 1)):
self.dumpData(turn_, state, action, action_prob, (- 1), (i == 0))
elif (i == 0):
self.dumpData(turn_, state, action, action_prob, 0, True)
else:
self.dumpData(turn_, state, action, action_prob, 0, False)
turn_ = (2 if (turn_ == 1) else 1)
self.sampleStartState()
self.env.reset(self.states[(- 1)])
launcher = 2
self.model2_score += 1
if self.isGameEnd():
self.outputScore()
self.model1_score = 0
self.model2_score = 0
elif (turn == 2):
if self.is_model2_shuttleNet:
(action, action_prob) = self.model2.action(self.states, self.actions)
else:
(action, action_prob) = self.model2.action(self.states[(- 1)], is_launch)
(next_state, reward) = self.env.step(action, is_launch)
if (next_state is not None):
self.states.append(next_state)
if (action is not None):
self.actions.append(action)
self.actions_prob.append(action_prob)
if (action[0] == 11):
print('cannot reach')
turn = 1
is_launch = False
if (reward == (- 1)):
is_launch = True
turn_ = launcher
for (i, (state, action, action_prob)) in enumerate(zip(self.states, self.actions, self.actions_prob)):
if (i == (len(self.states) - 1)):
self.dumpData(turn_, state, action, action_prob, (- 1), (i == 0))
elif (i == 0):
self.dumpData(turn_, state, action, action_prob, 0, True)
else:
self.dumpData(turn_, state, action, action_prob, 0, False)
turn_ = (2 if (turn_ == 1) else 1)
self.sampleStartState()
launcher = 1
self.env.reset(self.states[(- 1)])
self.model1_score += 1
if self.isGameEnd():
self.outputScore()
self.model1_score = 0
self.model2_score = 0
print(self.states[(- 1)])
self.save()
self.finished.emit() |
def new_focal_loss(logits, targets, alpha: float, gamma: float, normalizer, label_smoothing: float=0.01):
pred_prob = logits.sigmoid()
targets = targets.to(logits.dtype)
onem_targets = (1.0 - targets)
p_t = ((targets * pred_prob) + (onem_targets * (1.0 - pred_prob)))
alpha_factor = ((targets * alpha) + (onem_targets * (1.0 - alpha)))
modulating_factor = ((1.0 - p_t) ** gamma)
if (label_smoothing > 0.0):
targets = ((targets * (1.0 - label_smoothing)) + (0.5 * label_smoothing))
ce = F.binary_cross_entropy_with_logits(logits, targets, reduction='none')
return ((((1 / normalizer) * alpha_factor) * modulating_factor) * ce) |
def try_expanding_sum_type_to_union(typ: Type, target_fullname: str) -> ProperType:
typ = get_proper_type(typ)
if isinstance(typ, UnionType):
items = [try_expanding_sum_type_to_union(item, target_fullname) for item in typ.relevant_items()]
return make_simplified_union(items, contract_literals=False)
elif (isinstance(typ, Instance) and (typ.type.fullname == target_fullname)):
if typ.type.is_enum:
new_items = []
for (name, symbol) in typ.type.names.items():
if (not isinstance(symbol.node, Var)):
continue
if (name in ENUM_REMOVED_PROPS):
continue
new_items.append(LiteralType(name, typ))
return make_simplified_union(new_items, contract_literals=False)
elif (typ.type.fullname == 'builtins.bool'):
return make_simplified_union([LiteralType(True, typ), LiteralType(False, typ)], contract_literals=False)
return typ |
class ConfigHandler(Generic[Target]):
section_prefix: str
aliases: Dict[(str, str)] = {}
def __init__(self, target_obj: Target, options: AllCommandOptions, ignore_option_errors, ensure_discovered: expand.EnsurePackagesDiscovered):
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = dict(self._section_options(options))
self.set_options: List[str] = []
self.ensure_discovered = ensure_discovered
self._referenced_files: Set[str] = set()
def _section_options(cls, options: AllCommandOptions):
for (full_name, value) in options.items():
(pre, sep, name) = full_name.partition(cls.section_prefix)
if pre:
continue
(yield (name.lstrip('.'), value))
def parsers(self):
raise NotImplementedError(('%s must provide .parsers property' % self.__class__.__name__))
def __setitem__(self, option_name, value):
target_obj = self.target_obj
option_name = self.aliases.get(option_name, option_name)
try:
current_value = getattr(target_obj, option_name)
except AttributeError:
raise KeyError(option_name)
if current_value:
return
try:
parsed = self.parsers.get(option_name, (lambda x: x))(value)
except ((Exception,) * self.ignore_option_errors):
return
simple_setter = functools.partial(target_obj.__setattr__, option_name)
setter = getattr(target_obj, ('set_%s' % option_name), simple_setter)
setter(parsed)
self.set_options.append(option_name)
def _parse_list(cls, value, separator=','):
if isinstance(value, list):
return value
if ('\n' in value):
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
def _parse_dict(cls, value):
separator = '='
result = {}
for line in cls._parse_list(value):
(key, sep, val) = line.partition(separator)
if (sep != separator):
raise OptionError(f'Unable to parse option value to dict: {value}')
result[key.strip()] = val.strip()
return result
def _parse_bool(cls, value):
value = value.lower()
return (value in ('1', 'true', 'yes'))
def _exclude_files_parser(cls, key):
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError('Only strings are accepted for the {0} field, files are not accepted'.format(key))
return value
return parser
def _parse_file(self, value, root_dir: _Path):
include_directive = 'file:'
if (not isinstance(value, str)):
return value
if (not value.startswith(include_directive)):
return value
spec = value[len(include_directive):]
filepaths = [path.strip() for path in spec.split(',')]
self._referenced_files.update(filepaths)
return expand.read_files(filepaths, root_dir)
def _parse_attr(self, value, package_dir, root_dir: _Path):
attr_directive = 'attr:'
if (not value.startswith(attr_directive)):
return value
attr_desc = value.replace(attr_directive, '')
package_dir.update(self.ensure_discovered.package_dir)
return expand.read_attr(attr_desc, package_dir, root_dir)
def _get_parser_compound(cls, *parse_methods):
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
def _parse_section_to_dict_with_key(cls, section_options, values_parser):
value = {}
for (key, (_, val)) in section_options.items():
value[key] = values_parser(key, val)
return value
def _parse_section_to_dict(cls, section_options, values_parser=None):
parser = ((lambda _, v: values_parser(v)) if values_parser else (lambda _, v: v))
return cls._parse_section_to_dict_with_key(section_options, parser)
def parse_section(self, section_options):
for (name, (_, value)) in section_options.items():
with contextlib.suppress(KeyError):
self[name] = value
def parse(self):
for (section_name, section_options) in self.sections.items():
method_postfix = ''
if section_name:
method_postfix = ('_%s' % section_name)
section_parser_method: Optional[Callable] = getattr(self, ('parse_section%s' % method_postfix).replace('.', '__'), None)
if (section_parser_method is None):
raise OptionError(f'Unsupported distribution option section: [{self.section_prefix}.{section_name}]')
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, **kw):
(func)
def config_handler(*args, **kwargs):
kw.setdefault('stacklevel', 2)
_DeprecatedConfig.emit('Deprecated config in `setup.cfg`', msg, **kw)
return func(*args, **kwargs)
return config_handler |
def get_data(lmdb_path, name, model_path):
env = lmdb.open(lmdb_path, map_size=, max_dbs=64)
align_db = env.open_db('align'.encode())
txn = env.begin(write=False)
align_bin = txn.get(str(0).encode(), db=align_db)
with open('../data/wav2lip_train/{}.mp4'.format(name), 'wb') as f:
f.write(align_bin) |
def community_detection(embeddings, threshold=0.75, min_community_size=10, init_max_size=1000):
cos_scores = cos_sim(embeddings, embeddings)
(top_k_values, _) = cos_scores.topk(k=min_community_size, largest=True)
extracted_communities = []
for i in range(len(top_k_values)):
if (top_k_values[i][(- 1)] >= threshold):
new_cluster = []
(top_val_large, top_idx_large) = cos_scores[i].topk(k=init_max_size, largest=True)
top_idx_large = top_idx_large.tolist()
top_val_large = top_val_large.tolist()
if (top_val_large[(- 1)] < threshold):
for (idx, val) in zip(top_idx_large, top_val_large):
if (val < threshold):
break
new_cluster.append(idx)
else:
for (idx, val) in enumerate(cos_scores[i].tolist()):
if (val >= threshold):
new_cluster.append(idx)
extracted_communities.append(new_cluster)
extracted_communities = sorted(extracted_communities, key=(lambda x: len(x)), reverse=True)
unique_communities = []
extracted_ids = set()
for community in extracted_communities:
add_cluster = True
for idx in community:
if (idx in extracted_ids):
add_cluster = False
break
if add_cluster:
unique_communities.append(community)
for idx in community:
extracted_ids.add(idx)
return unique_communities |
class DataLoader():
def __init__(self, id_transformer_group: IDTransformerGroup, dataloader, *, data_info: Dict[(int, str)]=None, paths: List[str]=None, num_prefetch=0):
self._id_transformer_group = id_transformer_group
if (data_info is not None):
for (_, path) in data_info.items():
if (path not in self._id_transformer_group):
raise ValueError(f'invalid path `{path}` data_info. No id transformer for this path.')
else:
self._paths = paths
self._data_info = data_info
self._data_queue = queue.Queue(maxsize=num_prefetch)
self._done_event = threading.Event()
self._dataloader = dataloader
self._num_prefetch = num_prefetch
def _transform_fn(self, data):
if (self._data_info is None):
data_info = {}
path_idx = 0
for i in range(len(data)):
if isinstance(data[i], KeyedJaggedTensor):
if (path_idx >= len(self._paths)):
raise ValueError('Has more KJT in a data sample than the number of modules, could not infer data_info, please set data_info manually')
data_info[i] = self._paths[path_idx]
path_idx += 1
else:
data_info = self._data_info
global_kjts = {path: data[idx] for (idx, path) in data_info.items()}
(cache_kjts, fetch_handles) = self._id_transformer_group.transform(global_kjts)
data = list(data)
for (idx, path) in data_info.items():
data[idx] = cache_kjts[path]
return (tuple(data), fetch_handles)
def __iter__(self):
return DataLoaderIter(self._dataloader, self._transform_fn, num_prefetch=self._num_prefetch)
def __len__(self):
return len(self._dataloader) |
def _switch_admin(sa: ServerApp, session: MultiplayerSession, membership: MultiplayerMembership):
session_id = session.id
verify_has_admin(sa, session_id, None, allow_when_no_admins=True)
num_admins = MultiplayerMembership.select().where((MultiplayerMembership.session == session_id), is_boolean(MultiplayerMembership.admin, True)).count()
if (membership.admin and (num_admins <= 1)):
raise error.InvalidActionError("can't demote the only admin")
membership.admin = (not membership.admin)
session_common.add_audit_entry(sa, session, f"Made {membership.effective_name} {('' if membership.admin else 'not ')}an admin")
logger().info(f'{session_common.describe_session(session)}, User {membership.user.id}. Performing admin switch, new status is {membership.admin}.')
membership.save() |
class TridentConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False):
super(TridentConv, self).__init__()
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
self.init_weights()
def init_weights(self):
kaiming_init(self, distribution='uniform', mode='fan_in')
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if (self.training or (self.test_branch_idx == (- 1))):
outputs = [F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for (input, dilation, padding) in zip(inputs, self.dilations, self.paddings)]
else:
assert (len(inputs) == 1)
outputs = [F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx])]
return outputs |
class Migration(migrations.Migration):
dependencies = [('hotels', '0001_initial')]
operations = [migrations.SeparateDatabaseAndState(database_operations=[migrations.AlterField(model_name='hotelroomreservation', name='user', field=models.ForeignKey('users.User', db_constraint=False, db_index=True, null=False, on_delete=models.PROTECT))], state_operations=[migrations.RemoveField(model_name='hotelroomreservation', name='user'), migrations.AddField(model_name='hotelroomreservation', name='user_id', field=models.IntegerField(verbose_name='user'))])] |
class FIDInceptionE_1(models.inception.InceptionE):
def __init__(self, in_channels):
super(FIDInceptionE_1, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
class TradingCentres():
onedaydelta = datetime.timedelta(days=1)
def __init__(self):
self._centres = {}
def __len__(self):
return len(self._centres)
def add(self, tc):
self._centres[tc.code] = tc
def _isbizday(self, dte):
for c in self._centres.values():
if c._isholiday(dte):
return False
return True
def isbizday(self, dte):
if (dte.isoweekday() in isoweekend):
return False
return self._isbizday(dte)
def nextbizday(self, dte, nd=1):
n = 0
isbz = self.isbizday
while (not isbz(dte)):
dte += oneday
while (n < nd):
dte += oneday
if isbz(dte):
n += 1
return dte
def prevbizday(self, dte, nd=1):
n = 0
if (nd < 0):
return self.nextbizday(dte, (- nd))
else:
while (not self.isbizday(dte)):
dte -= self.onedaydelta
n = 0
while (n < nd):
dte -= self.onedaydelta
if self.isbizday(dte):
n += 1
return dte |
def call_gpt(prompt, model='code-davinci-002', stop=None, temperature=0.0, top_p=1.0, max_tokens=128, majority_at=None):
num_completions = (majority_at if (majority_at is not None) else 1)
num_completions_batch_size = 5
completions = []
for i in range((20 * ((num_completions // num_completions_batch_size) + 1))):
try:
requested_completions = min(num_completions_batch_size, (num_completions - len(completions)))
if (model.startswith('gpt-4') or model.startswith('gpt-3.5-turbo')):
ans = chat_api(model=model, max_tokens=max_tokens, stop=stop, prompt=prompt, temperature=temperature, top_p=top_p, n=requested_completions, best_of=requested_completions)
else:
ans = completions_api(model=model, max_tokens=max_tokens, stop=stop, prompt=prompt, temperature=temperature, top_p=top_p, n=requested_completions, best_of=requested_completions)
completions.extend(ans)
if (len(completions) >= num_completions):
return completions[:num_completions]
except openai.error.RateLimitError as e:
time.sleep(min((i ** 2), 60))
raise RuntimeError('Failed to call GPT API') |
class StationSection(TableSection):
keyword = b'STATION'
table_setup = dict(header={None: b'Net Sta Type Latitude Longitude Coord Sys Elev On Date Off Date', 'GSE2.0': b'Sta Type Latitude Longitude Elev On Date Off Date'}, attribute='stations', cls=Station)
stations = List.T(Station.T()) |
class SpacedDiffusion(GaussianDiffusion):
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.original_num_steps)
def _scale_timesteps(self, t):
return t |
(config_path='config', config_name='voting_cls')
def main(args):
if (args.seed is None):
args.seed = np.random.randint(1, 10000)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.manual_seed(args.seed)
torch.set_printoptions(10)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
os.environ['PYTHONHASHSEED'] = str(args.seed)
voting_test(args) |
def plant():
import obspy
obspy.Trace.to_pyrocko_trace = to_pyrocko_trace
obspy.Trace.snuffle = snuffle
obspy.Trace.fiddle = fiddle
obspy.Stream.to_pyrocko_traces = to_pyrocko_traces
obspy.Stream.snuffle = snuffle
obspy.Stream.fiddle = fiddle
obspy.core.event.Catalog.to_pyrocko_events = to_pyrocko_events
obspy.core.inventory.inventory.Inventory.to_pyrocko_stations = to_pyrocko_stations
import pyrocko.trace
import pyrocko.pile
pyrocko.trace.Trace.to_obspy_trace = to_obspy_trace
pyrocko.pile.Pile.to_obspy_stream = to_obspy_stream |
_test
def test_sequential_fit_generator():
((x_train, y_train), (x_test, y_test)) = _get_test_data()
def data_generator(train):
if train:
max_batch_index = (len(x_train) // batch_size)
else:
max_batch_index = (len(x_test) // batch_size)
i = 0
while 1:
if train:
(yield (x_train[(i * batch_size):((i + 1) * batch_size)], y_train[(i * batch_size):((i + 1) * batch_size)]))
else:
(yield (x_test[(i * batch_size):((i + 1) * batch_size)], y_test[(i * batch_size):((i + 1) * batch_size)]))
i += 1
i = (i % max_batch_index)
model = Sequential()
model.add(Dense(num_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(num_class))
model.pop()
model.add(Dense(num_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit_generator(data_generator(True), 5, epochs)
model.fit_generator(data_generator(True), 5, epochs, validation_data=(x_test, y_test))
model.fit_generator(data_generator(True), 5, epochs, validation_data=data_generator(False), validation_steps=3)
model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
model.evaluate(x_train, y_train) |
def derivatives_in_paraboloidal_coordinates():
coords = (u, v, phi) = symbols('u v phi', real=True)
(par3d, er, eth, ephi) = Ga.build('e_u e_v e_phi', X=[((u * v) * cos(phi)), ((u * v) * sin(phi)), (((u ** 2) - (v ** 2)) / 2)], coords=coords, norm=True)
grad = par3d.grad
f = par3d.mv('f', 'scalar', f=True)
A = par3d.mv('A', 'vector', f=True)
B = par3d.mv('B', 'bivector', f=True)
print('#Derivatives in Paraboloidal Coordinates')
print('f =', f)
print('A =', A)
print('B =', B)
print('grad*f =', (grad * f))
print('grad|A =', (grad | A))
((- par3d.i) * (grad ^ A)).Fmt(3, 'grad\\times A = -I*(grad^A)')
print('grad^B =', (grad ^ B))
return |
def test():
import gym
import mani_skill.env
env_name = 'OpenCabinetDoor-v0'
env = gym.make(env_name)
osc_interface = OperationalSpaceControlInterface(env_name)
env.set_env_mode(obs_mode='state', reward_type='sparse')
print(env.observation_space)
print(env.action_space)
for level_idx in range(0, 5):
obs = env.reset(level=level_idx)
print('#### Level {:d}'.format(level_idx))
for i_step in range(100000):
print(i_step)
action = env.action_space.sample()
qpos = osc_interface.get_robot_qpos_from_obs(obs)
joint_action = action
(os_action, null_action) = osc_interface.joint_space_to_operational_space_and_null_space(qpos, joint_action)
joint_action_rec = osc_interface.operational_space_and_null_space_to_joint_space(qpos, os_action, null_action)
epsilon = 1e-06
if (np.max(np.abs((joint_action_rec - action))) > epsilon):
print('Reconstruct Error!', joint_action_rec, action)
exit((- 1))
'\n # Example 1: Move robot arm in null space\n null_action = osc_interface.operational_space_and_null_space_to_joint_space(\n qpos, np.zeros(osc_interface.osc_dim), action[:osc_interface.null_space_dim])\n action = null_action\n '
hand_forward = np.zeros(osc_interface.osc_dim)
extra_dim = len(osc_interface.osc_extra_joints)
dim = 1
hand_forward[(extra_dim + dim):((extra_dim + dim) + 1)] = 0.1
forward_action = osc_interface.operational_space_and_null_space_to_joint_space(qpos, hand_forward, action[:osc_interface.null_space_dim])
action = forward_action
(obs, reward, done, info) = env.step(action)
env.render('human')
if done:
break
env.close() |
class SparseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, indice_key=indice_key) |
def _get_value_for_attr(obj, cls, orig_cls, sig_key, sig, meta_hints, attr_getters, **kwargs):
if (obj and (sig_key in obj)):
result = (sig_key, _get_value_from_obj(obj, cls, sig, sig_key, meta_hints, **kwargs))
elif (sig_key in attr_getters):
attr_getter = attr_getters.pop(sig_key)
result = (sig_key, attr_getter())
elif (sig.default != inspect.Parameter.empty):
result = (sig_key, sig.default)
elif (sig.kind in (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)):
result = (None, None)
elif can_match_with_none(cls):
result = (sig_key, None)
else:
raise UnfulfilledArgumentError('No value found for "{}".'.format(sig_key), sig_key, obj, orig_cls)
return result |
class TestMiTempBtPoller(unittest.TestCase):
TEST_MAC = '11:22:33:44:55:66'
def test_format_bytes(self):
self.assertEqual('AA BB 00', MiTempBtPoller._format_bytes([170, 187, 0]))
def test_read_battery(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.battery_level = 50
self.assertEqual(50, poller.battery_level())
self.assertEqual(50, poller.parameter_value(MI_BATTERY))
self.assertEqual(0, len(backend.written_handles))
def test_read_version(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.set_version('00.00.11')
self.assertEqual('00.00.11', poller.firmware_version())
self.assertEqual(0, len(backend.written_handles))
def test_read_measurements(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.temperature = 56.7
self.assertAlmostEqual(backend.temperature, poller.parameter_value(MI_TEMPERATURE), delta=0.11)
def test_name(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.name = 'my sensor name'
self.assertEqual(backend.name, poller.name())
def test_clear_cache(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
self.assertFalse(poller.cache_available())
backend.temperature = 1.0
self.assertAlmostEqual(1.0, poller.parameter_value(MI_TEMPERATURE), delta=0.01)
self.assertTrue(poller.cache_available())
backend.temperature = 2.0
self.assertAlmostEqual(1.0, poller.parameter_value(MI_TEMPERATURE), delta=0.01)
self.assertTrue(poller.cache_available())
poller.clear_cache()
self.assertFalse(poller.cache_available())
backend.temperature = 3.0
self.assertAlmostEqual(3.0, poller.parameter_value(MI_TEMPERATURE), delta=0.01)
self.assertTrue(poller.cache_available())
def test_no_answer_data(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.handle_0x0010_raw = None
with self.assertRaises(BluetoothBackendException):
poller.parameter_value(MI_TEMPERATURE)
def test_no_answer_name(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.handle_0x03_raw = None
with self.assertRaises(BluetoothBackendException):
poller.name()
def test_no_answer_firmware_version(self):
poller = MiTempBtPoller(self.TEST_MAC, MockBackend)
backend = self._get_backend(poller)
backend.handle_0x0024_raw = None
self.assertTrue((poller.firmware_version() is None))
def test_connect_exception(self):
poller = MiTempBtPoller(self.TEST_MAC, ConnectExceptionBackend, retries=0)
with self.assertRaises(BluetoothBackendException):
poller.firmware_version()
with self.assertRaises(BluetoothBackendException):
poller.name()
with self.assertRaises(BluetoothBackendException):
poller.parameter_value(MI_TEMPERATURE)
with self.assertRaises(BluetoothBackendException):
poller.parameter_value(MI_HUMIDITY)
def test_rw_exception(self):
poller = MiTempBtPoller(self.TEST_MAC, RWExceptionBackend, retries=0)
with self.assertRaises(BluetoothBackendException):
poller.firmware_version()
with self.assertRaises(BluetoothBackendException):
poller.name()
with self.assertRaises(BluetoothBackendException):
poller.parameter_value(MI_TEMPERATURE)
with self.assertRaises(BluetoothBackendException):
poller.parameter_value(MI_HUMIDITY)
def _get_backend(poller):
return poller._bt_interface._backend |
class OtherTests(unittest.TestCase):
def setUp(self):
self.proxy = Proxy()
self.proxy._proxyfd = MockFd()
def tearDown(self):
UnmockClassMethods(Proxy)
UnmockClassMethods(Server)
def testProcessInputNonProxyPort(self):
fd = MockFd(fd=111)
MockClassMethod(Server, '_ProcessInput')
self.proxy._ProcessInput(fd)
self.assertEqual(self.proxy.called, [('_ProcessInput', (fd,), {})])
def testProcessInput(self):
MockClassMethod(Proxy, '_GrabPacket')
MockClassMethod(Proxy, '_HandleProxyPacket')
self.proxy._ProcessInput(self.proxy._proxyfd)
self.assertEqual([x[0] for x in self.proxy.called], ['_GrabPacket', '_HandleProxyPacket']) |
def generate_packets() -> DNSOutgoing:
out = DNSOutgoing((const._FLAGS_QR_RESPONSE | const._FLAGS_AA))
address = socket.inet_pton(socket.AF_INET, '192.168.208.5')
additionals = [{'name': 'HASS Bridge ZJWH FF5137._hap._tcp.local.', 'address': address, 'port': 51832, 'text': b'\x13md=HASS Bridge ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=L0m/aQ=='}, {'name': 'HASS Bridge 3K9A C2582A._hap._tcp.local.', 'address': address, 'port': 51834, 'text': b'\x13md=HASS Bridge 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=b2CnzQ=='}, {'name': 'Master Bed TV CEDB27._hap._tcp.local.', 'address': address, 'port': 51830, 'text': b'\x10md=Master Bed TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=CVj1kw=='}, {'name': 'Living Room TV 921B77._hap._tcp.local.', 'address': address, 'port': 51833, 'text': b'\x11md=Living Room TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=qU77SQ=='}, {'name': 'HASS Bridge ZC8X FF413D._hap._tcp.local.', 'address': address, 'port': 51829, 'text': b'\x13md=HASS Bridge ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=b0QZlg=='}, {'name': 'HASS Bridge WLTF 4BE61F._hap._tcp.local.', 'address': address, 'port': 51837, 'text': b'\x13md=HASS Bridge WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ahAISA=='}, {'name': 'FrontdoorCamera 8941D1._hap._tcp.local.', 'address': address, 'port': 54898, 'text': b'\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA=='}, {'name': 'HASS Bridge W9DN 5B5CC5._hap._tcp.local.', 'address': address, 'port': 51836, 'text': b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A=='}, {'name': 'HASS Bridge Y9OO EFF0A7._hap._tcp.local.', 'address': address, 'port': 51838, 'text': b'\x13md=HASS Bridge Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=u3bdfw=='}, {'name': 'Snooze Room TV 6B89B0._hap._tcp.local.', 'address': address, 'port': 51835, 'text': b'\x11md=Snooze Room TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05ci=31\x04sf=0\x0bsh=xNTqsg=='}, {'name': 'AlexanderHomeAssistant 74651D._hap._tcp.local.', 'address': address, 'port': 54811, 'text': b'\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA=='}, {'name': 'HASS Bridge OS95 39C053._hap._tcp.local.', 'address': address, 'port': 51831, 'text': b'\x13md=HASS Bridge OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=Xfe5LQ=='}]
out.add_answer_at_time(DNSText('HASS Bridge W9DN 5B5CC5._hap._tcp.local.', const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_OTHER_TTL, b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A=='), 0)
for record in additionals:
out.add_additional_answer(DNSService(record['name'], const._TYPE_SRV, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_HOST_TTL, 0, 0, record['port'], record['name']))
out.add_additional_answer(DNSText(record['name'], const._TYPE_TXT, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_OTHER_TTL, record['text']))
out.add_additional_answer(DNSAddress(record['name'], const._TYPE_A, (const._CLASS_IN | const._CLASS_UNIQUE), const._DNS_HOST_TTL, record['address']))
return out |
def embedding_layers(inputs, model_name, embedding_size=512, dropout_keep_prob=None, is_training=False, weight_decay=4e-05, scope=None):
with tf.variable_scope('projection'):
arg_scope = nets_factory.arg_scopes_map[model_name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
net = inputs
if (dropout_keep_prob is not None):
print(('add dropout = %.4f to projection layer' % dropout_keep_prob))
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout')
joint_embeddings = slim.conv2d(net, embedding_size, [1, 1], activation_fn=None, scope=scope)
joint_embeddings = tf.squeeze(joint_embeddings, [1, 2])
return joint_embeddings |
def _box2cs(box, image_size):
(x, y, w, h) = box[:4]
aspect_ratio = ((1.0 * image_size[0]) / image_size[1])
center = np.zeros(2, dtype=np.float32)
center[0] = (x + (w * 0.5))
center[1] = (y + (h * 0.5))
if (w > (aspect_ratio * h)):
h = ((w * 1.0) / aspect_ratio)
elif (w < (aspect_ratio * h)):
w = (h * aspect_ratio)
scale = np.array([((w * 1.0) / 200.0), ((h * 1.0) / 200.0)], dtype=np.float32)
scale = (scale * 1.25)
return (center, scale) |
class Model(ModelBase):
def __init__(self, *args, **kwargs):
logger.debug('Initializing %s: (args: %s, kwargs: %s', self.__class__.__name__, args, kwargs)
self.configfile = kwargs.get('configfile', None)
if ('input_shape' not in kwargs):
kwargs['input_shape'] = (64, 64, 3)
if ('encoder_dim' not in kwargs):
kwargs['encoder_dim'] = (512 if self.config['lowmem'] else 1024)
super().__init__(*args, **kwargs)
logger.debug('Initialized %s', self.__class__.__name__)
def add_networks(self):
logger.debug('Adding networks')
self.add_network('decoder', 'a', self.decoder(), is_output=True)
self.add_network('decoder', 'b', self.decoder(), is_output=True)
self.add_network('encoder', None, self.encoder())
logger.debug('Added networks')
def build_autoencoders(self, inputs):
logger.debug('Initializing model')
for side in ('a', 'b'):
logger.debug('Adding Autoencoder. Side: %s', side)
decoder = self.networks['decoder_{}'.format(side)].network
output = decoder(self.networks['encoder'].network(inputs[0]))
autoencoder = KerasModel(inputs, output)
self.add_predictor(side, autoencoder)
logger.debug('Initialized model')
def encoder(self):
input_ = Input(shape=self.input_shape)
var_x = input_
var_x = self.blocks.conv(var_x, 128)
var_x = self.blocks.conv(var_x, 256)
var_x = self.blocks.conv(var_x, 512)
if (not self.config.get('lowmem', False)):
var_x = self.blocks.conv(var_x, 1024)
var_x = Dense(self.encoder_dim)(Flatten()(var_x))
var_x = Dense(((4 * 4) * 1024))(var_x)
var_x = Reshape((4, 4, 1024))(var_x)
var_x = self.blocks.upscale(var_x, 512)
return KerasModel(input_, var_x)
def decoder(self):
input_ = Input(shape=(8, 8, 512))
var_x = input_
var_x = self.blocks.upscale(var_x, 256)
var_x = self.blocks.upscale(var_x, 128)
var_x = self.blocks.upscale(var_x, 64)
var_x = self.blocks.conv2d(var_x, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out')
outputs = [var_x]
if self.config.get('mask_type', None):
var_y = input_
var_y = self.blocks.upscale(var_y, 256)
var_y = self.blocks.upscale(var_y, 128)
var_y = self.blocks.upscale(var_y, 64)
var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding='same', activation='sigmoid', name='mask_out')
outputs.append(var_y)
return KerasModel(input_, outputs=outputs) |
class CAGEAlgorithm():
def __init__(self, number_of_epochs=300):
self.model = None
self.number_of_epochs = number_of_epochs
def run_experiments(self, data, experiments):
results = []
for experiment in experiments:
(description, train_objs, test_objs) = experiment
print('\nRun experiment {}'.format(description))
for split in ['train', 'test']:
if (split == 'train'):
objs = train_objs
elif (split == 'test'):
objs = test_objs
labels = []
features = []
for obj in objs:
for id in obj:
label = data[id][0]
labels.append(label)
extracted_grasp_semantic_features = data[id][3]
grasp = extracted_grasp_semantic_features
task = data[id][1][0]
object_class = data[id][1][1]
state = data[id][1][2]
parts = data[id][4]
features.append((task, object_class, state, grasp, parts))
if (split == 'train'):
X_train = features
Y_train = np.array(labels)
elif (split == 'test'):
X_test = features
Y_test = np.array(labels)
if (len(np.unique(Y_train)) <= 1):
print('Skip this task because there is only one class')
continue
train_stats = {}
for label in np.unique(Y_train):
train_stats[label] = np.sum((Y_train == label))
test_stats = {}
for label in np.unique(Y_test):
test_stats[label] = np.sum((Y_test == label))
print('train stats:', train_stats)
print('test stats:', test_stats)
batcher = Batcher(X_train, Y_train, X_test, Y_test, batch_size=256, do_shuffle=True)
self.train(batcher)
Y_probs = self.test(batcher)
Y_probs = Y_probs.reshape([len(test_objs), (- 1), len(np.unique(Y_train))])
Y_test = Y_test.reshape([len(test_objs), (- 1)])
result = (description, Y_test.tolist(), Y_probs.tolist())
results.append(result)
return results
def train(self, batcher):
self.model = CAGEModel(affordance_vocab_size=batcher.get_affordance_vocab_size(), material_vocab_size=batcher.get_material_vocab_size(), task_vocab_size=batcher.get_task_vocab_size(), object_vocab_size=batcher.get_object_vocab_size(), state_vocab_size=batcher.get_state_vocab_size(), affordance_embedding_dim=5, material_embedding_dim=5, task_embedding_dim=5, object_embedding_dim=5, state_embedding_dim=5, part_encoder_dim=5, object_encoder_dim=5, grasp_encoder_dim=5, part_pooling_method='max', label_dim=batcher.get_label_dim())
optimizer = optim.Adagrad(self.model.parameters(), lr=0.1)
criterion = torch.nn.NLLLoss(weight=batcher.get_class_weights())
for epoch in range(self.number_of_epochs):
total_loss = 0
start = time.time()
batcher.reset()
for (batch_features, batch_labels) in batcher.get_train_batch():
self.model.train()
self.model.zero_grad()
log_probs = self.model(batch_features)
loss = criterion(log_probs, batch_labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
print('Epoch', epoch, 'spent', (time.time() - start), 'with total loss:', total_loss)
def test(self, batcher):
all_probs = None
with torch.no_grad():
self.model.eval()
batcher.reset()
for (batch_features, batch_labels) in batcher.get_test_batch():
log_probs = self.model(batch_features)
probs = torch.exp(log_probs).clone().cpu().data.numpy()
if (all_probs is None):
all_probs = probs
else:
all_probs = np.vstack([all_probs, probs])
return all_probs |
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
if (len(self.memory) < self.capacity):
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = ((self.position + 1) % self.capacity)
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def output_all(self):
return self.memory
def __len__(self):
return len(self.memory) |
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError((name + ': no argument supplied'), True)
else:
arg = args
try:
val = int(arg)
except:
doError((name + ': non-integer value given'), True)
if ((val < min) or (val > max)):
doError((name + (': value should be between %d and %d' % (min, max))), True)
return val |
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for (name, tensor) in named_params_and_buffers(module):
if ('running' in name):
continue
fullname = ((type(module).__name__ + '.') + name)
if ((ignore_regex is not None) and re.fullmatch(ignore_regex, fullname)):
continue
tensor = tensor.detach()
if tensor.is_floating_point():
tensor = nan_to_num(tensor)
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (tensor == other).all(), fullname |
(max_runs=10)
_with(Learner1D, Learner2D, LearnerND, AverageLearner, AverageLearner1D, SequenceLearner, with_all_loss_functions=False)
def test_balancing_learner(learner_type, f, learner_kwargs):
learners = [learner_type(generate_random_parametrization(f), **learner_kwargs) for i in range(4)]
learner = BalancingLearner(learners)
stash = []
for _i in range(100):
n = random.randint(1, 10)
m = random.randint(0, n)
(xs, _) = learner.ask(n, tell_pending=False)
random.shuffle(xs)
for _ in range(m):
stash.append(xs.pop())
for x in xs:
learner.tell(x, learner.function(x))
random.shuffle(stash)
for _ in range(m):
x = stash.pop()
learner.tell(x, learner.function(x))
if (learner_type is AverageLearner1D):
nsamples = [lrn.nsamples for lrn in learner.learners]
assert all(((lrn.nsamples > 5) for lrn in learner.learners)), nsamples
else:
npoints = [lrn.npoints for lrn in learner.learners]
assert all(((lrn.npoints > 5) for lrn in learner.learners)), npoints |
def enum_attach(ctr_mol, nei_node, amap, singletons):
(nei_mol, nei_idx) = (nei_node.mol, nei_node.nid)
att_confs = []
black_list = [atom_idx for (nei_id, atom_idx, _) in amap if (nei_id in singletons)]
ctr_atoms = [atom for atom in ctr_mol.GetAtoms() if (atom.GetIdx() not in black_list)]
ctr_bonds = [bond for bond in ctr_mol.GetBonds()]
if (nei_mol.GetNumBonds() == 0):
nei_atom = nei_mol.GetAtomWithIdx(0)
used_list = [atom_idx for (_, atom_idx, _) in amap]
for atom in ctr_atoms:
if (atom_equal(atom, nei_atom) and (atom.GetIdx() not in used_list)):
new_amap = (amap + [(nei_idx, atom.GetIdx(), 0)])
att_confs.append(new_amap)
elif (nei_mol.GetNumBonds() == 1):
bond = nei_mol.GetBondWithIdx(0)
bond_val = int(bond.GetBondTypeAsDouble())
(b1, b2) = (bond.GetBeginAtom(), bond.GetEndAtom())
for atom in ctr_atoms:
if ((atom.GetAtomicNum() == 6) and (atom.GetTotalNumHs() < bond_val)):
continue
if atom_equal(atom, b1):
new_amap = (amap + [(nei_idx, atom.GetIdx(), b1.GetIdx())])
att_confs.append(new_amap)
elif atom_equal(atom, b2):
new_amap = (amap + [(nei_idx, atom.GetIdx(), b2.GetIdx())])
att_confs.append(new_amap)
else:
for a1 in ctr_atoms:
for a2 in nei_mol.GetAtoms():
if atom_equal(a1, a2):
if ((a1.GetAtomicNum() == 6) and ((a1.GetTotalNumHs() + a2.GetTotalNumHs()) < 4)):
continue
new_amap = (amap + [(nei_idx, a1.GetIdx(), a2.GetIdx())])
att_confs.append(new_amap)
if (ctr_mol.GetNumBonds() > 1):
for b1 in ctr_bonds:
for b2 in nei_mol.GetBonds():
if ring_bond_equal(b1, b2):
new_amap = (amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetBeginAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetEndAtom().GetIdx())])
att_confs.append(new_amap)
if ring_bond_equal(b1, b2, reverse=True):
new_amap = (amap + [(nei_idx, b1.GetBeginAtom().GetIdx(), b2.GetEndAtom().GetIdx()), (nei_idx, b1.GetEndAtom().GetIdx(), b2.GetBeginAtom().GetIdx())])
att_confs.append(new_amap)
return att_confs |
def state_dict_all_gather_keys(state_dict: Dict[(str, Union[(torch.Tensor, ShardedTensor)])], pg: ProcessGroup) -> List[str]:
names = list(state_dict.keys())
all_names = ([None] * dist.get_world_size(pg))
dist.all_gather_object(all_names, names, pg)
deduped_names = set()
for local_names in all_names:
for name in local_names:
deduped_names.add(name)
return sorted(deduped_names) |
def test(net, config, master_bar, mode='test'):
net.eval()
num_nodes = config.num_nodes
num_neighbors = config.num_neighbors
batch_size = config.batch_size
batches_per_epoch = config.batches_per_epoch
beam_size = config.beam_size
val_filepath = config.val_filepath
val_target_filepath = config.val_filepath_solution
test_filepath = config.test_filepath
test_target_filepath = config.test_filepath_solution
if (mode == 'val'):
dataset = DataReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=val_filepath, target_filepath=val_target_filepath, do_prep=False)
elif (mode == 'test'):
dataset = DataReader(num_nodes, num_neighbors, batch_size=batch_size, filepath=test_filepath, target_filepath=test_target_filepath, do_prep=False)
batches_per_epoch = dataset.max_iter
dataset = iter(dataset)
edge_cw = None
running_loss = 0.0
running_pred_tour_len = 0.0
running_gt_tour_len = 0.0
running_nb_data = 0
running_nb_batch = 0
with torch.no_grad():
start_test = time.time()
for batch_num in progress_bar(range(batches_per_epoch), parent=master_bar):
try:
batch = next(dataset)
except StopIteration:
break
x_nodes_coord = Variable(torch.FloatTensor(batch.nodes_coord).type(dtypeFloat), requires_grad=False)
x_nodes_timew = (Variable(torch.FloatTensor(batch.nodes_timew).type(dtypeFloat), requires_grad=False) if is_tsptw else None)
y_tour = Variable(torch.LongTensor(batch.tour_nodes).type(dtypeLong), requires_grad=False)
if (type(edge_cw) != torch.Tensor):
num_nodes = x_nodes_coord.size(1)
num_edges = (num_nodes * num_nodes)
num_edge_classes = 2
edge_label_bincount = np.array([(num_edges - (2 * num_nodes)), (2 * num_nodes)])
edge_cw = (num_edges / (num_edge_classes * edge_label_bincount))
(y_preds, loss, x_edges_values) = net.forward(x_nodes_coord, x_nodes_timew, y_tour, edge_cw)
loss = loss.mean()
gt_tour_len = np.mean(batch.tour_len)
running_nb_data += batch_size
running_loss += (batch_size * loss.data.item())
running_gt_tour_len += (batch_size * gt_tour_len)
running_nb_batch += 1
result = 'loss:{loss:.4f} gt_tour_len:{gt_tour_len:.3f}'.format(loss=(running_loss / running_nb_data), gt_tour_len=(running_gt_tour_len / running_nb_data))
master_bar.child.comment = result
loss = (running_loss / running_nb_data)
err_edges = 0
err_tour = 0
err_tsp = 0
pred_tour_len = (running_pred_tour_len / running_nb_data)
gt_tour_len = (running_gt_tour_len / running_nb_data)
return ((time.time() - start_test), loss, err_edges, err_tour, err_tsp, pred_tour_len, gt_tour_len) |
def _calculate_parquet_column_size(type_params: PartialParquetParameters, columns: List[str]):
column_size = 0.0
for rg in type_params.row_groups_to_download:
columns_found = 0
row_group_meta = type_params.pq_metadata.row_group(rg)
for col in range(row_group_meta.num_columns):
column_meta = row_group_meta.column(col)
if (column_meta.path_in_schema in columns):
columns_found += 1
column_size += column_meta.total_uncompressed_size
assert (columns_found == len(columns)), f'Columns not found in the parquet data as {columns_found} != {len(columns)}'
return (column_size * PARQUET_TO_PYARROW_INFLATION) |
class Effect11945(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Projectile Turret')), 'trackingSpeed', src.getModifiedItemAttr('shipBonusTitanG1'), skill='Gallente Dreadnought', **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.