code stringlengths 101 5.91M |
|---|
def eval(configs):
print('Evaluate')
statistics_file = os.path.join(BASE_PATH, 'stats.json')
if os.path.exists(statistics_file):
print('Statistics file already exists!')
return statistics_file
import common.utils as utils
import pyrenderer
from volnet.inference import LoadedModel
from losses.lossbuilder import LossBuilder
num_cameras = 64
width = 512
height = 512
STEPSIZE = (1 / 512)
timer = pyrenderer.GPUTimer()
rendering_mode = LoadedModel.EvaluationMode.TENSORCORES_MIXED
enable_preintegration = (rendering_mode == LoadedModel.EvaluationMode.TENSORCORES_MIXED)
output_stats = []
device = torch.device('cuda')
ssim_loss = LossBuilder(device).ssim_loss(4)
lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)
def compute_stats(ln, mode, reference_images, stepsize, filename_template=None, do_ssim=False, do_lpips=False):
timingsX = []
ssimX = []
lpipsX = []
for i in range(num_cameras):
current_image = ln.render_network(cameras[i], width, height, mode, stepsize, timer=timer)
if (i > 0):
timingsX.append(timer.elapsed_milliseconds())
if (filename_template is not None):
imageio.imwrite((filename_template % i), LoadedModel.convert_image(current_image))
if do_ssim:
ssimX.append(ssim_loss(current_image, reference_images[i]).item())
if do_lpips:
lpipsX.append(lpips_loss(current_image, reference_images[i]).item())
return ((np.mean(timingsX), np.std(timingsX)), ((np.mean(ssimX), np.std(ssimX)) if do_ssim else (np.NaN, np.NaN)), ((np.mean(lpipsX), np.std(lpipsX)) if do_lpips else (np.NaN, np.NaN)))
def load_and_save(cfg):
(_, filename, output_name) = get_args_and_hdf5_file(cfg)
ln = LoadedModel(filename)
if enable_preintegration:
ln.enable_preintegration(True)
ln.save_compiled_network(filename.replace('.hdf5', '.volnet'))
return (ln, output_name)
for (cfg_index, config) in enumerate(configX):
image_folder = os.path.join(BASE_PATH, ('images_' + config[0]))
local_stats = {'cfg_index': cfg_index, 'cfg': config[1]}
reference_images = None
lns = dict()
base_ln = None
for (network, fourier, importance) in itertools.product(networkX, fourierX, importanceX):
filename = (FILENAME_PATTERN % (config[0], network[0], fourier[0], importance[0]))
(ln, name) = load_and_save((config[1], network[1:], fourier[1], importance[2], filename))
lns[(network[0], fourier[0], importance[0])] = (ln, name)
if (base_ln is None):
base_ln = ln
if (reference_images is None):
image_folder_reference = os.path.join(image_folder, 'reference')
os.makedirs(image_folder_reference, exist_ok=True)
print('\n Render reference', cfg_index)
cameras = base_ln.get_rotation_cameras(num_cameras)
reference_images = ([None] * num_cameras)
for i in range(num_cameras):
reference_images[i] = base_ln.render_reference(cameras[i], width, height)
imageio.imwrite(os.path.join(image_folder_reference, ('reference%03d.png' % i)), LoadedModel.convert_image(reference_images[i]))
for (network, fourier, importance) in itertools.product(networkX, fourierX, importanceX):
(ln, name) = lns[(network[0], fourier[0], importance[0])]
image_folder_screen = os.path.join(image_folder, ('%s' % name))
os.makedirs(image_folder_screen, exist_ok=True)
(time, ssim, lpips) = compute_stats(ln, rendering_mode, reference_images, STEPSIZE, os.path.join(image_folder_screen, 'img%03d.png'), True, True)
local_stats[name] = {'time': time, 'ssim': ssim, 'lpips': lpips}
output_stats.append(local_stats)
print('\n Done, save statistics')
with open(statistics_file, 'w') as f:
json.dump(output_stats, f)
return statistics_file |
def convert_to_execution_order(sql, schema):
ast = parse(sql)
eo_sql = format(ast, schema, in_execution_order=True)
return eo_sql |
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False, policy=False, CR=False, CR_augment=None):
if (z is not None):
with torch.set_grad_enabled(train_G):
G_z = self.G(z, self.G.shared(gy))
if (self.G.fp16 and (not self.D.fp16)):
G_z = G_z.float()
if (self.D.fp16 and (not self.G.fp16)):
G_z = G_z.half()
else:
G_z = None
D_input = torch.cat([img for img in [G_z, x] if (img is not None)], 0)
D_class = torch.cat([label for label in [gy, dy] if (label is not None)], 0)
D_input_rotate90 = DAG_Augment(D_input, policy='rotate90')
D_input_rotate180 = DAG_Augment(D_input, policy='rotate180')
D_input_rotate270 = DAG_Augment(D_input, policy='rotate270')
D_input_croptl = DAG_Augment(D_input, policy='croptl')
D_input_croptr = DAG_Augment(D_input, policy='croptr')
D_input_cropbl = DAG_Augment(D_input, policy='cropbl')
D_input_cropbr = DAG_Augment(D_input, policy='cropbr')
if CR:
if CR_augment:
x_CR_aug = torch.split(D_input, [G_z.shape[0], x.shape[0]])[1]
if CR_augment.startswith('flip,'):
x_CR_aug = torch.where((torch.randint(0, 2, size=[x_CR_aug.size(0), 1, 1, 1], device=x_CR_aug.device) > 0), x_CR_aug.flip(3), x_CR_aug)
x_CR_aug = DAG_Augment(x_CR_aug, policy=CR_augment.replace('flip,', ''))
D_input = torch.cat([D_input, x_CR_aug], 0)
else:
D_input = torch.cat([D_input, x], 0)
D_class = torch.cat([D_class, dy], 0)
(D_out, _, _, _, _, _, _, _) = self.D(D_input, D_class)
(_, D_out_rotate90, _, _, _, _, _, _) = self.D(D_input_rotate90, D_class)
(_, _, D_out_rotate180, _, _, _, _, _) = self.D(D_input_rotate180, D_class)
(_, _, _, D_out_rotate270, _, _, _, _) = self.D(D_input_rotate270, D_class)
(_, _, _, _, D_out_croptl, _, _, _) = self.D(D_input_croptl, D_class)
(_, _, _, _, _, D_out_croptr, _, _) = self.D(D_input_croptr, D_class)
(_, _, _, _, _, _, D_out_cropbl, _) = self.D(D_input_cropbl, D_class)
(_, _, _, _, _, _, _, D_out_cropbr) = self.D(D_input_cropbr, D_class)
if (G_z is None):
return D_out
elif (x is not None):
if CR:
return (torch.split(D_out, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_rotate90, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_rotate180, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_rotate270, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_croptl, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_croptr, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_cropbl, [G_z.shape[0], x.shape[0], x.shape[0]]), torch.split(D_out_cropbr, [G_z.shape[0], x.shape[0], x.shape[0]]))
else:
return (torch.split(D_out, [G_z.shape[0], x.shape[0]]), torch.split(D_out_rotate90, [G_z.shape[0], x.shape[0]]), torch.split(D_out_rotate180, [G_z.shape[0], x.shape[0]]), torch.split(D_out_rotate270, [G_z.shape[0], x.shape[0]]), torch.split(D_out_croptl, [G_z.shape[0], x.shape[0]]), torch.split(D_out_croptr, [G_z.shape[0], x.shape[0]]), torch.split(D_out_cropbl, [G_z.shape[0], x.shape[0]]), torch.split(D_out_cropbr, [G_z.shape[0], x.shape[0]]))
elif return_G_z:
return (D_out, D_out_rotate90, D_out_rotate180, D_out_rotate270, G_z)
else:
return (D_out, D_out_rotate90, D_out_rotate180, D_out_rotate270, D_out_croptl, D_out_croptr, D_out_cropbl, D_out_cropbr) |
def GenerateSM80_SparseTensorOp_16832(manifest, args):
if (not CudaToolkitVersionSatisfies(args.cuda_version, 11, 1)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor)]
math_instructions = [MathInstruction([16, 8, 32], DataType.f16, DataType.f16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 32], DataType.f16, DataType.f16, DataType.f16, OpcodeClass.TensorOp, MathOperation.multiply_add), MathInstruction([16, 8, 32], DataType.bf16, DataType.bf16, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add)]
min_cc = 80
max_cc = 1024
max_cc_smem_limited = 80
alignment_constraints = [8, 4, 2]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc_smem_limited), TileDescription([64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
if (math_inst.element_a != math_inst.element_accumulator):
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints) |
def _padded_batch(example_ds, batch_size, shapes, drop_remainder=False):
padded_shapes = {}
padded_shapes['observation'] = {}
for (k, v) in shapes.items():
if ('observation' in k):
padded_shapes['observation'][k.replace('observation/', '')] = (((- 1),) + v)
else:
padded_shapes[k] = (((- 1),) + v)
padded_shapes['length'] = ()
return example_ds.padded_batch(batch_size, padded_shapes=padded_shapes, drop_remainder=drop_remainder) |
class Adamax(Optimizer):
def __init__(self, params, lr=required, warmup=(- 1), t_total=(- 1), schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-08, weight_decay=0, max_grad_norm=1.0, **kwargs):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= e)):
raise ValueError('Invalid epsilon value: {}'.format(e))
if (not (0.0 <= b1 < 1.0)):
raise ValueError('Invalid b1 parameter: {} - should be in [0.0, 1.0['.format(b1))
if (not (0.0 <= b2 < 1.0)):
raise ValueError('Invalid b2 parameter: {} - should be in [0.0, 1.0['.format(b2))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (not isinstance(schedule, _LRSchedule)):
schedule_type = SCHEDULES[schedule]
schedule = schedule_type(warmup=warmup, t_total=t_total)
elif ((warmup != (- 1)) or (t_total != (- 1))):
logger.warning('warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. Please specify custom warmup and t_total in _LRSchedule object.')
defaults = dict(lr=lr, schedule=schedule, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm)
self.rate = None
super(Adamax, self).__init__(params, defaults)
def show_lr(self):
return self.rate
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if (len(state) == 0):
return [0]
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_inf'] = torch.zeros_like(p.data)
(exp_avg, exp_inf) = (state['exp_avg'], state['exp_inf'])
(beta1, beta2) = (group['b1'], group['b2'])
eps = group['e']
if (group['max_grad_norm'] > 0):
clip_grad_norm_(p, group['max_grad_norm'])
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
self.rate = lr_scheduled
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
norm_buf = torch.cat([exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_correction = (1 - (beta1 ** state['step']))
clr = (lr_scheduled / bias_correction)
p.data.addcdiv_((- clr), exp_avg, exp_inf)
return loss |
def patch_deprecated_methods(env):
global warn_once
if warn_once:
logger.warn(("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env))))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render('human', close=True)
env.render = render
env.close = close |
def make_y_lmdb_from_yuv(video_path_list, index_frame_list, key_list, lmdb_path, yuv_type='420p', h=None, w=None, batch=7000, compress_level=1, multiprocessing_read=False, map_size=None):
assert lmdb_path.endswith('.lmdb'), "lmdb_path must end with '.lmdb'."
assert (not op.exists(lmdb_path)), f'Folder {lmdb_path} already exists.'
num_img = len(key_list)
assert multiprocessing_read, 'Not implemented.'
def _callback(arg):
(key, img_byte, img_shape) = arg
(dataset[key], shapes[key]) = (img_byte, img_shape)
pbar.set_description(f'Reading {key}')
pbar.update(1)
dataset = {}
shapes = {}
pbar = tqdm(total=num_img, ncols=80)
pool = Pool()
for iter_frm in range(num_img):
pool.apply_async(_read_y_from_yuv_worker, args=(video_path_list[iter_frm], yuv_type, h, w, index_frame_list[iter_frm], key_list[iter_frm], compress_level), callback=_callback)
pool.close()
pool.join()
pbar.close()
if (map_size is None):
biggest_index = 0
biggest_size = 0
for iter_img in range(num_img):
vid_path = video_path_list[iter_img]
if (w == None):
(w, h) = map(int, vid_path.split('.')[(- 2)].split('_')[(- 2)].split('x'))
img_size = (w * h)
if (img_size > biggest_size):
biggest_size = img_size
biggest_index = iter_img
(_, img_byte, _) = _read_y_from_yuv_worker(video_path_list[biggest_index], yuv_type, h, w, index_frame_list[biggest_index], key_list[biggest_index], compress_level)
data_size_per_img = img_byte.nbytes
data_size = (data_size_per_img * num_img)
map_size = (data_size * 10)
env = lmdb.open(lmdb_path, map_size=map_size)
txn = env.begin(write=True)
txt_file = open(op.join(lmdb_path, 'meta_info.txt'), 'w')
pbar = tqdm(total=num_img, ncols=80)
for (idx, key) in enumerate(key_list):
pbar.set_description(f'Writing {key}')
pbar.update(1)
img_byte = dataset[key]
(h, w, c) = shapes[key]
key_byte = key.encode('ascii')
txn.put(key_byte, img_byte)
txt_file.write(f'''{key} ({h},{w},{c}) {compress_level}
''')
if ((idx % batch) == 0):
txn.commit()
txn = env.begin(write=True)
pbar.close()
txn.commit()
env.close()
txt_file.close() |
def register_Ns3LteRrcSapMeasObjectToAddMod_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::MeasObjectToAddMod const &', 'arg0')])
cls.add_instance_attribute('measObjectEutra', 'ns3::LteRrcSap::MeasObjectEutra', is_const=False)
cls.add_instance_attribute('measObjectId', 'uint8_t', is_const=False)
return |
class SubmoduleWithBasis(CombinatorialFreeModule):
def __classcall_private__(cls, basis, support_order, ambient=None, unitriangular=False, category=None, *args, **opts):
basis = Family(basis)
if (ambient is None):
ambient = basis.an_element().parent()
Mod = ModulesWithBasis(ambient.category().base_ring())
default_category = Mod.Subobjects()
if ((category is None) and ambient.category().is_subcategory(Mod.Filtered())):
default_category = default_category.Filtered()
category = default_category.or_subcategory(category, join=True)
return super().__classcall__(cls, basis, tuple(support_order), ambient, unitriangular, category, *args, **opts)
def __init__(self, basis, support_order, ambient, unitriangular, category, *args, **opts):
ring = ambient.base_ring()
CombinatorialFreeModule.__init__(self, ring, basis.keys(), *args, category=category.Subobjects(), **opts)
self._ambient = ambient
self._basis = basis
self._unitriangular = unitriangular
self._support_order = support_order
self.lift_on_basis = self._basis.__getitem__
self.lift.register_as_coercion()
def ambient(self):
return self._ambient
_method
def _support_key(self, x):
return self._support_order.index(x)
_attribute
def lift(self):
return self.module_morphism(self.lift_on_basis, codomain=self.ambient(), triangular='lower', unitriangular=self._unitriangular, key=self._support_key, inverse_on_support='compute')
_attribute
def reduce(self):
return self.lift.cokernel_projection()
_attribute
def retract(self):
return self.lift.section()
def is_submodule(self, other):
if (other is self.ambient()):
return True
if ((not isinstance(self, SubmoduleWithBasis)) and (self.ambient() is other.ambient())):
raise ValueError(('other (=%s) should be a submodule of the same ambient space' % other))
if (self not in ModulesWithBasis.FiniteDimensional):
raise NotImplementedError('is_submodule for infinite dimensional modules')
for b in self.basis():
try:
other.retract(b.lift())
except ValueError:
return False
return True |
class Pool2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, kernel, stride, N, C, H, W, device, op_func):
self.input = torch.rand(N, C, H, W, device=device)
self.kernel = kernel
self.stride = stride
self.op_func = op_func(self.kernel, stride=self.stride)
def forward(self):
return self.op_func(self.input) |
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True, file=None, with_color=None, with_preamble=True):
if (file is None):
file = sys.stderr
color = Color(enable=with_color)
output = _OutputLinesCollector(color=color)
rec_args = dict(autodebugshell=False, file=file, with_color=with_color, with_preamble=with_preamble)
if getattr(value, '__cause__', None):
better_exchook(type(value.__cause__), value.__cause__, value.__cause__.__traceback__, **rec_args)
output('')
output('The above exception was the direct cause of the following exception:')
output('')
elif getattr(value, '__context__', None):
better_exchook(type(value.__context__), value.__context__, value.__context__.__traceback__, **rec_args)
output('')
output('During handling of the above exception, another exception occurred:')
output('')
def format_filename(s):
base = os.path.basename(s)
return ((color(('"' + s[:(- len(base))]), color.fg_colors[2]) + color(base, color.fg_colors[2], bold=True)) + color('"', color.fg_colors[2]))
if with_preamble:
output(color('EXCEPTION', color.fg_colors[1], bold=True))
(all_locals, all_globals) = ({}, {})
if (tb is not None):
output.lines.extend(format_tb(tb=tb, allLocals=all_locals, allGlobals=all_globals, withTitle=True, with_color=color.enable))
else:
output(color('better_exchook: traceback unknown', color.fg_colors[1]))
if isinstance(value, SyntaxError):
filename = value.filename
file_descr = ''.join([' ', color('File ', color.fg_colors[0], bold=True), format_filename(filename), ', ', color('line ', color.fg_colors[0]), color(('%d' % value.lineno), color.fg_colors[4])])
with output.fold_text_ctx(file_descr):
if (not os.path.isfile(filename)):
alt_fn = fallback_findfile(filename)
if alt_fn:
output((color(" -- couldn't find file, trying this instead: ", color.fg_colors[0]) + format_filename(alt_fn)))
filename = alt_fn
source_code = get_source_code(filename, value.lineno)
if source_code:
source_code = replace_tab_indents(source_code)
lines = source_code.splitlines(True)
indent_prefix = get_same_indent_prefix(lines)
if (indent_prefix is None):
indent_prefix = ''
source_code = ''.join([line[len(indent_prefix):] for line in lines])
source_code = source_code.rstrip()
prefix = ' line: '
output(prefix, color.py_syntax_highlight(source_code), color=color.fg_colors[0])
output(((' ' * (((len(prefix) + value.offset) - len(indent_prefix)) - 1)) + '^'), color=color.fg_colors[4])
import types
def _some_str(value):
try:
return str(value)
except Exception:
return ('<unprintable %s object>' % type(value).__name__)
def _format_final_exc_line(etype, value):
value_str = _some_str(value)
if ((value is None) or (not value_str)):
line = color(('%s' % etype), color.fg_colors[1])
else:
line = (color(('%s' % etype), color.fg_colors[1]) + (': %s' % (value_str,)))
return line
if (isinstance(etype, BaseException) or (hasattr(types, 'InstanceType') and isinstance(etype, types.InstanceType)) or (etype is None) or (type(etype) is str)):
output(_format_final_exc_line(etype, value))
else:
output(_format_final_exc_line(etype.__name__, value))
for line in output.lines:
file.write(line)
file.flush()
if autodebugshell:
try:
debugshell = (int(os.environ['DEBUG']) != 0)
except Exception:
pass
if debugshell:
output(' DEBUG SHELL ')
debug_shell(user_ns=all_locals, user_global_ns=all_globals, traceback=tb) |
class Block_ViT_cross(nn.Module):
def __init__(self, config, vis, channel_num):
super(Block_ViT_cross, self).__init__()
self.attn_normQ = LayerNorm(channel_num[4], eps=1e-06)
self.attn_normKV = LayerNorm(config.KV_sizec, eps=1e-06)
self.channel_attn = Attention_org_cross(config, vis, channel_num)
self.ffn_norm = LayerNorm(channel_num[4], eps=1e-06)
self.ffn = Mlp(config, channel_num[4], channel_num[4])
def forward(self, S, T):
orgS = S
orgT = T
S = self.attn_normQ(S)
T = self.attn_normQ(T)
SKV = self.attn_normKV(S)
TKV = self.attn_normKV(T)
(S, T) = self.channel_attn(S, SKV, T, TKV)
S = (orgS + S)
T = (orgT + T)
orgS = S
orgT = T
S = self.ffn_norm(S)
T = self.ffn_norm(T)
S = self.ffn(S)
T = self.ffn(T)
S = (orgS + S)
T = (orgT + T)
return (S, T) |
def simRMLVel(dofs, smallestTimeStep, flags, currentPosVelAccel, maxAccelJerk, selection, targetVel):
handle = lib.simRMLVel(dofs, smallestTimeStep, flags, currentPosVelAccel, maxAccelJerk, selection, targetVel, ffi.NULL)
_check_return(handle)
return handle |
def test_count_message_tokens_invalid_model():
messages = [Message('user', 'Hello'), Message('assistant', 'Hi there!')]
with pytest.raises(NotImplementedError):
count_message_tokens(messages, model='invalid_model') |
class TestBundledInputs(TestCase):
def test_single_tensors(self):
class SingleTensorModel(torch.nn.Module):
def forward(self, arg):
return arg
im = cv2.imread('caffe2/test/test_img/p1.jpg')
tensor = torch.from_numpy(im)
inflatable_arg = bundle_jpeg_image(tensor, 90)
input = [(inflatable_arg,)]
sm = torch.jit.script(SingleTensorModel())
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(sm, input)
loaded = save_and_load(sm)
inflated = loaded.get_all_bundled_inputs()
decoded_data = inflated[0][0]
raw_data = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
raw_data = torch.from_numpy(raw_data).float()
raw_data = raw_data.permute(2, 0, 1)
raw_data = torch.div(raw_data, 255).unsqueeze(0)
self.assertEqual(len(inflated), 1)
self.assertEqual(len(inflated[0]), 1)
self.assertEqual(raw_data.shape, decoded_data.shape)
self.assertTrue(torch.allclose(raw_data, decoded_data, atol=0.1, rtol=0.1)) |
.parametrize('data_dict', [pytest.param('full_spark_dataset', marks=pytest.mark.spark), pytest.param('full_pandas_dataset', marks=pytest.mark.core)])
def test_feature_schema_schema_interaction_features(data_dict, request):
dataset = create_dataset(request.getfixturevalue(data_dict))
assert (dataset.feature_schema.interaction_features.columns == ['timestamp', 'rating']) |
class SentenceRepresentation():
def __init__(self, corpus):
self.corpus = corpus
def get_instance(cls, corpus):
return cls(corpus)
def _get_sents_with_representations(self):
(sents, sents_vec) = ([], [])
for doc in self.corpus:
for sent in doc:
sents.append(sent)
sents_vec.append(self._get_sentence_vector(sent))
return (sents, numpy.array(sents_vec))
def compute_pairwise_similarities(self):
(_, sents_vec) = self._get_sents_with_representations()
sims_temp = (1 - metrics.pairwise.pairwise_distances(sents_vec, metric='cosine'))
sims_temp += abs(sims_temp.min())
sims_temp /= sims_temp.max()
return sims_temp
def _get_sentence_vector(self, sent):
raise NotImplementedError('Needs to be implemented by subclass.') |
class SingleSubprocVecEnv2(VecEnv):
def __init__(self, env_fns, spaces=None):
self.waiting = False
self.closed = False
nenvs = len(env_fns)
(self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker2_single, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
(observation_space, action_space) = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for (remote, action) in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
(obs, obs_critic, select_opponent, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(obs_critic), np.stack(select_opponent), np.stack(rews), np.stack(dones), infos)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
(obs, obs_critic, select_opponent) = zip(*results)
return (np.stack(obs), np.stack(obs_critic), np.stack(select_opponent))
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True |
('tasks.implementations.dataset_check_version.Project.repository')
class TestVersionCheckTask():
def setup(self):
self.uut = VersionCheckTask()
self.uut._report_missing_key = MagicMock()
def test_missing_revision(self, _):
meta = {'misuses': ['1']}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('revision', '-project-/versions/-id-/version.yml')
def test_synthetic_no_revision(self, repository_mock):
meta = {'misuses': ['1']}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
repository_mock.vcstype = 'synthetic'
self.uut.run(project, version)
assert (call('revision', '-project-/versions/-id-/version.yml') not in self.uut._report_missing_key.call_args_list)
def test_missing_misuses(self, _):
meta = {'revision': '1'}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('misuses', '-project-/versions/-id-/version.yml')
def test_empty_misuses(self, _):
meta = {'misuses': []}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('misuses', '-project-/versions/-id-/version.yml')
def test_misuses_none(self, _):
project = create_project('-project-', meta={})
version = create_version('-id-', meta={'misuses': None}, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('misuses', '-project-/versions/-id-/version.yml')
def test_missing_build(self, _):
meta = {'misuses': ['1']}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('build', '-project-/versions/-id-/version.yml')
def test_missing_build_classes(self, _):
meta = {'build': {}}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('build.classes', '-project-/versions/-id-/version.yml')
def test_missing_build_commands(self, _):
meta = {'build': {}}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('build.commands', '-project-/versions/-id-/version.yml')
def test_empty_build_commands(self, _):
meta = {'build': {'commands': []}}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('build.commands', '-project-/versions/-id-/version.yml')
def test_missing_build_src(self, _):
meta = {'build': {}}
project = create_project('-project-', meta={})
version = create_version('-id-', meta=meta, project=project)
self.uut.run(project, version)
self.uut._report_missing_key.assert_any_call('build.src', '-project-/versions/-id-/version.yml')
def test_non_existent_misuse(self, _):
self.uut._report_unknown_misuse = MagicMock()
project = create_project('-project-', meta={})
version = create_version('-id-', meta={'misuses': ['-misuse-']}, project=project)
version._MISUSES = []
self.uut.run(project, version)
self.uut._report_unknown_misuse.assert_any_call(version.id, '-misuse-')
def test_existent_misuse(self, _):
self.uut._report_unknown_misuse = MagicMock()
project = create_project('-project-', meta={})
version = create_version('-id-', meta={'misuses': ['-misuse-']}, project=project)
project.path = mkdtemp(prefix='mubench_test-dataset-check_')
try:
misuse_yml_path = join(project.path, Project.MISUSES_DIR, '-misuse-', Misuse.MISUSE_FILE)
create_file(misuse_yml_path)
self.uut.run(project, version)
self.uut._report_unknown_misuse.assert_not_called()
finally:
remove_tree(project.path) |
class BaseDataset(object):
def get_imagedata_info(self, data):
(pids, cams) = ([], [])
for (_, pid, camid) in data:
pids += [pid]
cams += [camid]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return (num_pids, num_imgs, num_cams)
def get_videodata_info(self, data, return_tracklet_info=False):
(pids, cams, tracklet_info) = ([], [], [])
for (img_paths, pid, camid) in data:
pids += [pid]
cams += [camid]
tracklet_info += [len(img_paths)]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_tracklets = len(data)
if return_tracklet_info:
return (num_pids, num_tracklets, num_cams, tracklet_info)
return (num_pids, num_tracklets, num_cams) |
def extract_sdae_coil100(slope=0.0, dim=10):
return extractSDAE(dim=[49152, 500, 500, 2000, dim], slope=slope) |
class ExtCNNDMLoader(JsonLoader):
def __init__(self, fields=None):
fields = (fields or {'text': None, 'summary': None, 'label': None, 'publication': None})
super(ExtCNNDMLoader, self).__init__(fields=fields)
def load(self, paths: Union[(str, Dict[(str, str)])]=None):
if (paths is None):
paths = self.download()
paths = check_loader_paths(paths)
if (('train' in paths) and ('test' not in paths)):
paths['test'] = paths['train']
paths.pop('train')
datasets = {name: self._load(path) for (name, path) in paths.items()}
data_bundle = DataBundle(datasets=datasets)
return data_bundle
def download(self):
output_dir = self._get_dataset_path('ext-cnndm')
return output_dir |
def load_dataset(norm_flag=True):
imgX = sio.loadmat('river/river_before.mat')['river_before']
imgY = sio.loadmat('river/river_after.mat')['river_after']
imgX = np.reshape(imgX, newshape=[(- 1), imgX.shape[(- 1)]])
imgY = np.reshape(imgY, newshape=[(- 1), imgY.shape[(- 1)]])
GT = sio.loadmat('river/groundtruth.mat')['lakelabel_v1']
if norm_flag:
X = preprocessing.StandardScaler().fit_transform(imgX)
Y = preprocessing.StandardScaler().fit_transform(imgY)
return (X, Y, GT) |
def has_vector_accessnode(inf: vector_inference.VectorInferenceGraph):
for (node, _) in inf.sdfg.start_state.all_nodes_recursive():
if (isinstance(node, nodes.AccessNode) and isinstance(node.desc(inf.sdfg), data.Scalar)):
return (inf.get_constraint(node) == vector_inference.InferenceNode.Vector)
return False |
def merge_list_of_dicts(L):
result = {}
for d in L:
result.update(d)
return result |
def current_stream():
_lazy_init()
return torch.cuda.Stream(_cdata=torch._C._cuda_getCurrentStream()) |
def get_site_dirs():
sitedirs = []
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if (sys.exec_prefix != sys.prefix):
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if (sys.platform in ('os2emx', 'riscos')):
sitedirs.append(os.path.join(prefix, 'Lib', 'site-packages'))
elif (os.sep == '/'):
sitedirs.extend([os.path.join(prefix, 'lib', ('python' + sys.version[:3]), 'site-packages'), os.path.join(prefix, 'lib', 'site-python')])
else:
sitedirs.extend([prefix, os.path.join(prefix, 'lib', 'site-packages')])
if (sys.platform == 'darwin'):
if ('Python.framework' in prefix):
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(home, 'Library', 'Python', sys.version[:3], 'site-packages')
sitedirs.append(home_sp)
lib_paths = (get_path('purelib'), get_path('platlib'))
for site_lib in lib_paths:
if (site_lib not in sitedirs):
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs |
class FairseqDataclass():
_name: Optional[str] = None
def name():
return None
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(self, attribute_name: str, meta: str, default: Optional[Any]=None) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith('${'):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith('${'):
return str(self.__dataclass_fields__[attribute_name].default)
elif (getattr(self, attribute_name) != self.__dataclass_fields__[attribute_name].default):
return getattr(self, attribute_name)
f = self.__dataclass_fields__[attribute_name]
if (not isinstance(f.default_factory, _MISSING_TYPE)):
return f.default_factory()
return f.default
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, 'help')
def _get_argparse_const(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, 'argparse_const')
def _get_argparse_alias(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, 'argparse_alias')
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, 'choices')
def from_namespace(cls, args):
if isinstance(args, cls):
return args
else:
config = cls()
for k in config.__dataclass_fields__.keys():
if k.startswith('_'):
continue
if hasattr(args, k):
setattr(config, k, getattr(args, k))
return config |
def rmsprop(opfunc, x, config, state=None):
if ((config is None) and (state is None)):
raise ValueError('rmsprop requires a dictionary to retain state between iterations')
state = (state if (state is not None) else config)
lr = config.get('learningRate', 0.01)
alpha = config.get('alpha', 0.99)
epsilon = config.get('epsilon', 1e-08)
wd = config.get('weightDecay', 0)
(fx, dfdx) = opfunc(x)
if (wd != 0):
dfdx.add_(wd, x)
if ('m' not in state):
state['m'] = x.new().resize_as_(dfdx).zero_()
state['tmp'] = x.new().resize_as_(dfdx)
state['m'].mul_(alpha)
state['m'].addcmul_((1.0 - alpha), dfdx, dfdx)
torch.sqrt(state['m'], out=state['tmp']).add_(epsilon)
x.addcdiv_((- lr), dfdx, state['tmp'])
return (x, fx) |
class ReconstructionErrorsTest(TestCase):
y = np.array([[[0.0], [0.1]], [[1.0], [0.5]], [[0.1], [0.1]], [[0.0], [0.5]]])
y_hat = np.array([[[0.1], [2.0]], [[0.5], [0.0]], [[3.0], [0.1]], [[5.0], [0.5]]])
STEP_SIZE = 1
def _run(self, score_window, smoothing_window, smooth, rec_error_type, expected):
(sequences, _) = reconstruction_errors(self.y, self.y_hat, self.STEP_SIZE, score_window, smoothing_window, smooth, rec_error_type)
assert_allclose(sequences, expected, rtol=0.01)
def test_no_smooth(self):
smooth = False
score_window = 0
smoothing_window = 0
rec_error_type = 'point'
expected = np.array([0.1, 0.25, 1.4, 2.55, 0.0])
self._run(score_window, smoothing_window, smooth, rec_error_type, expected)
def test_smooth(self):
smooth = True
score_window = 0
smoothing_window = 0.25
rec_error_type = 'point'
expected = np.array([0.1, 0.25, 1.4, 2.55, 0.0])
self._run(score_window, smoothing_window, smooth, rec_error_type, expected)
def test_area(self):
smooth = False
score_window = 4
smoothing_window = 0
rec_error_type = 'area'
expected = np.array([0.175, 1.0, 2.975, 4.075, 3.25])
self._run(score_window, smoothing_window, smooth, rec_error_type, expected)
def test_dtw(self):
smooth = False
score_window = 2
smoothing_window = 0
rec_error_type = 'dtw'
expected = np.array([0.0, 0.27, 1.425, 0.0, 0.0])
self._run(score_window, smoothing_window, smooth, rec_error_type, expected) |
def test_invalid_parameters_in_stacking():
stacker = StackingClassifier(estimators=[])
html_output = estimator_html_repr(stacker)
assert (html.escape(str(stacker)) in html_output) |
def get_lm_pipeline(model: PreTrainedModel):
model_class = model.__class__.__name__
if (model_class == 'LlamaForCausalLM'):
return nn.Sequential(model.model.norm, model.lm_head)
elif (model_class == 'RWForCausalLM'):
return nn.Sequential(model.transformer.ln_f, model.lm_head)
elif (model_class == 'GPTNeoForCausalLM'):
return nn.Sequential(model.transformer.ln_f, model.lm_head)
elif (model_class == 'GPTNeoXForCausalLM'):
return nn.Sequential(model.gpt_neox.final_layer_norm, model.embed_out)
return get_lm_head(model) |
def validate_map_location(map_location=None):
if isinstance(map_location, str):
map_location = torch.device(map_location)
elif (not ((map_location is None) or isinstance(map_location, torch.device))):
raise ValueError(('map_location should be either None, string or torch.device, but got type: ' + str(type(map_location))))
if str(map_location).startswith('cuda'):
validate_cuda_device(map_location)
return map_location |
def main():
p = argparse.ArgumentParser(description=main.__doc__)
p.add_argument('catlas_prefix', help='catlas prefix')
p.add_argument('mh_index_picklefile', help='pickled hashval index')
p.add_argument('lca_db')
args = p.parse_args()
catlas = CAtlas(args.catlas_prefix, load_sizefile=True)
notify('loaded {} nodes from catlas {}', len(catlas), args.catlas_prefix)
notify('loaded {} layer 1 catlas nodes', len(catlas.layer1_to_cdbg))
ki_start = time.time()
kmer_idx = MPHF_KmerIndex.from_catlas_directory(args.catlas_prefix)
notify('loaded {} k-mers in index ({:.1f}s)', len(kmer_idx.mphf_to_kmer), (time.time() - ki_start))
with open(args.mh_index_picklefile, 'rb') as fp:
hashval_to_contig_id = pickle.load(fp)
notify('loaded {} hash value -> cdbg_id mappings from {}', len(hashval_to_contig_id), args.mh_index_picklefile)
db = LCA_Database.load(args.lca_db)
dom_hashvals = defaultdict(set)
for (hashval, cdbg_id) in hashval_to_contig_id.items():
dom_id = catlas.cdbg_to_layer1[cdbg_id]
dom_hashvals[dom_id].add(hashval)
n_zero = 0
n_one = 0
n_many = 0
dom_many_ids = set()
for dom_id in catlas:
if (catlas.levels[dom_id] != 1):
continue
hashvals = dom_hashvals.get(dom_id)
if (not hashvals):
n_zero += 1
elif (len(hashvals) == 1):
n_one += 1
else:
n_many += 1
dom_many_ids.add(dom_id)
print(f'{n_zero} dom nodes have no sourmash hashes under them.')
print(f'{n_one} dom nodes have exactly one sourmash hash under them.')
print(f'{n_many} dom nodes have two or more sourmash hashes under them.')
cnt = Counter()
for dom_id in dom_many_ids:
hashvals = dom_hashvals[dom_id]
assert (len(hashvals) > 1)
lins = set()
for hashval in hashvals:
lineages = db.get_lineage_assignments(hashval)
lins.update(lineages)
tree = lca.build_tree(lins)
(lca_lin, reason) = lca.find_lca(tree)
cnt[lca_lin[(- 1)].rank] += 1
print('')
print('rank of dom node lca count of dom nodes with that rank')
print(' ')
for (rank, count) in cnt.most_common():
print(f'{rank} {count}')
return 0 |
def load_img_future_de_snow_kitti(filepath, nFrames, img_id, phase='train'):
tt = int((nFrames / 2))
img_id = (img_id + tt)
num_dir = filepath.split('/')[3]
if (phase == 'train'):
targetPath = ('Dataset/KITTI_snow/Train_GT/' + num_dir)
else:
targetPath = ('Dataset/KITTI_snow/Test_GT/' + num_dir)
neigbor = []
target = []
seq = [x for x in range((img_id - tt), ((img_id + 1) + tt))]
for j in seq:
neigbor.append(Image.open((((filepath + '/') + str(j).zfill(5)) + '.jpg')).convert('RGB'))
target.append(Image.open((((targetPath + '/') + str(j).zfill(5)) + '.png')).convert('RGB'))
"\n a = filepath.split('/')[-1].split('_')[0][1]\n b = filepath.split('/')[-1].split('_')[2][1]\n base_path = filepath + '/motion_{}_{}.txt'.format(a, b)\n motion = np.loadtxt(base_path, delimiter=',')\n tar_motion = np.ones([128, 128, 2])\n # tar_motion = np.ones([480, 640, 2])\n tar_motion[:, :, 0] = tar_motion[:, :, 0] * motion[img_id - 1][0]\n # tar_motion[:, :, 1] = tar_motion[:, :, 1] * motion[img_id - 1][1]\n tar_motion[:, :, 1] = tar_motion[:, :, 1] * motion[img_id - 1][2]\n "
if (target is None):
print('read false')
exit()
return (target, neigbor) |
class ParaphraseGenerator():
def __init__(self, device='cuda'):
self._device = device
self._tokenizer = AutoTokenizer.from_pretrained('Vamsi/T5_Paraphrase_Paws')
self._model = AutoModelForSeq2SeqLM.from_pretrained('Vamsi/T5_Paraphrase_Paws').to(self._device)
def generate_sent(self, input_sent):
raw_input_text = input_sent
input_text = (('paraphrase: ' + input_sent) + ' </s>')
model_inputs = self._tokenizer.encode_plus(input_text, max_length=MAX_LENGTH, padding='longest', return_tensors='pt').to(self._device)
outputs = self._model.generate(**model_inputs, max_length=MAX_LENGTH, do_sample=True, top_k=TOP_K, top_p=TOP_P, early_stopping=True, num_return_sequences=10)
(max_dist, output_sent) = ((- 1), '')
for output in outputs:
output_text = self._tokenizer.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True)
dist = distance.levenshtein(raw_input_text, output_text)
if (dist > max_dist):
max_dist = dist
output_sent = output_text
return output_sent
def generate(self, input_text):
return ' '.join([self.generate_sent(input_sent=sent) for sent in sent_tokenize(input_text)]) |
def num_ifs_loops(graph):
graph_str = str(graph)
graph_body = graph_str[0:graph_str.find('return')]
return (graph_body.count('prim::Loop') + graph_body.count('prim::If')) |
def _get_cache_dir(req, wheel_cache):
cache_available = bool(wheel_cache.cache_dir)
assert req.link
if (cache_available and _should_cache(req)):
cache_dir = wheel_cache.get_path_for_link(req.link)
else:
cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
return cache_dir |
class DisplayLatexMessagePassing(MessagePassing):
def __init__(self, model):
model.init_shapes()
super().__init__(model, message_keys=['a', 'b'])
def forward(self, node, message):
m = format_latex_message(message, 'incoming')
new_message = node.forward_message(message)
m += ('\\;' + format_latex_message(new_message, 'outcoming'))
self.latex['forward'].append((('$' + m) + '$'))
return new_message
def backward(self, node, message):
m = format_latex_message(message, 'incoming')
new_message = node.backward_message(message)
m += ('\\;' + format_latex_message(new_message, 'outcoming'))
self.latex['backward'].append((('$' + m) + '$'))
return new_message
def update(self, variable, message):
pass
def run(self):
self.latex = dict(forward=[], backward=[])
initializer = ConstantInit(a=0, b=0)
self.init_message_dag(initializer)
self.configure_damping(None)
self.update_dA = False
self.forward_message()
self.backward_message()
return self.latex |
class ImagesDataset(Dataset):
def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
self.source_paths = sorted(data_utils.make_dataset(source_root))
self.target_paths = sorted(data_utils.make_dataset(target_root))
self.source_transform = source_transform
self.target_transform = target_transform
self.opts = opts
def __len__(self):
return len(self.source_paths)
def __getitem__(self, index):
from_path = self.source_paths[index]
from_im = Image.open(from_path)
from_im = (from_im.convert('RGB') if (self.opts.label_nc == 0) else from_im.convert('L'))
to_path = self.target_paths[index]
to_im = Image.open(to_path).convert('RGB')
if self.target_transform:
to_im = self.target_transform(to_im)
if self.source_transform:
from_im = self.source_transform(from_im)
else:
from_im = to_im
return (from_im, to_im) |
def collect_results_cpu(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
os.makedirs('.dist_test', exist_ok=True)
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
os.makedirs(tmpdir, exist_ok=True)
pickle.dump(result_part, open(osp.join(tmpdir, f'part_{rank}.pkl'), 'wb'))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(pickle.load(open(part_file, 'rb')))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results |
def load_dataset_example(format, name, dataset_dir):
dataset_dir = '{}/{}'.format(dataset_dir, name)
if (format == 'PyG'):
if (name == 'QM7b'):
dataset_raw = QM7b(dataset_dir)
graphs = GraphDataset.pyg_to_graphs(dataset_raw)
return graphs |
def test_floor():
x = Symbol('x')
y = Symbol('y')
assert (floor(nan) == nan)
assert (floor(oo) == oo)
assert (floor((- oo)) == (- oo))
assert (floor(0) == 0)
assert (floor(1) == 1)
assert (floor((- 1)) == (- 1))
assert (floor(E) == 2)
assert (floor(pi) == 3)
assert (floor(Rational(1, 2)) == 0)
assert (floor((- Rational(1, 2))) == (- 1))
assert (floor(Rational(7, 3)) == 2)
assert (floor((- Rational(7, 3))) == (- 3))
assert (floor(Float(17.0)) == 17)
assert (floor((- Float(17.0))) == (- 17))
assert (floor(Float(7.69)) == 7)
assert (floor((- Float(7.69))) == (- 8))
assert (floor(I) == I)
assert (floor((- I)) == (- I))
assert (floor((2 * I)) == (2 * I))
assert (floor(((- 2) * I)) == ((- 2) * I))
assert (floor((E + pi)) == floor((E + pi)))
assert (floor((I + pi)) == floor((I + pi)))
assert (floor(floor(pi)) == 3)
assert (floor(floor(y)) == floor(y))
assert (floor(floor(x)) == floor(floor(x)))
assert (floor(x) == floor(x))
assert (floor((2 * x)) == floor((2 * x))) |
class RegexMatch(NgramMatcher):
def init(self):
try:
self.rgx = self.opts['rgx']
except KeyError:
raise Exception('Please supply a regular expression string r as rgx=r.')
self.ignore_case = self.opts.get('ignore_case', True)
self.attrib = self.opts.get('attrib', WORDS)
self.sep = self.opts.get('sep', ' ')
self.rgx = (self.rgx if self.rgx.endswith('$') else (self.rgx + '$'))
self.r = re.compile(self.rgx, flags=(re.I if self.ignore_case else 0))
def _f(self, c):
raise NotImplementedError() |
def rounddict(d: Dict[(Any, float)], x=2):
return {k: round(number=v, ndigits=x) for (k, v) in d.items()} |
def test_array_api_deprecations():
X = sp.sparse.csr_array([[1, 2, 3], [4, 0, 6]])
msg = '1.13.0'
with pytest.deprecated_call(match=msg):
X.get_shape()
with pytest.deprecated_call(match=msg):
X.set_shape((2, 3))
with pytest.deprecated_call(match=msg):
X.asfptype()
with pytest.deprecated_call(match=msg):
X.getmaxprint()
with pytest.deprecated_call(match=msg):
X.getnnz()
with pytest.deprecated_call(match=msg):
X.getH()
with pytest.deprecated_call(match=msg):
X.getcol(1).todense()
with pytest.deprecated_call(match=msg):
X.getrow(1).todense() |
def as_numpy(obj):
if isinstance(obj, collections.Sequence):
return [as_numpy(v) for v in obj]
elif isinstance(obj, collections.Mapping):
return {k: as_numpy(v) for (k, v) in obj.items()}
elif isinstance(obj, Variable):
return obj.data.cpu().numpy()
elif torch.is_tensor(obj):
return obj.cpu().numpy()
else:
return np.array(obj) |
def simple_seg(hans):
assert (not isinstance(hans, bytes_type)), 'must be unicode string or [unicode, ...] list'
if isinstance(hans, text_type):
return _seg(hans)
else:
hans = list(hans)
if (len(hans) == 1):
return simple_seg(hans[0])
return list(chain(*[simple_seg(x) for x in hans])) |
def fetch(data_filename):
try:
return _fetch(data_filename)
except (ConnectionError, ModuleNotFoundError):
pytest.skip(f'Unable to download {data_filename}', allow_module_level=True) |
.parametrize('dumb_samplers', [True, False])
def test_parallel_thompson_sampling_builder_raises_when_update_with_wrong_function(dumb_samplers: bool) -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing='ij'), axis=(- 1)), ((- 1), 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model_type = (ModelWithDumbSamplers if dumb_samplers else QuadraticMeanAndRBFKernelWithSamplers)
model = model_type(dataset, noise_variance=tf.constant(1.0, dtype=tf.float64))
model.kernel = gpflow.kernels.RBF()
builder = ParallelContinuousThompsonSampling()
builder.prepare_acquisition_function(model)
with pytest.raises(ValueError):
builder.update_acquisition_function(lower_confidence_bound(model, 0.1), model) |
def test_no_feature_flag_raises_error():
with config_context(enable_metadata_routing=False):
with pytest.raises(RuntimeError, match='This method is only available'):
ConsumingClassifier().set_fit_request(sample_weight=True) |
class GCL_skip(nn.Module):
def __init__(self, g, f, in_feats, out_feats, activation, dropout, bias=True):
super(GCL_skip, self).__init__()
self.g = g
self.f = f
self.wh = nn.Parameter(torch.Tensor(in_feats, out_feats))
self.ws = nn.Parameter(torch.Tensor(out_feats, out_feats))
if bias:
self.bh = nn.Parameter(torch.Tensor(out_feats))
self.bs = nn.Parameter(torch.Tensor(out_feats))
else:
self.bh = None
self.bs = None
self.activation = activation
if dropout:
self.dropout = nn.Dropout(p=dropout)
else:
self.dropout = 0.0
self.reset_parameters()
def reset_parameters(self):
stdv1 = (1.0 / math.sqrt(self.wh.size(1)))
self.wh.data.uniform_((- stdv1), stdv1)
if (self.bh is not None):
self.bh.data.uniform_((- stdv1), stdv1)
stdv2 = (1.0 / math.sqrt(self.ws.size(1)))
self.ws.data.uniform_((- stdv2), stdv2)
if (self.bs is not None):
self.bs.data.uniform_((- stdv2), stdv2)
def forward(self, h, s):
if self.dropout:
h = self.dropout(h)
s = self.dropout(s)
h = torch.matmul(h, self.wh)
s = torch.matmul(s, self.ws)
h = (h * self.g.ndata['norm'])
s = (s * self.f.ndata['norm'])
self.g.ndata['h'] = h
self.f.ndata['s'] = s
self.g.update_all(fn.copy_src(src='h', out='m'), fn.sum(msg='m', out='h'))
self.f.update_all(fn.copy_src(src='s', out='m'), fn.sum(msg='m', out='s'))
h = self.g.ndata.pop('h')
s = self.f.ndata.pop('s')
h = (h * self.g.ndata['norm'])
s = (s * self.f.ndata['norm'])
if (self.bh is not None):
h = (h + self.bh)
s = (s + self.bs)
h = (h + s)
if self.activation:
h = self.activation(h)
return h |
def _transformList(l):
ret = np.empty(len(l), dtype=np.object)
for (i, arr) in enumerate(l):
ret[i] = arr
return ret |
def inference_context(model):
training_mode = model.training
model.eval()
(yield)
model.train(training_mode) |
(Output('select-causal-method', 'options'), Input('select-causal-method-parent', 'n_clicks'))
def update_method_dropdown(n_clicks):
options = []
ctx = dash.callback_context
prop_id = ctx.triggered_id
if (prop_id == 'select-causal-method-parent'):
methods = sorted(causal_method.get_supported_methods().keys())
options += [{'label': s, 'value': s} for s in methods]
return options |
class DataConfig():
def __init__(self, defaults={}):
super(DataConfig, self).__init__()
self.defaults = defaults
def apply(self, args):
if (torch.distributed.get_rank() == 0):
print('configuring data')
self.apply_defaults(args)
return make_loaders(args)
def set_defaults(self, **kwargs):
for (k, v) in kwargs.items():
self.defaults[k] = v
def apply_defaults(self, args):
for (k, v) in self.defaults.items():
k = k.replace('-', '_')
if (not hasattr(args, k)):
setattr(args, k, v) |
def render_missing_impact(itmdt: Intermediate, cfg: Config) -> Dict[(str, Any)]:
plot_width = (cfg.plot.width if (cfg.plot.width is not None) else 500)
plot_height = (cfg.plot.height if (cfg.plot.height is not None) else 500)
tabs: List[Panel] = []
htgs: Dict[(str, List[Tuple[(str, str)]])] = {}
if cfg.bar.enable:
fig_barchart = render_bar_chart(itmdt['data_bars'], 'linear', plot_width, plot_height)
tabs.append(Panel(child=row(fig_barchart), title='Bar Chart'))
htgs['Bar Chart'] = cfg.bar.missing_how_to_guide(plot_height, plot_width)
if cfg.spectrum.enable:
fig_spectrum = render_missing_spectrum(itmdt['data_spectrum'], itmdt['data_total_missing'], plot_width, plot_height)
tabs.append(Panel(child=row(fig_spectrum), title='Spectrum'))
htgs['Spectrum'] = cfg.spectrum.how_to_guide(plot_height, plot_width)
if cfg.heatmap.enable:
fig_heatmap = render_heatmaps(itmdt['data_heatmap'], plot_width, plot_height)
tabs.append(Panel(child=row(fig_heatmap), title='Heat Map'))
htgs['Heat Map'] = cfg.heatmap.missing_how_to_guide(plot_height, plot_width)
if (cfg.dendro.enable and (itmdt['ncols'] > 1)):
fig_dendrogram = render_dendrogram(itmdt['data_dendrogram'], plot_width, plot_height)
tabs.append(Panel(child=row(fig_dendrogram), title='Dendrogram'))
htgs['Dendrogram'] = cfg.dendro.how_to_guide(plot_height, plot_width)
if cfg.stats.enable:
stat_dict = {name: itmdt['missing_stat'][name] for name in itmdt['missing_stat']}
return {'insights': itmdt['insights'], 'tabledata': ({'Missing Statistics': stat_dict} if cfg.stats.enable else {}), 'layout': [panel.child.children[0] for panel in tabs], 'meta': [panel.title for panel in tabs], 'container_width': max([panel.child.children[0].plot_width for panel in tabs]), 'how_to_guide': htgs} |
def _bfs_relational(adj, roots, max_nodes_per_hop=None):
visited = set()
current_lvl = set(roots)
next_lvl = set()
while current_lvl:
for v in current_lvl:
visited.add(v)
next_lvl = _get_neighbors(adj, current_lvl)
next_lvl -= visited
if (max_nodes_per_hop and (max_nodes_per_hop < len(next_lvl))):
next_lvl = set(random.sample(next_lvl, max_nodes_per_hop))
(yield next_lvl)
current_lvl = set.union(next_lvl) |
class AutoModelForMaskedImageModeling(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_mixed_none_2d_local():
result = ak.argsort([[None, 1, None, 0, None, None, (- 1)], None, [None, 2, None, 2, 0, None, (- 2)]], axis=1)
assert ak.is_valid(result)
assert (result.to_list() == [[6, 3, 1, 0, 2, 4, 5], None, [6, 4, 1, 3, 0, 2, 5]])
assert (result.type == ak.types.ArrayType(ak.types.OptionType(ak.types.ListType(ak.types.NumpyType('int64'))), 3)) |
.parametrize('whiten', ['arbitrary-variance', 'unit-variance', False])
.parametrize('return_X_mean', [True, False])
.parametrize('return_n_iter', [True, False])
def test_fastica_output_shape(whiten, return_X_mean, return_n_iter):
n_features = 3
n_samples = 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected_len = ((3 + return_X_mean) + return_n_iter)
out = fastica(X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean)
assert (len(out) == expected_len)
if (not whiten):
assert (out[0] is None) |
class ConvolutionalComponent(tf.keras.Model):
def __init__(self, channels, kernels, strides, name='ConvolutionalComponent', **kwargs):
super().__init__(name=name, **kwargs)
self.channels = channels
self.kernels = kernels
self.strides = strides
self.num_of_nets = (len(self.channels) - 1)
if ((len(self.kernels) != len(self.strides)) or (self.num_of_nets != len(self.kernels))):
raise RuntimeError("channels, kernels and strides don't match\n")
self.initializer = tf.initializers.GlorotUniform()
self.cnn_network = tf.keras.Sequential()
for i in range(self.num_of_nets):
self.cnn_network.add(keras.layers.Conv1D(kernel_size=self.kernels[i], filters=self.channels[(i + 1)], strides=self.strides[i], use_bias=True, activation='relu', kernel_initializer=self.initializer))
self.cnn_network.add(keras.layers.MaxPool1D())
def call(self, inputs, **kwargs):
return self.cnn_network(inputs) |
class ResConvBlock(ConvBlock):
def forward(self, x):
dx = self.conv_block(x)
return (x + dx) |
class Conv1dGLU(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dropout):
super(Conv1dGLU, self).__init__()
self.out_channels = out_channels
self.conv1 = nn.Conv1d(in_channels, (2 * out_channels), kernel_size=kernel_size, padding=2)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.conv1(x)
(x1, x2) = torch.split(x, split_size_or_sections=self.out_channels, dim=1)
x = (x1 * torch.sigmoid(x2))
x = (residual + self.dropout(x))
return x |
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0, method='weak'):
if (method not in ('asymmetric', 'strong', 'weak', 'average')):
raise ValueError('method must be one of: "asymmetric", "strong", "weak", "average"')
if ((rel_tol < 0.0) or (abs_tol < 0.0)):
raise ValueError('error tolerances must be non-negative')
if (a == b):
return True
if (cmath.isinf(a) or cmath.isinf(b)):
return False
diff = abs((b - a))
if (method == 'asymmetric'):
return ((diff <= abs((rel_tol * b))) or (diff <= abs_tol))
elif (method == 'strong'):
return (((diff <= abs((rel_tol * b))) and (diff <= abs((rel_tol * a)))) or (diff <= abs_tol))
elif (method == 'weak'):
return (((diff <= abs((rel_tol * b))) or (diff <= abs((rel_tol * a)))) or (diff <= abs_tol))
elif (method == 'average'):
return ((diff <= abs(((rel_tol * (a + b)) / 2))) or (diff <= abs_tol))
else:
raise ValueError('method must be one of: "asymmetric", "strong", "weak", "average"') |
def __get_by_pos(candidates, pos):
for mention in candidates:
if (mention.attributes['type'] == pos):
return mention |
def convert_links(links_by_name):
all_converted = {}
for (name, links) in links_by_name.iteritems():
converted = []
for l in links:
relation_name = convert_name(l[2])
converted.append((l[0], l[1], relation_name))
all_converted[name] = converted
return all_converted |
def smtpNotifier(to, subject, body):
sender = to
receivers = [to]
message = f'''From: <{to}>
To: {to} <{to}>
Subject: {subject}
{body}
'''
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print(f'Successfully notified to {to}')
except smtplib.SMTPException:
print(f'Error: unable to notify to {to}') |
def rec_get_const_div_inv(expr: Expression, desc_ctx: LeanDescContext) -> List[Tuple[(str, int, bool)]]:
if isinstance(expr, ExprNeg):
return rec_get_const_div_inv(expr.val, desc_ctx)
if isinstance(expr, ExprCast):
return rec_get_const_div_inv(expr.expr, desc_ctx)
if isinstance(expr, ExprOperator):
inv = rec_get_const_div_inv(expr.a, desc_ctx)
const_inv = get_const_div_inv(expr, desc_ctx.simplifier)
if (const_inv is not None):
(the_const, is_full_expr) = const_inv
if is_full_expr:
return (inv + [(to_lean_description(expr, context=desc_ctx), the_const, is_full_expr)])
return (inv + [(to_lean_description(expr.b), the_const, is_full_expr)])
return (inv + rec_get_const_div_inv(expr.b, desc_ctx))
if isinstance(expr, ExprPow):
return (rec_get_const_div_inv(expr.a, desc_ctx) + rec_get_const_div_inv(expr.b, desc_ctx))
if isinstance(expr, ExprParentheses):
return rec_get_const_div_inv(expr.val, desc_ctx)
if isinstance(expr, ExprFuncCall):
inv = []
for arg in expr.rvalue.arguments.args:
inv += rec_get_const_div_inv(arg.expr, desc_ctx)
return inv
if isinstance(expr, ExprDeref):
return rec_get_const_div_inv(expr.addr, desc_ctx)
return [] |
class TestImitationLoss(TestCase):
def _fake_tensors(self):
return {'output_actions': tf.random_uniform((BATCH, 2, ACTION_SIZE)), 'ctrnet_outputs': tf.random_uniform((BATCH, 2, ACTION_SIZE))}
def test_float_outputs(self):
il = ImitationLoss()
with tf.variable_scope('test_float_outputs'):
outputs = il.consume(self._fake_tensors())
sess = tf.InteractiveSession()
(loss_support, loss_query) = sess.run([outputs['loss_support'], outputs['loss_query']])
self.assertIs(type(loss_support), np.float32)
self.assertIs(type(loss_query), np.float32) |
def embed_model_spatio_temporal_gcnn(n_neuron, timesteps, num_nodes, num_features, graph_conv_filters_shape1, graph_conv_filters_shape2, num_filters, num_classes, n_dropout, protocol):
i3d = i3d_modified(weights='rgb_imagenet_and_kinetics')
model_branch = i3d.i3d_flattened(num_classes=num_classes)
optim = SGD(lr=0.01, momentum=0.9)
model_branch.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])
print('Build model...')
model_inputs = []
model_gcnn = GCNN_skeleton_t16(num_nodes, num_features, graph_conv_filters_shape1, graph_conv_filters_shape2, num_filters, num_classes, n_neuron, n_dropout, timesteps)
z1 = Dense(256, activation='tanh', name='z1_layer', trainable=True)(model_gcnn.get_layer('gcnn_out').output)
z2 = Dense(128, activation='tanh', name='z2_layer', trainable=True)(model_gcnn.get_layer('gcnn_out').output)
fc_main_spatial = Dense(49, activity_regularizer=attention_reg, kernel_initializer='zeros', bias_initializer='zeros', activation='sigmoid', trainable=True, name='dense_spatial')(z1)
fc_main_temporal = Dense(2, activity_regularizer=attention_reg, kernel_initializer='zeros', bias_initializer='zeros', activation='softmax', trainable=True, name='dense_temporal')(z2)
atten_mask_spatial = keras.layers.core.Lambda(inflate_dense_spatial, output_shape=(2, 7, 7, 1024))(fc_main_spatial)
atten_mask_temporal = keras.layers.core.Lambda(inflate_dense_temporal, output_shape=(2, 7, 7, 1024))(fc_main_temporal)
atten_mask = keras.layers.Multiply()([atten_mask_spatial, atten_mask_temporal])
for l in model_branch.layers:
l.trainable = True
for layer in model_gcnn.layers:
layer.trainable = True
for i in model_gcnn.input:
model_inputs.append(i)
model_inputs.append(model_branch.input)
flatten_video = Flatten(name='flatten_video')(model_branch.get_layer('Mixed_5c').output)
embed_video = Dense(256, activation='sigmoid', trainable=True, name='dense_video')(flatten_video)
embed_skeleton = Dense(256, activation='sigmoid', trainable=True, name='dense_skeleton')(fc_main_spatial)
embed_output = Merge(mode=(lambda x: manhattan_distance(x[0], x[1])), output_shape=(lambda inp_shp: (inp_shp[0][0], 1)), name='embed_output')([embed_video, embed_skeleton])
multiplied_features = keras.layers.Multiply()([atten_mask, model_branch.get_layer('Mixed_5c').output])
added_features = keras.layers.Add()([multiplied_features, model_branch.get_layer('Mixed_5c').output])
x = AveragePooling3D((2, 7, 7), strides=(1, 1, 1), padding='valid', name=('global_avg_pool' + 'second'))(added_features)
x = Dropout(n_dropout)(x)
x = conv3d_bn(x, num_classes, 1, 1, 1, padding='same', use_bias=True, use_activation_fn=False, use_bn=False, name=('Conv3d_6a_1x1' + 'second'))
x = Flatten(name=('flatten' + 'second'))(x)
predictions = Dense(num_classes, activation='softmax', name='action_output')(x)
model = Model(inputs=model_inputs, outputs=[predictions, embed_output], name='spatial_temporal_attention')
return model |
class AttrDict(dict):
__setattr__ = dict.__setitem__
def __getattribute__(self, item):
if (item in self):
return self[item]
else:
return super().__getattribute__(item) |
def is_hf_dataset(dataset):
if (not is_datasets_available()):
return False
from datasets import Dataset, IterableDataset
return isinstance(dataset, (Dataset, IterableDataset)) |
def test_config_hdf(hdf_file_path, tardis_config_verysimple):
expected = Configuration.from_config_dict(tardis_config_verysimple, validate=True, config_dirname='test')
expected.to_hdf(hdf_file_path, overwrite=True)
actual = pd.read_hdf(hdf_file_path, key='/simulation/config')
expected = expected.get_properties()['config']
assert (actual[0] == expected[0]) |
def random_swap(words, n):
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words |
class DummyMortalityOntology():
def get_children(self, code: str) -> List[str]:
if (code == 'SNOMED/'):
return ['DEATH_CHILD']
return [] |
_builder('ok_vqa_instruct')
class OKVQAInstructBuilder(COCOVQAInstructBuilder):
DATASET_CONFIG_DICT = {'default': 'configs/datasets/okvqa/defaults_instruct.yaml'} |
.parametrize('param,min_feature,value', [(parametrization.GratingParam([1, 2, 5, 6.7], 10), 1.5, [[1, (- 1), 0, 0], [0, 1, (- 1), 0], [0, 0, 1, (- 1)], [(- 1), 0, 0, 0], [0, 0, 0, 1]]), (parametrization.CompositeParam([parametrization.GratingParam([1, 2, 5, 6.7], 10), parametrization.GratingParam([3, 4], 8)]), 1.5, [[1, (- 1), 0, 0, 0, 0], [0, 1, (- 1), 0, 0, 0], [0, 0, 1, (- 1), 0, 0], [(- 1), 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, (- 1)], [0, 0, 0, 0, (- 1), 0], [0, 0, 0, 0, 0, 1]])])
def test_grating_feature_constraint_gradient(param, min_feature, value):
constraint = problem.GratingFeatureConstraint(min_feature)
np.testing.assert_almost_equal(constraint.calculate_gradient(param), np.array(value)) |
def spearman(x, y):
assert (len(x) == len(y) > 0)
q = (lambda n: map((lambda val: (sorted(n).index(val) + 1)), n))
d = sum(map((lambda x, y: ((x - y) ** 2)), q(x), q(y)))
return (1.0 - ((6.0 * d) / float((len(x) * ((len(y) ** 2) - 1.0))))) |
class FlowDensity(mrl.Module):
def __init__(self, item, optimize_every=2, batch_size=1000, lr=0.001, num_layer_pairs=3, normalize=True):
super().__init__('{}_flow'.format(item), required_agent_modules=['replay_buffer'], locals=locals())
self.step = 0
self.item = item
self.num_layer_pairs = num_layer_pairs
self.optimize_every = optimize_every
self.batch_size = batch_size
self.lazy_load = None
self.flow_model = None
self.dev = None
self.lr = lr
self.sample_mean = 0.0
self.sample_std = 1.0
self.normalize = normalize
def _setup(self):
assert isinstance(self.replay_buffer, OnlineHERBuffer)
def _init_from_sample(self, x):
input_size = x.shape[(- 1)]
self.input_channel = input_size
if self.config.get('device'):
self.dev = self.config.device
elif (self.dev is None):
self.dev = ('cuda' if torch.cuda.is_available() else 'cpu')
self.flow_model = RealNVP(input_channel=self.input_channel, lr=self.lr, num_layer_pairs=self.num_layer_pairs, dev=self.dev)
def evaluate_log_density(self, samples):
assert self.ready, 'ENSURE READY BEFORE EVALUATING LOG DENSITY'
return self.flow_model.score_samples(((samples - self.sample_mean) / self.sample_std))
def ready(self):
return (self.flow_model is not None)
def _optimize(self, force=False):
buffer = self.replay_buffer.buffer.BUFF[('buffer_' + self.item)]
self.step += 1
if (force or (((self.step % self.optimize_every) == 0) and len(buffer))):
sample_idxs = np.random.randint(len(buffer), size=self.batch_size)
samples = buffer.get_batch(sample_idxs)
if self.normalize:
self.sample_mean = np.mean(samples, axis=0, keepdims=True)
self.sample_std = (np.std(samples, axis=0, keepdims=True) + 0.0001)
samples = ((samples - self.sample_mean) / self.sample_std)
if (self.flow_model is None):
self._init_from_sample(samples)
if (self.lazy_load is not None):
self.load(self.lazy_load)
self.lazy_load = None
samples = self.torch(samples)
self.flow_model.fit(samples, epochs=1)
def save(self, save_folder: str):
path = os.path.join(save_folder, (self.module_name + '.pt'))
if (self.flow_model is not None):
torch.save({'flow_model': self.flow_model}, path)
def load(self, save_folder: str):
path = os.path.join(save_folder, (self.module_name + '.pt'))
if ((self.flow_model is None) and os.path.exists(path)):
self.lazy_load = save_folder
else:
self.flow_model = torch.load(path) |
def get_devices(devices=None):
if (not torch.cuda.is_available()):
return [torch.device('cpu')]
if (not devices):
return [torch.device(('cuda:' + str(i))) for i in range(torch.cuda.device_count())]
return [torch.device(ordinal) for ordinal in devices] |
def block_inception_b(inputs, scope=None, reuse=None):
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(3, [branch_0, branch_1, branch_2, branch_3]) |
class SingleTablePreset():
_synthesizer = None
_default_synthesizer = GaussianCopulaSynthesizer
def _setup_fast_preset(self, metadata, locales):
self._synthesizer = GaussianCopulaSynthesizer(metadata=metadata, default_distribution='norm', enforce_rounding=False, locales=locales)
def __init__(self, metadata, name, locales=None):
if (name not in PRESETS):
raise ValueError(f"'name' must be one of {PRESETS}.")
self.name = name
if (name == FAST_ML_PRESET):
self._setup_fast_preset(metadata, locales)
def add_constraints(self, constraints):
self._synthesizer.add_constraints(constraints)
def get_metadata(self):
return self._synthesizer.get_metadata()
def get_parameters(self):
return self._synthesizer.get_parameters()
def fit(self, data):
self._synthesizer.fit(data)
def sample(self, num_rows, max_tries_per_batch=100, batch_size=None, output_file_path=None):
sampled = self._synthesizer.sample(num_rows, max_tries_per_batch, batch_size, output_file_path)
return sampled
def sample_from_conditions(self, conditions, max_tries_per_batch=100, batch_size=None, output_file_path=None):
sampled = self._synthesizer.sample_from_conditions(conditions, max_tries_per_batch, batch_size, output_file_path)
return sampled
def sample_remaining_columns(self, known_columns, max_tries_per_batch=100, batch_size=None, output_file_path=None):
sampled = self._synthesizer.sample_remaining_columns(known_columns, max_tries_per_batch, batch_size, output_file_path)
return sampled
def save(self, filepath):
with open(filepath, 'wb') as output:
cloudpickle.dump(self, output)
def load(cls, filepath):
with open(filepath, 'rb') as f:
model = cloudpickle.load(f)
return model
def list_available_presets(cls, out=sys.stdout):
out.write(f'''Available presets:
{PRESETS}
Supply the desired preset using the `name` parameter.
Have any requests for custom presets? Contact the SDV team to learn more an SDV Premium license.
''')
def __repr__(self):
return f'SingleTablePreset(name={self.name})' |
def generate_case_from_nntxt_str(nntxt_str, nnp_filename, param_format, dataset_sample_num, batch_size=None):
proto = proto_from_str(nntxt_str)
with generate_csv_png(dataset_sample_num, get_input_size(proto)) as dataset_csv_file:
for ds in proto.dataset:
ds.batch_size = (batch_size if batch_size else ds.batch_size)
ds.uri = dataset_csv_file
ds.cache_dir = os.path.join(os.path.dirname(dataset_csv_file), 'data.cache')
nntxt_io = io.StringIO()
text_format.PrintMessage(proto, nntxt_io)
nntxt_io.seek(0)
version = io.StringIO()
version.write('{}\n'.format(nnp_version()))
version.seek(0)
param = io.BytesIO()
prepare_parameters(nntxt_str)
nn.parameter.save_parameters(param, extension=param_format)
with create_temp_with_dir(nnp_filename) as temp_nnp_file_name:
with get_file_handle_save(temp_nnp_file_name, '.nnp') as nnp:
nnp.writestr('nnp_version.txt', version.read())
nnp.writestr('network.nntxt', nntxt_io.read())
nnp.writestr('parameter{}'.format(param_format), param.read())
(yield temp_nnp_file_name) |
def mkdir_list(p_list, use_relative_path=True, log=True):
root_path = os.path.abspath(os.path.dirname(__file__)).split('utils')[0]
p_list = (p_list if isinstance(p_list, list) else [p_list])
for p in p_list:
p = (os.path.join(root_path, p) if use_relative_path else p)
p = get_dir_of_file(p)
mkdir_p(p, log) |
def generate_targets(org_bboxes, p_c, p_e, motion_parameters):
(track_num, _, _) = motion_parameters.shape
target = [org_bboxes, motion_parameters, p_c, p_e]
return target |
def parse_args():
parser = argparse.ArgumentParser(description='Med VQA')
parser.add_argument('--cfg', help='decide which cfg to use', required=False, default='/home/test.yaml', type=str)
parser.add_argument('--gpu', type=int, default=0, help='use gpu device. default:0')
parser.add_argument('--test', type=bool, default=False, help='Test or train.')
args = parser.parse_args()
return args |
def convert_dataset_for_tensorflow(dataset, non_label_column_names, batch_size, dataset_mode='variable_batch', shuffle=True, drop_remainder=True):
def densify_ragged_batch(features, label=None):
features = {feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for (feature, ragged_tensor) in features.items()}
if (label is None):
return features
else:
return (features, label)
feature_keys = list((set(dataset.features.keys()) - set((non_label_column_names + ['label']))))
if (dataset_mode == 'variable_batch'):
batch_shape = {key: None for key in feature_keys}
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
elif (dataset_mode == 'constant_batch'):
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
batch_shape = {key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0) for (key, ragged_tensor) in data.items()}
else:
raise ValueError('Unknown dataset mode!')
if ('label' in dataset.features):
labels = tf.convert_to_tensor(np.array(dataset['label']))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
if shuffle:
tf_dataset = tf_dataset.shuffle(buffer_size=len(dataset))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
tf_dataset = tf_dataset.with_options(options).batch(batch_size=batch_size, drop_remainder=drop_remainder).map(densify_ragged_batch)
return tf_dataset |
def process_json_file(json_file_path, src_dir, ori_dst_dir, binary_dst_dir, instance_dst_dir):
assert ops.exists(json_file_path), '{:s} not exist'.format(json_file_path)
image_nums = len(os.listdir(ori_dst_dir))
count_unlabeled = 0
with open(json_file_path, 'r') as file:
for (line_index, line) in enumerate(file):
labeled = True
info_dict = json.loads(line)
image_path = ops.join(src_dir, info_dict['raw_file'])
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
h_samples = info_dict['h_samples']
lanes = info_dict['lanes']
image_name_new = '{:s}.png'.format('{:d}'.format((line_index + image_nums)).zfill(4))
src_image = cv2.imread(image_path, cv2.IMREAD_COLOR)
dst_binary_image = np.zeros([src_image.shape[0], src_image.shape[1]], np.uint8)
dst_instance_image = np.zeros([src_image.shape[0], src_image.shape[1]], np.uint8)
for (lane_index, lane) in enumerate(lanes):
assert (len(h_samples) == len(lane))
lane_x = []
lane_y = []
for index in range(len(lane)):
if (lane[index] == (- 2)):
continue
else:
ptx = lane[index]
pty = h_samples[index]
lane_x.append(ptx)
lane_y.append(pty)
if (not lane_x):
labeled = False
continue
lane_pts = np.vstack((lane_x, lane_y)).transpose()
lane_pts = np.array([lane_pts], np.int64)
cv2.polylines(dst_binary_image, lane_pts, isClosed=False, color=255, thickness=2)
cv2.polylines(dst_instance_image, lane_pts, isClosed=False, color=((lane_index * 50) + 20), thickness=6)
if (not labeled):
print('{} image has lane not labeled'.format(image_path))
count_unlabeled += 1
continue
dst_binary_image_path = ops.join(binary_dst_dir, image_name_new)
dst_instance_image_path = ops.join(instance_dst_dir, image_name_new)
dst_rgb_image_path = ops.join(ori_dst_dir, image_name_new)
cv2.imwrite(dst_binary_image_path, dst_binary_image)
cv2.imwrite(dst_instance_image_path, dst_instance_image)
cv2.imwrite(dst_rgb_image_path, src_image)
print('Process {:s} success'.format(image_path))
print(count_unlabeled, 'has not labeled lane') |
def load_categories_from_csv_file(csv_path):
categories = []
with tf.gfile.Open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if (not row):
continue
if (len(row) != 2):
raise ValueError(('Expected 2 fields per row in csv: %s' % ','.join(row)))
category_id = int(row[0])
category_name = row[1]
categories.append({'id': category_id, 'name': category_name})
return categories |
class IntegerAttribute(AbstractAttribute):
def __init__(self, name, data, histogram_size):
super().__init__(name, data, histogram_size)
self.is_categorical = False
self.is_numerical = True
self.data_type = DataType.INTEGER
self.data = self.data.astype(int)
self.data_dropna = self.data_dropna.astype(int)
def set_domain(self, domain=None):
if (domain is not None):
(self.min, self.max) = domain
else:
self.min = self.data_dropna.min()
self.max = self.data_dropna.max()
self.min = int(self.min)
self.max = int(self.max)
self.distribution_bins = linspace(self.min, self.max, (self.histogram_size + 1)).astype(int)
self.domain_size = self.histogram_size
def infer_distribution(self):
(frequency_counts, _) = histogram(self.data_dropna, bins=self.distribution_bins)
self.distribution_probabilities = normalize_given_distribution(frequency_counts)
def generate_values_as_candidate_key(self, n):
return super().generate_values_as_candidate_key(n)
def sample_values_from_binning_indices(self, binning_indices):
column = super().sample_values_from_binning_indices(binning_indices)
column = column.round()
column = column.astype(int)
return column |
_torch
_sigopt
class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments('.')
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {'a': 0, 'b': 0}
def hp_space(trial):
return [{'bounds': {'min': (- 4), 'max': 4}, 'name': 'a', 'type': 'int'}, {'bounds': {'min': (- 4), 'max': 4}, 'name': 'b', 'type': 'int'}]
def model_init(trial):
if (trial is not None):
a = trial.assignments['a']
b = trial.assignments['b']
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.assignments)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir='runs', run_name='test', model_init=model_init)
trainer.hyperparameter_search(direction='minimize', hp_space=hp_space, hp_name=hp_name, backend='sigopt', n_trials=4) |
def diffusion_defaults():
return dict(learn_sigma=False, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False) |
def parse_uri(uri):
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8]) |
def test_random_single_image():
shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False) |
def weakly_connected_component(dfg, node_in_component: Node) -> StateSubgraphView:
seen = set()
to_search = [node_in_component]
while to_search:
node = to_search.pop()
if (node in seen):
continue
seen.add(node)
for succ in dfg.successors(node):
to_search.append(succ)
to_search = [node_in_component]
seen.remove(node_in_component)
while to_search:
node = to_search.pop()
if (node in seen):
continue
seen.add(node)
for succ in dfg.predecessors(node):
to_search.append(succ)
subgraph = StateSubgraphView(dfg, seen)
return subgraph |
_numpy_output()
def test_transpose3(A: dace.float32[(M, N, N, M)]):
return A.transpose(3, 0, 2, 1) |
def load_model(load_path, e_common, e_separate_A, e_separate_B, decoder, ae_opt, disc, disc_opt):
state = torch.load(load_path)
e_common.load_state_dict(state['e_common'])
e_separate_A.load_state_dict(state['e_separate_A'])
e_separate_B.load_state_dict(state['e_separate_B'])
decoder.load_state_dict(state['decoder'])
ae_opt.load_state_dict(state['ae_opt'])
disc.load_state_dict(state['disc'])
disc_opt.load_state_dict(state['disc_opt'])
return state['iters'] |
_module
class IterTimerHook(Hook):
def before_epoch(self, runner):
self.t = time.time()
def before_iter(self, runner):
runner.log_buffer.update({'data_time': (time.time() - self.t)})
def after_iter(self, runner):
runner.log_buffer.update({'time': (time.time() - self.t)})
self.t = time.time() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.