code stringlengths 101 5.91M |
|---|
def filesys_decode(path):
if isinstance(path, six.text_type):
return path
fs_enc = (sys.getfilesystemencoding() or 'utf-8')
candidates = (fs_enc, 'utf-8')
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue |
def save_as_rust(mlp, dataset, output):
with open(output, 'w') as f:
(mrs, nrs) = dataset.get_mr_nr_values()
params = {}
for (name, tensor) in mlp.named_parameters():
params[name] = tensor.detach().numpy()
(big_product_mkn_threshold, big_product_kernel_choice) = dataset.big_product_behaviour()
f.write(f'''use crate::frame::mmm::CostModel;
pub fn model() -> CostModel<'static> {{
CostModel {{
big_product_mkn_threshold: {big_product_mkn_threshold},
big_product_kernel_choice: "{big_product_kernel_choice}",
kernels: &{str(list(map((lambda k: k[0]), mlp.kernels))).replace("'", '"')},
mrs: &{mrs},
nrs: &{nrs},
feat_norm_mean: &{mlp.mean.flatten().tolist()},
feat_norm_stddev: &{mlp.std.flatten().tolist()},
w1: &{params['linear_1.weight'].flatten().tolist()},
b1: &{params['linear_1.bias'].flatten().tolist()},
w2: &{params['linear_2.weight'].flatten().tolist()},
b2: &{params['linear_2.bias'].flatten().tolist()},
}}
}}
''') |
def test_loop_description():
C = sq.Capacitor(1)
loop1 = sq.Loop(id_str='loop1')
JJ1 = sq.Junction(1, loops=[loop1], cap=C, id_str='JJ1')
JJ2 = sq.Junction(1, loops=[loop1], cap=C, id_str='JJ2')
L = sq.Inductor(1, loops=[loop1], cap=C, id_str='ind')
elements = {(0, 1): [JJ1], (0, 2): [JJ2], (1, 2): [L]}
cr = sq.Circuit(elements, flux_dist='all')
desc = cr.loop_description(_test=True)
f = open((DATADIR + '/flux_dist_all.txt'), 'r')
desc_data = f.read().replace('\\n', '\n')
assert (desc == desc_data)
cr = sq.Circuit(elements, flux_dist='inductors')
desc = cr.loop_description(_test=True)
f = open((DATADIR + '/flux_dist_inductors.txt'), 'r')
desc_data = f.read().replace('\\n', '\n')
assert (desc == desc_data)
cr = sq.Circuit(elements, flux_dist='junctions')
desc = cr.loop_description(_test=True)
f = open((DATADIR + '/flux_dist_junctions.txt'), 'r')
desc_data = f.read().replace('\\n', '\n')
assert (desc == desc_data) |
def dummy_lower5(context, builder, sig, args):
def compute(left):
return abs(left.x)
return context.compile_internal(builder, compute, sig, args) |
def run_mlp(_trainMode, _dataType, _oRate, _var, _GPU_ID):
(_n, _oRange, _hdims, _actv, _maxEpoch, _PLOT_EVERY, _SAVE_NET, _SAVE_FIG) = get_common_config()
(x, y, t) = data4reg(_type=_dataType, _n=_n, _oRange=_oRange, _oRate=_oRate, measVar=_var)
xtest = np.linspace(start=(- 3), stop=3, num=500).reshape(((- 1), 1))
tf.reset_default_graph()
tf.set_random_seed(0)
np.random.seed(0)
MLP = mlp_reg_class(_name=('MLP_%s_oRate%d_var%.1e' % (_dataType, (_oRate * 100), _var)), _xdim=1, _ydim=1, _hdims=_hdims, _actv=_actv, _bn=None, _l2_reg_coef=1e-05, _GPU_ID=_GPU_ID, _VERBOSE=False)
sess = gpusession()
sess.run(tf.global_variables_initializer())
MLP.train(_sess=sess, _x=x, _y=y, _yref=t, _lr=0.1, _batchSize=256, _maxEpoch=_maxEpoch, _kp=1.0, _LR_SCHEDULE=True, _PRINT_EVERY=50, _PLOT_EVERY=_PLOT_EVERY, _SAVE_TXT=True, _SAVE_BEST_NET=_SAVE_NET, _SAVE_FINAL=_SAVE_NET)
MLP.test(_sess=sess, _xdata=x, _ydata=y, _yref=t, _xtest=xtest, _titleStr=MLP.name, _PLOT_TRAIN=True, _PLOT_RES=True, _SAVE_FIG=_SAVE_FIG)
sess.close() |
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, input_to_constant):
super(Model, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size)
if input_to_constant:
self.conv.weight.data.fill_(0.1)
self.conv.bias.data.fill_(1)
def forward(self, x):
return self.conv(x) |
def _make_sparse(grad, grad_indices, values):
size = grad.size()
if ((grad_indices.numel() == 0) or (values.numel() == 0)):
return torch.empty_like(grad)
return torch.sparse_coo_tensor(grad_indices, values, size) |
class BasicConv2d(nn.Module):
def __init__(self, input_channels, output_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(output_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
def create_emb_layer(weights_matrix=None, voc_size=None, embed_dim=None, trainable_embeds=True) -> torch.nn.Embedding:
assert ((weights_matrix is not None) or ((voc_size is not None) and (embed_dim is not None))), 'Please define anything: weights_matrix or voc_size & embed_dim'
if (weights_matrix is not None):
(voc_size, embed_dim) = weights_matrix.size()
emb_layer = nn.Embedding(voc_size, embed_dim)
if (weights_matrix is not None):
emb_layer.load_state_dict({'weight': weights_matrix})
if (not trainable_embeds):
emb_layer.weight.requires_grad = False
return emb_layer |
def split_core_object_name(core_object_name):
args = core_object_name.split('_')
dtypes = []
for dtype_name in args[1:]:
dtypes.append(bb.dtype_from_name(dtype_name))
return (args[0], dtypes) |
class BaseModel(nn.Module):
def __init__(self, config):
super(BaseModel, self).__init__()
self.config = config
self.use_cuda = torch.cuda.is_available() |
def load_weights(model, yolo_weight_file):
data = np.fromfile(yolo_weight_file, np.float32)
data = data[4:]
index = 0
for layer in model.layers:
shape = [w.shape for w in layer.get_weights()]
if (shape != []):
(kshape, bshape) = shape
bia = data[index:(index + np.prod(bshape))].reshape(bshape)
index += np.prod(bshape)
ker = data[index:(index + np.prod(kshape))].reshape(kshape)
index += np.prod(kshape)
layer.set_weights([ker, bia]) |
class GenericEnum(GenericAccessibleObject):
def __init__(self, owner: TypeInfo):
super().__init__(owner)
self._generated_type = Instance(owner)
self._names = [e.name for e in typing.cast(list[enum.Enum], list(typing.cast(type[enum.Enum], owner.raw_type)))]
def generated_type(self) -> ProperType:
return self._generated_type
def names(self) -> list[str]:
return self._names
def is_enum(self) -> bool:
return True
def get_dependencies(self, memo: dict[(InferredSignature, dict[(str, ProperType)])]) -> OrderedSet[ProperType]:
return OrderedSet()
def __eq__(self, other):
if (self is other):
return True
if (not isinstance(other, GenericEnum)):
return False
return (self._owner == other._owner)
def __hash__(self):
return hash(self._owner)
def __repr__(self):
return f'{self.__class__.__name__}({self.owner})' |
class ArgPackType(CompoundType):
def __init__(self, **kwargs):
self.members = {}
elements = []
for (k, dtype) in kwargs.items():
if isinstance(dtype, StructType):
self.members[k] = dtype
elements.append([dtype.dtype, k])
elif isinstance(dtype, ArgPackType):
self.members[k] = dtype
elements.append([_ti_core.DataType(_ti_core.get_type_factory_instance().get_struct_type_for_argpack_ptr(dtype.dtype)), k])
elif isinstance(dtype, MatrixType):
if (dtype.ndim == 1):
elements_ = [(dtype.dtype, f'{k}_{i}') for i in range(dtype.n)]
else:
elements_ = [(dtype.dtype, f'{k}_{i}_{j}') for i in range(dtype.n) for j in range(dtype.m)]
self.members[k] = dtype
elements.append([_ti_core.get_type_factory_instance().get_struct_type(elements_), k])
elif isinstance(dtype, sparse_matrix_builder):
self.members[k] = dtype
elif isinstance(dtype, ndarray_type.NdarrayType):
self.members[k] = dtype
elif isinstance(dtype, texture_type.RWTextureType):
self.members[k] = dtype
elif isinstance(dtype, texture_type.TextureType):
self.members[k] = dtype
else:
dtype = cook_dtype(dtype)
self.members[k] = dtype
elements.append([dtype, k])
if (len(elements) == 0):
elements.append([primitive_types.i32, k])
self.dtype = _ti_core.get_type_factory_instance().get_argpack_type(elements)
def __call__(self, *args, **kwargs):
d = {}
items = self.members.items()
for (index, pair) in enumerate(items):
(name, dtype) = pair
if (index < len(args)):
data = args[index]
else:
data = kwargs.get(name, None)
if (isinstance(dtype, CompoundType) and (not isinstance(data, (dict, ArgPack, Struct)))):
data = dtype(data)
d[name] = data
entries = ArgPack(self.members, self.dtype, d)
pack = self.cast(entries)
return pack
def __instancecheck__(self, instance):
if (not isinstance(instance, ArgPack)):
return False
if (list(self.members.keys()) != list(instance._ArgPack__entries.keys())):
return False
for (k, v) in self.members.items():
if isinstance(v, ArgPackType):
if (not isinstance(instance._ArgPack__entries[k], v)):
return False
elif (instance._ArgPack__annotations[k] != v):
return False
return True
def cast(self, pack):
if (self.members.keys() != pack._ArgPack__entries.keys()):
raise TaichiSyntaxError('Incompatible arguments for custom argument pack members!')
entries = {}
for (k, dtype) in self.members.items():
if isinstance(dtype, MatrixType):
entries[k] = dtype(pack._ArgPack__entries[k])
elif isinstance(dtype, CompoundType):
entries[k] = dtype.cast(pack._ArgPack__entries[k])
elif isinstance(dtype, ArgPackType):
entries[k] = dtype.cast(pack._ArgPack__entries[k])
elif isinstance(dtype, ndarray_type.NdarrayType):
entries[k] = pack._ArgPack__entries[k]
elif isinstance(dtype, texture_type.RWTextureType):
entries[k] = pack._ArgPack__entries[k]
elif isinstance(dtype, texture_type.TextureType):
entries[k] = pack._ArgPack__entries[k]
elif isinstance(dtype, sparse_matrix_builder):
entries[k] = pack._ArgPack__entries[k]
elif in_python_scope():
v = pack._ArgPack__entries[k]
entries[k] = (int(v) if (dtype in primitive_types.integer_types) else float(v))
else:
entries[k] = ops.cast(pack._ArgPack__entries[k], dtype)
pack = ArgPack(self.members, self.dtype, entries)
return pack
def from_taichi_object(self, arg_load_dict: dict):
d = {}
items = self.members.items()
for (index, pair) in enumerate(items):
(name, dtype) = pair
d[name] = arg_load_dict[name]
pack = _IntermediateArgPack(self.members, self.dtype, d)
pack._ArgPack__dtype = self.dtype
return pack
def __str__(self):
item_str = ', '.join([((str(k) + '=') + str(v)) for (k, v) in self.members.items()])
return f'<ti.ArgPackType {item_str}>' |
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
elif torch.is_tensor(stat):
stat = stat.tolist()
return stat |
def register_Ns3RrcConnectionReestablishmentRejectHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::RrcConnectionReestablishmentRejectHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_virtual=True)
cls.add_method('GetMessage', 'ns3::LteRrcSap::RrcConnectionReestablishmentReject', [], is_const=True)
cls.add_method('PreSerialize', 'void', [], is_const=True, is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('SetMessage', 'void', [param('ns3::LteRrcSap::RrcConnectionReestablishmentReject', 'msg')])
return |
class MNISTLeaveOut(MNIST):
img_size = (28, 28)
def __init__(self, root, l_out_class, split='training', transform=None, target_transform=None, download=False):
super(MNISTLeaveOut, self).__init__(root, transform=transform, target_transform=target_transform, download=download)
if ((split == 'training') or (split == 'validation')):
self.train = True
else:
self.train = False
self.split = split
self.l_out_class = list(l_out_class)
for c in l_out_class:
assert (c in set(list(range(10))))
set_out_class = set(l_out_class)
if download:
self.download()
if (not self._check_exists()):
raise RuntimeError(('Dataset not found.' + ' You can use download=True to download it'))
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
(data, targets) = torch.load(os.path.join(self.processed_folder, data_file))
if (split == 'training'):
data = data[:50000]
targets = targets[:50000]
elif (split == 'validation'):
data = data[50000:]
targets = targets[50000:]
out_idx = torch.zeros(len(data), dtype=torch.bool)
for c in l_out_class:
out_idx = (out_idx | (targets == c))
self.data = data[(~ out_idx)]
self.digits = targets[(~ out_idx)]
self.targets = self.digits
def raw_folder(self):
return os.path.join(self.root, 'MNIST', 'raw')
def processed_folder(self):
return os.path.join(self.root, 'MNIST', 'processed') |
def FoF_search(array, threshold):
def cycle_through_options(coord):
for i in range(len(coord)):
for j in [(- 1), 1]:
new_coordinate = [k for k in coord]
new_coordinate[i] += j
(yield tuple(new_coordinate))
out_map = np.zeros(array.shape, dtype=int)
possibilities = zip(*np.where((array > threshold)))
poss_set = set(possibilities)
def recursive_search(point, current_group, currentsize):
for testPoint in cycle_through_options(point):
if ((testPoint in poss_set) and (not out_map[testPoint])):
out_map[testPoint] = current_group
q.put(testPoint)
currentsize += 1
return currentsize
c = count()
next(c)
size_list = []
q = Queue()
for p in possibilities:
if (not out_map[p]):
group = next(c)
out_map[p] = group
q.put(p)
s = 1
while (not q.empty()):
s = recursive_search(q.get(), group, s)
size_list.append(s)
return (out_map, np.array(size_list)) |
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
code.globalstate.use_utility_code(acquire_utility_code)
return ('__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, (PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, %(cast)d, __pyx_stack)' % locals()) |
def load_loggers(cfg):
log_path = cfg.General.log_path
Path(log_path).mkdir(exist_ok=True, parents=True)
log_name = Path(cfg.config).parent
version_name = Path(cfg.config).name[:(- 5)]
cfg.log_path = (((Path(log_path) / log_name) / version_name) / f'fold{cfg.Data.fold}')
print(f'---->Log dir: {cfg.log_path}')
tb_logger = pl_loggers.TensorBoardLogger((log_path + str(log_name)), name=version_name, version=f'fold{cfg.Data.fold}', log_graph=True, default_hp_metric=False)
csv_logger = pl_loggers.CSVLogger((log_path + str(log_name)), name=version_name, version=f'fold{cfg.Data.fold}')
return [tb_logger, csv_logger] |
def comp(a, b, op):
if b.isTime():
if (op == '='):
return b.contains(a)
elif (op == '!='):
return (not b.contains(a))
if (op == '='):
return (a == b)
elif (op == '<'):
return (a < b)
elif (op == '>'):
return (a > b)
elif (op == '!='):
return (a != b) |
class ElementWiseArrayOperation2D(pm.SingleStateTransformation):
map_entry = pm.PatternNode(nodes.MapEntry)
def expressions(cls):
return [sdutil.node_path_graph(cls.map_entry)]
def can_be_applied(self, graph: dace.SDFGState, expr_index: int, sdfg: dace.SDFG, permissive: bool=False):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
params = [dace.symbol(p) for p in map_entry.map.params]
if (len(params) != 2):
return False
if ('commsize' in map_entry.map.range.free_symbols):
return False
if ('Px' in map_entry.map.range.free_symbols):
return False
if ('Py' in map_entry.map.range.free_symbols):
return False
inputs = dict()
for (_, _, _, _, m) in graph.out_edges(map_entry):
if (not m.data):
continue
desc = sdfg.arrays[m.data]
if (desc not in inputs.keys()):
inputs[desc] = []
inputs[desc].append(m.subset)
for (desc, accesses) in inputs.items():
if isinstance(desc, dace.data.Scalar):
continue
elif isinstance(desc, (dace.data.Array, dace.data.View)):
if (list(desc.shape) == [1]):
continue
if (len(desc.shape) != 2):
return False
for a in accesses:
if (a.num_elements() != 1):
return False
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) > 0):
return False
else:
return False
outputs = dict()
for (_, _, _, _, m) in graph.in_edges(map_exit):
if m.wcr:
return False
desc = sdfg.arrays[m.data]
if (desc not in outputs.keys()):
outputs[desc] = []
outputs[desc].append(m.subset)
for (desc, accesses) in outputs.items():
if isinstance(desc, (dace.data.Array, dace.data.View)):
if (len(desc.shape) != 2):
return False
for a in accesses:
if (a.num_elements() != 1):
return False
indices = a.min_element()
unmatched_indices = set(params)
for idx in indices:
if (idx in unmatched_indices):
unmatched_indices.remove(idx)
if (len(unmatched_indices) > 0):
return False
else:
return False
return True
def apply(self, graph: dace.SDFGState, sdfg: dace.SDFG):
map_entry = self.map_entry
map_exit = graph.exit_node(map_entry)
sz = dace.symbol('commsize', dtype=dace.int32, integer=True, positive=True)
Px = dace.symbol('Px', dtype=dace.int32, integer=True, positive=True)
Py = dace.symbol('Py', dtype=dace.int32, integer=True, positive=True)
from dace.data import _prod
if (len(map_entry.map.params) == 2):
params = map_entry.map.params
ranges = ([None] * 2)
(b, e, _) = map_entry.map.range[0]
ranges[0] = (0, ((((e - b) + 1) / Px) - 1), 1)
(b, e, _) = map_entry.map.range[1]
ranges[1] = (0, ((((e - b) + 1) / Py) - 1), 1)
strides = [1]
else:
params = ['__iflat']
sizes = map_entry.map.range.size_exact()
total_size = _prod(sizes)
ranges = [(0, ((total_size / sz) - 1), 1)]
strides = [_prod(sizes[(i + 1):]) for i in range(len(sizes))]
root_name = sdfg.temp_data_name()
sdfg.add_scalar(root_name, dace.int32, transient=True)
root_node = graph.add_access(root_name)
root_tasklet = graph.add_tasklet('_set_root_', {}, {'__out'}, '__out = 0')
graph.add_edge(root_tasklet, '__out', root_node, None, dace.Memlet.simple(root_name, '0'))
from dace.libraries.mpi import Bcast
from dace.libraries.pblas import BlockCyclicScatter, BlockCyclicGather
inputs = set()
for (src, _, _, _, m) in graph.in_edges(map_entry):
if (not isinstance(src, nodes.AccessNode)):
raise NotImplementedError
desc = src.desc(sdfg)
if (not isinstance(desc, (data.Scalar, data.Array))):
raise NotImplementedError
if (list(desc.shape) != m.src_subset.size_exact()):
if (str(list(desc.shape)) != str(m.src_subset.size_exact())):
raise NotImplementedError
inputs.add(src)
for inp in inputs:
desc = inp.desc(sdfg)
if isinstance(desc, data.Scalar):
local_access = graph.add_access(inp.data)
bcast_node = Bcast('_Bcast_')
graph.add_edge(inp, None, bcast_node, '_inbuffer', dace.Memlet.from_array(inp.data, desc))
graph.add_edge(root_node, None, bcast_node, '_root', dace.Memlet.simple(root_name, '0'))
graph.add_edge(bcast_node, '_outbuffer', local_access, None, dace.Memlet.from_array(inp.data, desc))
for e in graph.edges_between(inp, map_entry):
graph.add_edge(local_access, None, map_entry, e.dst_conn, dace.Memlet.from_array(inp.data, desc))
graph.remove_edge(e)
elif isinstance(desc, data.Array):
(local_name, local_arr) = sdfg.add_temp_transient([(desc.shape[0] // Px), (desc.shape[1] // Py)], dtype=desc.dtype, storage=desc.storage)
local_access = graph.add_access(local_name)
(bsizes_name, bsizes_arr) = sdfg.add_temp_transient((2,), dtype=dace.int32)
bsizes_access = graph.add_access(bsizes_name)
bsizes_tasklet = nodes.Tasklet('_set_bsizes_', {}, {'__out'}, '__out[0] = {x}; __out[1] = {y}'.format(x=(desc.shape[0] // Px), y=(desc.shape[1] // Py)))
graph.add_edge(bsizes_tasklet, '__out', bsizes_access, None, dace.Memlet.from_array(bsizes_name, bsizes_arr))
(gdesc_name, gdesc_arr) = sdfg.add_temp_transient((9,), dtype=dace.int32)
gdesc_access = graph.add_access(gdesc_name)
(ldesc_name, ldesc_arr) = sdfg.add_temp_transient((9,), dtype=dace.int32)
ldesc_access = graph.add_access(ldesc_name)
scatter_node = BlockCyclicScatter('_Scatter_')
graph.add_edge(inp, None, scatter_node, '_inbuffer', dace.Memlet.from_array(inp.data, desc))
graph.add_edge(bsizes_access, None, scatter_node, '_block_sizes', dace.Memlet.from_array(bsizes_name, bsizes_arr))
graph.add_edge(scatter_node, '_outbuffer', local_access, None, dace.Memlet.from_array(local_name, local_arr))
graph.add_edge(scatter_node, '_gdescriptor', gdesc_access, None, dace.Memlet.from_array(gdesc_name, gdesc_arr))
graph.add_edge(scatter_node, '_ldescriptor', ldesc_access, None, dace.Memlet.from_array(ldesc_name, ldesc_arr))
for e in graph.edges_between(inp, map_entry):
graph.add_edge(local_access, None, map_entry, e.dst_conn, dace.Memlet.from_array(local_name, local_arr))
graph.remove_edge(e)
for e in graph.out_edges(map_entry):
if (e.data.data == inp.data):
e.data.data = local_name
else:
raise NotImplementedError
outputs = set()
for (_, _, dst, _, m) in graph.out_edges(map_exit):
if (not isinstance(dst, nodes.AccessNode)):
raise NotImplementedError
desc = dst.desc(sdfg)
if (not isinstance(desc, data.Array)):
raise NotImplementedError
try:
if (list(desc.shape) != m.dst_subset.size_exact()):
if (str(list(desc.shape)) != str(m.dst_subset.size_exact())):
raise NotImplementedError
except AttributeError:
if (list(desc.shape) != m.subset.size_exact()):
if (str(list(desc.shape)) != str(m.subset.size_exact())):
raise NotImplementedError
outputs.add(dst)
for out in outputs:
desc = out.desc(sdfg)
if isinstance(desc, data.Scalar):
raise NotImplementedError
elif isinstance(desc, data.Array):
(local_name, local_arr) = sdfg.add_temp_transient([(desc.shape[0] // Px), (desc.shape[1] // Py)], dtype=desc.dtype, storage=desc.storage)
local_access = graph.add_access(local_name)
(bsizes_name, bsizes_arr) = sdfg.add_temp_transient((2,), dtype=dace.int32)
bsizes_access = graph.add_access(bsizes_name)
bsizes_tasklet = nodes.Tasklet('_set_bsizes_', {}, {'__out'}, '__out[0] = {x}; __out[1] = {y}'.format(x=(desc.shape[0] // Px), y=(desc.shape[1] // Py)))
graph.add_edge(bsizes_tasklet, '__out', bsizes_access, None, dace.Memlet.from_array(bsizes_name, bsizes_arr))
scatter_node = BlockCyclicGather('_Gather_')
graph.add_edge(local_access, None, scatter_node, '_inbuffer', dace.Memlet.from_array(local_name, local_arr))
graph.add_edge(bsizes_access, None, scatter_node, '_block_sizes', dace.Memlet.from_array(bsizes_name, bsizes_arr))
graph.add_edge(scatter_node, '_outbuffer', out, None, dace.Memlet.from_array(out.data, desc))
for e in graph.edges_between(map_exit, out):
graph.add_edge(map_exit, e.src_conn, local_access, None, dace.Memlet.from_array(local_name, local_arr))
graph.remove_edge(e)
for e in graph.in_edges(map_exit):
if (e.data.data == out.data):
e.data.data = local_name
else:
raise NotImplementedError
map_entry.map.params = params
map_entry.map.range = subsets.Range(ranges) |
def assert_dict_keys_equal(dictionary, target_keys):
assert isinstance(dictionary, dict)
assert (set(dictionary.keys()) == set(target_keys)) |
.parametrize('knn_methods', knn_methods)
def test_kne_proba(knn_methods):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
kne = KNORAE(pool_classifiers, knn_classifier=knn_methods, voting='soft')
kne.fit(X_dsel, y_dsel)
probas = kne.predict_proba(X_test)
expected = np.load('deslib/tests/expected_values/kne_proba_integration.npy')
assert np.allclose(probas, expected) |
class HalfCheetahEnv(HalfCheetahEnv_):
def _get_obs(self):
return np.concatenate([self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, self.get_body_com('torso').flat]).astype(np.float32).flatten()
def viewer_setup(self):
camera_id = self.model.camera_name2id('track')
self.viewer.cam.type = 2
self.viewer.cam.fixedcamid = camera_id
self.viewer.cam.distance = (self.model.stat.extent * 0.35)
self.viewer._hide_overlay = True
def render(self, mode='human'):
if (mode == 'rgb_array'):
(width, height) = (500, 500)
self._get_viewer(mode=mode).render(width, height)
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
return data
elif (mode == 'human'):
self._get_viewer(mode=mode).render() |
class InMemoryDemoDatabase(DemoDatabase):
def __init__(self):
self.data: List[Permadata] = []
def add_result(self, headers: JsonDict, model_name: str, inputs: JsonDict, outputs: JsonDict) -> Optional[int]:
self.data.append(Permadata(model_name, inputs, outputs))
return (len(self.data) - 1)
def get_result(self, perma_id: int) -> Permadata:
try:
return self.data[perma_id]
except IndexError:
return None
def from_environment(cls) -> Optional['InMemoryDemoDatabase']:
return InMemoryDemoDatabase() |
def test_detect_col_types_consistent():
df1 = pd.DataFrame({'num': rng.random(5), 'cat': list('abcde')})
df2 = pd.DataFrame({'num': rng.random(5), 'cat': list('fghil')})
assert (detect_consistent_col_types(df1, df2) == {'cat': ['cat'], 'num': ['num']}) |
((not have_working_shmget()), 'shmget does not work')
def test_pickle_unpickle_auto_unused():
old_num_servers = None
for i in range(10):
m = numpy.random.randn(((i * 2) + 1), ((i * 3) + 2))
p = pickle_dumps((m, m, m))
new_num_servers = len(SharedNumpyArray.ServerInstances)
if (old_num_servers is not None):
assert (old_num_servers == new_num_servers)
old_num_servers = new_num_servers
(m2, m3, m4) = pickle_loads(p)
assert numpy.allclose(m, m2)
assert numpy.allclose(m, m3)
assert numpy.allclose(m, m4)
assert (not m4.base.is_server)
m4.base._get_in_use_flag_ref().value = 42
assert (m4.base._get_in_use_flag_ref().value == 42)
assert (find_numpy_shared_by_shmid(m4.base.mem.shmid)._get_in_use_flag_ref().value == 42)
assert numpy.allclose(m, m4)
ss = list([find_numpy_shared_by_shmid(_m.base.mem.shmid) for _m in (m2, m3, m4)])
_m = None
m2 = m3 = m4 = None
gc.collect()
for s in ss:
assert isinstance(s, SharedNumpyArray)
assert s.is_server
assert (not s.is_in_use()) |
def _handle_PacketIn(event):
event_info(event)
ALL_PORTS = of.OFPP_FLOOD
packet = event.parsed
src_key = (event.connection, packet.src)
table[src_key] = event.port
dst_key = (event.connection, packet.dst)
dst_port = table.get(dst_key)
if (dst_port is None):
packet_out = of.ofp_packet_out()
packet_out.data = event.ofp
action = of.ofp_action_output(port=ALL_PORTS)
packet_out.actions.append(action)
event.connection.send(packet_out)
else:
flow_mod = of.ofp_flow_mod()
flow_mod.match.dl_src = packet.dst
flow_mod.match.dl_dst = packet.src
action = of.ofp_action_output(port=event.port)
flow_mod.actions.append(action)
event.connection.send(flow_mod)
flow_mod = of.ofp_flow_mod()
flow_mod.match.dl_src = packet.src
flow_mod.match.dl_dst = packet.dst
action = of.ofp_action_output(port=dst_port)
flow_mod.actions.append(action)
event.connection.send(flow_mod)
log.info(('Sent to switch rules for %s <-> %s' % (packet.src, packet.dst))) |
class AttrCheck(object):
def __init__(self, attrs: list=[], func=(lambda x: x)):
self.attrs = attrs
self.func = func |
def setup_module(module):
if (not Path('mobilenetv2-7.onnx').exists()):
urllib.request.urlretrieve(' 'mobilenetv2-7.onnx')
if (not Path('mobilenet_v2_1.0.onnx.nnef.tgz').exists()):
urllib.request.urlretrieve(' 'mobilenet_v2_1.0.onnx.nnef.tgz') |
class qCommutingPolynomials(qCommutingPolynomials_generic):
def __init__(self, q, B, names):
indices = FreeAbelianMonoid(len(names), names)
qCommutingPolynomials_generic.__init__(self, q, B, indices, indices.variable_names())
def _repr_(self):
names = ', '.join(self.variable_names())
return '{}-commuting polynomial ring in {} over {} with matrix:\n{}'.format(self._q, names, self.base_ring(), self._B)
def _latex_(self):
names = ', '.join(self.variable_names())
return '{}[{}]_{{{}}}'.format(latex(self.base_ring()), names, self._q)
_method
def one_basis(self):
return self._indices.one()
def product_on_basis(self, x, y):
if (x == self.one_basis()):
return self.monomial(y)
if (y == self.one_basis()):
return self.monomial(x)
Lx = x.list()
Ly = y.list()
B = self._B
qpow = sum(((exp * sum(((B[(j, i)] * val) for (j, val) in enumerate(Ly[:i])))) for (i, exp) in enumerate(Lx) if exp))
return self.term((x * y), (self._q ** qpow)) |
_function()
def lf_carry_subject(x):
if (x.object_category == 'person'):
if (x.subject_category in ['chair', 'bike', 'snowboard', 'motorcycle', 'horse']):
return CARRY
return ABSTAIN |
def get_dataset_name(config):
name_map = dict(CityscapesDataset='Cityscapes', CocoDataset='COCO', CocoPanopticDataset='COCO', DeepFashionDataset='Deep Fashion', LVISV05Dataset='LVIS v0.5', LVISV1Dataset='LVIS v1', VOCDataset='Pascal VOC', WIDERFaceDataset='WIDER Face', OpenImagesDataset='OpenImagesDataset', OpenImagesChallengeDataset='OpenImagesChallengeDataset')
cfg = mmcv.Config.fromfile(('./configs/' + config))
return name_map[cfg.dataset_type] |
def inc_dec_constructor(is_prefix, operator):
return (lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)) |
def _distributed_main(i, main, args, kwargs):
args.device_id = i
if (torch.cuda.is_available() and (not args.cpu)):
torch.cuda.set_device(args.device_id)
if (args.distributed_rank is None):
args.distributed_rank = (kwargs.get('start_rank', 0) + i)
args.distributed_rank = distributed_init(args)
main(args, **kwargs) |
def crop_xml(xml, sub_set_crop_path, instanc_size=511):
xmltree = ET.parse(xml)
objects = xmltree.findall('object')
frame_crop_base_path = join(sub_set_crop_path, xml.split('/')[(- 1)].split('.')[0])
if (not isdir(frame_crop_base_path)):
makedirs(frame_crop_base_path)
img_path = xml.replace('xml', 'JPEG').replace('Annotations', 'Data')
im = cv2.imread(img_path)
avg_chans = np.mean(im, axis=(0, 1))
for (id, object_iter) in enumerate(objects):
bndbox = object_iter.find('bndbox')
bbox = [int(bndbox.find('xmin').text), int(bndbox.find('ymin').text), int(bndbox.find('xmax').text), int(bndbox.find('ymax').text)]
(z, x) = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(0, id)), z)
cv2.imwrite(join(frame_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(0, id)), x) |
class omniglot(Dataset):
def __init__(self, root='data/meta-dataset/omniglot', transform=None):
self.transform = transform
self.dataset = Omniglot(root, 'test', transform)
self.label = []
for pair in self.dataset._flat_character_images:
self.label.append(pair[1])
def __getitem__(self, index: int):
(image_name, character_class) = self.dataset._flat_character_images[index]
image_path = os.path.join(self.dataset.target_folder, self.dataset._characters[character_class], image_name)
image = Image.open(image_path, mode='r').convert('RGB')
if self.dataset.transform:
image = self.dataset.transform(image)
if self.dataset.target_transform:
character_class = self.dataset.target_transform(character_class)
return (image, character_class)
def __len__(self):
return len(self.dataset) |
_model('wav2vec2', dataclass=Wav2Vec2Config)
class Wav2Vec2Model(BaseFairseqModel):
def __init__(self, cfg: Wav2Vec2Config):
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[(- 1)][0]
self.feature_extractor = ConvFeatureExtractionModel(conv_layers=feature_enc_layers, dropout=0.0, mode=cfg.extractor_mode, conv_bias=cfg.conv_bias)
self.post_extract_proj = (nn.Linear(self.embed, cfg.encoder_embed_dim) if ((self.embed != cfg.encoder_embed_dim) and (not cfg.quantize_input)) else None)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = (cfg.final_dim if (cfg.final_dim > 0) else cfg.encoder_embed_dim)
if cfg.quantize_targets:
vq_dim = (cfg.latent_dim if (cfg.latent_dim > 0) else final_dim)
self.quantizer = GumbelVectorQuantizer(dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
if cfg.quantize_input:
if (cfg.same_quantizer and (self.quantizer is not None)):
vq_dim = final_dim
self.input_quantizer = self.quantizer
else:
vq_dim = (cfg.latent_dim if (cfg.latent_dim > 0) else cfg.encoder_embed_dim)
self.input_quantizer = GumbelVectorQuantizer(dim=self.embed, num_vars=cfg.latent_vars, temp=cfg.latent_temp, groups=cfg.latent_groups, combine_groups=False, vq_dim=vq_dim, time_first=True, weight_proj_depth=cfg.quantizer_depth, weight_proj_factor=cfg.quantizer_factor)
self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(torch.FloatTensor(cfg.encoder_embed_dim).uniform_())
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(nn.Linear(final_dim, (final_dim * 2)), nn.GLU())
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
def build_model(cls, cfg: Wav2Vec2Config, task=None):
return cls(cfg)
def apply_mask(self, x, padding_mask, mask_indices=None, mask_channel_indices=None):
(B, T, C) = x.shape
if ((self.mask_channel_prob > 0) and self.mask_channel_before):
mask_channel_indices = compute_mask_indices((B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space)
mask_channel_indices = torch.from_numpy(mask_channel_indices).to(x.device).unsqueeze(1).expand((- 1), T, (- 1))
x[mask_channel_indices] = 0
if (self.mask_prob > 0):
if (mask_indices is None):
mask_indices = compute_mask_indices((B, T), padding_mask, self.mask_prob, self.mask_length, self.mask_selection, self.mask_other, min_masks=2, no_overlap=self.no_mask_overlap, min_space=self.mask_min_space)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = index_put(x, mask_indices, self.mask_emb)
else:
mask_indices = None
if ((self.mask_channel_prob > 0) and (not self.mask_channel_before)):
if (mask_channel_indices is None):
mask_channel_indices = compute_mask_indices((B, C), None, self.mask_channel_prob, self.mask_channel_length, self.mask_channel_selection, self.mask_channel_other, no_overlap=self.no_mask_channel_overlap, min_space=self.mask_channel_min_space)
mask_channel_indices = torch.from_numpy(mask_channel_indices).to(x.device).unsqueeze(1).expand((- 1), T, (- 1))
x = index_put(x, mask_channel_indices, 0)
return (x, mask_indices)
def sample_negatives(self, y, num, padding_count=None):
if ((self.n_negatives == 0) and (self.cross_sample_negatives == 0)):
return y.new(0)
(bsz, tsz, fsz) = y.shape
y = y.view((- 1), fsz)
cross_high = (tsz * bsz)
high = (tsz - (padding_count or 0))
with torch.no_grad():
assert (high > 1), f'{(bsz, tsz, fsz)}'
if (self.n_negatives > 0):
tszs = buffered_arange(num).unsqueeze((- 1)).expand((- 1), self.n_negatives).flatten()
neg_idxs = torch.randint(low=0, high=(high - 1), size=(bsz, (self.n_negatives * num)))
neg_idxs[(neg_idxs >= tszs)] += 1
if (self.cross_sample_negatives > 0):
tszs = buffered_arange(num).unsqueeze((- 1)).expand((- 1), self.cross_sample_negatives).flatten()
cross_neg_idxs = torch.randint(low=0, high=(cross_high - 1), size=(bsz, (self.cross_sample_negatives * num)))
cross_neg_idxs[(cross_neg_idxs >= tszs)] += 1
if (self.n_negatives > 0):
for i in range(1, bsz):
neg_idxs[i] += (i * high)
else:
neg_idxs = cross_neg_idxs
if ((self.cross_sample_negatives > 0) and (self.n_negatives > 0)):
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs = y[neg_idxs.view((- 1))]
negs = negs.view(bsz, num, (self.n_negatives + self.cross_sample_negatives), fsz).permute(2, 0, 1, 3)
return (negs, neg_idxs)
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all((- 1))
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=(- 1)).type_as(x)
logits = (logits / self.logit_temp)
if (is_xla_tensor(logits) or neg_is_pos.any()):
fillval = (- float((2 ** 30)))
if (not hasattr(self, '_inftensor')):
self._inftensor = (torch.tensor(fillval).to(x.device) if is_xla_tensor(logits) else float('-inf'))
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((((input_length - kernel_size) / stride) + 1))
conv_cfg_list = eval(self.cfg.conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2])
return input_lengths.to(torch.long)
def forward(self, source, padding_mask=None, mask=True, features_only=False, layer=None, mask_indices=None, mask_channel_indices=None, padding_count=None):
if (self.feature_grad_mult > 0):
features = self.feature_extractor(source)
if (self.feature_grad_mult != 1.0):
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if ((padding_mask is not None) and padding_mask.any()):
input_lengths = (1 - padding_mask.long()).sum((- 1))
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(features.shape[:2], dtype=features.dtype, device=features.device)
padding_mask[(torch.arange(padding_mask.shape[0], device=padding_mask.device), (output_lengths - 1))] = 1
padding_mask = (1 - padding_mask.flip([(- 1)]).cumsum((- 1)).flip([(- 1)])).bool()
else:
padding_mask = None
if (self.post_extract_proj is not None):
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
if self.input_quantizer:
q = self.input_quantizer(features, produce_targets=False)
features = q['x']
num_vars = q['num_vars']
code_ppl = q['code_perplexity']
prob_ppl = q['prob_perplexity']
curr_temp = q['temp']
features = self.project_inp(features)
if mask:
(x, mask_indices) = self.apply_mask(features, padding_mask, mask_indices=mask_indices, mask_channel_indices=mask_channel_indices)
if ((not is_xla_tensor(x)) and (mask_indices is not None)):
y = unmasked_features[mask_indices].view(unmasked_features.size(0), (- 1), unmasked_features.size((- 1)))
else:
y = unmasked_features
else:
x = features
y = unmasked_features
mask_indices = None
(x, layer_results) = self.encoder(x, padding_mask=padding_mask, layer=layer)
if features_only:
return {'x': x, 'padding_mask': padding_mask, 'features': unmasked_features, 'layer_results': layer_results}
if self.quantizer:
q = self.quantizer(y, produce_targets=False)
y = q['x']
num_vars = q['num_vars']
code_ppl = q['code_perplexity']
prob_ppl = q['prob_perplexity']
curr_temp = q['temp']
y = self.project_q(y)
if self.negatives_from_everywhere:
neg_cands = self.quantizer(unmasked_features, produce_targets=False)['x']
(negs, _) = self.sample_negatives(neg_cands, y.size(1), padding_count=padding_count)
negs = self.project_q(negs)
else:
(negs, _) = self.sample_negatives(y, y.size(1), padding_count=padding_count)
if (self.codebook_negatives > 0):
cb_negs = self.quantizer.sample_from_codebook((y.size(0) * y.size(1)), self.codebook_negatives)
cb_negs = cb_negs.view(self.codebook_negatives, y.size(0), y.size(1), (- 1))
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y = self.project_q(y)
if self.negatives_from_everywhere:
(negs, _) = self.sample_negatives(unmasked_features, y.size(1), padding_count=padding_count)
negs = self.project_q(negs)
else:
(negs, _) = self.sample_negatives(y, y.size(1), padding_count=padding_count)
if (not is_xla_tensor(x)):
x = x[mask_indices].view(x.size(0), (- 1), x.size((- 1)))
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
x = self.final_proj(x)
x = self.compute_preds(x, y, negs)
result = {'x': x, 'padding_mask': padding_mask, 'features_pen': features_pen}
if (prob_ppl is not None):
result['prob_perplexity'] = prob_ppl
result['code_perplexity'] = code_ppl
result['num_vars'] = num_vars
result['temp'] = curr_temp
return result
def quantize(self, x):
assert (self.quantizer is not None)
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, source, padding_mask, mask=False, layer=None):
res = self.forward(source, padding_mask, mask=mask, features_only=True, layer=layer)
return res
def get_logits(self, net_output):
logits = net_output['x']
logits = logits.transpose(0, 2)
logits = logits.reshape((- 1), logits.size((- 1)))
return logits
def get_targets(self, sample, net_output, expand_steps=True):
x = net_output['x']
return x.new_zeros((x.size(1) * x.size(2)), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if ('prob_perplexity' in net_output):
pen.append(((net_output['num_vars'] - net_output['prob_perplexity']) / net_output['num_vars']))
if ('features_pen' in net_output):
pen.append(net_output['features_pen'])
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None |
.parametrize('T', [x for x in np.typecodes['All'] if (x not in 'eGUVOMm')])
def test_bandwidth_square_inputs(T):
n = 20
k = 4
R = np.zeros([n, n], dtype=T, order='F')
R[([x for x in range(n)], [x for x in range(n)])] = 1
R[([x for x in range((n - k))], [x for x in range(k, n)])] = 1
R[([x for x in range(1, n)], [x for x in range((n - 1))])] = 1
R[([x for x in range(k, n)], [x for x in range((n - k))])] = 1
assert (bandwidth(R) == (k, k)) |
def hyperbolic_triangle(a, b, c, model='UHP', **options):
return hyperbolic_polygon((a, b, c), model, **options) |
_utils.test()
def test_matrix_arg_insertion_pos():
rgba8 = ti.types.vector(4, ti.u8)
def _render(color_attm: ti.types.ndarray(rgba8, ndim=2), camera_pos: ti.math.vec3, camera_up: ti.math.vec3):
up = ti.math.normalize(camera_up)
for (x, y) in color_attm:
o = camera_pos
color_attm = ti.Vector.ndarray(4, dtype=ti.u8, shape=(512, 512))
camera_pos = ti.math.vec3(0, 0, 0)
camera_up = ti.math.vec3(0, 1, 0)
_render(color_attm, camera_pos, camera_up) |
def camPosToQuaternion(cx, cy, cz):
q1a = 0
q1b = 0
q1c = (math.sqrt(2) / 2)
q1d = (math.sqrt(2) / 2)
camDist = math.sqrt((((cx * cx) + (cy * cy)) + (cz * cz)))
cx = (cx / camDist)
cy = (cy / camDist)
cz = (cz / camDist)
t = math.sqrt(((cx * cx) + (cy * cy)))
tx = (cx / t)
ty = (cy / t)
yaw = math.acos(ty)
if (tx > 0):
yaw = ((2 * math.pi) - yaw)
pitch = 0
tmp = min(max(((tx * cx) + (ty * cy)), (- 1)), 1)
roll = math.acos(tmp)
if (cz < 0):
roll = (- roll)
(q2a, q2b, q2c, q2d) = quaternionFromYawPitchRoll(yaw, pitch, roll)
q1 = ((((q1a * q2a) - (q1b * q2b)) - (q1c * q2c)) - (q1d * q2d))
q2 = ((((q1b * q2a) + (q1a * q2b)) + (q1d * q2c)) - (q1c * q2d))
q3 = ((((q1c * q2a) - (q1d * q2b)) + (q1a * q2c)) + (q1b * q2d))
q4 = ((((q1d * q2a) + (q1c * q2b)) - (q1b * q2c)) + (q1a * q2d))
return (q1, q2, q3, q4) |
def drn_d_24(BatchNorm, pretrained=True):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', BatchNorm=BatchNorm)
if pretrained:
pretrained = model_zoo.load_url(model_urls['drn-d-24'])
del pretrained['fc.weight']
del pretrained['fc.bias']
model.load_state_dict(pretrained)
return model |
def sentence_ppx(num_symbols, output_logits, targets, masks):
batch_size = tf.shape(output_logits)[0]
local_masks = tf.reshape(masks, [(- 1)])
one_hot_targets = tf.one_hot(targets, num_symbols)
ppx_prob = tf.reduce_sum((tf.nn.log_softmax(output_logits) * one_hot_targets), axis=2)
sent_ppx = tf.reduce_sum(tf.reshape((tf.reshape((- ppx_prob), [(- 1)]) * local_masks), [batch_size, (- 1)]), axis=1)
sent_ppx = (sent_ppx / tf.reduce_sum(masks, axis=1))
return sent_ppx |
class LifelongSAGE(SAGE):
def __init__(self, args, feat_len, num_class, k=1):
super().__init__(feat_len, num_class)
self.args = args
self.register_buffer('adj', torch.zeros(1, feat_len, feat_len))
self.register_buffer('inputs', torch.Tensor(0, 1, feat_len))
self.register_buffer('targets', torch.LongTensor(0))
self.neighbor = []
self.sample_viewed = 0
self.memory_order = torch.LongTensor()
self.memory_size = self.args.memory_size
self.criterion = nn.CrossEntropyLoss()
exec(('self.optimizer = torch.optim.%s(self.parameters(), lr=%f)' % (args.optm, args.lr)))
def observe(self, inputs, targets, neighbor, reply=True):
self.train()
for i in range(self.args.iteration):
self.optimizer.zero_grad()
outputs = self.forward(inputs, neighbor)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
self.sample(inputs, targets, neighbor)
if reply:
L = torch.randperm(self.inputs.size(0))
minibatches = [L[n:(n + self.args.batch_size)] for n in range(0, len(L), self.args.batch_size)]
for index in minibatches:
self.optimizer.zero_grad()
(inputs, targets, neighbor) = (self.inputs[index], self.targets[index], [self.neighbor[i] for i in index.tolist()])
outputs = self.forward(inputs, neighbor)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
_grad()
def uniform_sample(self, inputs, targets, neighbor):
self.inputs = torch.cat((self.inputs, inputs), dim=0)
self.targets = torch.cat((self.targets, targets), dim=0)
self.neighbor += neighbor
if (self.inputs.size(0) > self.args.memory_size):
idx = torch.randperm(self.inputs.size(0))[:self.args.memory_size]
(self.inputs, self.targets) = (self.inputs[idx], self.targets[idx])
self.neighbor = [self.neighbor[i] for i in idx.tolist()]
_grad()
def sample(self, inputs, targets, neighbor):
self.sample_viewed += inputs.size(0)
self.memory_order += inputs.size(0)
self.targets = torch.cat((self.targets, targets), dim=0)
self.inputs = torch.cat((self.inputs, inputs), dim=0)
self.memory_order = torch.cat((self.memory_order, torch.LongTensor(list(range((inputs.size()[0] - 1), (- 1), (- 1))))), dim=0)
self.neighbor += neighbor
node_len = int(self.inputs.size(0))
ext_memory = (node_len - self.memory_size)
if (ext_memory > 0):
mask = torch.zeros(node_len, dtype=bool)
reserve = self.memory_size
seg = np.append(np.arange(0, self.sample_viewed, (self.sample_viewed / ext_memory)), self.sample_viewed)
for i in range((len(seg) - 2), (- 1), (- 1)):
left = (self.memory_order.ge(np.ceil(seg[i])) * self.memory_order.lt(np.floor(seg[(i + 1)])))
leftindex = left.nonzero()
if (leftindex.size()[0] > (reserve / (i + 1))):
leftindex = leftindex[torch.randperm(leftindex.size()[0])[:int((reserve / (i + 1)))]]
mask[leftindex] = True
else:
mask[leftindex] = True
reserve -= leftindex.size()[0]
self.inputs = self.inputs[mask]
self.targets = self.targets[mask]
self.memory_order = self.memory_order[mask]
self.neighbor = [self.neighbor[i] for i in mask.nonzero()] |
class Job():
def __init__(self, func, args, kwds, apply_result):
self._func = func
self._args = args
self._kwds = kwds
self._result = apply_result
def __call__(self):
try:
result = self._func(*self._args, **self._kwds)
except:
self._result._set_exception()
else:
self._result._set_value(result) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, default='~/t7/ScanNet')
parser.add_argument('-v', '--video', type=str, default='scene0208_00')
parser.add_argument('--mode', type=str, default='validation')
args = parser.parse_args()
git_repo = Path(git.Repo(search_parent_directories=True).working_tree_dir)
sys.path.append(str(git_repo))
scannet200 = importlib.import_module('scannet_related_scripts.scannet200_constants')
SCANNET_COLOR_MAP_200 = scannet200.SCANNET_COLOR_MAP_200
CLASS_LABELS = scannet200.CLASS_LABELS_200
VALID_CLASS_IDS = scannet200.VALID_CLASS_IDS_200
ID_TO_LABEL = {}
LABEL_TO_ID = {}
for i in range(len(VALID_CLASS_IDS)):
LABEL_TO_ID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
ID_TO_LABEL[VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
dataset = Path(args.dataset).expanduser()
video_path = ((dataset / 'aligned_scans') / args.video)
scene_pcd_path = (video_path / f'{args.video}_vh_clean_2.ply')
scene_pcd = o3d.io.read_point_cloud(str(scene_pcd_path))
print(f'len(scene_pcd.points) = {len(scene_pcd.points)!r}')
vis_pcd(scene_pcd)
gt_path = ((((git_repo / 'scannet_related_scripts') / 'scannet200_instance_gt') / args.mode) / f'{args.video}.txt')
instance_ids = np.array(gt_path.read_text().splitlines()).astype(int)
num_pts = len(instance_ids)
instance_labels = (instance_ids // 1000)
unique_instance_ids = np.unique(instance_ids)
unique_categories = np.unique(instance_labels)
unique_labels = [ID_TO_LABEL[i] for i in unique_categories if (i in ID_TO_LABEL)]
print(f'unique_labels = {unique_labels!r}')
print(f'len(unique_instance_ids) = {len(unique_instance_ids)!r}')
print(f'len(unique_categories) = {len(unique_categories)!r}')
instance_to_color = dict()
for instance_id in unique_instance_ids:
label = (instance_id // 1000)
if ((label not in SCANNET_COLOR_MAP_200) or (label in [0, 1, 3])):
label_rgb = np.array([0, 0, 0])
else:
label_rgb = np.random.random((3,))
instance_to_color[instance_id] = label_rgb
pcd_vis = copy.deepcopy(scene_pcd)
colors = np.zeros((num_pts, 3))
for instance_id in unique_instance_ids:
colors[(instance_ids == instance_id)] = instance_to_color[instance_id]
pcd_vis.colors = o3d.utility.Vector3dVector(colors)
vis_pcd(pcd_vis)
vis_gt_instances(scene_pcd, instance_ids, ID_TO_LABEL) |
def build_inference_based_loaders(cfg: CfgNode, model: torch.nn.Module) -> Tuple[(List[InferenceBasedLoader], List[float])]:
loaders = []
ratios = []
embedder = build_densepose_embedder(cfg).to(device=model.device)
for dataset_spec in cfg.BOOTSTRAP_DATASETS:
dataset_cfg = get_bootstrap_dataset_config().clone()
dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec))
loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder)
loaders.append(loader)
ratios.append(dataset_cfg.RATIO)
return (loaders, ratios) |
class Record():
def __init__(self, field_pairs, parameters):
assert (len(field_pairs) != 0)
self.field_pairs_ = {pair.name: pair for pair in field_pairs}
self.first_content_ = field_pairs[0].content
self.parameters_ = parameters
self.set_id(Ref(0))
def field(self, name):
return self.field_pairs_[name].content
def parameters(self):
return self.parameters_
def set_id(self, id: Ref(int)):
self.id_ = id.value
id.value += 1
for pair in self.field_pairs_.values():
pair.content.set_id(id)
def clear(self):
for pair in self.field_pairs_.values():
pair.content.clear()
def length(self):
return self.first_content_.length()
def is_valid(self, error: Ref(str)):
length = (- 1)
for pair in self.field_pairs_.values():
if (length == (- 1)):
length = pair.content.length()
elif (length != pair.content.length()):
error.value = f'Record node{self.id_} has field {pair.name} length {pair.content.length()} that differs from the first length {length}'
return False
for pair in self.field_pairs_.values():
if (not pair.content.is_valid(error)):
return False
return True
def buffer_nbytes(self, names_nbytes):
for pair in self.field_pairs_.values():
pair.content.buffer_nbytes(names_nbytes)
def to_buffers(self, buffers):
for pair in self.field_pairs_.values():
pair.content.to_buffers(buffers)
def form(self):
params = ('' if (self.parameters_ == '') else f', parameters: {self.parameters_}')
pairs = ', '.join((f'{json.dumps(pair.name)}: {pair.content.form()}' for pair in self.field_pairs_.values()))
return f'{{"class": "RecordArray", "contents": {{{pairs}}}, "form_key": "node{self.id_}"{params}}}' |
class ListForm(ListMeta[Form], Form):
_content: Form
def __init__(self, starts, stops, content, *, parameters=None, form_key=None):
if (not isinstance(starts, str)):
raise TypeError("{} 'starts' must be of type str, not {}".format(type(self).__name__, repr(starts)))
if (not isinstance(stops, str)):
raise TypeError("{} 'starts' must be of type str, not {}".format(type(self).__name__, repr(starts)))
if (not isinstance(content, Form)):
raise TypeError("{} all 'contents' must be Form subclasses, not {}".format(type(self).__name__, repr(content)))
self._starts = starts
self._stops = stops
self._content = content
self._init(parameters=parameters, form_key=form_key)
def starts(self):
return self._starts
def stops(self):
return self._stops
def content(self):
return self._content
def copy(self, starts=UNSET, stops=UNSET, content=UNSET, *, parameters=UNSET, form_key=UNSET):
return ListForm((self._starts if (starts is UNSET) else starts), (self._stops if (stops is UNSET) else stops), (self._content if (content is UNSET) else content), parameters=(self._parameters if (parameters is UNSET) else parameters), form_key=(self._form_key if (form_key is UNSET) else form_key))
def simplified(cls, starts, stops, content, *, parameters=None, form_key=None):
return cls(starts, stops, content, parameters=parameters, form_key=form_key)
def __repr__(self):
args = [repr(self._starts), repr(self._stops), repr(self._content), *self._repr_args()]
return '{}({})'.format(type(self).__name__, ', '.join(args))
def _to_dict_part(self, verbose, toplevel):
return self._to_dict_extra({'class': 'ListArray', 'starts': self._starts, 'stops': self._stops, 'content': self._content._to_dict_part(verbose, toplevel=False)}, verbose)
def type(self):
return ak.types.ListType(self._content.type, parameters=self._parameters)
def _columns(self, path, output, list_indicator):
if ((self.parameter('__array__') not in ('string', 'bytestring')) and (list_indicator is not None)):
path = (*path, list_indicator)
self._content._columns(path, output, list_indicator)
def _prune_columns(self, is_inside_record_or_union: bool) -> (Self | None):
next_content = self._content._prune_columns(is_inside_record_or_union)
if (next_content is None):
return None
else:
return self.copy(content=next_content)
def _select_columns(self, match_specifier: _SpecifierMatcher) -> Self:
return self.copy(content=self._content._select_columns(match_specifier))
def _column_types(self):
if (self.parameter('__array__') in ('string', 'bytestring')):
return ('string',)
else:
return self._content._column_types()
def __setstate__(self, state):
if isinstance(state, dict):
self.__dict__.update(state)
else:
(has_identities, parameters, form_key, starts, stops, content) = state
if (form_key is not None):
form_key = ('part0-' + form_key)
self.__init__(starts, stops, content, parameters=parameters, form_key=form_key)
def _expected_from_buffers(self, getkey: Callable[([Form, str], str)], recursive: bool) -> Iterator[tuple[(str, DType)]]:
(yield (getkey(self, 'starts'), index_to_dtype[self._starts]))
(yield (getkey(self, 'stops'), index_to_dtype[self._stops]))
if recursive:
(yield from self._content._expected_from_buffers(getkey, recursive))
def _is_equal_to(self, other: Any, all_parameters: bool, form_key: bool) -> bool:
return (self._is_equal_to_generic(other, all_parameters, form_key) and (self._starts == other._starts) and (self._stops == other._stops) and self._content._is_equal_to(other._content, all_parameters, form_key)) |
def write_augmented_dataset(input_conllu, output_conllu, augment_function):
random.seed(1234)
sents = read_sentences_from_conllu(input_conllu)
new_sents = augment_function(sents)
write_sentences_to_conllu(output_conllu, new_sents) |
.hypothesis_nested
def test_is_valid_query_strategy():
strategy = st.sampled_from([{'key': '1'}, {'key': '\udcff'}]).filter(is_valid_query)
(strategy)
(max_examples=10)
def test(value):
assert (value == {'key': '1'})
test() |
class TestSuiteChromosomeComputation(ChromosomeComputation, metaclass=abc.ABCMeta):
def _run_test_suite_chromosome(self, individual) -> list[ExecutionResult]:
results: list[ExecutionResult] = []
for test_case_chromosome in individual.test_case_chromosomes:
if (test_case_chromosome.changed or (test_case_chromosome.get_last_execution_result() is None)):
test_case_chromosome.set_last_execution_result(self._executor.execute(test_case_chromosome.test_case))
test_case_chromosome.changed = False
test_case_chromosome.invalidate_cache()
result = test_case_chromosome.get_last_execution_result()
assert (result is not None)
results.append(result)
return results |
def hiddens(layer, hidden_sizes, hidden_func=nonlin.relu, hidden_keep_prob=1.0):
layer_shape = nn.get_sizes(layer)
input_size = layer_shape.pop()
weights = []
for (i, hidden_size) in enumerate(hidden_sizes):
weights.append(tf.get_variable(('Weights-%d' % i), shape=[input_size, hidden_size]))
weights = tf.concat(weights, axis=1)
hidden_size = sum(hidden_sizes)
biases = tf.get_variable('Biases', shape=[hidden_size], initializer=tf.zeros_initializer)
if (hidden_keep_prob < 1.0):
if (len(layer_shape) > 1):
noise_shape = tf.stack((layer_shape[:(- 1)] + [1, input_size]))
else:
noise_shape = None
layer = nn.dropout(layer, hidden_keep_prob, noise_shape=noise_shape)
layer = nn.reshape(layer, [(- 1), input_size])
layer = (tf.matmul(layer, weights) + biases)
layer = hidden_func(layer)
layer = nn.reshape(layer, (layer_shape + [hidden_size]))
layers = tf.split(layer, hidden_sizes, axis=(- 1))
return layers |
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file):
if transfo_xl_dataset_file:
with open(transfo_xl_dataset_file, 'rb') as fp:
corpus = pickle.load(fp, encoding='latin1')
pytorch_vocab_dump_path = ((pytorch_dump_folder_path + '/') + VOCAB_FILES_NAMES['pretrained_vocab_file'])
print(f'Save vocabulary to {pytorch_vocab_dump_path}')
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = ((pytorch_dump_folder_path + '/') + CORPUS_NAME)
print(f'Save dataset to {pytorch_dataset_dump_path}')
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.')
if (transfo_xl_config_file == ''):
config = TransfoXLConfig()
else:
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
print(f'Building PyTorch model from configuration: {config}')
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print(f'Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}')
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f'Save configuration file to {os.path.abspath(pytorch_config_dump_path)}')
with open(pytorch_config_dump_path, 'w', encoding='utf-8') as f:
f.write(config.to_json_string()) |
def compute_lnsr(real, adve, norm_L2=True):
real = real.reshape(real.shape[0], (- 1))
adve = adve.reshape(adve.shape[0], (- 1))
l2 = np.linalg.norm((real - adve), ord=2)
if norm_L2:
l2 /= np.linalg.norm(real, ord=2)
return l2 |
class ClassicalWeylSubgroup(WeylGroup_gens):
_method
def cartan_type(self):
return self.domain().cartan_type().classical()
def simple_reflections(self):
return Family({i: self.from_morphism(self.domain().simple_reflection(i)) for i in self.index_set()})
def __repr__(self):
domain = self._domain._name_string(capitalize=False, base_ring=False, type=False)
return ('Parabolic Subgroup of the Weyl Group of type %s (as a matrix group acting on the %s)' % (self.domain().cartan_type(), domain))
def weyl_group(self, prefix='hereditary'):
if (prefix == 'hereditary'):
prefix = self._prefix
return self.domain().weyl_group(prefix)
def _test_is_finite(self, **options):
tester = self._tester(**options)
tester.assertTrue((not self.weyl_group(self._prefix).is_finite()))
tester.assertTrue(self.is_finite()) |
def train_index(data, quantizer_path, trained_index_path, fine_quant='SQ8', cuda=False):
quantizer = faiss.read_index(quantizer_path)
if (fine_quant == 'SQ8'):
trained_index = faiss.IndexIVFScalarQuantizer(quantizer, quantizer.d, quantizer.ntotal, faiss.METRIC_L2)
elif fine_quant.startswith('PQ'):
m = int(fine_quant[2:])
trained_index = faiss.IndexIVFPQ(quantizer, quantizer.d, quantizer.ntotal, m, 8)
else:
raise ValueError(fine_quant)
if cuda:
if fine_quant.startswith('PQ'):
print('PQ not supported on GPU; keeping CPU.')
else:
res = faiss.StandardGpuResources()
gpu_index = faiss.index_cpu_to_gpu(res, 0, trained_index)
gpu_index.train(data)
trained_index = faiss.index_gpu_to_cpu(gpu_index)
else:
trained_index.train(data)
faiss.write_index(trained_index, trained_index_path) |
class QuiverMutationTypeFactory(SageObject):
def __call__(self, *args):
if (len(args) == 1):
data = args[0]
else:
data = args
if isinstance(data, QuiverMutationType_Irreducible):
return data
elif isinstance(data, QuiverMutationType_Reducible):
return data
if (isinstance(data, tuple) and data):
pass
elif (isinstance(data, list) and data):
data = tuple(data)
else:
_mutation_type_error(data)
if all(((type(data_component) in [list, tuple, QuiverMutationType_Irreducible]) for data_component in data)):
if (len(data) == 1):
return QuiverMutationType(data[0])
else:
data = tuple((QuiverMutationType(comp) for comp in data))
return QuiverMutationType_Reducible(*data)
if (len(data) == 2):
data = (data[0], data[1], None)
elif (len(data) == 3):
pass
else:
_mutation_type_error(data)
if isinstance(data[2], list):
data = (data[0], data[1], tuple(data[2]))
if isinstance(data[1], list):
data = (data[0], tuple(data[1]), data[2])
if (data == ('D', 2, None)):
return QuiverMutationType(('A', 1, None), ('A', 1, None))
elif (data == ('D', 3, None)):
data = ('A', 3, None)
elif (data == ('C', 2, None)):
data = ('B', 2, None)
elif (data == ('E', 9, None)):
data = ('E', 8, 1)
elif ((data[0] == 'A') and (data[2] == 1) and isinstance(data[1], tuple) and (len(data[1]) == 2) and (min(data[1]) == 0)):
if (max(data[1]) == 0):
pass
elif (max(data[1]) == 1):
data = ('A', 1, None)
elif (max(data[1]) == 2):
return QuiverMutationType(('A', 1, None), ('A', 1, None))
elif (max(data[1]) == 3):
data = ('A', 3, None)
else:
data = ('D', max(data[1]), None)
elif ((data[0] == 'GR') and (data[2] is None) and isinstance(data[1], tuple) and (len(data[1]) == 2) and (data[1][1] > data[1][0])):
if ((min(data[1]) > (max(data[1]) / 2)) and (max(data[1]) != (min(data[1]) + 1))):
data = (data[0], ((max(data[1]) - min(data[1])), max(data[1])), data[2])
if ((min(data[1]) == 2) and (max(data[1]) > 3)):
data = ('A', (max(data[1]) - 3), None)
elif (data[1] == (3, 6)):
data = ('D', 4, None)
elif (data[1] == (3, 7)):
data = ('E', 6, None)
elif (data[1] == (3, 8)):
data = ('E', 8, None)
elif (data[1] == (3, 9)):
data = ('E', 8, [1, 1])
elif (data[1] == (4, 8)):
data = ('E', 7, [1, 1])
elif (data == ('TR', 1, None)):
data = ('A', 1, None)
elif (data == ('TR', 2, None)):
data = ('A', 3, None)
elif (data == ('TR', 3, None)):
data = ('D', 6, None)
elif (data == ('TR', 4, None)):
data = ('E', 8, (1, 1))
elif (data == ('A', 1, 1)):
data = ('A', (1, 1), 1)
elif ((data[0] == 'B') and (data[2] == 1)):
if (data[1] == 2):
data = ('CC', 2, 1)
elif (data[1] > 2):
data = ('BD', data[1], 1)
elif ((data[0] == 'B') and (data[2] == (- 1))):
if (data[1] == 2):
data = ('BB', 2, 1)
elif (data[1] > 2):
data = ('CD', data[1], 1)
elif ((data[0] == 'C') and (data[1] > 1) and (data[2] == 1)):
data = ('CC', data[1], 1)
elif ((data[0] == 'C') and (data[1] > 1) and (data[2] == (- 1))):
data = ('BB', data[1], 1)
elif (data == ('A', 2, 2)):
data = ('BC', 1, 1)
elif ((data[0] == 'A') and (data[1] in ZZ) and (data[1] > 1) and ((data[1] % 2) == 0) and (data[2] == 2)):
data = ('BC', (data[1] // 2), 1)
elif ((data[0] == 'A') and (data[1] in ZZ) and (data[1] > 3) and (data[1] % 2) and (data[2] == 2)):
data = ('CD', ((data[1] + 1) // 2), 1)
elif (data == ('A', 3, 2)):
data = ('BB', 2, 1)
elif ((data[0] == 'D') and (data[1] in ZZ) and (data[1] > 2) and (data[2] == 2)):
data = ('BB', (data[1] - 1), 1)
elif (data == ('E', 6, 2)):
data = ('F', 4, (- 1))
elif (data == ('D', 4, 3)):
data = ('G', 2, (- 1))
elif (data == ('F', 4, (2, 1))):
data = ('F', 4, (1, 2))
elif (data == ('G', 2, (3, 1))):
data = ('G', 2, (1, 3))
elif ((data[0] == 'T') and (data[2] is None)):
data = (data[0], tuple(sorted(data[1])), data[2])
(r, p, q) = data[1]
if (r == 1):
data = ('A', ((p + q) - 1), None)
elif (r == p == 2):
data = ('D', (q + 2), None)
elif ((r == 2) and (p == 3)):
if (q in (3, 4, 5)):
data = ('E', (q + 3), None)
elif (q == 6):
data = ('E', 8, 1)
else:
data = ('E', (q + 3), None)
elif ((r == 2) and (p == q == 4)):
data = ('E', 7, 1)
elif (r == p == q == 3):
data = ('E', 6, 1)
elif ((data[0] == 'R2') and (data[2] is None) and all((((data[1][i] in ZZ) and (data[1][i] > 0)) for i in [0, 1]))):
data = (data[0], tuple(sorted(data[1])), data[2])
if (data[1] == (1, 1)):
data = ('A', 2, None)
elif (data[1] == (1, 2)):
data = ('B', 2, None)
elif (data[1] == (1, 3)):
data = ('G', 2, None)
elif (data[1] == (1, 4)):
data = ('BC', 1, 1)
elif (data[1] == (2, 2)):
data = ('A', (1, 1), 1)
(letter, rank, twist) = data
if (not isinstance(letter, str)):
_mutation_type_error(data)
if isinstance(rank, list):
rank = tuple(rank)
if isinstance(twist, list):
twist = tuple(twist)
return QuiverMutationType_Irreducible(letter, rank, twist)
def _repr_(self) -> str:
return 'QuiverMutationType'
def samples(self, finite=None, affine=None, elliptic=None, mutation_finite=None):
result = self._samples()
if (finite is not None):
result = [t for t in result if (t.is_finite() == finite)]
if (affine is not None):
result = [t for t in result if (t.is_affine() == affine)]
if (elliptic is not None):
result = [t for t in result if (t.is_elliptic() == elliptic)]
if (mutation_finite is not None):
result = [t for t in result if (t.is_mutation_finite() == mutation_finite)]
return result
_method
def _samples(self):
finite_types = [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5], ['C', 3], ['C', 5], ['D', 2], ['D', 5], ['E', 6], ['E', 7], ['E', 8], ['F', 4], ['G', 2]]]
affine_types = [QuiverMutationType(t) for t in [['A', [1, 1], 1], ['A', [4, 5], 1], ['D', 4, 1], ['BB', 5, 1]]]
elliptic_types = [QuiverMutationType(t) for t in [['E', 6, [1, 1]], ['E', 7, [1, 1]]]]
mutation_finite_types = [QuiverMutationType(t) for t in [['R2', (1, 5)], ['R2', (3, 5)]]]
mutation_infinite_types = [QuiverMutationType(t) for t in [['E', 10], ['BE', 5], ['GR', (3, 10)], ['T', (3, 3, 4)]]]
return ((((finite_types + affine_types) + elliptic_types) + mutation_finite_types) + mutation_infinite_types) |
def constant_symbols(sdfg: SDFG) -> Set[str]:
interstate_symbols = {k for e in sdfg.edges() for k in e.data.assignments.keys()}
return (set(sdfg.symbols) - interstate_symbols) |
class TensorWrapper(object):
def __init__(self, **kwargs):
self.add_attributes(**kwargs)
def add_attributes(self, **kwargs):
for (name, possible_attr) in kwargs.items():
if (possible_attr is None):
continue
elif (_is_tensor_like(possible_attr) or _is_string_like(possible_attr)):
setattr(self, name, possible_attr)
else:
raise TypeError(f'Invalid input to `TensorWrapper`: {possible_attr}')
def _apply_to_tensors(self, func):
def _adaptive_func(x):
if isinstance(x, torch.Tensor):
return func(x)
else:
return x._apply_to_tensors(func)
_apply = _create_apply((lambda x: isinstance(x, (torch.Tensor, TensorWrapper))), _adaptive_func)
for (name, attr) in self.__dict__.items():
if _is_tensor_like(attr):
setattr(self, name, _apply(attr))
return self
def pin_memory(self):
return self._apply_to_tensors((lambda x: x.pin_memory()))
def to(self, *args, **kwargs):
return self._apply_to_tensors((lambda x: x.to(*args, **kwargs)))
def cuda(self, *args, **kwargs):
return self._apply_to_tensors((lambda x: x.cuda(*args, **kwargs))) |
class GenerationMultimodalAdapter(InContextLearningMultimodalAdapter):
def generate_requests(self, eval_instance: Instance, train_trial_index: int, training_instances: List[Instance]) -> List[RequestState]:
prompt: MultimodalPrompt = self.construct_prompt(training_instances, eval_instance, include_output=False, reference_index=None)
request = Request(model=self.adapter_spec.model, model_deployment=self.adapter_spec.model_deployment, multimodal_prompt=prompt.multimedia_object, num_completions=self.adapter_spec.num_outputs, temperature=self.adapter_spec.temperature, max_tokens=self.adapter_spec.max_tokens, stop_sequences=self.adapter_spec.stop_sequences, random=self.adapter_spec.random)
request_state = RequestState(instance=eval_instance, reference_index=None, request_mode=None, train_trial_index=train_trial_index, output_mapping=None, request=request, result=None, num_train_instances=prompt.num_train_instances, prompt_truncated=False)
return [request_state] |
def stdout_to_string(s):
return ecl_eval(('(with-output-to-string (*standard-output*)\n (maxima-eval #$%s$))' % s)).python()[1:(- 1)] |
def _getencoder(mode, encoder_name, args, extra=()):
if (args is None):
args = ()
elif (not isinstance(args, tuple)):
args = (args,)
try:
encoder = ENCODERS[encoder_name]
except KeyError:
pass
else:
return encoder(mode, *(args + extra))
try:
encoder = getattr(core, (encoder_name + '_encoder'))
except AttributeError:
raise OSError(('encoder %s not available' % encoder_name))
return encoder(mode, *(args + extra)) |
.parametrize('x', [0.1, 3])
.parametrize('allclose', [test_utils.allclose, (lambda x, y: (x == test_utils.approx(y)))])
_utils.test()
def test_allclose_rel_reordered2(x, allclose):
rel = test_utils.get_rel_eps()
assert (not allclose((x + ((x * rel) * 3.0)), x))
assert (not allclose((x + ((x * rel) * 1.2)), x))
assert allclose((x + ((x * rel) * 0.9)), x)
assert allclose((x + ((x * rel) * 0.5)), x)
assert allclose(x, x)
assert allclose((x - ((x * rel) * 0.5)), x)
assert allclose((x - ((x * rel) * 0.9)), x)
assert (not allclose((x - ((x * rel) * 1.2)), x))
assert (not allclose((x - ((x * rel) * 3.0)), x)) |
class EmpiricalALPComputer():
def __init__(self, task_size, max_size=None, buffer_size=500):
self.alp_knn = BufferedDataset(1, task_size, buffer_size=buffer_size, lateness=0, max_size=max_size)
def compute_alp(self, task, reward):
alp = 0
if (len(self.alp_knn) > 5):
(dist, idx) = self.alp_knn.nn_y(task)
closest_previous_task_reward = self.alp_knn.get_x(idx[0])
lp = (reward - closest_previous_task_reward)
alp = np.abs(lp)
self.alp_knn.add_xy(reward, task)
return alp |
class CloudpickleWrapper():
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob) |
_converter_regitstry('sPorD')
def sPorD_converter(context: 'BM1688Context', reg: sPorD_reg):
(n, c, h, w) = (reg[f'res0_{d}'] for d in 'nchw')
opd0 = dict(address=reg.opd0_addr, dtype=(reg.opt_opd0_prec, reg.opt_opd0_sign), shape=(n, c, reg.opd0_h, reg.opd0_w), layout=Layout.alignEU)
res0 = dict(address=reg.res0_addr, dtype=(reg.opt_res0_prec, reg.opt_opd0_sign), shape=(n, c, h, w), layout=Layout.alignEU)
opd1 = dict(address=reg.opd1_addr, dtype=(reg.opt_opd0_prec, reg.opt_opd1_sign), shape=(1, c, reg.opd1_h, reg.opd1_w), layout=Layout.compact, is_const=reg.opt_opd1_const)
opd2 = dict(address=reg.opd2_addr, dtype=(reg.opt_opd0_prec, reg.opt_opd2_sign), shape=(1, c, 1, 1), layout=Layout.compact, is_const=reg.opt_opd2_const)
opd3 = dict(address=reg.opd3_addr, dtype=(reg.opt_opd0_prec, reg.opt_opd0_sign), shape=(1, c, 1, 2), layout=Layout.compact, is_const=reg.opt_opd3_const)
opd5 = dict(address=0, dtype=DType.si32, shape=(1, c, 1, 2), layout=Layout.compact, is_const=reg.opt_opd5_const)
opds = []
if (reg.tsk_eu_typ in [0, 4, 3, 1]):
if (reg.tsk_eu_typ == 0):
opds = [opd0, opd1, opd2, opd3, opd5]
elif (reg.tsk_eu_typ == 1):
opds = [opd0, opd1, opd3, opd5]
else:
opds = [opd0, opd3, opd5]
else:
opd3['shape'] = (1, c, 1, 4)
opd3['dtype'] = DType.ui16
if (reg.tsk_eu_typ in [5, 6]):
opds = [opd0, opd1, opd2, opd3, opd5]
else:
opds = [opd0, opd2, opd3, opd5]
attr = dict(kernel=[reg.opd1_h, reg.opd1_w], stride=[reg.res_op_y_str, reg.res_op_x_str], in_zero=[reg.opd0_y_ins0, reg.opd0_x_ins0], ke_zero=[reg.opd1_y_ins0, reg.opd1_x_ins0], opt_kernel_rotate=bool(reg.opt_kernel_rotate), pad_mode=reg.pad_mode, pad=[reg[f'opd0_{x}_pad'] for x in ('up', 'dn', 'lf', 'rt')], round_mode=reg.opd2_n_str, shift=np.uint32([reg.res1_addr]).view(np.int8)[0])
if (not bool(reg.opt_rq)):
opds.remove(opd5)
elif bool(reg.opt_opd5_const):
opds.remove(opd5)
attr['multiplier'] = tgcr.getter(6)
attr['shift'] = int(np.binary_repr(tgcr.getter(32), width=32)[(- 8):(- 1)], 2)
attr['yzp'] = int(np.binary_repr(tgcr.getter(33), width=32)[(- 16):(- 1)], 2)
operands = [get_value(context, **x) for x in opds]
results = [get_value(context, **res0)]
return (results, attr, operands) |
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) |
def load_syn():
with open('{}/smallworld.pkl'.format(dirname), 'rb') as file:
graphs = pickle.load(file)
for graph in graphs:
print(nx.average_clustering(graph), nx.average_shortest_path_length(graph)) |
class Model(object):
def __init__(self, mode):
self.mode = mode
self._build_model()
def add_internal_summaries(self):
pass
def _stride_arr(self, stride):
return [1, stride, stride, 1]
def _build_model(self):
assert ((self.mode == 'train') or (self.mode == 'eval'))
with tf.variable_scope('input'):
self.x_input = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
self.y_input = tf.placeholder(tf.int64, shape=None)
input_standardized = tf.map_fn((lambda img: tf.image.per_image_standardization(img)), self.x_input)
x = self._conv('init_conv', input_standardized, 3, 3, 16, self._stride_arr(1))
strides = [1, 2, 2]
activate_before_residual = [True, False, False]
res_func = self._residual
filters = [16, 160, 320, 640]
self.image_size = 32
self.num_channels = 3
self.num_labels = 10
with tf.variable_scope('unit_1_0'):
x = res_func(x, filters[0], filters[1], self._stride_arr(strides[0]), activate_before_residual[0])
for i in range(1, 5):
with tf.variable_scope(('unit_1_%d' % i)):
x = res_func(x, filters[1], filters[1], self._stride_arr(1), False)
with tf.variable_scope('unit_2_0'):
x = res_func(x, filters[1], filters[2], self._stride_arr(strides[1]), activate_before_residual[1])
for i in range(1, 5):
with tf.variable_scope(('unit_2_%d' % i)):
x = res_func(x, filters[2], filters[2], self._stride_arr(1), False)
with tf.variable_scope('unit_3_0'):
x = res_func(x, filters[2], filters[3], self._stride_arr(strides[2]), activate_before_residual[2])
for i in range(1, 5):
with tf.variable_scope(('unit_3_%d' % i)):
x = res_func(x, filters[3], filters[3], self._stride_arr(1), False)
with tf.variable_scope('unit_last'):
x = self._batch_norm('final_bn', x)
x = self._relu(x, 0.1)
x = self._global_avg_pool(x)
with tf.variable_scope('logit'):
self.pre_softmax = self._fully_connected(x, 10)
self.softmax_pred = tf.nn.softmax(self.pre_softmax, axis=1)
self.predictions = tf.argmax(self.pre_softmax, 1)
self.correct_prediction = tf.equal(self.predictions, self.y_input)
self.num_correct = tf.reduce_sum(tf.cast(self.correct_prediction, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
with tf.variable_scope('costs'):
self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.pre_softmax, labels=self.y_input)
self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
self.mean_xent = tf.reduce_mean(self.y_xent)
self.weight_decay_loss = self._decay()
def _batch_norm(self, name, x):
with tf.name_scope(name):
return tf.contrib.layers.batch_norm(inputs=x, decay=0.9, center=True, scale=True, activation_fn=None, updates_collections=None, is_training=(self.mode == 'train'))
def _residual(self, x, in_filter, out_filter, stride, activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope('shared_activation'):
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
orig_x = x
else:
with tf.variable_scope('residual_only_activation'):
orig_x = x
x = self._batch_norm('init_bn', x)
x = self._relu(x, 0.1)
with tf.variable_scope('sub1'):
x = self._conv('conv1', x, 3, in_filter, out_filter, stride)
with tf.variable_scope('sub2'):
x = self._batch_norm('bn2', x)
x = self._relu(x, 0.1)
x = self._conv('conv2', x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope('sub_add'):
if (in_filter != out_filter):
orig_x = tf.nn.avg_pool(orig_x, stride, stride, 'VALID')
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [((out_filter - in_filter) // 2), ((out_filter - in_filter) // 2)]])
x += orig_x
tf.logging.debug('image after unit %s', x.get_shape())
return x
def _decay(self):
costs = []
for var in tf.trainable_variables():
if (var.op.name.find('DW') > 0):
costs.append(tf.nn.l2_loss(var))
return tf.add_n(costs)
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
with tf.variable_scope(name):
n = ((filter_size * filter_size) * out_filters)
kernel = tf.get_variable('DW', [filter_size, filter_size, in_filters, out_filters], tf.float32, initializer=tf.random_normal_initializer(stddev=np.sqrt((2.0 / n))))
return tf.nn.conv2d(x, kernel, strides, padding='SAME')
def _relu(self, x, leakiness=0.0):
return tf.where(tf.less(x, 0.0), (leakiness * x), x, name='leaky_relu')
def _fully_connected(self, x, out_dim):
num_non_batch_dimensions = len(x.shape)
prod_non_batch_dimensions = 1
for ii in range((num_non_batch_dimensions - 1)):
prod_non_batch_dimensions *= int(x.shape[(ii + 1)])
x = tf.reshape(x, [tf.shape(x)[0], (- 1)])
w = tf.get_variable('DW', [prod_non_batch_dimensions, out_dim], initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
b = tf.get_variable('biases', [out_dim], initializer=tf.constant_initializer())
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert (x.get_shape().ndims == 4)
return tf.reduce_mean(x, [1, 2]) |
class WedgeOfSimplicialSets_finite(WedgeOfSimplicialSets, PushoutOfSimplicialSets_finite):
def __init__(self, factors=None):
if (not factors):
PushoutOfSimplicialSets_finite.__init__(self, [Point().identity()])
else:
if any(((not space.is_pointed()) for space in factors)):
raise ValueError('the simplicial sets must be pointed')
PushoutOfSimplicialSets_finite.__init__(self, [space.base_point_map() for space in factors])
self.base_point().rename('*')
self._factors = factors
def inclusion_map(self, i):
return self.structure_map(i)
def projection_map(self, i):
m = len(self._factors)
simplices = ([self.inclusion_map(j).image().nondegenerate_simplices() for j in range(i)] + [self.inclusion_map(j).image().nondegenerate_simplices() for j in range((i + 1), m)])
return self.quotient(list(itertools.chain(*simplices))).quotient_map() |
def signed_distance_between_cartesian_angles(a0, a1):
distance = (a1 - a0)
if (distance < 0):
distance += (2 * np.pi)
return distance |
def test_keras_ensemble_network_raises_on_incorrect_tensor_spec() -> None:
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork([1], tf.TensorSpec(shape=(1,), dtype=tf.float32), tf.keras.losses.MeanSquaredError())
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork(tf.TensorSpec(shape=(1,), dtype=tf.float32), [1], tf.keras.losses.MeanSquaredError()) |
def eval(args):
e_common = E_common(args.sep, int((args.resize / 64)))
e_separate_A = E_separate_A(args.sep, int((args.resize / 64)))
e_separate_B = E_separate_B(args.sep, int((args.resize / 64)))
decoder = Decoder(int((args.resize / 64)))
if torch.cuda.is_available():
e_common = e_common.cuda()
e_separate_A = e_separate_A.cuda()
e_separate_B = e_separate_B.cuda()
decoder = decoder.cuda()
if (args.load != ''):
save_file = os.path.join(args.load, 'checkpoint')
_iter = load_model_for_eval(save_file, e_common, e_separate_A, e_separate_B, decoder)
e_common = e_common.eval()
e_separate_A = e_separate_A.eval()
e_separate_B = e_separate_B.eval()
decoder = decoder.eval()
if ((not os.path.exists(args.out)) and (args.out != '')):
os.mkdir(args.out)
save_chosen_imgs(args, e_common, e_separate_A, e_separate_B, decoder, _iter, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], False) |
def bilinear_classifier_nary(inputs1, inputs2, n_classes, keep_prob, add_bias1=True, add_bias2=True):
input_shape1 = tf.shape(inputs1)
input_shape2 = tf.shape(inputs2)
batch_size1 = input_shape1[0]
batch_size2 = input_shape2[0]
bucket_size1 = input_shape1[1]
bucket_size2 = input_shape2[1]
input_size1 = inputs1.get_shape().as_list()[(- 1)]
input_size2 = inputs2.get_shape().as_list()[(- 1)]
input_shape_to_set1 = [tf.Dimension(None), tf.Dimension(None), (input_size1 + 1)]
input_shape_to_set2 = [tf.Dimension(None), tf.Dimension(None), (input_size2 + 1)]
if (isinstance(keep_prob, tf.Tensor) or (keep_prob < 1)):
noise_shape1 = tf.stack([batch_size1, 1, input_size1])
noise_shape2 = tf.stack([batch_size2, 1, input_size2])
inputs1 = tf.nn.dropout(inputs1, keep_prob, noise_shape=noise_shape1)
inputs2 = tf.nn.dropout(inputs2, keep_prob, noise_shape=noise_shape2)
inputs1 = tf.concat(axis=2, values=[inputs1, tf.ones(tf.stack([batch_size1, bucket_size1, 1]))])
inputs1.set_shape(input_shape_to_set1)
inputs2 = tf.concat(axis=2, values=[inputs2, tf.ones(tf.stack([batch_size2, bucket_size2, 1]))])
inputs2.set_shape(input_shape_to_set2)
bilin = bilinear(inputs1, inputs2, n_classes, add_bias1=add_bias1, add_bias2=add_bias2, initializer=tf.zeros_initializer())
return bilin |
class Tanh(Module):
def updateOutput(self, input):
self._backend.Tanh_updateOutput(self._backend.library_state, input, self.output)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.Tanh_updateGradInput(self._backend.library_state, gradOutput, self.gradInput, self.output)
return self.gradInput |
def cli_main(parser, args):
global return_value
return_value = False
if ('func' not in args):
parser.print_help(sys.stderr)
sys.exit((- 1))
if args.mpi:
from nnabla.utils.communicator_util import create_communicator
comm = create_communicator()
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
logger.log(99, 'ABORTED')
os.kill(os.getpid(), 9)
else:
try:
return_value = args.func(args)
except:
import traceback
print(traceback.format_exc())
return_value = False
sys.exit((- 1)) |
class SentenceBleuScorer(Scorer):
def __init__(self, argument_string):
Scorer.__init__(self, argument_string)
if (not ('n' in self._arguments.keys())):
self._arguments['n'] = 4
def set_reference(self, reference_tokens):
self._reference = SentenceBleuReference(reference_tokens, self._arguments['n']) |
def slerp(z1, z2, t):
omega = tf.math.acos((tf.reduce_sum((z1 * z2)) / (tf.norm(z1) * tf.norm(z2))))
a = (tf.sin(((1 - t) * omega)) / tf.sin(omega))
b = (tf.sin((t * omega)) / tf.sin(omega))
return ((a * z1) + (b * z2)) |
def _is_cur_v_passive(v):
for tok in v.children:
if (tok.dep_ == 'auxpass'):
return True
return False |
(resources={'machine': 1})
class RayBenchmarkWorker():
def __init__(self, notification_address, world_size, world_rank, object_size):
self.notification_address = notification_address
self.notification_port = 7777
self.world_size = world_size
self.world_rank = world_rank
self.object_size = object_size
def barrier(self):
time.sleep(1)
barrier(self.notification_address, self.notification_port, self.world_size)
def put_object(self):
return ray.put(np.ones((self.object_size // 4), dtype=np.float32))
def get_objects(self, object_ids):
object_ids = ray.get(object_ids)
start = time.time()
_ = ray.get(object_ids)
duration = (time.time() - start)
return duration
def get_objects_with_creation_time(self, object_ids):
start = time.time()
object_ids = ray.get(object_ids)
_ = ray.get(object_ids)
duration = (time.time() - start)
return duration
(num_returns=2)
def reduce_objects(self, object_ids):
object_ids = ray.get(object_ids)
start = time.time()
reduce_result = np.zeros((self.object_size // 4), dtype=np.float32)
for object_id in object_ids:
array = ray.get(object_id)
reduce_result += array
duration = (time.time() - start)
result_id = ray.put(reduce_result)
return (result_id, duration) |
def single_ellipsis_index(names, fn_name):
ellipsis_indices = [i for (i, name) in enumerate(names) if is_ellipsis(name)]
if (len(ellipsis_indices) >= 2):
raise RuntimeError("{}: More than one Ellipsis ('...') found in names ({}). This function supports up to one Ellipsis.".format(fn_name, names))
if (len(ellipsis_indices) == 1):
return ellipsis_indices[0]
return None |
def set_location_header(request):
target = request.args.get('target')
response = HttpResponse('')
response.headers['Location'] = target
return response |
def check_modules():
global dpdk_drivers
mods = [{'Name': driver, 'Found': False} for driver in dpdk_drivers]
for mod in mods:
if module_is_loaded(mod['Name']):
mod['Found'] = True
if ((True not in [mod['Found'] for mod in mods]) and (b_flag is not None)):
print('Warning: no supported DPDK kernel modules are loaded', file=sys.stderr)
dpdk_drivers = [mod['Name'] for mod in mods if mod['Found']] |
def test():
W.set(120)
A = dace.ndarray([W])
stats = dace.ndarray([2])
A[:] = np.random.normal(3.0, 5.0, W.get())
stats[:] = 0.0
multi_output_scope(A, stats, W=W)
mean = (stats[0] / W.get())
variance = ((stats[1] / W.get()) - (mean * mean))
print(('Mean: %f, Variance: %f' % (mean, variance)))
diff_mean = abs((mean - np.mean(A)))
print('Difference (mean):', diff_mean)
diff_var = abs((variance - np.var(A)))
print('Difference (variance):', diff_var)
assert ((diff_mean <= 1e-05) and (diff_var <= 0.0001)) |
class Newpipe(StableDiffusionPipeline):
def _encode_prompt(self, *args, **kwargs):
embedding = super()._encode_prompt(*args, **kwargs)
return (embedding + (self.noiselam * torch.randn_like(embedding))) |
def scrape_all_channels(in_fp, out_fp, aws_access_key_id, aws_secret_access_key):
if os.path.exists(out_fp):
already_scraped = set([l.split('\t')[0] for l in open(out_fp)])
of = open(out_fp, 'a')
print('ALREADY SCRAPED:', len(already_scraped))
else:
already_scraped = set([])
of = open(out_fp, 'w')
print('FRESH FILE.')
chan_id_l = [l.strip() for l in open(in_fp) if (l.strip() not in already_scraped)]
random.shuffle(chan_id_l)
print('SCRAPING:', len(chan_id_l))
time_l = []
mv_error_c = 0
no_subs_c = 0
for channel_id in chan_id_l:
url = (' % vars())
print(url)
html = subprocess.Popen(('curl "%(url)s"' % vars()), stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8', shell=True).communicate()[0]
if ('<TITLE>302 Moved</TITLE></HEAD><BODY>' in html):
mv_error_c += 1
else:
mv_error_c = 0
time.sleep(2)
sub_l = scrape_subscriptions(html)
if (len(sub_l) == 0):
print('NO SUBSCRIPTIONS.')
of.write(('\t'.join(([channel_id] + ['', '', ''])) + '\n'))
no_subs_c += 1
else:
for sub_tpl in sub_l:
of.write(('\t'.join(([channel_id] + list(sub_tpl))) + '\n'))
print('NUM SUBS:', len(sub_l))
no_subs_c = 0
time_l.append(datetime.datetime.now())
if (len(time_l) > 5):
avg_scrape_time_str = ('%.2f' % ((time_l[(- 1)] - time_l[(- 6)]).seconds / 5.0))
print('LAST 5 AVG SCRAPE TIME:', avg_scrape_time_str)
if ((mv_error_c >= 5) or (no_subs_c >= 100)):
print('JOB FAILED.')
os.system(('touch %(out_fp)s.FAILED' % vars()))
scrape_email_util.send_email(aws_access_key_id, aws_secret_access_key, ('SUB SCRAPE FINISHED - FAILED - ' + in_fp), out_fp)
of.close()
sys.exit(1)
of.close()
os.system(('touch %(out_fp)s.SUCCESS' % vars())) |
class TestTempitaUtilityLoader(TestUtilityLoader):
expected_tempita = (TestUtilityLoader.expected[0].replace('{{loader}}', 'Loader'), TestUtilityLoader.expected[1].replace('{{loader}}', 'Loader'))
required_tempita = (TestUtilityLoader.required[0].replace('{{loader}}', 'Loader'), TestUtilityLoader.required[1].replace('{{loader}}', 'Loader'))
cls = Code.TempitaUtilityCode
def test_load_as_string(self):
got = strip_2tup(self.cls.load_as_string(self.name, context=self.context))
self.assertEqual(got, self.expected_tempita)
def test_load(self):
utility = self.cls.load(self.name, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
self.assertEqual(got, self.expected_tempita)
(required,) = utility.requires
got = strip_2tup((required.proto, required.impl))
self.assertEqual(got, self.required_tempita)
utility = self.cls.load(self.name, from_file=self.filename, context=self.context)
got = strip_2tup((utility.proto, utility.impl))
self.assertEqual(got, self.expected_tempita) |
class Histogram(object):
def __init__(self, bucket_limits=None):
if (bucket_limits is None):
bucket_limits = default_buckets()
self.bucket_limits = bucket_limits
self.clear()
def clear(self):
self.min = self.bucket_limits[(- 1)]
self.max = self.bucket_limits[0]
self.num = 0
self.sum = 0.0
self.sum_squares = 0.0
self.buckets = np.zeros(((len(self.bucket_limits) + 1),))
def add(self, arr):
if (not isinstance(arr, np.ndarray)):
arr = np.array(arr)
arr = arr.flatten()
self.min = min(self.min, arr.min())
self.max = max(self.max, arr.max())
self.sum += arr.sum()
self.num += len(arr)
self.sum_squares += (arr ** 2).sum()
indices = np.searchsorted(self.bucket_limits, arr)
new_counts = np.bincount(indices, minlength=self.buckets.shape[0])
self.buckets += new_counts
def encode_to_proto(self):
p = HistogramProto()
p.min = float(self.min)
p.max = float(self.max)
p.num = float(self.num)
p.sum = float(self.sum)
p.sum_squares = float(self.sum_squares)
bucket_limits = []
buckets = []
for (i, (end, count)) in enumerate(izip(self.bucket_limits, self.buckets)):
if ((count > 0.0) or (i >= len(self.bucket_limits)) or (self.buckets[(i + 1)] > 0.0)):
bucket_limits.append(float(end))
buckets.append(float(count))
buckets.append(float(self.buckets[(- 1)]))
p.bucket_limit.extend(bucket_limits)
p.bucket.extend(buckets)
return p |
def GetShellCommandOutput(cmd):
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output |
def calc_ping_slots(dev_addr, ping_nb, beacon_ts=None, gps=True):
if (beacon_ts is None):
beacon_ts = next_beacon_ts(gps=True)
beacon_reserved = 2.12
slot_len = 0.03
ping_period = int(((2 ** 12) / ping_nb))
beacon_ts_raw = [((beacon_ts >> s) & 255) for s in [24, 16, 8, 0]]
key = [0 for _ in range(16)]
rand = aes128_encrypt(key, ((list(reversed(beacon_ts_raw)) + list(reversed(dev_addr))) + [0 for _ in range(8)]))
ping_offset = int(((rand[0] + (rand[1] * 256)) % ping_period))
ping_slots = [(ping_offset + (n * ping_period)) for n in range(ping_nb)]
base_time = (beacon_ts if gps else gpstime.gps2unix(beacon_ts))
return [(base_time + (slot_id * slot_len)) for slot_id in ping_slots] |
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if (section == 'globals'):
continue
for (option, value) in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if (name in variables):
return variables[name]
return matchobj.group(0)
for (option, value) in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value)) |
def init_logs(opt):
log_dir = './explogs{}'.format(opt.exp_id)
if (not os.path.exists(log_dir)):
os.mkdir(log_dir)
if opt.istrain:
img_logs = os.path.join(log_dir, 'train')
else:
img_logs = os.path.join(log_dir, 'eval')
weight_logs = os.path.join(log_dir, 'weights')
if (not os.path.exists(img_logs)):
os.mkdir(img_logs)
if (not os.path.exists(weight_logs)):
os.mkdir(weight_logs)
return (img_logs, weight_logs) |
def has_dspec(dname, given_dnames):
clean_dname = ''.join((i for i in dname if (not i.isdigit())))
return (clean_dname in given_dnames) |
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size)))
tokens_ds = data.TokenBlockDataset(tokens, sizes=[len(tokens)], block_size=1, pad=0, eos=1, include_targets=False)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)])
return (trainer, epoch_itr) |
class SmartPointerTransformation(typehandlers.TypeTransformation):
def __init__(self):
super(SmartPointerTransformation, self).__init__()
self.rx = re.compile('(ns3::|::ns3::|)Ptr<([^>]+)>\\s*$')
print('{0!r}'.format(self), file=sys.stderr)
def _get_untransformed_type_traits(self, name):
m = self.rx.match(name)
is_const = False
if (m is None):
print('{0!r} did not match'.format(name), file=sys.stderr)
return (None, False)
else:
name1 = m.group(2).strip()
if name1.startswith('const '):
name1 = name1[len('const '):]
is_const = True
if name1.endswith(' const'):
name1 = name1[:(- len(' const'))]
is_const = True
new_name = (name1 + ' *')
if new_name.startswith('::'):
new_name = new_name[2:]
return (new_name, is_const)
def get_untransformed_name(self, name):
(new_name, dummy_is_const) = self._get_untransformed_type_traits(name)
return new_name
def create_type_handler(self, type_handler, *args, **kwargs):
if issubclass(type_handler, Parameter):
kwargs['transfer_ownership'] = False
elif issubclass(type_handler, ReturnValue):
kwargs['caller_owns_return'] = False
else:
raise AssertionError
(orig_ctype, is_const) = self._get_untransformed_type_traits(args[0])
if is_const:
correct_ctype = 'ns3::Ptr< {0} const >'.format(orig_ctype[:(- 2)])
else:
correct_ctype = 'ns3::Ptr< {0} >'.format(orig_ctype[:(- 2)])
args = tuple(([correct_ctype] + list(args[1:])))
handler = type_handler(*args, **kwargs)
handler.set_transformation(self, orig_ctype)
return handler
def untransform(self, type_handler, declarations, code_block, expression):
return ('const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression))
def transform(self, type_handler, declarations, code_block, expression):
assert (type_handler.untransformed_ctype[(- 1)] == '*')
return ('ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:(- 1)], expression)) |
class Vocab(object):
def __init__(self, counter: Counter, max_size=None, min_freq=1, specials=('<unk>', '<pad>'), specials_first=True):
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
itos = []
if specials_first:
itos = list(specials)
max_size = (None if (max_size is None) else (max_size + len(specials)))
for tok in specials:
del counter[tok]
words_and_frequencies = sorted(counter.items(), key=(lambda tup: tup[0]))
words_and_frequencies.sort(key=(lambda tup: tup[1]), reverse=True)
for (word, freq) in words_and_frequencies:
if ((freq < min_freq) or (len(itos) == max_size)):
break
itos.append(word)
if (not specials_first):
itos.extend(list(specials))
self.itos = itos
def itos(self):
return self._itos
def itos(self, itos: List[str]):
self._itos = itos
self.stoi = {w: i for (i, w) in enumerate(itos)}
def __getitem__(self, token):
return self.stoi.get(token, self.stoi.get('<unk>'))
def __len__(self):
return len(self.itos)
def lookup_indices(self, tokens):
return [self.__getitem__(token) for token in tokens] |
class ValueListVar(TemplateVar):
def __init__(self, values, *args, **kwargs):
self.values = values
return super().__init__(*args, **kwargs)
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __iter__(self):
(yield from iter(self.values)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.