code stringlengths 101 5.91M |
|---|
class ResNet12(nn.Module):
def __init__(self, drop_ratio=0.1, with_drop=False):
super(ResNet12, self).__init__()
self.drop_layers = with_drop
self.inplanes = 3
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(drop_ratio, inplace=inp)
self.layer1 = ResNetBlock(self.inplanes, 64)
self.inplanes = 64
self.layer2 = ResNetBlock(self.inplanes, 128)
self.inplanes = 128
self.layer3 = ResNetBlock(self.inplanes, 256)
self.inplanes = 256
self.layer4 = ResNetBlock(self.inplanes, 512)
self.inplanes = 512
self.layer1_rn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, padding=0), nn.BatchNorm2d(512, momentum=1, affine=True), nn.ReLU(), nn.MaxPool2d(2))
self.fc1_rn = nn.Sequential(nn.Linear(((512 * 2) * 2), 128), nn.BatchNorm1d(128, momentum=1, affine=True), nn.ReLU())
self.fc2_rn = nn.Linear(128, 1)
nn.init.xavier_uniform_(self.fc2_rn.weight)
self.alpha = nn.Parameter(torch.Tensor(1))
nn.init.constant_(self.alpha, 0)
self.beta = nn.Parameter(torch.Tensor(1))
nn.init.constant_(self.beta, 0)
self.relu = nn.ReLU(inplace=inp)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='conv2d')
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.global_w = nn.Conv2d(in_channels=512, out_channels=64, kernel_size=1, stride=1)
nn.init.xavier_uniform_(self.global_w.weight)
def _make_layer(self, block, planes):
layers = []
layers.append(block(self.inplanes, planes))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.dropout(x)
x = self.layer2(x)
x = self.dropout(x)
x = self.layer3(x)
x = self.dropout(x)
if self.drop_layers:
x_f = self.layer4(x, drop=False)
x_f = self.dropout(x_f)
x_d = self.layer4(x, drop=True)
x_d = self.dropout(x_d)
key_list = [x_f, x_d]
return key_list
else:
x_f = self.layer4(x, drop=False)
x_f = self.dropout(x_f)
return [x_f]
def relation_net(self, query, proto):
nb_class = proto.size(0)
q = query.unsqueeze(dim=1)
q = q.repeat(1, nb_class, 1, 1, 1)
p = proto.unsqueeze(dim=0)
p = p.repeat(query.size(0), 1, 1, 1, 1)
cat_pair = torch.cat((q, p), dim=2)
(B, nb_C, C, W, H) = cat_pair.size()
relation = cat_pair.reshape((- 1), C, W, H)
relation = self.layer1_rn(relation)
relation = relation.flatten(start_dim=1)
relation = self.fc1_rn(relation)
relation = self.fc2_rn(relation)
sigma = relation.reshape(B, nb_C)
sigma = torch.sigmoid(sigma)
sigma = ((torch.exp(self.alpha) * sigma) + torch.exp(self.beta))
return sigma |
def main():
assert_and_infer_cfg(args)
prep_experiment(args, parser)
writer = None
(_, val_loaders, _, _, extra_val_loaders) = datasets.setup_loaders(args)
(criterion, criterion_val) = loss.get_loss(args)
criterion_aux = loss.get_loss_aux(args)
net = network.get_net(args, criterion, criterion_aux, args.cont_proj_head, args.wild_cont_dict_size)
(optim, scheduler) = optimizer.get_optimizer(args, net)
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
net = network.warp_network_in_dataparallel(net, args.local_rank)
epoch = 0
i = 0
if args.snapshot:
(epoch, mean_iu) = optimizer.load_weights(net, optim, scheduler, args.snapshot, args.restore_optimizer)
print('#### iteration', i)
torch.cuda.empty_cache()
for (dataset, val_loader) in val_loaders.items():
validate(val_loader, dataset, net, criterion_val, optim, scheduler, epoch, writer, i, save_pth=False)
for (dataset, val_loader) in extra_val_loaders.items():
print("Extra validating... This won't save pth file")
validate(val_loader, dataset, net, criterion_val, optim, scheduler, epoch, writer, i, save_pth=False) |
def remove_zero_length_instances_from_file(filepath):
new_filepath = (filepath[:filepath.rfind('.tsv')] + '_nozerolens.tsv')
new_f = open(new_filepath, 'w')
first_line = True
counter = 0
with open(filepath, 'r') as f:
for line in f:
if first_line:
new_f.write(line)
temp_line = line
num_sents_field_ind = 0
while (not (temp_line.startswith('num_sentences\t') or temp_line.startswith('num_sentences\n'))):
temp_line = temp_line[(temp_line.index('\t') + 1):]
num_sents_field_ind += 1
temp_line = line
max_num_tokens_field_ind = 0
while (not (temp_line.startswith('max_num_tokens_in_sentence\t') or temp_line.startswith('max_num_tokens_in_sentence\n'))):
temp_line = temp_line[(temp_line.index('\t') + 1):]
max_num_tokens_field_ind += 1
first_line = False
else:
if (line.strip() == ''):
continue
num_sents = int(get_nth_field_in_line(line, num_sents_field_ind))
max_num_tokens = int(get_nth_field_in_line(line, max_num_tokens_field_ind))
if ((num_sents > 0) or (max_num_tokens > 0)):
new_f.write(line)
counter += 1
print((('Wrote ' + str(counter)) + ' instances to file.'))
new_f.close() |
def batch_run(m, dl, device, flatten=False, method='predict', input_type='first', no_grad=True, **kwargs):
method = getattr(m, method)
l_result = []
for batch in dl:
if (input_type == 'first'):
x = batch[0]
if no_grad:
with torch.no_grad():
if flatten:
x = x.view(len(x), (- 1))
pred = method(x.cuda(device), **kwargs).detach().cpu()
else:
if flatten:
x = x.view(len(x), (- 1))
pred = method(x.cuda(device), **kwargs).detach().cpu()
l_result.append(pred)
return torch.cat(l_result) |
class BNInception_gsm(nn.Module):
def __init__(self, model_path='model_zoo/bninception/bn_inception_gsm.yaml', num_classes=101, weight_url=' num_segments=16):
super(BNInception_gsm, self).__init__()
manifest = yaml.load(open(model_path))
layers = manifest['layers']
self._channel_dict = dict()
self._op_list = list()
hGates_cnt = 0
for l in layers:
(out_var, op, in_var) = parse_expr(l['expr'])
if (op != 'Concat'):
if (op == 'gsm'):
hGates_cnt += 1
(id, out_name, module, out_channel, in_name) = build_gsm(l, self._channel_dict[in_var[0]], conv_bias=True, num_segments=num_segments)
self._channel_dict[out_name] = out_channel
setattr(self, id, module)
self._op_list.append((id, op, out_name, in_name))
else:
(id, out_name, module, out_channel, in_name) = get_basic_layer(l, (3 if (len(self._channel_dict) == 0) else self._channel_dict[in_var[0]]), conv_bias=True)
self._channel_dict[out_name] = out_channel
setattr(self, id, module)
self._op_list.append((id, op, out_name, in_name))
else:
self._op_list.append((id, op, out_var[0], in_var))
channel = sum([self._channel_dict[x] for x in in_var])
self._channel_dict[out_var[0]] = channel
state_dict = torch.utils.model_zoo.load_url(weight_url)
for (k, v) in state_dict.items():
state_dict[k] = torch.squeeze(v, dim=0)
self.load_state_dict(state_dict, strict=False)
def forward(self, input):
data_dict = dict()
data_dict[self._op_list[0][(- 1)]] = input
def get_hook(name):
def hook(m, grad_in, grad_out):
print(name, grad_out[0].data.abs().mean())
return hook
for op in self._op_list:
if ((op[1] != 'Concat') and (op[1] != 'InnerProduct')):
data_dict[op[2]] = getattr(self, op[0])(data_dict[op[(- 1)]])
elif (op[1] == 'InnerProduct'):
x = data_dict[op[(- 1)]]
data_dict[op[2]] = getattr(self, op[0])(x.view(x.size(0), (- 1)))
else:
try:
data_dict[op[2]] = torch.cat(tuple((data_dict[x] for x in op[(- 1)])), 1)
except:
for x in op[(- 1)]:
print(x, data_dict[x].size())
raise
return data_dict[self._op_list[(- 1)][2]] |
def test_ast_resolver_chain():
import taichi as ti
ti.init()
node = ast.parse('ti.lang.ops.atomic_add', mode='eval').body
assert ASTResolver.resolve_to(node, ti.atomic_add, locals()) |
def get_rank():
if is_xla():
return xm.get_ordinal()
if (not dist.is_available()):
return 0
if (not dist.is_nccl_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank() |
_module()
class PyGPointNextEncoder(nn.Module):
def __init__(self, block, blocks, in_channels=6, width=32, strides=[4, 4, 4, 4], nsample=[16, 16, 16, 16], radius=0.1, radius_scaling=2, nsample_scaling=1, aggr_args={'feature_type': 'dp_fj', 'reduction': 'max'}, group_args={'NAME': 'ballquery'}, norm_args={'norm': 'bn'}, act_args={'act': 'relu'}, conv_args=None, mid_res=False, use_res=True, expansion=1, sa_layers=2, num_posconvs=2, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
if isinstance(block, str):
block = eval(block)
self.blocks = blocks
self.strides = strides
self.c = in_channels
self.in_channels = in_channels
self.mid_res = mid_res
self.use_res = use_res
self.aggr_args = aggr_args
self.norm_args = norm_args
self.act_args = act_args
self.conv_args = conv_args
self.expansion = expansion
self.sa_layers = sa_layers
self.num_posconvs = num_posconvs
self.radii = self._to_full_list(radius, radius_scaling)
self.nsample = self._to_full_list(nsample, nsample_scaling)
logging.info(f'''radius: {self.radii},
nsample: {self.nsample}''')
channels = []
for stride in strides:
if (stride != 1):
width *= 2
channels.append(width)
encoder = []
for i in range(len(blocks)):
group_args.radius = self.radii[i]
group_args.nsample = self.nsample[i]
encoder.append(self._make_enc(block, channels[i], blocks[i], stride=strides[i], group_args=group_args, is_head=((i == 0) and (strides[i] == 1))))
self.encoder = nn.Sequential(*encoder)
self.out_channels = channels[(- 1)]
def _to_full_list(self, param, param_scaling=1):
param_list = []
if isinstance(param, List):
for (i, value) in enumerate(param):
value = ([value] if (not isinstance(value, List)) else value)
if (len(value) != self.blocks[i]):
value += ([value[(- 1)]] * (self.blocks[i] - len(value)))
param_list.append(value)
else:
for (i, stride) in enumerate(self.strides):
if (stride == 1):
param_list.append(([param] * self.blocks[i]))
else:
param_list.append(([param] + ([(param * param_scaling)] * (self.blocks[i] - 1))))
param *= param_scaling
return param_list
def _make_enc(self, block, channels, blocks, stride, group_args, is_head=False):
layers = []
radii = group_args.radius
nsample = group_args.nsample
group_args.radius = radii[0]
group_args.nsample = nsample[0]
layers.append(SetAbstraction(self.in_channels, channels, (self.sa_layers if (not is_head) else 1), stride, group_args=group_args, norm_args=self.norm_args, act_args=self.act_args, conv_args=self.conv_args, is_head=is_head))
self.in_channels = channels
for i in range(1, blocks):
group_args.radius = radii[i]
group_args.nsample = nsample[i]
layers.append(block(self.in_channels, aggr_args=self.aggr_args, norm_args=self.norm_args, act_args=self.act_args, group_args=group_args, conv_args=self.conv_args, mid_res=self.mid_res, expansion=self.expansion, use_res=self.use_res, num_posconvs=self.num_posconvs))
return nn.Sequential(*layers)
def forward_cls_feat(self, p0, f0, batch0):
for i in range(0, len(self.encoder)):
(p0, f0, batch0) = self.encoder[i]([p0, f0, batch0])
return f0.squeeze((- 1))
def forward_seg_feat(self, p0, f0=None, batch0=None):
if hasattr(p0, 'keys'):
(p0, f0, batch0) = (p0['pos'], p0['x'], p0['batch'])
if (f0 is None):
f0 = p0.clone().transpose(1, 2).contiguous()
(p, f, batch) = ([p0], [f0], [batch0])
for i in range(0, len(self.encoder)):
(_p, _f, _batch) = self.encoder[i]([p[(- 1)], f[(- 1)], batch[(- 1)]])
p.append(_p)
f.append(_f)
batch.append(_batch)
return (p, f, batch)
def forward(self, p0, f0, batch0):
self.forward_seg_feat(p0, f0, batch0) |
def strip_span(span, tokens):
start = 0
while (start < len(span)):
token = tokens[span[start]]
if (not re.search('^(in|of|at|-|,)$', token)):
break
start += 1
end = (len(span) - 1)
while (end > start):
token = tokens[span[end]]
if (not re.search('^(in|of|at|-|,)$', token)):
break
end -= 1
return span[start:(end + 1)] |
def get_train_data(labels, tr_num, val_num, seed):
np.random.seed(seed)
labels_vec = labels.argmax(1)
labels_num = (labels_vec.max() + 1)
idx_train = []
idx_val = []
for label_idx in range(labels_num):
pos0 = np.argwhere((labels_vec == label_idx)).flatten()
pos0 = np.random.permutation(pos0)
idx_train.append(pos0[0:tr_num])
idx_val.append(pos0[tr_num:(val_num + tr_num)])
idx_train = np.array(idx_train).flatten()
idx_val = np.array(idx_val).flatten()
idx_test = np.setdiff1d(range(labels.shape[0]), np.union1d(idx_train, idx_val))
idx_train = torch.LongTensor(np.random.permutation(idx_train))
idx_val = torch.LongTensor(np.random.permutation(idx_val))
idx_test = torch.LongTensor(np.random.permutation(idx_test))
return (idx_train, idx_val, idx_test) |
class CHomP():
def __repr__(self):
return 'CHomP interface'
def __call__(self, program, complex, subcomplex=None, **kwds):
from sage.misc.temporary_file import tmp_filename
from sage.topology.cubical_complex import CubicalComplex, cubical_complexes
from sage.topology.simplicial_complex import SimplicialComplex, Simplex
from sage.homology.chain_complex import HomologyGroup
from subprocess import Popen, PIPE
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.modules.free_module import VectorSpace
from sage.modules.free_module_element import free_module_element as vector
from sage.combinat.free_module import CombinatorialFreeModule
deprecation(33777, 'the CHomP interface is deprecated')
if (not have_chomp(program)):
raise OSError(('Program %s not found' % program))
verbose = kwds.get('verbose', False)
generators = kwds.get('generators', False)
extra_opts = kwds.get('extra_opts', '')
base_ring = kwds.get('base_ring', ZZ)
if extra_opts:
extra_opts = extra_opts.split()
else:
extra_opts = []
cubical = False
simplicial = False
chain = False
if isinstance(complex, CubicalComplex):
cubical = True
edge = cubical_complexes.Cube(1)
original_complex = complex
complex = edge.product(complex)
if verbose:
print('Cubical complex')
elif isinstance(complex, SimplicialComplex):
simplicial = True
if verbose:
print('Simplicial complex')
else:
chain = True
base_ring = kwds.get('base_ring', complex.base_ring())
if verbose:
print(('Chain complex over %s' % base_ring))
if (base_ring == QQ):
raise ValueError("CHomP doesn't compute over the rationals, only over Z or F_p.")
if base_ring.is_prime_field():
p = base_ring.characteristic()
extra_opts.append(('-p%s' % p))
mod_p = True
else:
mod_p = False
try:
data = complex._chomp_repr_()
except AttributeError:
raise AttributeError('Complex cannot be converted to use with CHomP.')
datafile = tmp_filename()
with open(datafile, 'w') as f:
f.write(data)
if (subcomplex is None):
if cubical:
subcomplex = CubicalComplex([complex.n_cells(0)[0]])
elif simplicial:
m = re.search('\\(([^,]*),', data)
v = int(m.group(1))
subcomplex = SimplicialComplex([[v]])
elif cubical:
subcomplex = edge.product(subcomplex)
if generators:
genfile = tmp_filename()
extra_opts.append(('-g%s' % genfile))
if (subcomplex is not None):
try:
sub = subcomplex._chomp_repr_()
except AttributeError:
raise AttributeError('Subcomplex cannot be converted to use with CHomP.')
subfile = tmp_filename()
with open(subfile, 'w') as f:
f.write(sub)
else:
subfile = ''
if verbose:
print('Popen called with arguments', end='')
print(([program, datafile, subfile] + extra_opts))
print('')
print('CHomP output:')
print('')
cmd = [program, datafile]
if subfile:
cmd.append(subfile)
if extra_opts:
cmd.extend(extra_opts)
output = Popen(cmd, stdout=PIPE).communicate()[0]
if verbose:
print(output)
print('End of CHomP output')
print('')
if generators:
with open(genfile, 'r') as f:
gens = f.read()
if verbose:
print('Generators:')
print(gens)
if (output.find('ERROR') != (- 1)):
raise RuntimeError('error inside CHomP')
if (output.find('trivial') != (- 1)):
if mod_p:
return {0: VectorSpace(base_ring, 0)}
else:
return {0: HomologyGroup(0, ZZ)}
d = {}
h = re.compile('^H_([0-9]*) = (.*)$', re.M)
tors = re.compile('Z_([0-9]*)')
for m in h.finditer(output):
if verbose:
print(m.groups())
dim = int(m.group(1))
hom_str = m.group(2)
if (hom_str.find('0') == 0):
if mod_p:
hom = VectorSpace(base_ring, 0)
else:
hom = HomologyGroup(0, ZZ)
else:
rk = 0
if (hom_str.find('^') != (- 1)):
rk_srch = re.search('\\^([0-9]*)\\s?', hom_str)
rk = int(rk_srch.group(1))
rk += len(re.findall('(Z$)|(Z\\s)', hom_str))
if mod_p:
rk = (rk if (rk != 0) else 1)
if verbose:
print(('dimension = %s, rank of homology = %s' % (dim, rk)))
hom = VectorSpace(base_ring, rk)
else:
n = rk
invts = []
for t in tors.finditer(hom_str):
n += 1
invts.append(int(t.group(1)))
for i in range(rk):
invts.append(0)
if verbose:
print(('dimension = %s, number of factors = %s, invariants = %s' % (dim, n, invts)))
hom = HomologyGroup(n, ZZ, invts)
if generators:
if cubical:
g = process_generators_cubical(gens, dim)
if verbose:
print(('raw generators: %s' % g))
if g:
module = CombinatorialFreeModule(base_ring, original_complex.n_cells(dim), prefix='', bracket=True)
basis = module.basis()
output = []
for x in g:
v = module(0)
for term in x:
v += (term[0] * basis[term[1]])
output.append(v)
g = output
elif simplicial:
g = process_generators_simplicial(gens, dim, complex)
if verbose:
print(('raw generators: %s' % gens))
if g:
module = CombinatorialFreeModule(base_ring, complex.n_cells(dim), prefix='', bracket=False)
basis = module.basis()
output = []
for x in g:
v = module(0)
for term in x:
if complex._is_numeric():
v += (term[0] * basis[term[1]])
else:
translate = complex._translation_from_numeric()
simplex = Simplex([translate[a] for a in term[1]])
v += (term[0] * basis[simplex])
output.append(v)
g = output
elif chain:
g = process_generators_chain(gens, dim, base_ring)
if verbose:
print(('raw generators: %s' % gens))
if g:
if (not mod_p):
g = [_[1] for _ in sorted(zip(invts, g), key=(lambda x: x[0]))]
d[dim] = (hom, g)
else:
d[dim] = hom
else:
d[dim] = hom
if chain:
new_d = {}
diff = complex.differential()
if (len(diff) == 0):
return {}
bottom = min(diff)
top = max(diff)
for dim in d:
if (complex._degree_of_differential == (- 1)):
new_dim = (bottom + dim)
else:
new_dim = (top - dim)
if isinstance(d[dim], tuple):
group = d[dim][0]
gens = d[dim][1]
new_gens = []
dimension = complex.differential(new_dim).ncols()
for v in gens:
v_dict = v.dict()
if ((dimension - 1) not in v.dict()):
v_dict[(dimension - 1)] = 0
new_gens.append(vector(base_ring, v_dict))
else:
new_gens.append(v)
new_d[new_dim] = (group, new_gens)
else:
new_d[new_dim] = d[dim]
d = new_d
return d
def help(self, program):
deprecation(33777, 'the CHomP interface is deprecated')
from subprocess import Popen, PIPE
print(Popen([program, '-h'], stdout=PIPE).communicate()[0]) |
def p_adic_LLL_bound_one_prime(prime, B0, M, M_logp, m0, c3, prec=106):
if any(((g.valuation(prime) != 0) for g in (M + [m0]))):
raise ValueError('There is an element with non zero valuation')
K = prime.ring()
w = K.number_of_roots_of_unity()
p = prime.smallest_integer()
f = prime.residue_class_degree()
e = prime.absolute_ramification_index()
R = RealField(prec)
c5 = (c3 / ((f * e) * R(p).log()))
theta = K.gen()
if (len(M) == 0):
if (m0 != 1):
return (max(4, w, R(max((((R(p).log() * f) * (m0 - 1).valuation(prime)) / c3), 0)).floor()), False)
else:
return (0, False)
m0_logp = log_p(m0, prime, prec)
m0_logp = embedding_to_Kp(m0_logp, prime, prec)
n = len(M_logp)
Theta = [(theta ** i) for i in range(K.absolute_degree())]
ordp_Disc = K.disc(Theta).valuation(p)
c8 = min((min((a.valuation(p) for a in g)) for g in M_logp))
lam = (p ** c8)
low_bound = ((1 / c5).round() + 1)
for a in m0_logp:
if ((a != 0) and (c8 > a.valuation(p))):
B1 = ((c8 + (ordp_Disc / 2)) / c5)
if (B1 > low_bound):
return (max(4, w, RR(B1).floor()), False)
else:
return (max(4, w, low_bound), False)
c8 = min(([a.valuation(p) for a in m0_logp] + [c8]))
B = [(g / lam) for g in M_logp]
b0 = (m0_logp / lam)
c9 = (c8 + (ordp_Disc / 2))
m = (e * f)
u = 1
while True:
if (prec <= (u + c8)):
return (0, True)
A11 = identity_matrix(ZZ, n)
A12 = zero_matrix(ZZ, n, m)
A21 = zero_matrix(ZZ, n, m)
A22 = ((p ** u) * identity_matrix(ZZ, m))
for (i, b) in enumerate(B):
A21[i] = vector([mod(b[j], (p ** u)) for j in range(m)])
A = block_matrix([[A11, A12], [A21.transpose(), A22]])
y = zero_vector(ZZ, (n + m))
for i in range(m):
y[(i + n)] = (- mod(b0[i], (p ** u)))
c10squared = minimal_vector(A.transpose(), y)
if (c10squared > (n * (B0 ** 2))):
B2 = ((u + c9) / c5)
if (B2 > low_bound):
return (max(4, w, R(B2).floor()), False)
else:
return (max(4, w, low_bound), False)
else:
u += 1 |
def from_json_tester(algo: LearnableBase[(ImplBase, LearnableConfig)], observation_shape: Shape, action_size: int) -> None:
algo.create_impl(observation_shape, action_size)
adapter_factory = FileAdapterFactory('test_data')
logger = D3RLPyLogger(adapter_factory, experiment_name='test')
save_config(algo, logger)
adapter = logger.adapter
assert isinstance(adapter, FileAdapter)
json_path = os.path.join(adapter.logdir, 'params.json')
new_algo = algo.__class__.from_json(json_path)
_check_reconst_algo(algo, new_algo) |
class AutoModelForSequenceClassification():
def __init__(self):
raise EnvironmentError('AutoModelForSequenceClassification is designed to be instantiated using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForSequenceClassification.from_config(config)` methods.')
_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()):
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()))))
_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a sequence classification head---from a pretrained model.', AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()):
return MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys())))) |
def drop_mask(shape, keep_prob):
if isinstance(shape, (tuple, list)):
shape = tf.stack(shape)
ones = tf.ones(shape)
return dropout(ones, keep_prob) |
def test_case122():
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata121), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 201) |
def test_count_ops():
(x, y) = symbols('x, y')
assert (count_ops((x + y)) == 1)
assert (count_ops(((x + y), (x * y))) == 2)
assert (count_ops([[(x ** y)], [((x + y) - 1)]]) == 3)
assert (count_ops((x + y), (x * y)) == 2) |
def get_robust_regression(device: torch.device) -> GetterReturnType:
N = 10
K = 10
X = torch.rand(N, (K + 1), device=device)
Y = torch.rand(N, 1, device=device)
nu_alpha = torch.randn(1, 1, device=device)
nu_beta = torch.rand(1, 1, device=device)
nu = dist.Gamma(nu_alpha, nu_beta)
sigma_rate = torch.rand(N, 1, device=device)
sigma = dist.Exponential(sigma_rate)
beta_mean = torch.rand((K + 1), 1, device=device)
beta_sigma = torch.rand((K + 1), 1, device=device)
beta = dist.Normal(beta_mean, beta_sigma)
nu_value = nu.sample()
nu_value.requires_grad_(True)
sigma_value = sigma.sample()
sigma_unconstrained_value = sigma_value.log()
sigma_unconstrained_value.requires_grad_(True)
beta_value = beta.sample()
beta_value.requires_grad_(True)
def forward(nu_value: Tensor, sigma_unconstrained_value: Tensor, beta_value: Tensor) -> Tensor:
sigma_constrained_value = sigma_unconstrained_value.exp()
mu = X.mm(beta_value)
nu_score = (dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() + nu.log_prob(nu_value))
sigma_score = ((dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() + sigma.log_prob(sigma_constrained_value)) + sigma_unconstrained_value)
beta_score = (dist.StudentT(nu_value, mu, sigma_constrained_value).log_prob(Y).sum() + beta.log_prob(beta_value))
return ((nu_score.sum() + sigma_score.sum()) + beta_score.sum())
return (forward, (nu_value.to(device), sigma_unconstrained_value.to(device), beta_value.to(device))) |
class RiemannianStructure(Singleton):
chart = RealDiffChart
name = 'Riemannian'
scalar_field_algebra = DiffScalarFieldAlgebra
homset = DifferentiableManifoldHomset
def subcategory(self, cat):
return cat |
class InactiveLeaf():
def new_nominal_attribute_observer():
return None
def new_numeric_attribute_observer():
return None
def update_attribute_observers(self, X, y, weight, tree):
pass |
def __getattr__(name):
if (name in {'HalvingGridSearchCV', 'HalvingRandomSearchCV'}):
raise ImportError(f'''{name} is experimental and the API might change without any deprecation cycle. To use it, you need to explicitly import enable_halving_search_cv:
from sklearn.experimental import enable_halving_search_cv''')
raise AttributeError(f'module {__name__} has no attribute {name}') |
def get_memory_usage(assignments):
ret = 0
for cur in assignments:
ret += _get_max_size(cur)
return ret |
class TestFactor(unittest.TestCase):
def setUp(self):
if skip:
raise unittest.SkipTest('PyTorch not installed')
attrs = ['a', 'b', 'c']
shape = [2, 3, 4]
domain = Domain(attrs, shape)
values = torch.rand(*shape)
self.factor = Factor(domain, values)
def test_expand(self):
domain = Domain(['a', 'b', 'c', 'd'], [2, 3, 4, 5])
res = self.factor.expand(domain)
self.assertEqual(res.domain, domain)
self.assertEqual(res.values.shape, domain.shape)
res = (res.sum(['d']) * 0.2)
self.assertTrue(torch.allclose(res.values, self.factor.values))
def test_transpose(self):
attrs = ['b', 'c', 'a']
tr = self.factor.transpose(attrs)
ans = Domain(attrs, [3, 4, 2])
self.assertEqual(tr.domain, ans)
def test_project(self):
res = self.factor.project(['c', 'a'], agg='sum')
ans = Domain(['c', 'a'], [4, 2])
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
res = self.factor.project(['c', 'a'], agg='logsumexp')
self.assertEqual(res.domain, ans)
self.assertEqual(res.values.shape, (4, 2))
def test_sum(self):
res = self.factor.sum(['a', 'b'])
self.assertEqual(res.domain, Domain(['c'], [4]))
self.assertTrue(torch.allclose(res.values, self.factor.values.sum(dim=(0, 1))))
def test_logsumexp(self):
res = self.factor.logsumexp(['a', 'c'])
values = self.factor.values
ans = torch.log(torch.sum(torch.exp(values), dim=(0, 2)))
self.assertEqual(res.domain, Domain(['b'], [3]))
self.assertTrue(torch.allclose(res.values, ans))
def test_binary(self):
dom = Domain(['b', 'd', 'e'], [3, 5, 6])
vals = torch.rand(3, 5, 6)
factor = Factor(dom, vals)
res = (self.factor * factor)
ans = Domain(['a', 'b', 'c', 'd', 'e'], [2, 3, 4, 5, 6])
self.assertEqual(res.domain, ans)
res = (self.factor + factor)
self.assertEqual(res.domain, ans)
res = (self.factor * 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor + 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = (self.factor - 2.0)
self.assertEqual(res.domain, self.factor.domain)
res = self.factor.exp().log()
self.assertEqual(res.domain, self.factor.domain)
self.assertTrue(np.allclose(res.datavector(), self.factor.datavector())) |
def arc_length(arc):
angle_a = cartesian_angle(arc.circle.center, arc.a)
angle_b = cartesian_angle(arc.circle.center, arc.b)
angle = signed_distance_between_cartesian_angles(angle_a, angle_b)
return (angle * arc.circle.radius) |
(wait_incrementing_start=(5 * 1000), wait_incrementing_increment=(5 * 1000), stop_max_attempt_number=5)
def get_slurm_job_state(job_id: int) -> str:
try:
scontrol_output = subprocess.check_output(f'scontrol show job {job_id}', stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
raise Exception(f'{str(e)} output: {e.output}')
search_result = re.search('JobState=(\\w+)', scontrol_output.decode())
if (not search_result):
raise Exception(f'Could not extract JobState from scontrol: {scontrol_output.decode()}')
return search_result.group(1) |
def register_Ns3QuicClient_methods(root_module, cls):
cls.add_constructor([param('ns3::QuicClient const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('SetRemote', 'void', [param('ns3::Address', 'ip'), param('uint16_t', 'port')])
cls.add_method('SetRemote', 'void', [param('ns3::Address', 'addr')])
cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('StartApplication', 'void', [], visibility='private', is_virtual=True)
cls.add_method('StopApplication', 'void', [], visibility='private', is_virtual=True)
return |
def color_crossings(n, c):
if (n in seen):
return
seen.add(n)
if (n in color_dict):
color_dict[n] = c
return
for a in getattr(n, 'args', []):
color_crossings(a, c)
for a in getattr(n, 'fields', []):
color_crossings(a, c)
for nam in ('body', 'tuple_value'):
b = getattr(n, nam, None)
if (b is not None):
color_crossings(b, c) |
.parametrize('dtype, storage_format', [(ti.f32, 'col_major'), (ti.f32, 'row_major'), (ti.f64, 'col_major'), (ti.f64, 'row_major')])
_utils.test(arch=ti.cpu)
def test_build_sparse_matrix_frome_ndarray(dtype, storage_format):
n = 8
triplets = ti.Vector.ndarray(n=3, dtype=ti.f32, shape=n)
A = ti.linalg.SparseMatrix(n=10, m=10, dtype=ti.f32, storage_format=storage_format)
def fill(triplets: ti.types.ndarray()):
for i in range(n):
triplet = ti.Vector([i, i, i], dt=ti.f32)
triplets[i] = triplet
fill(triplets)
A.build_from_ndarray(triplets)
for i in range(n):
assert (A[(i, i)] == i) |
def create_latex_accuracy_singletable(df, outname, title):
df = df.copy()
df['redshift'] = df['model_name_noseed'].apply((lambda x: re.search('(?<=R\\_)[A-Za-z]+', x).group()))
metric = 'accuracy'
list_keys = ['-2', '0', '+2', 'all']
for k in list_keys:
df[k] = (((('$' + df[f'{k}_{metric}_mean'].round(2).map(str)) + ' \\pm ') + df[f'{k}_{metric}_std'].round(2).map(str)) + '$')
to_write = df.to_latex(index=False, columns=(['redshift'] + list_keys), escape=False)
title_to_use = (('\n\\multicolumn{5}{c}{' + title) + '} \\\\\n')
to_write = to_write.replace('\n', title_to_use, 1)
to_write = to_write.replace('toprule', 'hline').replace('midrule', 'hline').replace('bottomrule', 'hline').replace('{lllll}', '{l cccc }')
return to_write |
def top_k(source: Tensor, *, axis: Union[(Dim, Sequence[Dim])], k: Optional[Union[(int, Tensor)]]=None, k_dim: Optional[Dim]=None, sorted: bool=True) -> Tuple[(Tensor, Union[(Tensor, Sequence[Tensor])], Dim)]:
if (k is None):
assert k_dim, 'top_k: either provide `k` or `k_dim`'
k = (k_dim.dimension or k_dim.dyn_size_ext)
assert (k is not None), f'top_k: k_dim {k_dim} undefined and no k provided'
return source._raw_backend.top_k(source, axis=axis, k=k, k_dim=k_dim, sorted=sorted) |
def adjust_gamma(image, gamma=1.0):
invGamma = (1.0 / gamma)
table = np.array([(((i / 255.0) ** invGamma) * 255) for i in np.arange(0, 256)]).astype('uint8')
return cv2.LUT(image, table) |
def test_optimal_control_with_custom_preconditioner(geometry, config_ocp):
mesh = geometry.mesh
dx = geometry.dx
v_elem = VectorElement('CG', mesh.ufl_cell(), 2)
p_elem = FiniteElement('CG', mesh.ufl_cell(), 1)
V = FunctionSpace(mesh, (v_elem * p_elem))
W = VectorFunctionSpace(mesh, 'CG', 1)
up = Function(V)
(u, p) = split(up)
vq = Function(V)
(v, q) = split(vq)
c = Function(W)
F = ((((inner(grad(u), grad(v)) * dx) - ((p * div(v)) * dx)) - ((q * div(u)) * dx)) - (dot(c, v) * dx))
def pressure_point(x, on_boundary):
return (near(x[0], 0) and near(x[1], 0))
bcs = cashocs.create_dirichlet_bcs(V.sub(0), Constant((0, 0)), geometry.boundaries, [1, 2, 3])
lid_velocity = Expression(('4*x[0]*(1-x[0])', '0.0'), degree=2)
bcs += cashocs.create_dirichlet_bcs(V.sub(0), lid_velocity, geometry.boundaries, 4)
bcs += [DirichletBC(V.sub(1), Constant(0), pressure_point, method='pointwise')]
u_d = Expression(('sqrt(pow(x[0], 2) + pow(x[1], 2))*cos(2*pi*x[1])', '-sqrt(pow(x[0], 2) + pow(x[1], 2))*sin(2*pi*x[0])'), degree=2)
J = cashocs.IntegralFunctional(((Constant(0.5) * dot((u - u_d), (u - u_d))) * dx))
(u_, p_) = TrialFunctions(V)
(v_, q_) = TestFunctions(V)
pc_form = ((inner(grad(u_), grad(v_)) * dx) + ((p_ * q_) * dx))
ksp_options = {'ksp_type': 'minres', 'ksp_max_it': 90, 'ksp_rtol': 1e-10, 'ksp_atol': 1e-30, 'pc_type': 'fieldsplit', 'pc_fieldsplit_type': 'additive', 'fieldsplit_0_ksp_type': 'preonly', 'fieldsplit_0_pc_type': 'hypre', 'fieldsplit_0_pc_hypre_type': 'boomeramg', 'fieldsplit_1_ksp_type': 'preonly', 'fieldsplit_1_pc_type': 'jacobi'}
ocp = cashocs.OptimalControlProblem(F, bcs, J, up, c, vq, config=config_ocp, preconditioner_forms=pc_form, ksp_options=ksp_options)
ocp.solve(rtol=0.01, max_iter=38) |
class E2E_TrainingRestorer(object):
def __init__(self, opts, model, optimizer):
if exists(f'{opts.output_dir}/log/args.json'):
restore_opts = json.load(open(f'{opts.output_dir}/log/args.json', 'r'))
with open(join(opts.output_dir, 'log', 'restore_args.json'), 'w') as writer:
json.dump(vars(opts), writer, indent=4)
self.save_path = f'{opts.output_dir}/restore.pt'
self.backup_path = f'{opts.output_dir}/restore_backup.pt'
self.model = model
self.optimizer = optimizer
self.save_steps = int((opts.save_steps_ratio * opts.num_train_steps))
self.amp = opts.fp16
self.max_save_load_trial = 10
if (exists(self.save_path) or exists(self.backup_path)):
LOGGER.info('found previous checkpoint. try to resume...')
restore_trial = 0
while (restore_trial < self.max_save_load_trial):
LOGGER.info(f'TrainingRestorer restore trial NO. {restore_trial}')
try:
self.restore(opts)
break
except Exception as e:
restore_trial += 1
else:
self.global_step = 0
def step(self):
self.global_step += 1
if ((self.global_step % self.save_steps) == 0):
save_trial = 0
while (save_trial < self.max_save_load_trial):
LOGGER.info(f'TrainingRestorer save trial NO. {save_trial}')
try:
self.save()
break
except Exception as e:
save_trial += 1
def save(self):
checkpoint = {'global_step': self.global_step, 'model_state_dict': _to_cpu(self.model.state_dict()), 'optim_state_dict': _to_cpu(self.optimizer.state_dict())}
if self.amp:
checkpoint['amp_state_dict'] = amp.state_dict()
if exists(self.save_path):
os.rename(self.save_path, self.backup_path)
torch.save(checkpoint, self.save_path)
def restore(self, opts):
try:
checkpoint = torch.load(self.save_path)
except Exception:
checkpoint = torch.load(self.backup_path)
self.global_step = checkpoint['global_step']
self.model.load_state_dict(_to_cuda(checkpoint['model_state_dict']))
self.optimizer.load_state_dict(_to_cuda(checkpoint['optim_state_dict']))
if self.amp:
amp.load_state_dict(checkpoint['amp_state_dict'])
LOGGER.info(f'resume training from step {self.global_step}') |
_kwargs(**{'device': 'hpu'})
_fl_task(model='unet_model', data_loader='val_loader', device='device')
def validate(unet_model, val_loader, device):
print(f'''
TASK VALIDATE GOT DEVICE {device}
''')
unet_model.eval()
unet_model.to(device)
val_loader = tqdm.tqdm(val_loader, desc='validate')
val_score = 0
total_samples = 0
with torch.no_grad():
for (data, target) in val_loader:
samples = target.shape[0]
total_samples += samples
(data, target) = (torch.tensor(data).to(device), torch.tensor(target).to(device, dtype=torch.int64))
output = unet_model(data)
htcore.mark_step()
val = soft_dice_coef(output, target)
val_score += val.sum().cpu().numpy()
return {'dice_coef': (val_score / total_samples)} |
class CompositionalAttention(CompositionalAttentionBase):
def __init__(self, state_size: int, n_heads: int, n_rules: int, qk_dim: int, dot: bool=False, dropout: float=0.1, input_size: Optional[torch.Tensor]=None):
super().__init__(state_size, n_heads, n_rules, qk_dim, dot, dropout)
self.query_net = torch.nn.Linear((state_size if (input_size is None) else input_size), (n_heads * self.projection_size), bias=False)
self.key_net = torch.nn.Linear(state_size, (n_heads * self.projection_size), bias=False)
self.value_net = torch.nn.Linear(state_size, (self.projection_size * self.n_rules), bias=False)
self.reset_parameters()
def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask], need_weights: bool=False):
(bsz, n_read, _) = curr_state.shape
(_, n_write, _) = attend_to.shape
q = self.query_net(curr_state).reshape(bsz, n_read, self.n_heads, self.projection_size)
q = (q.permute(0, 2, 1, 3) * self.scale)
k = self.key_net(attend_to).reshape(bsz, n_write, self.n_heads, self.projection_size)
k = k.permute(0, 2, 3, 1)
v = self.value_net(attend_to).reshape(bsz, n_write, self.n_rules, self.projection_size)
v = v.permute(0, 2, 1, 3).unsqueeze(1)
(data, scores) = self.merged_attention(curr_state, mask, bsz, n_read, q, k, v)
if need_weights:
return (data, scores.mean(1))
else:
return data
def reset_parameters(self):
super().reset_parameters()
torch.nn.init.xavier_uniform_(self.query_net.weight)
torch.nn.init.xavier_uniform_(self.key_net.weight)
torch.nn.init.xavier_uniform_(self.value_net.weight) |
class CamRender(Render):
def __init__(self, width=1600, height=1200, name='Cam Renderer', program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1):
Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate)
self.camera = None
glutDisplayFunc(self.display)
glutKeyboardFunc(self.keyboard)
def set_camera(self, camera):
self.camera = camera
(self.projection_matrix, self.model_view_matrix) = camera.get_gl_matrix()
def keyboard(self, key, x, y):
eps = 1
if (key == b'w'):
self.camera.center += (eps * self.camera.direction)
elif (key == b's'):
self.camera.center -= (eps * self.camera.direction)
if (key == b'a'):
self.camera.center -= (eps * self.camera.right)
elif (key == b'd'):
self.camera.center += (eps * self.camera.right)
if (key == b' '):
self.camera.center += (eps * self.camera.up)
elif (key == b'x'):
self.camera.center -= (eps * self.camera.up)
elif (key == b'i'):
self.camera.near += (0.1 * eps)
self.camera.far += (0.1 * eps)
elif (key == b'o'):
self.camera.near -= (0.1 * eps)
self.camera.far -= (0.1 * eps)
(self.projection_matrix, self.model_view_matrix) = self.camera.get_gl_matrix()
def show(self):
glutMainLoop() |
def _repo_path(repo, version):
if (not version):
return _dev_repo_path(repo)
return ('%%s' % (_main_repo_path(repo), version)) |
def getLabelIdxMapping(path):
import csv
raw = dict()
toNYU40 = dict()
toEigen = dict()
toRIO27 = dict()
toRIO7 = dict()
with open(path, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if (not row[0].isnumeric()):
continue
raw[int(row[0])] = int(row[0])
toNYU40[int(row[0])] = int(row[2])
toEigen[int(row[0])] = int(row[4])
toRIO27[int(row[0])] = int(row[6])
toRIO7[int(row[0])] = int(row[8])
return (raw, toNYU40, toEigen, toRIO27, toRIO7) |
def test_Data_copy_compatible_to_dims_match_priority():
feat_dim = Dim(2, name='feature')
in_dim = feat_dim.copy(match_priority=1)
assert ((in_dim == feat_dim) and (in_dim.match_priority > feat_dim.match_priority) and (in_dim is not feat_dim))
raw_np = numpy.arange(0, (2 * 2), dtype=numpy.float32).reshape((2, 2))
raw = torch.tensor(raw_np)
x = Tensor('x', [in_dim, feat_dim], 'float32', raw_tensor=raw)
x_ = x.copy_compatible_to_dims([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np)
x_ = x.copy_compatible_to_dims([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to_dims([feat_dim, in_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is feat_dim) and (x_.dims[1] is in_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np.transpose([1, 0]))
x_ = x_.copy_compatible_to_dims([in_dim, feat_dim])
assert ((len(x_.dims) == 2) and (x_.dims[0] is in_dim) and (x_.dims[1] is feat_dim))
x_np = x_.raw_tensor.detach().numpy()
numpy.testing.assert_equal(x_np, raw_np) |
def _color_wrap(*colors):
def wrapped(inp):
return ''.join((list(colors) + [inp, colorama.Style.RESET_ALL]))
return wrapped |
class PolicyWithPacking(Policy):
def __init__(self, solver='ECOS'):
Policy.__init__(self, solver)
def scale_factors_array(self, scale_factors, job_ids, m, n):
scale_factors_array = np.zeros((m, n))
for i in range(m):
scale_factor = None
for single_job_id in job_ids[i].singletons():
if ((scale_factor is not None) and (scale_factor != scale_factors[single_job_id])):
scale_factor = 0
else:
scale_factor = scale_factors[single_job_id]
for j in range(n):
scale_factors_array[(i, j)] = scale_factor
return scale_factors_array
def flatten(self, d, cluster_spec, priority_weights=None):
job_ids = sorted(list(d.keys()))
if (len(job_ids) == 0):
return (None, None)
worker_types = sorted(list(d[job_ids[0]].keys()))
self._num_workers = [cluster_spec[worker_type] for worker_type in worker_types]
relevant_combinations = {}
single_job_ids = set()
sorted_single_job_ids = []
for (i, job_id) in enumerate(job_ids):
if (not job_id.is_pair()):
single_job_ids.add(job_id)
sorted_single_job_ids.append(job_id)
if (job_id not in relevant_combinations):
relevant_combinations[job_id] = []
relevant_combinations[job_id].append(i)
else:
for single_job_id in job_id.singletons():
if (single_job_id not in relevant_combinations):
relevant_combinations[single_job_id] = []
relevant_combinations[single_job_id].append(i)
if (len(worker_types) == 0):
return (None, None)
shape = (len(single_job_ids), len(job_ids), len(worker_types))
all_m = np.zeros(shape, dtype=np.float32)
for (i, single_job_id) in enumerate(sorted_single_job_ids):
for j in relevant_combinations[single_job_id]:
job_id = job_ids[j]
for (k, worker_type) in enumerate(worker_types):
if (job_id in single_job_ids):
if (job_id == single_job_id):
all_m[i][j][k] = d[job_id][worker_type]
elif single_job_id.overlaps_with(job_id):
index = job_id.as_tuple().index(single_job_id[0])
throughputs = d[job_id][worker_type]
all_m[i][j][k] = d[job_id][worker_type][index]
if (priority_weights is not None):
all_m[i] /= priority_weights[single_job_id]
return (all_m, (job_ids, sorted_single_job_ids, worker_types, relevant_combinations))
def unflatten(self, m, index):
(job_id_combinations, single_job_ids, worker_types, _) = index
d = {}
for i in range(len(job_id_combinations)):
d[job_id_combinations[i]] = {}
for j in range(len(worker_types)):
d[job_id_combinations[i]][worker_types[j]] = m[i][j]
return d
def get_base_constraints(self, x, single_job_ids, scale_factors_array, relevant_combinations):
constraints = [(x >= 0), (cp.sum(cp.multiply(scale_factors_array, x), axis=0) <= np.array(self._num_workers))]
idx = []
for single_job_id in single_job_ids:
indexes = relevant_combinations[single_job_id]
idx += indexes
index_var = x[idx]
index_var = cp.reshape(index_var, (len(single_job_ids), int((np.prod(index_var.shape) / len(single_job_ids)))), order='C')
constraints.append((cp.sum(index_var, axis=1) <= 1))
return constraints
def convert_job_type_allocation(self, allocation, job_id_to_job_type_key):
job_ids = sorted(allocation.keys())
worker_types = sorted(allocation[job_ids[0]].keys())
job_type_keys = sorted(set([job_id_to_job_type_key[job_id] for job_id in job_ids]))
job_type_allocation = {}
for worker_type in worker_types:
job_type_allocation[worker_type] = {}
for job_type_key in job_type_keys:
job_type_allocation[worker_type][job_type_key] = {}
job_type_allocation_ = job_type_allocation[worker_type][job_type_key]
for other_job_type_key in ([None] + job_type_keys):
job_type_allocation_[other_job_type_key] = 0.0
for worker_type in worker_types:
for job_id in allocation:
job_type_key = job_id_to_job_type_key[job_id]
for other_job_type_key in allocation[job_id][worker_type]:
job_type_allocation[worker_type][job_type_key][other_job_type_key] += allocation[job_id][worker_type][other_job_type_key]
converted_allocation = {}
for (i, job_id) in enumerate(job_ids):
converted_allocation[job_id] = {}
job_type_key = job_id_to_job_type_key[job_id]
for worker_type in worker_types:
converted_allocation[job_id][worker_type] = allocation[job_id][worker_type][None]
for other_job_id in job_ids[(i + 1):]:
other_job_type_key = job_id_to_job_type_key[other_job_id]
merged_job_id = job_id_pair.JobIdPair(job_id[0], other_job_id[0])
converted_allocation[merged_job_id] = {}
for worker_type in worker_types:
current_job_type_allocation = job_type_allocation[worker_type][job_type_key][other_job_type_key]
if (current_job_type_allocation > 0.0):
if (job_type_key == other_job_type_key):
current_job_type_allocation -= allocation[job_id][worker_type][job_type_key]
converted_allocation[merged_job_id][worker_type] = ((allocation[job_id][worker_type][other_job_type_key] * allocation[other_job_id][worker_type][job_type_key]) / current_job_type_allocation)
else:
converted_allocation[merged_job_id][worker_type] = 0.0
return converted_allocation |
def create_plot_window(vis, xlabel, ylabel, title, win, env, trace_name):
if (not isinstance(trace_name, list)):
trace_name = [trace_name]
vis.line(X=np.array([1]), Y=np.array([np.nan]), win=win, env=env, name=trace_name[0], opts=dict(xlabel=xlabel, ylabel=ylabel, title=title))
for name in trace_name[1:]:
vis.line(X=np.array([1]), Y=np.array([np.nan]), win=win, env=env, name=name) |
def conv_relu_layer(input_layer, filter_shape, stride):
filter = create_variables(name='conv_relu', shape=filter_shape)
conv_layer = tf.nn.conv2d(input_layer, filter, strides=[1, stride, stride, 1], padding='SAME')
output = tf.nn.relu(conv_layer)
return output |
def _df(model, data, labels, attack_args):
max_iter = attack_args.get('max_iter', 100)
eps = attack_args.get('eps', 0.01)
nb_grads = attack_args.get('nb_grads', 10)
attacker = DeepFool(classifier=model, max_iter=max_iter, epsilon=eps, nb_grads=nb_grads)
return attacker.generate(data, labels) |
class Agent(object):
def __init__(self, action_shape):
self.action_shape = action_shape
def act(self, obs):
arm = np.random.normal(0.0, 0.1, size=((self.action_shape[0] - 1),))
gripper = [1.0]
return np.concatenate([arm, gripper], axis=(- 1)) |
def read_msg() -> Optional[Dict]:
msg = json.loads(sys.stdin.readline().strip())
if ('terminate' in (msg.get('type'), msg.get('event'))):
return None
if (msg.get('event') not in ('download', 'upload')):
logger.critical('Received unexpected message')
sys.exit(1)
return msg |
class TestMultipleInputsMultipleOutputsKerasMCTQExporter(TestKerasMCTQExport):
def get_input_shape(self):
return [(30, 30, 3), (28, 28, 3)]
def get_tpc(self):
tp = generate_test_tp_model({'weights_n_bits': 2})
return generate_keras_tpc(name='test_conv2d_2bit_fq_weight', tp_model=tp)
def get_model(self):
inputs1 = Input(shape=self.get_input_shape()[0])
inputs2 = Input(shape=self.get_input_shape()[1])
x = Conv2D(3, 3)(inputs1)
y = Conv2D(3, 3, padding='same')(inputs2)
x = Add()([x, y])
model = keras.Model(inputs=[inputs1, inputs2], outputs=[x, y])
return model |
class RewardFunction(object):
def __init__(self, rew_map=None, default=0):
if (rew_map is None):
rew_map = {REWARD: 1.0, REWARD2: 2.0, REWARD3: 4.0, REWARD4: 8.0, LAVA: (- 100.0)}
self.default = default
self.rew_map = rew_map
def __call__(self, gridspec, s, a, ns):
val = gridspec[gridspec.idx_to_xy(s)]
if (val in self.rew_map):
return self.rew_map[val]
return self.default |
def test_reallocations(capture, msg):
pytest.gc_collect()
with capture:
create_and_destroy(1)
assert (msg(capture) == '\n noisy new\n noisy placement new\n NoisyAlloc(int 1)\n ---\n ~NoisyAlloc()\n noisy delete\n ')
with capture:
create_and_destroy(1.5)
assert (msg(capture) == strip_comments('\n noisy new # allocation required to attempt first overload\n noisy delete # have to dealloc before considering factory init overload\n noisy new # pointer factory calling "new", part 1: allocation\n NoisyAlloc(double 1.5) # ... part two, invoking constructor\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
with capture:
create_and_destroy(2, 3)
assert (msg(capture) == strip_comments('\n noisy new # pointer factory calling "new", allocation\n NoisyAlloc(int 2) # constructor\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
with capture:
create_and_destroy(2.5, 3)
assert (msg(capture) == strip_comments('\n NoisyAlloc(double 2.5) # construction (local func variable: operator_new not called)\n noisy new # return-by-value "new" part 1: allocation\n ~NoisyAlloc() # moved-away local func variable destruction\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
with capture:
create_and_destroy(3.5, 4.5)
assert (msg(capture) == strip_comments('\n noisy new # preallocation needed before invoking placement-new overload\n noisy placement new # Placement new\n NoisyAlloc(double 3.5) # construction\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
with capture:
create_and_destroy(4, 0.5)
assert (msg(capture) == strip_comments('\n noisy new # preallocation needed before invoking placement-new overload\n noisy delete # deallocation of preallocated storage\n noisy new # Factory pointer allocation\n NoisyAlloc(int 4) # factory pointer construction\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
with capture:
create_and_destroy(5, 'hi')
assert (msg(capture) == strip_comments('\n noisy new # preallocation needed before invoking first placement new\n noisy delete # delete before considering new-style constructor\n noisy new # preallocation for second placement new\n noisy placement new # Placement new in the second placement new overload\n NoisyAlloc(int 5) # construction\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n ')) |
class Chunker():
def __init__(self, path: str, start_offset: int, end_offset: int):
self.path = path
self.start_offset = start_offset
self.end_offset = end_offset
def __enter__(self) -> ChunkLineIterator:
self.fd = open(self.path, 'r', encoding='utf-8')
return ChunkLineIterator(self.fd, self.start_offset, self.end_offset)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.fd.close() |
def get_df(L_to_minmax, L_to_num_stages, L_to_best_objective):
def list_keys(x):
return list(x.keys())
assert (list_keys(L_to_num_stages) == list_keys(L_to_best_objective) == list_keys(L_to_minmax))
records = [dict(L=L, stages=stages, objective=objective) for (L, stages, objective) in zip(L_to_num_stages.keys(), L_to_num_stages.values(), L_to_best_objective.values())]
df = pd.DataFrame.from_records(records)
df['objective'] /= 10000.0
return df |
class Adagrad(Optimizer):
def __init__(self, params, lr=0.01, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= lr_decay)):
raise ValueError('Invalid lr_decay value: {}'.format(lr_decay))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (not (0.0 <= initial_accumulator_value)):
raise ValueError('Invalid initial_accumulator_value value: {}'.format(initial_accumulator_value))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
defaults = dict(lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value)
super(Adagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.full_like(p, initial_accumulator_value, memory_format=torch.preserve_format)
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
state_sums = []
state_steps = []
for p in group['params']:
if (p.grad is not None):
params_with_grad.append(p)
grads.append(p.grad)
state = self.state[p]
state_sums.append(state['sum'])
state['step'] += 1
state_steps.append(state['step'])
F.adagrad(params_with_grad, grads, state_sums, state_steps, lr=group['lr'], weight_decay=group['weight_decay'], lr_decay=group['lr_decay'], eps=group['eps'])
return loss |
class Config(object, metaclass=ModelConfigMeta):
filename = 'config.json'
_default_transform = Identity()
transform: TransformBase = None
dim: Optional[int] = None
def __init__(self, transform: TransformBase=None, **kwargs):
super().__init__()
if (transform is None):
self.transform = copy.deepcopy(self._default_transform)
elif isinstance(transform, dict):
self.transform = TransformFactory.create(**transform)
else:
self.transform = transform
self.dim = None
def to_dict(self, _skipped_keys=None):
config_dict = {}
skipped_keys = (set() if (_skipped_keys is None) else _skipped_keys)
for (key, value) in self.__dict__.items():
k_strip = key.lstrip('_')
key = (k_strip if hasattr(self, k_strip) else key)
if hasattr(value, 'to_dict'):
value = value.to_dict()
elif isinstance(value, Enum):
value = value.name
if (key not in skipped_keys):
config_dict[key] = copy.deepcopy(value)
return config_dict
def from_dict(cls, config_dict: Dict[(str, Any)], return_unused_kwargs=False, dim=None, **kwargs):
config_dict = copy.copy(config_dict)
dim = config_dict.pop('dim', dim)
config_dict = dict(**config_dict, **kwargs)
config = cls(**config_dict)
if (dim is not None):
config.dim = dim
kwargs = config.get_unused_kwargs(**config_dict)
if ((len(kwargs) > 0) and (not return_unused_kwargs)):
logger.warning(f'Unused kwargs: {kwargs}', stack_info=True)
elif return_unused_kwargs:
return (config, kwargs)
return config
def __reduce__(self):
return (self.__class__.from_dict, (self.to_dict(),))
def __copy__(self):
return self.from_dict(self.to_dict())
def __deepcopy__(self, memodict={}):
return self.__copy__()
def get_unused_kwargs(self, **kwargs):
return {k: v for (k, v) in kwargs.items() if (k not in self.to_dict())} |
def test():
array = ak.Array([[3.14]])
first_slice = ak.Array([True, None])[:1]
second_slice = 0
with pytest.raises(ValueError):
array[(first_slice, second_slice)] |
def _ssim(X, Y, data_range, win, size_average=True, K=(0.01, 0.03)):
(K1, K2) = K
compensation = 1.0
C1 = ((K1 * data_range) ** 2)
C2 = ((K2 * data_range) ** 2)
win = win.to(X.device, dtype=X.dtype)
mu1 = gaussian_filter(X, win)
mu2 = gaussian_filter(Y, win)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (compensation * (gaussian_filter((X * X), win) - mu1_sq))
sigma2_sq = (compensation * (gaussian_filter((Y * Y), win) - mu2_sq))
sigma12 = (compensation * (gaussian_filter((X * Y), win) - mu1_mu2))
cs_map = (((2 * sigma12) + C2) / ((sigma1_sq + sigma2_sq) + C2))
ssim_map = ((((2 * mu1_mu2) + C1) / ((mu1_sq + mu2_sq) + C1)) * cs_map)
ssim_per_channel = torch.flatten(ssim_map, 2).mean((- 1))
cs = torch.flatten(cs_map, 2).mean((- 1))
return (ssim_per_channel, cs) |
def test_Or():
assert (Or() == false)
assert (Or(True) == true)
assert (Or(False) == false)
assert (Or(True, True) == true)
assert (Or(True, False) == true)
assert (Or(False, False) == false)
assert (Or(True, False, False) == true) |
def _replace_none(dictionary: Dict[(str, Any)]) -> Dict[(str, Any)]:
for key in dictionary.keys():
if (dictionary[key] == 'None'):
dictionary[key] = None
elif isinstance(dictionary[key], pyhocon.config_tree.ConfigTree):
dictionary[key] = _replace_none(dictionary[key])
return dictionary |
def _savePngJson(hInPklResultsFile, hOutJsonPackedFile):
from PIL import Image
import StringIO
dataFPickle = pickle.load(hInPklResultsFile)
statusStr = ''
dataLen = len(dataFPickle)
for (i, x) in enumerate(dataFPickle):
x['uv_shape'] = x['uv'].shape
x['uv_data'] = _encodePngData(x['uv'])
del x['uv']
sys.stdout.write(('\x08' * len(statusStr)))
statusStr = _statusStr(i, dataLen)
sys.stdout.write(statusStr)
sys.stdout.write('\n')
json.dump(dataFPickle, hOutJsonPackedFile, ensure_ascii=False, sort_keys=True, indent=4) |
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.langs = langs
self.dicts = dicts
self.lang_dict = self.create_lang_dictionary(self.langs)
self.sampling_method = sampling_method
self.sampling_scheduler = None
self._has_sharded_data = False
self._num_shards_dict = {}
self._training_data_sizes = defaultdict((lambda : {}))
def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method):
return MultilingualDatasetManager(args, lang_pairs, langs, dicts, sampling_method)
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner', action=FileContentsAction)
parser.add_argument('--langs', default=None, type=csv_str_list, help='a list of languages comma sperated languages which can appear in lang-pairs; note that the ordering determines language token IDs')
parser.add_argument('--lang-dict', default=None, type=str, help='an external file which contains a list of languages which can appear in lang-pairs; note that the ordering determines language token IDs; --langs and --lang-dict are two exclusive options')
parser.add_argument('--lang-tok-style', default=LangTokStyle.multilingual.value, type=str, choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value], help='language token styles')
parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value], metavar='SRCTGT', help='prepend to the beginning of source sentence the source or target language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true', help='prepend to the beginning of target sentence the target language token')
parser.add_argument('--lang-tok-replacing-bos-eos', action='store_true', default=False)
parser.add_argument('--enable-lang-ids', default=False, action='store_true', help='whether to include language IDs in samples')
parser.add_argument('--enable-reservsed-directions-shared-datasets', default=False, action='store_true', help='whether to allow datasets be used in reversed directions')
parser.add_argument('--extra-data', help='a dictionary of data name to this path, e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}', type=(lambda uf: eval_str_dict(uf, type=str)), default=None)
parser.add_argument('--extra-lang-pairs', help='a dictionary of data name to the language pairs they serve, e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}', type=(lambda uf: eval_str_dict(uf, type=str)), default=None)
parser.add_argument('--fixed-dictionary', help='Fixed dictionary to use with model path', default=None, type=str)
parser.add_argument('--langtoks-specs', help='a list of comma separated data types that a set of language tokens to be specialized for, e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to distinguish languages in different training data types. If not specified, default language tokens per languages will be added', default=LangTokSpec.main.value, type=csv_str_list)
parser.add_argument('--langtoks', help='a dictionary of how to add language tokens, e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": ("src", "tgt")}, or {"mined": ("src.mined", "tgt")}', default=None, type=(lambda uf: eval_str_dict(uf, type=str)))
parser.add_argument('--sampling-weights-from-file', help='a file contain a python dictionary of how to sample data sets, e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=str)
parser.add_argument('--sampling-weights', help='a dictionary of how to sample data sets, e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=(lambda uf: eval_str_dict(uf, type=str)))
parser.add_argument('--virtual-epoch-size', default=None, type=int, help='virtual epoch size to speed up data loading')
parser.add_argument('--virtual-data-size', default=None, type=int, help='virtual data size of the whole joint dataset to speedup data loading and have specific dynamic sampling strategy interval')
def load_langs(cls, args, **kwargs):
if (args.lang_dict and args.langs):
raise ValueError('--langs and --lang-dict can not both be specified')
if ((args.lang_dict is None) and (args.langs is None)):
logger.warning('External language dictionary is not provided; use lang-pairs to infer the set of supported languages. The language ordering is not stable which might cause misalignment in pretraining and finetuning.')
langs = list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})
langs = sorted(langs)
logger.info(f'inferred language list: {langs}')
elif args.lang_dict:
with open(PathManager.get_local_path(args.lang_dict), 'r', encoding='utf-8') as f:
langs = [lang.strip() for lang in f.readlines() if lang.strip()]
logger.info(f'loaded language list from {args.lang_dict} as they are ordered in file')
elif args.langs:
langs = args.langs
logger.info(f'parsed the language list as they are ordered in the option: {langs}')
return langs
def has_sharded_data(self, split):
return (self._has_sharded_data and (split == getattr(self.args, 'train_subset', None)))
def _shared_collater(self):
return ((not (self.args.extra_data and ('mono_dae' in self.args.extra_data))) and (not self.args.lang_tok_replacing_bos_eos))
def estimate_global_pass_epoch(self, epoch):
if ((self.args.virtual_epoch_size is None) or (self.args.virtual_data_size is None)):
return None
virtual_epochs_per_shard = math.ceil((self.args.virtual_data_size / self.args.virtual_epoch_size))
shard_epoch = (((epoch - 1) // virtual_epochs_per_shard) + 1)
return shard_epoch
def prepare(cls, load_dictionary, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if (not hasattr(args, 'shuffle_instance')):
args.shuffle_instance = False
if (args.langtoks is None):
args.langtoks = {}
if ('main' not in args.langtoks):
src_langtok_spec = (args.encoder_langtok if args.encoder_langtok else None)
tgt_langtok_spec = ('tgt' if args.decoder_langtok else None)
args.langtoks['main'] = (src_langtok_spec, tgt_langtok_spec)
def check_langs(langs, pairs):
messages = []
for (src, tgt) in pairs:
if ((src not in langs) or (tgt not in langs)):
messages.append(f'language pair {src}-{tgt} contains languages that are not in the language dictionary')
if (len(messages) > 0):
raise ValueError((' '.join(messages) + f'; langs: {langs}'))
if (args.lang_pairs is None):
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
if ((args.source_lang is not None) or (args.target_lang is not None)):
training = False
else:
training = True
language_list = cls.load_langs(args, **kargs)
check_langs(language_list, ([p.split('-') for p in args.lang_pairs] if training else [(args.source_lang, args.target_lang)]))
if training:
extra_lang_pairs = (list({p for (_, v) in args.extra_lang_pairs.items() for p in v.split(',')}) if args.extra_lang_pairs else [])
langs_to_load_dicts = sorted({x for p in (args.lang_pairs + extra_lang_pairs) for x in p.split('-')})
else:
langs_to_load_dicts = sorted([args.source_lang, args.target_lang])
dicts = OrderedDict()
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
for lang in langs_to_load_dicts:
if (args.fixed_dictionary is not None):
dicts[lang] = load_dictionary(args.fixed_dictionary)
else:
dicts[lang] = load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
augment_dictionary(dictionary=dicts[lang], language_list=language_list, lang_tok_style=args.lang_tok_style, langtoks_specs=args.langtoks_specs, extra_data=args.extra_data)
if (len(dicts) > 0):
assert (dicts[lang].pad() == dicts[langs_to_load_dicts[0]].pad())
assert (dicts[lang].eos() == dicts[langs_to_load_dicts[0]].eos())
assert (dicts[lang].unk() == dicts[langs_to_load_dicts[0]].unk())
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return (language_list, dicts, training)
def create_lang_dictionary(cls, langs):
unk = '<unk>'
lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk)
for lang in langs:
lang_dict.add_symbol(lang)
return lang_dict
def get_langtok_index(cls, lang_tok, dic):
idx = dic.index(lang_tok)
assert (idx != dic.unk_index), 'cannot find language token {} in the dictionary'.format(lang_tok)
return idx
def get_encoder_langtok(self, src_lang, tgt_lang, spec=None):
if (spec is None):
return None
if (spec and spec.startswith('src')):
if (src_lang is None):
return None
langtok = get_lang_tok(lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
else:
if (tgt_lang is None):
return None
langtok = get_lang_tok(lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
return self.get_langtok_index(langtok, self.dicts[(src_lang if src_lang else tgt_lang)])
def get_decoder_langtok(self, tgt_lang, spec=None):
if (spec is None):
return None
langtok = get_lang_tok(lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
return self.get_langtok_index(langtok, self.dicts[tgt_lang])
def load_data(cls, path, vdict, impl):
dataset = data_utils.load_indexed_dataset(path, vdict, impl)
return dataset
def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def load_lang_dataset(self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions, prepend_bos=False, load_alignments=False, truncate_source=False):
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
elif (k > 0):
break
else:
logger.error(f'Dataset not found: {data_path}, {split_k}, {src}, {tgt}')
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = self.load_data((prefix + src), src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos())
src_datasets.append(src_dataset)
tgt_datasets.append(self.load_data((prefix + tgt), tgt_dict, dataset_impl))
logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)])))
if (not combine):
break
assert (len(src_datasets) == len(tgt_datasets))
if (len(src_datasets) == 1):
(src_dataset, tgt_dataset) = (src_datasets[0], tgt_datasets[0])
else:
sample_ratios = ([1] * len(src_datasets))
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index'))
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
return (src_dataset, tgt_dataset, align_dataset)
def load_langpair_dataset(self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, src_dataset_transform_func=(lambda dataset: dataset), tgt_dataset_transform_func=(lambda dataset: dataset), src_lang_id=None, tgt_lang_id=None, langpairs_sharing_datasets=None):
norm_direction = '-'.join(sorted([src, tgt]))
if (langpairs_sharing_datasets is not None):
src_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, src), 'NotInCache')
tgt_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, tgt), 'NotInCache')
align_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, src, tgt), 'NotInCache')
if ((langpairs_sharing_datasets is None) or (src_dataset == 'NotInCache') or (tgt_dataset == 'NotInCache') or (align_dataset == 'NotInCache') or (split != getattr(self.args, 'train_subset', None))):
(src_dataset, tgt_dataset, align_dataset) = self.load_lang_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions=max_source_positions, prepend_bos=prepend_bos, load_alignments=load_alignments, truncate_source=truncate_source)
src_dataset = src_dataset_transform_func(src_dataset)
tgt_dataset = tgt_dataset_transform_func(tgt_dataset)
if (langpairs_sharing_datasets is not None):
langpairs_sharing_datasets[(data_path, split, norm_direction, src)] = src_dataset
langpairs_sharing_datasets[(data_path, split, norm_direction, tgt)] = tgt_dataset
langpairs_sharing_datasets[(data_path, split, norm_direction, src, tgt)] = align_dataset
if (align_dataset is None):
langpairs_sharing_datasets[(data_path, split, norm_direction, tgt, src)] = align_dataset
else:
logger.info(f'Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: [{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}')
return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, (tgt_dataset.sizes if (tgt_dataset is not None) else None), tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, src_lang_id=src_lang_id, tgt_lang_id=tgt_lang_id)
def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
return dataset
if (spec is None):
return dataset
tok = self.get_encoder_langtok(src_lang, tgt_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None):
if (dataset is None):
return None
if self.args.lang_tok_replacing_bos_eos:
return dataset
if (not spec):
return dataset
tok = self.get_decoder_langtok(target_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, src_langtok_spec=None, tgt_langtok_spec=None):
if ((src_langtok_spec is None) and (tgt_langtok_spec is None)):
return lang_pair_dataset
new_src_eos = None
if ((src_langtok_spec is not None) and (src_eos is not None) and ((src_lang is not None) or (tgt_lang is not None))):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec)
else:
src_eos = None
new_tgt_bos = None
if (tgt_langtok_spec and (tgt_eos is not None) and (tgt_lang is not None)):
new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec)
else:
tgt_eos = None
return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos)
def load_a_dataset(self, split, data_path, src, src_dict, tgt, tgt_dict, combine, prepend_bos=False, langpairs_sharing_datasets=None, data_category=None, **extra_kwargs):
dataset_impl = self.args.dataset_impl
upsample_primary = self.args.upsample_primary
left_pad_source = self.args.left_pad_source
left_pad_target = self.args.left_pad_target
max_source_positions = self.args.max_source_positions
max_target_positions = self.args.max_target_positions
load_alignments = self.args.load_alignments
truncate_source = self.args.truncate_source
src_dataset_transform_func = self.src_dataset_tranform_func
tgt_dataset_transform_func = self.tgt_dataset_tranform_func
enable_lang_ids = self.args.enable_lang_ids
lang_dictionary = self.lang_dict
(src_langtok_spec, tgt_langtok_spec) = extra_kwargs['langtok_spec']
src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec)
tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec)
logger.info(f'{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}')
langpair_ds = self.load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos, load_alignments, truncate_source, src_dataset_transform_func=(lambda dataset: src_dataset_transform_func(src, tgt, dataset, src_langtok_spec)), tgt_dataset_transform_func=(lambda dataset: tgt_dataset_transform_func(src, tgt, dataset, tgt_langtok_spec)), src_lang_id=(_lang_id(lang_dictionary, src) if (enable_lang_ids and (lang_dictionary is not None)) else None), tgt_lang_id=(_lang_id(lang_dictionary, tgt) if (enable_lang_ids and (lang_dictionary is not None)) else None), langpairs_sharing_datasets=langpairs_sharing_datasets)
if self.args.lang_tok_replacing_bos_eos:
ds = self.alter_dataset_langtok(langpair_ds, src_eos=self.dicts[(src if src else tgt)].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec)
else:
ds = langpair_ds
return ds
def load_split_langpair_datasets(self, split, data_param_list):
datasets = []
langpairs_sharing_datasets = ({} if self.args.enable_reservsed_directions_shared_datasets else None)
for param in data_param_list:
ds = self.load_a_dataset(split=split, langpairs_sharing_datasets=langpairs_sharing_datasets, **param)
datasets.append(ds)
return datasets
def get_data_paths_and_lang_pairs(self, split):
datapaths = {'main': self.args.data}
lang_pairs = {'main': self.lang_pairs}
if (split == getattr(self.args, 'train_subset', None)):
if self.args.extra_data:
extra_datapaths = self.args.extra_data
datapaths.update(extra_datapaths)
if self.args.extra_lang_pairs:
extra_lang_pairs = {k: v.split(',') for (k, v) in self.args.extra_lang_pairs.items()}
lang_pairs.update(extra_lang_pairs)
return (datapaths, lang_pairs)
def get_dataset_key(cls, data_category, src, tgt):
return f'{data_category}:{src}-{tgt}'
def _get_shard_num_dict(cls, split, paths):
shards = defaultdict(int)
for path in paths:
files = PathManager.ls(path)
directions = set()
for f in files:
if (f.startswith(split) and f.endswith('.idx')):
direction = f.split('.')[(- 3)]
directions.add(direction)
for direction in directions:
shards[direction] += 1
return shards
def get_split_num_data_shards(self, split):
if (split in self._num_shards_dict):
return self._num_shards_dict[split]
num_shards_dict = {}
(data_paths, lang_pairs) = self.get_data_paths_and_lang_pairs(split)
for (data_category, paths) in data_paths.items():
if (data_category not in lang_pairs):
continue
paths = utils.split_paths(paths)
shards_dict = self._get_shard_num_dict(split, paths)
lang_dirs = [lang_pair.split('-') for lang_pair in lang_pairs[data_category]]
lang_dirs = [(x if (len(x) > 1) else (x[0], x[0])) for x in lang_dirs]
for (src, tgt) in lang_dirs:
key = self.get_dataset_key(data_category, src, tgt)
if ('mono_' in data_category):
assert ((src is None) or (src == tgt)), f'error: src={src}, tgt={{tgt}} for data_category={{data_category}}'
num_shards_dict[key] = shards_dict[tgt]
elif (f'{src}-{tgt}' in shards_dict):
num_shards_dict[key] = shards_dict[f'{src}-{tgt}']
elif (f'{tgt}-{src}' in shards_dict):
num_shards_dict[key] = shards_dict[f'{tgt}-{src}']
self._num_shards_dict[split] = num_shards_dict
logger.info(f'[{split}] num of shards: {num_shards_dict}')
return num_shards_dict
def get_shard_id(cls, num_shards, epoch, shard_epoch=None):
shard = (epoch if (shard_epoch is None) else shard_epoch)
shard = ((shard - 1) % num_shards)
return shard
def get_split_data_path(self, paths, epoch, shard_epoch, num_shards):
path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)]
return path
def get_split_data_param_list(self, split, epoch, shard_epoch=None):
param_list = []
(data_paths, lang_pairs) = self.get_data_paths_and_lang_pairs(split)
logger.info(f'langtoks settings: {self.args.langtoks}')
split_num_shards_dict = self.get_split_num_data_shards(split)
for (data_category, paths) in data_paths.items():
if (data_category not in lang_pairs):
continue
paths = utils.split_paths(paths)
assert (len(paths) > 0)
if (len(paths) > 1):
self._has_sharded_data = True
if (split != getattr(self.args, 'train_subset', None)):
paths = paths[:1]
if (data_category in self.args.langtoks):
lang_tok_spec = self.args.langtoks[data_category]
else:
lang_tok_spec = (None, None)
lang_dirs = [lang_pair.split('-') for lang_pair in lang_pairs[data_category]]
lang_dirs = [(x if (len(x) > 1) else (x[0], x[0])) for x in lang_dirs]
for (src, tgt) in lang_dirs:
assert ((src is not None) or (data_category == 'mono_dae')), f'error: src={src}, tgt={{tgt}} for data_category={{data_category}}'
key = self.get_dataset_key(data_category, src, tgt)
data_path = self.get_split_data_path(paths, epoch, shard_epoch, split_num_shards_dict[key])
param_list.append({'key': key, 'data_path': data_path, 'split': split, 'src': src, 'src_dict': (self.dicts[src] if (src and (data_category != 'mono_dae')) else None), 'tgt': tgt, 'tgt_dict': self.dicts[tgt], 'data_category': data_category, 'langtok_spec': lang_tok_spec})
return param_list
def get_train_dataset_sizes(self, data_param_list, datasets, epoch, shard_epoch=None):
num_shards = [self.get_split_num_data_shards(param['split'])[param['key']] for param in data_param_list]
data_sizes = []
for ((key, d), num_shard) in zip(datasets, num_shards):
my_data_sizes = self._training_data_sizes[key]
shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch)
if (shard_ind not in my_data_sizes):
my_data_sizes[shard_ind] = len(d)
known_size = max(my_data_sizes.values())
data_sizes.append((key, sum((my_data_sizes.get(i, known_size) for i in range(num_shard)))))
logger.info(f'estimated total data sizes of all shards used in sampling ratios: {data_sizes}. Note that if the data a shard has not been loaded yet, use the max known data size to approximate')
return [s for (_, s) in data_sizes]
def get_train_sampling_ratios(self, data_param_list, datasets, epoch=1, shard_epoch=None):
data_sizes = self.get_train_dataset_sizes(data_param_list, datasets, epoch, shard_epoch)
sampling_func = self.sampling_method.sampling_method_selector()
sample_ratios = (sampling_func(data_sizes) if (sampling_func is not None) else None)
return sample_ratios
def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None):
if self.args.sampling_weights_from_file:
weights = load_sampling_weights(self.args.sampling_weights_from_file)
sample_ratios = [weights[k] for (k, _) in datasets]
logger.info(f'| ignoring --sampling-weights when loadding sampling weights from file {self.args.sampling_weights_from_file}')
elif self.args.sampling_weights:
sample_ratios = [self.args.sampling_weights[k] for (k, _) in datasets]
else:
sample_ratios = self.get_train_sampling_ratios(data_param_list, datasets, epoch, shard_epoch)
if (sample_ratios is not None):
logger.info('| Upsample ratios: {}'.format(list(zip(map((lambda x: x['key']), data_param_list), sample_ratios))))
assert (len(sample_ratios) == len(datasets))
return sample_ratios
def load_split_datasets(self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs):
data_param_list = self.get_split_data_param_list(split, epoch, shard_epoch=shard_epoch)
langpairs_sharing_datasets = ({} if self.args.enable_reservsed_directions_shared_datasets else None)
datasets = [(param['key'], self.load_a_dataset(combine=combine, langpairs_sharing_datasets=langpairs_sharing_datasets, **param)) for param in data_param_list]
return (datasets, data_param_list)
def load_into_concat_dataset(self, split, datasets, data_param_list):
if self.args.lang_tok_replacing_bos_eos:
return SampledMultiDataset(OrderedDict(datasets), sampling_ratios=None, eval_key=None, collate_format=CollateFormat.single, virtual_size=None, split=split)
return ConcatDataset([d for (_, d) in datasets])
def load_sampled_multi_epoch_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
(datasets, data_param_list) = self.load_split_datasets(split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs)
if (training and (split == getattr(self.args, 'train_subset', None))):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiEpochDataset(OrderedDict(datasets), epoch=epoch, shard_epoch=shard_epoch, sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, virtual_epoch_size=self.args.virtual_epoch_size, shared_collater=self._shared_collater())
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_sampled_multi_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
(datasets, data_param_list) = self.load_split_datasets(split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs)
if (training and (split == getattr(self.args, 'train_subset', None))):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiDataset(OrderedDict(datasets), epoch=epoch, sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, shared_collater=self._shared_collater())
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
if (self.args.virtual_epoch_size is None):
return self.load_sampled_multi_dataset(split, training, epoch, combine, shard_epoch, **kwargs)
else:
return self.load_sampled_multi_epoch_dataset(split, training, epoch, combine, shard_epoch, **kwargs) |
def box_sphere_intersections(z, d, lb, ub, trust_radius, entire_line=False, extra_info=False):
(ta_b, tb_b, intersect_b) = box_intersections(z, d, lb, ub, entire_line)
(ta_s, tb_s, intersect_s) = sphere_intersections(z, d, trust_radius, entire_line)
ta = np.maximum(ta_b, ta_s)
tb = np.minimum(tb_b, tb_s)
if (intersect_b and intersect_s and (ta <= tb)):
intersect = True
else:
intersect = False
if extra_info:
sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
return (ta, tb, intersect, sphere_info, box_info)
else:
return (ta, tb, intersect) |
def train_epoch(model, dataloader, criterion, optimizer, epoch_i):
model.train()
criterion.train()
time_meters = defaultdict(AverageMeter)
loss_meters = defaultdict(AverageMeter)
tictoc = time.time()
for (idx, batch) in tqdm(enumerate(dataloader), desc='Training Iteration', total=len(dataloader)):
time_meters['dataloading_time'].update((time.time() - tictoc))
tictoc = time.time()
device = ('cuda' if (torch.cuda.is_available() and args.use_gpu) else 'cpu')
(model_inputs, targets) = prepare_batch_inputs(batch[1], device, non_blocking=args.pin_memory)
time_meters['prepare_inputs_time'].update((time.time() - tictoc))
tictoc = time.time()
outputs = model(**model_inputs, att_visualize=args.att_visualize, corr_visualize=args.corr_visualize, epoch_i=epoch_i, idx=idx)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(((loss_dict[k] * weight_dict[k]) for k in loss_dict.keys() if (k in weight_dict)))
time_meters['model_forward_time'].update((time.time() - tictoc))
tictoc = time.time()
optimizer.zero_grad()
losses.backward()
optimizer.step()
time_meters['model_backward_time'].update((time.time() - tictoc))
loss_dict['loss_overall'] = float(losses)
for (k, v) in loss_dict.items():
loss_meters[k].update(((float(v) * weight_dict[k]) if (k in weight_dict) else float(v)))
tictoc = time.time()
if (args.debug and (idx == 3)):
break
return (time_meters, loss_meters) |
def wheel_version(wheel_data):
version_text = wheel_data['Wheel-Version']
if (version_text is None):
raise UnsupportedWheel('WHEEL is missing Wheel-Version')
version = version_text.strip()
try:
return tuple(map(int, version.split('.')))
except ValueError:
raise UnsupportedWheel('invalid Wheel-Version: {!r}'.format(version)) |
class DenseFlatIndexer(DenseIndexer):
def __init__(self, buffer_size: int=50000):
super(DenseFlatIndexer, self).__init__(buffer_size=buffer_size)
def init_index(self, vector_sz: int):
self.index = faiss.IndexFlatIP(vector_sz)
def index_data(self, data: List[Tuple[(object, np.array)]]):
n = len(data)
for i in range(0, n, self.buffer_size):
db_ids = [t[0] for t in data[i:(i + self.buffer_size)]]
vectors = [np.reshape(t[1], (1, (- 1))) for t in data[i:(i + self.buffer_size)]]
vectors = np.concatenate(vectors, axis=0)
total_data = self._update_id_mapping(db_ids)
self.index.add(vectors)
logger.info('data indexed %d', total_data)
indexed_cnt = len(self.index_id_to_db_id)
logger.info('Total data indexed %d', indexed_cnt)
def search_knn(self, query_vectors: np.array, top_docs: int) -> List[Tuple[(List[object], List[float])]]:
(scores, indexes) = self.index.search(query_vectors, top_docs)
db_ids = [[self.index_id_to_db_id[i] for i in query_top_idxs] for query_top_idxs in indexes]
result = [(db_ids[i], scores[i]) for i in range(len(db_ids))]
return result
def get_index_name(self):
return 'flat_index' |
def main():
(examples, label_list) = get_data(task=args.task, train_num_per_class=args.train_num_per_class, dev_num_per_class=args.dev_num_per_class, imbalance_rate=args.imbalance_rate, data_seed=args.data_seed)
if (args.task in ['sst-2', 'sst-5']):
classifier = Classifier(label_list=label_list, ren=True, norm_fn='linear', device=device)
classifier.get_optimizer(learning_rate=args.learning_rate)
else:
classifier = ImageClassifier(pretrained=args.resnet_pretrained, ren=True)
classifier.get_optimizer(learning_rate=args.image_lr, momentum=args.image_momentum, weight_decay=args.image_weight_decay)
for split in ['train', 'dev', 'test']:
classifier.load_data(set_type=split, examples=examples[split], batch_size=args.batch_size, shuffle=(split != 'test'))
print(('=' * 60), '\n', 'Pre-training', '\n', ('=' * 60), sep='')
for epoch in range(args.pretrain_epochs):
classifier.pretrain_epoch()
dev_acc = classifier.evaluate('dev')
print('Pre-train Epoch {}, Dev Acc: {:.4f}'.format(epoch, (100.0 * dev_acc)))
print(('=' * 60), '\n', 'Training', '\n', ('=' * 60), sep='')
(best_dev_acc, final_test_acc) = ((- 1.0), (- 1.0))
for epoch in range(args.epochs):
classifier.train_epoch()
dev_acc = classifier.evaluate('dev')
if (epoch >= args.min_epochs):
do_test = (dev_acc > best_dev_acc)
best_dev_acc = max(best_dev_acc, dev_acc)
else:
do_test = False
print('Epoch {}, Dev Acc: {:.4f}, Best Ever: {:.4f}'.format(epoch, (100.0 * dev_acc), (100.0 * best_dev_acc)))
if do_test:
final_test_acc = classifier.evaluate('test')
print('Test Acc: {:.4f}'.format((100.0 * final_test_acc)))
print('Final Dev Acc: {:.4f}, Final Test Acc: {:.4f}'.format((100.0 * best_dev_acc), (100.0 * final_test_acc))) |
def is_available() -> bool:
if (not hasattr(torch._C, '_cuda_getDeviceCount')):
return False
return (torch._C._cuda_getDeviceCount() > 0) |
_fwd(cast_inputs=torch.float32)
def ml_soft_nms(dets, scores, labels, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear', topk=0):
assert (method in SOFT_NMS_METHODS), 'Unknown soft_nms method: {}'.format(method)
return _C.ml_soft_nms(dets, scores, labels, sigma, overlap_thresh, score_thresh, SOFT_NMS_METHODS[method], topk) |
class TimeBinBSM(BSM):
def __init__(self, name, timeline, phase_error=0, detectors=None):
super().__init__(name, timeline, phase_error, detectors)
self.encoding = 'time_bin'
self.encoding_type = time_bin
self.last_res = [(- 1), (- 1)]
assert (len(self.detectors) == 2)
def get(self, photon, **kwargs):
super().get(photon)
if (len(self.photons) != 2):
return
if (self.get_generator().random() < self.phase_error):
self.photons[1].apply_phase_error()
self.photons[0].combine_state(self.photons[1])
res = Photon.measure_multiple(self.bell_basis, self.photons, self.get_generator())
if ((res == 0) or (res == 1)):
return
early_time = self.timeline.now()
late_time = (early_time + self.encoding_type['bin_separation'])
if (res == 2):
detector_num = self.get_generator().choice([0, 1])
process = Process(self.detectors[detector_num], 'get', [])
event = Event(int(round(early_time)), process)
self.timeline.schedule(event)
process = Process(self.detectors[detector_num], 'get', [])
event = Event(int(round(late_time)), process)
self.timeline.schedule(event)
elif (res == 3):
detector_num = self.get_generator().choice([0, 1])
process = Process(self.detectors[detector_num], 'get', [])
event = Event(int(round(early_time)), process)
self.timeline.schedule(event)
process = Process(self.detectors[(1 - detector_num)], 'get', [])
event = Event(int(round(late_time)), process)
self.timeline.schedule(event)
else:
raise Exception('Invalid result from photon.measure_multiple')
def trigger(self, detector: Detector, info: Dict[(str, Any)]):
detector_num = self.detectors.index(detector)
time = info['time']
if (round(((time - self.last_res[0]) / self.encoding_type['bin_separation'])) == 1):
if (detector_num == self.last_res[1]):
info = {'entity': 'BSM', 'info_type': 'BSM_res', 'res': 0, 'time': time}
self.notify(info)
else:
info = {'entity': 'BSM', 'info_type': 'BSM_res', 'res': 1, 'time': time}
self.notify(info)
self.last_res = [time, detector_num] |
def _wrapper_count_operators(model: nn.Module, inputs: list, mode: str, **kwargs) -> typing.DefaultDict[(str, float)]:
supported_ops = {k: (lambda *args, **kwargs: {}) for k in _IGNORED_OPS}
supported_ops.update(kwargs.pop('supported_ops', {}))
kwargs['supported_ops'] = supported_ops
assert (len(inputs) == 1), 'Please use batch size=1'
tensor_input = inputs[0]['image']
inputs = [{'image': tensor_input}]
old_train = model.training
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
model = model.module
wrapper = TracingAdapter(model, inputs)
wrapper.eval()
if (mode == FLOPS_MODE):
ret = flop_count(wrapper, (tensor_input,), **kwargs)
elif (mode == ACTIVATIONS_MODE):
ret = activation_count(wrapper, (tensor_input,), **kwargs)
else:
raise NotImplementedError('Count for mode {} is not supported yet.'.format(mode))
if isinstance(ret, tuple):
ret = ret[0]
model.train(old_train)
return ret |
def tokenize(corpus, remove_list=remove_items):
for text in corpus:
doc = nlp.tokenizer(text)
tokens = [str(token.lemma_).lower() for token in doc if (token.is_alpha and (token.text.lower() not in remove_list) and (len(token.text) > 1))]
(yield tokens) |
class Mask(Transform):
def __init__(self, mask_key: str, mask_value: int=0, masking_value: float=0.0, loop_axis=None, entries=(defs.KEY_IMAGES, defs.KEY_LABELS)) -> None:
super().__init__()
self.mask_key = mask_key
self.mask_value = mask_value
self.masking_value = masking_value
self.loop_axis = loop_axis
self.entries = entries
def __call__(self, sample: dict) -> dict:
np_mask = check_and_return(sample[self.mask_key], np.ndarray)
for entry in self.entries:
if (entry not in sample):
if raise_error_if_entry_not_extracted:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
continue
np_entry = check_and_return(sample[entry], np.ndarray)
if (np_mask.shape == np_entry.shape):
np_entry[(np_mask == self.mask_value)] = self.masking_value
else:
mask_for_np_entry = np.repeat(np.expand_dims(np_mask, self.loop_axis), np_entry.shape[self.loop_axis], axis=self.loop_axis)
np_entry[(mask_for_np_entry == self.mask_value)] = self.masking_value
sample[entry] = np_entry
return sample |
.parametrize('cls_name', ['Pickleable', 'PickleableNew'])
def test_roundtrip(cls_name):
cls = getattr(m, cls_name)
p = cls('test_value')
p.setExtra1(15)
p.setExtra2(48)
data = pickle.dumps(p, 2)
p2 = pickle.loads(data)
assert (p2.value() == p.value())
assert (p2.extra1() == p.extra1())
assert (p2.extra2() == p.extra2()) |
def transform_pt_cluewsc(example, label_normalize_dict=None, is_test=False):
if is_test:
example['label_length'] = 2
text = example['text']
span1_text = example['target']['span1_text']
span2_text = example['target']['span2_text']
example['sentence1'] = (((text + span2_text) + '') + span1_text)
return example
else:
origin_label = example['label']
example['text_label'] = label_normalize_dict[origin_label]
text = example['text']
span1_text = example['target']['span1_text']
span2_text = example['target']['span2_text']
example['sentence1'] = (((text + span2_text) + '') + span1_text)
return example |
def register_Ns3ServiceFlow_methods(root_module, cls):
cls.add_constructor([param('ns3::Tlv', 'tlv')])
cls.add_constructor([])
cls.add_constructor([param('ns3::ServiceFlow::Direction', 'direction')])
cls.add_constructor([param('ns3::ServiceFlow const &', 'sf')])
cls.add_constructor([param('uint32_t', 'sfid'), param('ns3::ServiceFlow::Direction', 'direction'), param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
cls.add_method('CheckClassifierMatch', 'bool', [param('ns3::Ipv4Address', 'srcAddress'), param('ns3::Ipv4Address', 'dstAddress'), param('uint16_t', 'srcPort'), param('uint16_t', 'dstPort'), param('uint8_t', 'proto')], is_const=True)
cls.add_method('CleanUpQueue', 'void', [])
cls.add_method('CopyParametersFrom', 'void', [param('ns3::ServiceFlow', 'sf')])
cls.add_method('GetArqBlockLifeTime', 'uint16_t', [], is_const=True)
cls.add_method('GetArqBlockSize', 'uint16_t', [], is_const=True)
cls.add_method('GetArqDeliverInOrder', 'uint8_t', [], is_const=True)
cls.add_method('GetArqEnable', 'uint8_t', [], is_const=True)
cls.add_method('GetArqPurgeTimeout', 'uint16_t', [], is_const=True)
cls.add_method('GetArqRetryTimeoutRx', 'uint16_t', [], is_const=True)
cls.add_method('GetArqRetryTimeoutTx', 'uint16_t', [], is_const=True)
cls.add_method('GetArqSyncLoss', 'uint16_t', [], is_const=True)
cls.add_method('GetArqWindowSize', 'uint16_t', [], is_const=True)
cls.add_method('GetCid', 'uint16_t', [], is_const=True)
cls.add_method('GetConnection', 'ns3::Ptr< ns3::WimaxConnection >', [], is_const=True)
cls.add_method('GetConvergenceSublayerParam', 'ns3::CsParameters', [], is_const=True)
cls.add_method('GetCsSpecification', 'ns3::ServiceFlow::CsSpecification', [], is_const=True)
cls.add_method('GetDirection', 'ns3::ServiceFlow::Direction', [], is_const=True)
cls.add_method('GetFixedversusVariableSduIndicator', 'uint8_t', [], is_const=True)
cls.add_method('GetIsEnabled', 'bool', [], is_const=True)
cls.add_method('GetIsMulticast', 'bool', [], is_const=True)
cls.add_method('GetMaxSustainedTrafficRate', 'uint32_t', [], is_const=True)
cls.add_method('GetMaxTrafficBurst', 'uint32_t', [], is_const=True)
cls.add_method('GetMaximumLatency', 'uint32_t', [], is_const=True)
cls.add_method('GetMinReservedTrafficRate', 'uint32_t', [], is_const=True)
cls.add_method('GetMinTolerableTrafficRate', 'uint32_t', [], is_const=True)
cls.add_method('GetModulation', 'ns3::WimaxPhy::ModulationType', [], is_const=True)
cls.add_method('GetQosParamSetType', 'uint8_t', [], is_const=True)
cls.add_method('GetQueue', 'ns3::Ptr< ns3::WimaxMacQueue >', [], is_const=True)
cls.add_method('GetRecord', 'ns3::ServiceFlowRecord *', [], is_const=True)
cls.add_method('GetRequestTransmissionPolicy', 'uint32_t', [], is_const=True)
cls.add_method('GetSchedulingType', 'ns3::ServiceFlow::SchedulingType', [], is_const=True)
cls.add_method('GetSchedulingTypeStr', 'char *', [], is_const=True)
cls.add_method('GetSduSize', 'uint8_t', [], is_const=True)
cls.add_method('GetServiceClassName', 'std::string', [], is_const=True)
cls.add_method('GetServiceSchedulingType', 'ns3::ServiceFlow::SchedulingType', [], is_const=True)
cls.add_method('GetSfid', 'uint32_t', [], is_const=True)
cls.add_method('GetTargetSAID', 'uint16_t', [], is_const=True)
cls.add_method('GetToleratedJitter', 'uint32_t', [], is_const=True)
cls.add_method('GetTrafficPriority', 'uint8_t', [], is_const=True)
cls.add_method('GetType', 'ns3::ServiceFlow::Type', [], is_const=True)
cls.add_method('GetUnsolicitedGrantInterval', 'uint16_t', [], is_const=True)
cls.add_method('GetUnsolicitedPollingInterval', 'uint16_t', [], is_const=True)
cls.add_method('HasPackets', 'bool', [], is_const=True)
cls.add_method('HasPackets', 'bool', [param('ns3::MacHeaderType::HeaderType', 'packetType')], is_const=True)
cls.add_method('InitValues', 'void', [])
cls.add_method('PrintQoSParameters', 'void', [], is_const=True)
cls.add_method('SetArqBlockLifeTime', 'void', [param('uint16_t', 'lifeTime')])
cls.add_method('SetArqBlockSize', 'void', [param('uint16_t', 'size')])
cls.add_method('SetArqDeliverInOrder', 'void', [param('uint8_t', 'inOrder')])
cls.add_method('SetArqEnable', 'void', [param('uint8_t', 'arqEnable')])
cls.add_method('SetArqPurgeTimeout', 'void', [param('uint16_t', 'timeout')])
cls.add_method('SetArqRetryTimeoutRx', 'void', [param('uint16_t', 'timeout')])
cls.add_method('SetArqRetryTimeoutTx', 'void', [param('uint16_t', 'timeout')])
cls.add_method('SetArqSyncLoss', 'void', [param('uint16_t', 'syncLoss')])
cls.add_method('SetArqWindowSize', 'void', [param('uint16_t', 'arqWindowSize')])
cls.add_method('SetConnection', 'void', [param('ns3::Ptr< ns3::WimaxConnection >', 'connection')])
cls.add_method('SetConvergenceSublayerParam', 'void', [param('ns3::CsParameters', 'csparam')])
cls.add_method('SetCsSpecification', 'void', [param('ns3::ServiceFlow::CsSpecification', 'spec')])
cls.add_method('SetDirection', 'void', [param('ns3::ServiceFlow::Direction', 'direction')])
cls.add_method('SetFixedversusVariableSduIndicator', 'void', [param('uint8_t', 'sduIndicator')])
cls.add_method('SetIsEnabled', 'void', [param('bool', 'isEnabled')])
cls.add_method('SetIsMulticast', 'void', [param('bool', 'isMulticast')])
cls.add_method('SetMaxSustainedTrafficRate', 'void', [param('uint32_t', 'maxSustainedRate')])
cls.add_method('SetMaxTrafficBurst', 'void', [param('uint32_t', 'maxTrafficBurst')])
cls.add_method('SetMaximumLatency', 'void', [param('uint32_t', 'MaximumLatency')])
cls.add_method('SetMinReservedTrafficRate', 'void', [param('uint32_t', 'minResvRate')])
cls.add_method('SetMinTolerableTrafficRate', 'void', [param('uint32_t', 'minJitter')])
cls.add_method('SetModulation', 'void', [param('ns3::WimaxPhy::ModulationType', 'modulationType')])
cls.add_method('SetQosParamSetType', 'void', [param('uint8_t', 'type')])
cls.add_method('SetRecord', 'void', [param('ns3::ServiceFlowRecord *', 'record')])
cls.add_method('SetRequestTransmissionPolicy', 'void', [param('uint32_t', 'policy')])
cls.add_method('SetSduSize', 'void', [param('uint8_t', 'sduSize')])
cls.add_method('SetServiceClassName', 'void', [param('std::string', 'name')])
cls.add_method('SetServiceSchedulingType', 'void', [param('ns3::ServiceFlow::SchedulingType', 'schedType')])
cls.add_method('SetSfid', 'void', [param('uint32_t', 'sfid')])
cls.add_method('SetTargetSAID', 'void', [param('uint16_t', 'targetSaid')])
cls.add_method('SetToleratedJitter', 'void', [param('uint32_t', 'jitter')])
cls.add_method('SetTrafficPriority', 'void', [param('uint8_t', 'priority')])
cls.add_method('SetType', 'void', [param('ns3::ServiceFlow::Type', 'type')])
cls.add_method('SetUnsolicitedGrantInterval', 'void', [param('uint16_t', 'unsolicitedGrantInterval')])
cls.add_method('SetUnsolicitedPollingInterval', 'void', [param('uint16_t', 'unsolicitedPollingInterval')])
cls.add_method('ToTlv', 'ns3::Tlv', [], is_const=True)
return |
.pure
def test_cast_float_to_int(sdfg_name):
sdfg = dace.SDFG(sdfg_name)
sdfg.add_array('X', [2, 4], dace.float32)
sdfg.add_array('__return', [2, 4], dace.int32)
state = sdfg.add_state()
access_X = state.add_access('X')
access_result = state.add_access('__return')
op_node = donnx.ONNXCast('Cast')
op_node.to = converters.typeclass_to_onnx_tensor_type_int(dace.int32)
state.add_node(op_node)
state.add_edge(access_X, None, op_node, 'input', sdfg.make_array_memlet('X'))
state.add_edge(op_node, 'output', access_result, None, sdfg.make_array_memlet('__return'))
X = np.random.normal(scale=10, size=(2, 4)).astype(np.float32)
sdfg.expand_library_nodes()
assert any((isinstance(n, dace.nodes.MapEntry) for (n, _) in sdfg.all_nodes_recursive()))
result = sdfg(X=X)
assert_allclose(X.astype(np.int32), result) |
def sort_params(model, hook):
hooks = []
if ('GP' in model.lat_dist.name):
h1 = model.lat_dist.nu.register_hook(hook)
h2 = model.lat_dist._scale.register_hook(hook)
hooks.append(h1)
hooks.append(h2)
else:
for prm in model.lat_dist.parameters():
h = prm.register_hook(hook)
hooks.append(h)
params0 = list(itertools.chain.from_iterable([model.lat_dist.gmu_parameters(), model.svgp.g0_parameters()]))
params1 = list(itertools.chain.from_iterable([model.lat_dist.concentration_parameters(), model.lprior.parameters(), model.svgp.g1_parameters()]))
params = [{'params': params0}, {'params': params1}]
return (params, hooks) |
class Visualization(object):
def __init__(self, seq_info, update_ms):
self.view_ls = list(seq_info.keys())
key0 = self.view_ls[0]
image_shape = seq_info[key0]['image_size'][::(- 1)]
aspect_ratio = (float(image_shape[1]) / image_shape[0])
image_shape = (1024, int((aspect_ratio * 1024)))
self.viewer = ImageViewer(update_ms, image_shape, 'visual')
self.viewer.thickness = 2
self.frame_idx = seq_info[key0]['min_frame_idx']
self.last_idx = seq_info[key0]['max_frame_idx']
self.len = len(self.view_ls)
self.view_id = 0
def run(self, frame_matching, frame_callback, frame_display):
self.viewer.run((lambda : self._update_fun(frame_matching, frame_callback, frame_display)))
def _update_fun(self, frame_matching, frame_callback, frame_display):
if (self.frame_idx > self.last_idx):
return False
if (self.view_id == 0):
frame_matching(self.frame_idx)
frame_callback(self.frame_idx)
frame_display(self, self.frame_idx, self.view_ls[self.view_id])
if (self.view_id < self.len):
self.view_id += 1
if (self.view_id == self.len):
self.view_id = 0
self.frame_idx += 1
return True
def set_image(self, image, view, frame_id):
self.viewer.image = image
self.viewer.view = view
self.viewer.frame_id = frame_id
def draw_groundtruth(self, track_ids, boxes):
self.viewer.thickness = 2
for (track_id, box) in zip(track_ids, boxes):
self.viewer.color = create_unique_color_uchar(track_id)
self.viewer.rectangle(*box.astype(np.int), label=str(track_id))
def draw_detections(self, detections):
self.viewer.thickness = 2
self.viewer.color = (0, 0, 255)
for (i, detection) in enumerate(detections):
self.viewer.rectangle(*detection.tlwh)
def draw_trackers(self, tracks):
self.viewer.thickness = 2
for track in tracks:
if ((not track.is_confirmed()) or (track.time_since_update > 0)):
continue
self.viewer.color = create_unique_color_uchar(track.track_id)
self.viewer.rectangle(*track.to_tlwh().astype(np.int), label=str(track.track_id)) |
()
class DiscreteBCQConfig(LearnableConfig):
learning_rate: float = 6.25e-05
optim_factory: OptimizerFactory = make_optimizer_field()
encoder_factory: EncoderFactory = make_encoder_field()
q_func_factory: QFunctionFactory = make_q_func_field()
batch_size: int = 32
gamma: float = 0.99
n_critics: int = 1
action_flexibility: float = 0.3
beta: float = 0.5
target_update_interval: int = 8000
share_encoder: bool = True
def create(self, device: DeviceArg=False) -> 'DiscreteBCQ':
return DiscreteBCQ(self, device)
def get_type() -> str:
return 'discrete_bcq' |
def mapfission_sdfg():
sdfg = dace.SDFG('mapfission')
sdfg.add_array('A', [4], dace.float64)
sdfg.add_array('B', [2], dace.float64)
sdfg.add_scalar('scal', dace.float64, transient=True)
sdfg.add_scalar('s1', dace.float64, transient=True)
sdfg.add_transient('s2', [2], dace.float64)
sdfg.add_transient('s3out', [1], dace.float64)
state = sdfg.add_state()
rnode = state.add_read('A')
(ome, omx) = state.add_map('outer', dict(i='0:2'))
t1 = state.add_tasklet('one', {'a'}, {'b'}, 'b = a[0] + a[1]')
(ime2, imx2) = state.add_map('inner', dict(j='0:2'))
t2 = state.add_tasklet('two', {'a'}, {'b'}, 'b = a * 2')
s24node = state.add_access('s2')
s34node = state.add_access('s3out')
(ime3, imx3) = state.add_map('inner', dict(j='0:2'))
t3 = state.add_tasklet('three', {'a'}, {'b'}, 'b = a[0] * 3')
scalar = state.add_tasklet('scalar', {}, {'out'}, 'out = 5.0')
t4 = state.add_tasklet('four', {'ione', 'itwo', 'ithree', 'sc'}, {'out'}, 'out = ione + itwo[0] * itwo[1] + ithree + sc')
wnode = state.add_write('B')
state.add_nedge(ome, scalar, dace.Memlet())
state.add_memlet_path(rnode, ome, t1, memlet=dace.Memlet.simple('A', '2*i:2*i+2'), dst_conn='a')
state.add_memlet_path(rnode, ome, ime2, t2, memlet=dace.Memlet.simple('A', '2*i+j'), dst_conn='a')
state.add_memlet_path(t2, imx2, s24node, memlet=dace.Memlet.simple('s2', 'j'), src_conn='b')
state.add_memlet_path(rnode, ome, ime3, t3, memlet=dace.Memlet.simple('A', '2*i:2*i+2'), dst_conn='a')
state.add_memlet_path(t3, imx3, s34node, memlet=dace.Memlet.simple('s3out', '0'), src_conn='b')
state.add_edge(t1, 'b', t4, 'ione', dace.Memlet.simple('s1', '0'))
state.add_edge(s24node, None, t4, 'itwo', dace.Memlet.simple('s2', '0:2'))
state.add_edge(s34node, None, t4, 'ithree', dace.Memlet.simple('s3out', '0'))
state.add_edge(scalar, 'out', t4, 'sc', dace.Memlet.simple('scal', '0'))
state.add_memlet_path(t4, omx, wnode, memlet=dace.Memlet.simple('B', 'i'), src_conn='out')
sdfg.validate()
return sdfg |
class TestPyTorchHelper(unittest.TestCase):
def setUp(self):
self.helper = PyTorchHelper()
def test_increment_average(self):
model = {'layer1': np.array([1, 2, 3])}
model_next = {'layer1': np.array([4, 5, 6])}
a = 10
W = 20
result = self.helper.increment_average(model, model_next, a, W)
np.testing.assert_array_equal(result['layer1'], np.array([2.5, 3.5, 4.5]))
model = {'layer1': [1, 2, 3]}
model_next = {'layer1': [4, 5, 6]}
a = 10
W = 20
with self.assertRaises(TypeError):
result = self.helper.increment_average(model, model_next, a, W)
def test_save_load(self):
model = {'layer1': np.array([1, 2, 3])}
self.helper.save(model, 'test_model')
self.assertTrue(os.path.exists('test_model.npz'))
result = self.helper.load('test_model.npz')
np.testing.assert_array_equal(result['layer1'], np.array([1, 2, 3]))
os.remove('test_model.npz') |
('random_crop')
def random_crop(cfg, **kwargs):
size = (kwargs['input_size'] if (kwargs['input_size'] != None) else cfg.INPUT_SIZE)
return transforms.RandomCrop(size, padding=cfg.TRANSFORMS.PROCESS_DETAIL.RANDOM_CROP.PADDING) |
def filter_invalid_unicode_from_table(table):
if (not hasattr(table, 'table_id')):
table.table_id = 0
for (row_index, row) in table.iterrows():
for (col_index, cell) in enumerate(row):
(cell, is_invalid) = filter_invalid_unicode(cell)
if is_invalid:
logging.warning('Scrub an invalid table body table_id: %s, row_index: %d, col_index: %d', table.table_id, row_index, col_index)
for (col_index, column) in enumerate(table.columns):
(column, is_invalid) = filter_invalid_unicode(column)
if is_invalid:
logging.warning('Scrub an invalid table header table_id: %s, col_index: %d', table.table_id, col_index) |
class SlateCascadeDoublyRobust(BaseSlateOffPolicyEstimator):
len_list: int
n_unique_action: int
estimator_name: str = 'cascade-dr'
def __post_init__(self):
check_scalar(self.n_unique_action, 'n_unique_action', int, min_val=1)
def _estimate_round_rewards(self, action: np.ndarray, reward: np.ndarray, position: np.ndarray, behavior_policy_pscore: np.ndarray, evaluation_policy_pscore: np.ndarray, q_hat: np.ndarray, evaluation_policy_action_dist: np.ndarray, **kwargs) -> np.ndarray:
q_hat_3d = q_hat.reshape(((- 1), self.len_list, self.n_unique_action))
q_hat_for_observed_action = []
for i in range(self.n_rounds_):
for pos_ in range(self.len_list):
q_hat_for_observed_action.append(q_hat_3d[(i, pos_, action[((i * self.len_list) + pos_)])])
q_hat_for_observed_action = np.array(q_hat_for_observed_action)
expected_q_hat_under_eval_policy = (evaluation_policy_action_dist * q_hat).reshape(((- 1), self.len_list, self.n_unique_action)).sum(axis=2).flatten()
iw = (evaluation_policy_pscore / behavior_policy_pscore)
iw_prev = np.roll(iw, 1)
iw_prev[np.array([(i * self.len_list) for i in range(self.n_rounds_)])] = 1
estimated_rewards = ((iw * (reward - q_hat_for_observed_action)) + (iw_prev * expected_q_hat_under_eval_policy))
return estimated_rewards
def estimate_policy_value(self, slate_id: np.ndarray, action: np.ndarray, reward: np.ndarray, position: np.ndarray, pscore_cascade: np.ndarray, evaluation_policy_pscore_cascade: np.ndarray, q_hat: np.ndarray, evaluation_policy_action_dist: np.ndarray, **kwargs) -> float:
check_cascade_dr_inputs(n_unique_action=self.n_unique_action, slate_id=slate_id, action=action, reward=reward, position=position, pscore_cascade=pscore_cascade, evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, q_hat=q_hat, evaluation_policy_action_dist=evaluation_policy_action_dist)
self.n_rounds_ = np.unique(slate_id).shape[0]
return (self._estimate_round_rewards(action=action, reward=reward, position=position, behavior_policy_pscore=pscore_cascade, evaluation_policy_pscore=evaluation_policy_pscore_cascade, q_hat=q_hat, evaluation_policy_action_dist=evaluation_policy_action_dist).sum() / self.n_rounds_)
def estimate_interval(self, slate_id: np.ndarray, action: np.ndarray, reward: np.ndarray, position: np.ndarray, pscore_cascade: np.ndarray, evaluation_policy_pscore_cascade: np.ndarray, q_hat: np.ndarray, evaluation_policy_action_dist: np.ndarray, alpha: float=0.05, n_bootstrap_samples: int=10000, random_state: Optional[int]=None, **kwargs) -> Dict[(str, float)]:
check_cascade_dr_inputs(n_unique_action=self.n_unique_action, slate_id=slate_id, action=action, reward=reward, position=position, pscore_cascade=pscore_cascade, evaluation_policy_pscore_cascade=evaluation_policy_pscore_cascade, q_hat=q_hat, evaluation_policy_action_dist=evaluation_policy_action_dist)
self.n_rounds_ = np.unique(slate_id).shape[0]
estimated_rewards = self._estimate_round_rewards(action=action, reward=reward, position=position, behavior_policy_pscore=pscore_cascade, evaluation_policy_pscore=evaluation_policy_pscore_cascade, q_hat=q_hat, evaluation_policy_action_dist=evaluation_policy_action_dist)
return self._estimate_slate_confidence_interval_by_bootstrap(slate_id=slate_id, estimated_rewards=estimated_rewards, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
def _estimate_slate_confidence_interval_by_bootstrap(self, slate_id: np.ndarray, estimated_rewards: np.ndarray, alpha: float=0.05, n_bootstrap_samples: int=10000, random_state: Optional[int]=None) -> Dict[(str, float)]:
unique_slate = np.unique(slate_id)
estimated_round_rewards = list()
for slate in unique_slate:
estimated_round_rewards.append(estimated_rewards[(slate_id == slate)].sum())
estimated_round_rewards = np.array(estimated_round_rewards)
return estimate_confidence_interval_by_bootstrap(samples=estimated_round_rewards, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state) |
class HubModel():
def __init__(self, local_dir: str, metadata: Optional[Union[(HubMetadata, str)]]=None, model_card: Optional[Union[(HubModelCardHelper, ModelCard, str)]]=None):
self._local_dir = local_dir
self._model_path = f'{self._local_dir}/model.pt'
self._adata_path = f'{self._local_dir}/adata.h5ad'
self._large_training_adata_path = f'{self._local_dir}/large_training_adata.h5ad'
self._model = None
self._adata = None
self._large_training_adata = None
metadata_path = f'{self._local_dir}/{_SCVI_HUB.METADATA_FILE_NAME}'
if isinstance(metadata, HubMetadata):
self._metadata = metadata
elif (isinstance(metadata, str) or os.path.isfile(metadata_path)):
path = (metadata if isinstance(metadata, str) else metadata_path)
content = Path(path).read_text()
content_dict = json.loads(content)
self._metadata = HubMetadata(**content_dict)
else:
raise ValueError('No metadata found')
model_card_path = f'{self._local_dir}/{_SCVI_HUB.MODEL_CARD_FILE_NAME}'
if isinstance(model_card, HubModelCardHelper):
self._model_card = model_card.model_card
elif isinstance(model_card, ModelCard):
self._model_card = model_card
elif (isinstance(model_card, str) or os.path.isfile(model_card_path)):
path = (model_card if isinstance(model_card, str) else model_card_path)
content = Path(path).read_text()
self._model_card = ModelCard(content)
else:
raise ValueError('No model card found')
def push_to_huggingface_hub(self, repo_name: str, repo_token: str, repo_create: bool):
if (os.path.isfile(self._adata_path) and (os.path.getsize(self._adata_path) >= _SCVI_HUB.MAX_HF_UPLOAD_SIZE)):
raise ValueError('Dataset is too large to upload to the Model. Please refer to scvi-tools tutorials for how to handle this case.')
if os.path.isfile(repo_token):
repo_token = Path(repo_token).read_text()
if repo_create:
create_repo(repo_name, token=repo_token)
api = HfApi()
self.model_card.push_to_hub(repo_name, token=repo_token)
api.upload_file(path_or_fileobj=self._model_path, path_in_repo=self._model_path.split('/')[(- 1)], repo_id=repo_name, token=repo_token)
if os.path.isfile(self._adata_path):
api.upload_file(path_or_fileobj=self._adata_path, path_in_repo=self._adata_path.split('/')[(- 1)], repo_id=repo_name, token=repo_token)
api.upload_file(path_or_fileobj=json.dumps(asdict(self.metadata), indent=4).encode(), path_in_repo=_SCVI_HUB.METADATA_FILE_NAME, repo_id=repo_name, token=repo_token)
def pull_from_huggingface_hub(cls, repo_name: str, cache_dir: Optional[str]=None, revision: Optional[str]=None, **kwargs):
if (revision is None):
warnings.warn('No revision was passed, so the default (latest) revision will be used.', UserWarning, stacklevel=settings.warnings_stacklevel)
snapshot_folder = snapshot_download(repo_id=repo_name, allow_patterns=['model.pt', 'adata.h5ad', _SCVI_HUB.METADATA_FILE_NAME], cache_dir=cache_dir, revision=revision, **kwargs)
model_card = ModelCard.load(repo_name)
return cls(snapshot_folder, model_card=model_card)
def __repr__(self):
def eval_obj(obj):
return ('No' if (obj is None) else 'Yes')
print(f'''HubModel with:
local_dir: {self._local_dir}
model loaded? {eval_obj(self._model)}
adata loaded? {eval_obj(self._adata)}
large_training_adata loaded? {eval_obj(self._large_training_adata)}
metadata:
{self.metadata}
model_card:''')
rich.print(Markdown(self.model_card.content.replace('\n', '\n\n')))
return ''
def local_dir(self) -> str:
return self._local_dir
def metadata(self) -> HubMetadata:
return self._metadata
def model_card(self) -> ModelCard:
return self._model_card
def model(self) -> type[BaseModelClass]:
if (self._model is None):
self.load_model()
return self._model
def adata(self) -> Optional[AnnData]:
if (self._adata is None):
self.read_adata()
return self._adata
def large_training_adata(self) -> Optional[AnnData]:
if (self._large_training_adata is None):
self.read_large_training_adata()
return self._large_training_adata
def load_model(self, adata: Optional[AnnData]=None, accelerator: Optional[Union[(str, Accelerator)]]='auto', device: Optional[Union[(str, int)]]='auto'):
logger.info('Loading model...')
model_cls_name = self.metadata.model_cls_name
python_module = importlib.import_module(self.metadata.model_parent_module)
model_cls = getattr(python_module, model_cls_name)
if ((adata is not None) or os.path.isfile(self._adata_path)):
self._model = model_cls.load(os.path.dirname(self._model_path), adata=adata, accelerator=accelerator, device=device)
elif (self.large_training_adata is None):
raise ValueError('Could not find any dataset to load the model with. Either provide a dataset on disk or a url to download the data in the model card, or pass an `adata` to this method. See scvi-tools tutorials for more details.')
else:
self._model = model_cls.load(os.path.dirname(self._model_path), adata=self.large_training_adata, accelerator=accelerator, device=device)
def read_adata(self):
if os.path.isfile(self._adata_path):
logger.info('Reading adata...')
self._adata = anndata.read_h5ad(self._adata_path)
else:
logger.info('No data found on disk. Skipping...')
def read_large_training_adata(self):
training_data_url = self.metadata.training_data_url
if (training_data_url is not None):
logger.info(f'''Downloading large training dataset from this url:
{training_data_url}...''')
dn = Path(self._large_training_adata_path).parent.as_posix()
fn = Path(self._large_training_adata_path).name
url_parts = training_data_url.split('/')
url_last_part = (url_parts[(- 2)] if (url_parts[(- 1)] == '') else url_parts[(- 1)])
if url_last_part.endswith('.cxg'):
_ = cellxgene(training_data_url, fn, dn, return_path=True)
else:
_download(training_data_url, dn, fn)
logger.info('Reading large training data...')
self._large_training_adata = anndata.read_h5ad(self._large_training_adata_path)
else:
logger.info('No training_data_url found in the model card. Skipping...') |
class GraphBuilderTest(test_util.TensorFlowTestCase):
def setUp(self):
initial_task_context = os.path.join(FLAGS.test_srcdir, 'syntaxnet/testdata/context.pbtxt')
self._task_context = os.path.join(FLAGS.test_tmpdir, 'context.pbtxt')
with open(initial_task_context, 'r') as fin:
with open(self._task_context, 'w') as fout:
fout.write(fin.read().replace('SRCDIR', FLAGS.test_srcdir).replace('OUTPATH', FLAGS.test_tmpdir))
with self.test_session() as sess:
gen_parser_ops.lexicon_builder(task_context=self._task_context, corpus_name='training-corpus').run()
(self._num_features, self._num_feature_ids, _, self._num_actions) = sess.run(gen_parser_ops.feature_size(task_context=self._task_context, arg_prefix='brain_parser'))
def MakeBuilder(self, use_averaging=True, **kw_args):
return graph_builder.GreedyParser(self._num_actions, self._num_features, self._num_feature_ids, embedding_sizes=[8, 8, 8], hidden_layer_sizes=[32, 32], seed=42, gate_gradients=True, use_averaging=use_averaging, **kw_args)
def FindNode(self, name):
for node in tf.get_default_graph().as_graph_def().node:
if (node.name == name):
return node
return None
def NodeFound(self, name):
return (self.FindNode(name) is not None)
def testScope(self):
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder()
parser.AddTraining(self._task_context, batch_size=10, corpus_name='training-corpus')
parser.AddEvaluation(self._task_context, batch_size=2, corpus_name='tuning-corpus')
parser.AddSaver()
self.assertEqual(parser.training['logits'].name, 'training/logits:0')
self.assertTrue(self.NodeFound('training/logits'))
self.assertTrue(self.NodeFound('training/feature_0'))
self.assertTrue(self.NodeFound('training/feature_1'))
self.assertTrue(self.NodeFound('training/feature_2'))
self.assertFalse(self.NodeFound('training/feature_3'))
self.assertEqual(parser.evaluation['logits'].name, 'evaluation/logits:0')
self.assertTrue(self.NodeFound('evaluation/logits'))
self.assertTrue(self.NodeFound('save/restore_all'))
self.assertTrue(self.NodeFound('embedding_matrix_0'))
self.assertTrue(self.NodeFound('embedding_matrix_1'))
self.assertTrue(self.NodeFound('embedding_matrix_2'))
self.assertFalse(self.NodeFound('embedding_matrix_3'))
def testNestedScope(self):
graph = tf.Graph()
with graph.as_default():
with graph.name_scope('top'):
parser = self.MakeBuilder()
parser.AddTraining(self._task_context, batch_size=10, corpus_name='training-corpus')
parser.AddSaver()
self.assertTrue(self.NodeFound('top/training/logits'))
self.assertTrue(self.NodeFound('top/training/feature_0'))
self.assertFalse(self.NodeFound('top/save/restore_all'))
self.assertTrue(self.NodeFound('save/restore_all'))
def testUseCustomGraphs(self):
batch_size = 10
custom_train_graph = tf.Graph()
with custom_train_graph.as_default():
train_parser = self.MakeBuilder()
train_parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
custom_eval_graph = tf.Graph()
with custom_eval_graph.as_default():
eval_parser = self.MakeBuilder()
eval_parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
with self.test_session(graph=custom_train_graph) as sess:
self.assertTrue(self.NodeFound('training/logits'))
sess.run(train_parser.inits.values())
sess.run(['training/logits:0'])
with self.test_session(graph=custom_eval_graph) as sess:
self.assertFalse(self.NodeFound('training/logits'))
self.assertTrue(self.NodeFound('evaluation/logits'))
sess.run(eval_parser.inits.values())
sess.run(['evaluation/logits:0'])
def testTrainingAndEvalAreIndependent(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
(eval_logits,) = sess.run([parser.evaluation['logits']])
(training_logits,) = sess.run([parser.training['logits']])
self.assertNear(abs((eval_logits - training_logits).sum()), 0, 1e-06)
for _ in range(5):
eval_logits = parser.evaluation['logits'].eval()
for _ in range(5):
(training_logits, _) = sess.run([parser.training['logits'], parser.training['train_op']])
self.assertGreater(abs((eval_logits - training_logits).sum()), 0, 0.001)
def testReproducibility(self):
batch_size = 10
def ComputeACost(graph):
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
for _ in range(5):
(cost, _) = sess.run([parser.training['cost'], parser.training['train_op']])
return cost
cost1 = ComputeACost(tf.Graph())
cost2 = ComputeACost(tf.Graph())
self.assertNear(cost1, cost2, 1e-08)
def testAddTrainingAndEvalOrderIndependent(self):
batch_size = 10
graph1 = tf.Graph()
with graph1.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
with self.test_session(graph=graph1) as sess:
sess.run(parser.inits.values())
metrics1 = None
for _ in range(50):
(cost1, _) = sess.run([parser.training['cost'], parser.training['train_op']])
em1 = parser.evaluation['eval_metrics'].eval()
metrics1 = ((metrics1 + em1) if (metrics1 is not None) else em1)
graph2 = tf.Graph()
with graph2.as_default():
parser = self.MakeBuilder(use_averaging=False)
parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
with self.test_session(graph=graph2) as sess:
sess.run(parser.inits.values())
metrics2 = None
for _ in range(50):
(cost2, _) = sess.run([parser.training['cost'], parser.training['train_op']])
em2 = parser.evaluation['eval_metrics'].eval()
metrics2 = ((metrics2 + em2) if (metrics2 is not None) else em2)
self.assertNear(cost1, cost2, 1e-08)
self.assertEqual(abs((metrics1 - metrics2)).sum(), 0)
def testEvalMetrics(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder()
parser.AddEvaluation(self._task_context, batch_size, corpus_name='tuning-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
tokens = 0
correct_heads = 0
for _ in range(100):
eval_metrics = sess.run(parser.evaluation['eval_metrics'])
tokens += eval_metrics[0]
correct_heads += eval_metrics[1]
self.assertGreater(tokens, 0)
self.assertGreaterEqual(tokens, correct_heads)
self.assertGreaterEqual(correct_heads, 0)
def MakeSparseFeatures(self, ids, weights):
f = sparse_pb2.SparseFeatures()
for (i, w) in zip(ids, weights):
f.id.append(i)
f.weight.append(w)
return f.SerializeToString()
def testEmbeddingOp(self):
graph = tf.Graph()
with self.test_session(graph=graph):
params = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], tf.float32)
var = variables.Variable([self.MakeSparseFeatures([1, 2], [1.0, 1.0]), self.MakeSparseFeatures([], [])])
var.initializer.run()
embeddings = graph_builder.EmbeddingLookupFeatures(params, var, True).eval()
self.assertAllClose([[8.0, 10.0], [0.0, 0.0]], embeddings)
var = variables.Variable([self.MakeSparseFeatures([], []), self.MakeSparseFeatures([0, 2], [0.5, 2.0])])
var.initializer.run()
embeddings = graph_builder.EmbeddingLookupFeatures(params, var, True).eval()
self.assertAllClose([[0.0, 0.0], [10.5, 13.0]], embeddings)
def testOnlyTrainSomeParameters(self):
batch_size = 10
graph = tf.Graph()
with graph.as_default():
parser = self.MakeBuilder(use_averaging=False, only_train='softmax_bias')
parser.AddTraining(self._task_context, batch_size, corpus_name='training-corpus')
with self.test_session(graph=graph) as sess:
sess.run(parser.inits.values())
(bias0, weight0) = sess.run([parser.params['softmax_bias'], parser.params['softmax_weight']])
for _ in range(5):
(bias, weight, _) = sess.run([parser.params['softmax_bias'], parser.params['softmax_weight'], parser.training['train_op']])
self.assertAllEqual(weight, weight0)
self.assertGreater(abs((bias - bias0)).sum(), 0, 1e-05) |
def _pick_key_for_choices(letters_to_try: T.List[str], sub: T.Optional[int], is_unused: T.Callable[([str, T.Optional[int]], bool)], next_unused_sub: T.Callable[([str], int)]) -> GeneratedKey:
assert letters_to_try, 'letters_to_try should not be empty'
for letter in letters_to_try:
if is_unused(letter, sub):
return GeneratedKey(letter, sub)
if is_unused(letter.upper(), sub):
return GeneratedKey(letter.upper(), sub)
if (sub is not None):
if is_unused(letter, None):
return GeneratedKey(letter, None)
if is_unused(letter.upper(), None):
return GeneratedKey(letter.upper(), None)
letter = letters_to_try[0]
return GeneratedKey(letter, next_unused_sub(letter)) |
def data_loading(dataset):
current_path = op.dirname(op.abspath(__file__))
if (dataset == 'telco'):
data_house_prices_path = op.join(current_path, 'telco_churn.csv')
data = pd.read_csv(data_telco)
else:
raise ValueError('Dataset not found. Check the docstring for available values')
return data |
.parametrize('length,max_seq_length,eos_token_id', [(5, None, None), (2, 6, (- 1)), (0, 6, (- 1))])
def test_len(tokenized_line: TokenizedLine, length: int):
assert (len(tokenized_line) == length)
assert (len(tokenized_line.tokens) == length) |
class Swish(nn.Module):
def __init__(self):
super(Swish, self).__init__()
def forward(self, inputs: Tensor) -> Tensor:
return (inputs * inputs.sigmoid()) |
def get_raw2scannetv2_label_map():
lines = [line.rstrip() for line in open('scannetv2-labels.combined.tsv')]
lines_0 = lines[0].split('\t')
print(lines_0)
print(len(lines))
lines = lines[1:]
raw2scannet = {}
for i in range(len(lines)):
label_classes_set = set(g_label_names)
elements = lines[i].split('\t')
raw_name = elements[1]
if (elements[1] != elements[2]):
print('{}: {} {}'.format(i, elements[1], elements[2]))
nyu40_name = elements[7]
if (nyu40_name not in label_classes_set):
raw2scannet[raw_name] = 'unannotated'
else:
raw2scannet[raw_name] = nyu40_name
return raw2scannet |
class Classifier():
def __init__(self):
arch = 'resnet50'
model_file = ('%s_places365.pth.tar' % arch)
if (not os.access(model_file, os.W_OK)):
weight_url = (' + model_file)
os.system(('wget ' + weight_url))
model = models.__dict__[arch](num_classes=365)
checkpoint = torch.load(model_file, map_location=(lambda storage, loc: storage))
state_dict = {str.replace(k, 'module.', ''): v for (k, v) in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
model.cuda()
model.eval()
self.model = model
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.trn = trn.Normalize(self.mean, self.std)
file_name = 'categories_places365.txt'
if (not os.access(file_name, os.W_OK)):
synset_url = '
os.system(('wget ' + synset_url))
classes = list()
with open(file_name) as class_file:
for line in class_file:
class_name = line.strip().split(' ')[0][3:]
classes.append(''.join(class_name.split('/')))
self.classes = classes
def get_name(self, id):
return self.classes[id]
def transform(self, x):
x = (F.interpolate(x, size=(224, 224)) / 255.0)
x = torch.stack([self.trn(xi) for xi in x]).cuda()
return x
def get_predictions_and_confidence(self, x):
x = self.transform(x)
logit = self.model.forward(x)
(values, ind) = logit.max(dim=1)
return (ind, values)
def get_predictions(self, x):
x = self.transform(x)
logit = self.model.forward(x)
return logit.argmax(dim=1) |
def simple_condition2(fib: dace.int32, F: dace.int32, i: dace.int32, N: dace.int32):
return ((fib < F) and (i < N)) |
class ToPILImage(object):
def __init__(self, mode=None):
self.mode = mode
def __call__(self, pic):
return F.to_pil_image(pic, self.mode)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
if (self.mode is not None):
format_string += 'mode={0}'.format(self.mode)
format_string += ')'
return format_string |
(frozen=True)
class RunGroup(Field):
metric_groups: List[str] = field(default_factory=list)
subgroups: List[str] = field(default_factory=list)
sub_splits: Optional[List[str]] = None
subgroup_display_mode: str = BY_METRIC
subgroup_metric_groups_hidden: List[str] = field(default_factory=list)
environment: Dict[(str, str)] = field(default_factory=dict)
category: str = 'Scenarios'
visibility: str = ALL_GROUPS
taxonomy: Optional[TaxonomyInfo] = None
todo: bool = False
adapter_keys_shown: List[str] = field(default_factory=(lambda : ['model_deployment', 'model'])) |
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
def build_fc1(self, input_dim, output_dim):
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim):
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True) |
class DistributedCutoutTuner():
def __init__(self, tuner: ct.CutoutTuner) -> None:
self._tuner = tuner
def optimize(self, measurements: int=30, **kwargs) -> Dict:
cutouts = OrderedDict()
existing_files = set()
for (cutout, cutout_hash) in self._tuner.cutouts():
cutouts[cutout_hash] = cutout
file_name = self._tuner.file_name(cutout_hash)
result = self._tuner.try_load(file_name)
if (result is not None):
existing_files.add(cutout_hash)
new_cutouts = []
for hash in cutouts:
if (hash not in existing_files):
new_cutouts.append(hash)
rank = optim_utils.get_world_rank()
num_ranks = optim_utils.get_world_size()
chunk_size = (len(new_cutouts) // max(num_ranks, 1))
chunks = list(optim_utils.partition(new_cutouts, chunk_size))
if (rank >= len(chunks)):
return
self._tuner.rank = rank
self._tuner.num_ranks = num_ranks
chunk = chunks[rank]
for hash in chunk:
cutout = cutouts[hash]
results = self._tuner.search(cutout=cutout, measurements=measurements, **kwargs)
file_name = self._tuner.file_name(hash)
with open(file_name, 'w') as fp:
json.dump(results, fp) |
def register_Ns3ThreeGppHttpServerTxBuffer_methods(root_module, cls):
cls.add_constructor([param('ns3::ThreeGppHttpServerTxBuffer const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')])
cls.add_method('CloseAllSockets', 'void', [])
cls.add_method('CloseSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')])
cls.add_method('DepleteBufferSize', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('uint32_t', 'amount')])
cls.add_method('GetBufferContentType', 'ns3::ThreeGppHttpHeader::ContentType_t', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('GetBufferSize', 'uint32_t', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('GetClientTs', 'ns3::Time', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('HasTxedPartOfObject', 'bool', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('IsBufferEmpty', 'bool', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('IsSocketAvailable', 'bool', [param('ns3::Ptr< ns3::Socket >', 'socket')], is_const=True)
cls.add_method('PrepareClose', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')])
cls.add_method('RecordNextServe', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::EventId const &', 'eventId'), param('ns3::Time const &', 'clientTs')])
cls.add_method('RemoveSocket', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket')])
cls.add_method('WriteNewObject', 'void', [param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::ThreeGppHttpHeader::ContentType_t', 'contentType'), param('uint32_t', 'objectSize')])
return |
def filter(input_file, output_file, minK):
filtered = 0
total = 0
for line in input_file:
total += 1
fields = line.rstrip().split('\t')
assert (len(fields) == 3)
if (int(fields[2]) >= minK):
output_file.write('\t'.join(fields))
output_file.write('\n')
else:
filtered += 1
print(('Filtered %d of %d total, %.2f' % (filtered, total, (filtered / total)))) |
def test_panloss():
panloss = losses.PANLoss()
mask = [[1, 0, 1], [1, 1, 1], [0, 0, 1]]
target = [[1, 0, 1, 0, 0], [1, 1, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
masks = [np.array(mask)]
bitmasks = BitmapMasks(masks, 3, 3)
target_sz = (6, 5)
results = panloss.bitmasks2tensor([bitmasks], target_sz)
assert (len(results) == 1)
assert (torch.sum(torch.abs((results[0].float() - torch.Tensor(target)))).item() == 0) |
def test_deskl():
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
deskl = DESKL(pool_classifiers, DFP=True)
deskl.fit(X_dsel, y_dsel)
assert np.isclose(deskl.score(X_test, y_test), 0.) |
()
.usefixtures('spark')
def log(spark):
return spark.createDataFrame(log_data, schema=['user_id', 'item_id', 'timestamp', 'relevance']) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.