code stringlengths 101 5.91M |
|---|
class LinearElasticYPADTerm(Term):
name = 'dw_lin_elastic_yp_ad'
arg_types = (('material_1', 'material_2', 'virtual', 'state'),)
arg_shapes = {'material_1': '1, 1', 'material_2': '1, 1', 'virtual': ('D', 'state'), 'state': 'D'}
modes = ('weak',)
diff_info = {'material_1': 1, 'material_2': 1}
def get_fargs(self, material1, material2, virtual, state, mode=None, term_mode=None, diff_var=None, **kwargs):
(vgmap, _) = self.get_mapping(state)
(sgmap, _) = self.get_mapping(state)
vecu = jnp.array(state().reshape(((- 1), vgmap.dim)))
econn = state.field.get_econn(self.integration, self.region)
cu = vecu[econn].transpose((0, 2, 1))
young = material1[(0, 0, 0, 0)]
poisson = material2[(0, 0, 0, 0)]
if (diff_var is None):
fun = eval_elasticity_yp
elif (diff_var == state.name):
fun = eval_jac_elasticity_yp
elif (diff_var == 'material_1'):
fun = eval_young_elasticity_yp
elif (diff_var == 'material_2'):
fun = eval_poisson_elasticity_yp
else:
raise ValueError
fargs = [young, poisson, 'strain', vgmap.bfg, sgmap.bfg, vgmap.det, cu]
return (fun, fargs)
def function(out, fun, fargs):
out[:] = np.asarray(fun(*fargs).reshape(out.shape))
return 0 |
class Accumulator():
def __init__(self):
self.count = 0
self.accum = None
def __call__(self, x):
self.count += 1
if (self.accum is None):
self.accum = np.array(x)
else:
self.accum += x |
def test_restore_state(files, seq_len):
batch_size = (32, 16)
n_batches = 32
loader_train_iter = create_iterator_from_tfrecords_files(files, seq_len=seq_len, batch_size=batch_size)
for _ in range(n_batches):
next(loader_train_iter)
check_sample = next(loader_train_iter)
loader_train_iter = create_iterator_from_tfrecords_files(files, seq_len=seq_len, batch_size=batch_size, skip=(n_batches * np.prod(batch_size)))
check_sample2 = next(loader_train_iter)
assert (tf.reduce_sum(tf.cast(tf.not_equal(check_sample, check_sample2), tf.uint32)).numpy() == 0) |
class GimpPaletteFile():
rawmode = 'RGB'
def __init__(self, fp):
self.palette = [(o8(i) * 3) for i in range(256)]
if (fp.readline()[:12] != b'GIMP Palette'):
raise SyntaxError('not a GIMP palette file')
for i in range(256):
s = fp.readline()
if (not s):
break
if re.match(b'\\w+:|#', s):
continue
if (len(s) > 100):
raise SyntaxError('bad palette file')
v = tuple(map(int, s.split()[:3]))
if (len(v) != 3):
raise ValueError('bad palette entry')
self.palette[i] = ((o8(v[0]) + o8(v[1])) + o8(v[2]))
self.palette = b''.join(self.palette)
def getpalette(self):
return (self.palette, self.rawmode) |
def sorting_keys(element):
x = element._x
P = x.parent()
CR = P.cohomology_raw(x.degree())
V = CR.V()
return list(CR(V(x.basis_coefficients()))) |
def remove_intensity_images(path):
for filename in glob.iglob((path + '**/*/photo/*_intensity.jpg'), recursive=True):
os.remove(filename)
for filename in glob.iglob((path + '**/*/photo/*_resized.jpg'), recursive=True):
os.remove(filename) |
def tokenize(src_file: str, tgt_file: str, tokenizer: Tokenizer, split: str, annotator: Optional[str]=None, batch_size=100, max_position=1024, max_tgt_position=256, save_to_file=True, datadir: Optional[str]=None) -> Dict:
def tokenize_batch(batched, data):
src_docs = tokenizer.pipe([x[1] for x in batched], batch_size=batch_size)
tgt_docs = tokenizer.pipe([x[2] for x in batched], batch_size=batch_size)
id_list = [x[0] for x in batched]
for (id_, src_doc, tgt_doc) in zip(id_list, src_docs, tgt_docs):
assert (id_ not in data)
data[id_] = {'id': id_, 'src_doc': (src_doc.to_bytes() if save_to_file else src_doc), 'tgt_doc': (tgt_doc.to_bytes() if save_to_file else tgt_doc)}
def truncate(x, max_len):
x_s = x.rstrip().split()
max_len = min(len(x_s), max_len)
return ' '.join(x_s[:max_len])
data = {}
with open(src_file) as fsrc, open(tgt_file) as ftgt:
batched = []
for (i, (src_l, tgt_l)) in enumerate(zip(fsrc, ftgt)):
batched.append((i, truncate(src_l, max_position), truncate(tgt_l, max_tgt_position)))
if (((i + 1) % batch_size) == 0):
tokenize_batch(batched, data)
batched = []
if ((i % 1000) == 0):
print('processed {} lines'.format(i))
if (len(batched) > 0):
tokenize_batch(batched, data)
batched = []
if save_to_file:
with open(f'{datadir}/{split}.{annotator}.pickle', 'wb') as outfile:
pickle.dump(data, outfile, protocol=pickle.HIGHEST_PROTOCOL)
return data |
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_pytorch(load_tf_weights_in_transfo_xl) |
def __getattr__(name):
return _sub_module_deprecation(sub_package='signal', module='waveforms', private_modules=['_waveforms'], all=__all__, attribute=name) |
def main(args):
out_dir = args.output
with open(args.config, 'r') as f:
cfg = json.loads(f.read())
makedirs(out_dir, exist_ok=True)
copy(args.config, out_dir)
expt_cfg = cfg['expt']
model_cfg = cfg['model']
net = BUnet(nb_ch=model_cfg['nb_ch'], nb_kers=model_cfg['nb_kers'], nb_mc=model_cfg['nb_mc'], weight_decay=model_cfg['wd'], batch_size=expt_cfg['batch_size'])
train_ds = DataProvider(expt_cfg['data_path'], {'mode': 'train', 'shuffle': (True if (expt_cfg['shuffle'] is 1) else False)})
valid_ds = DataProvider(expt_cfg['data_path'], {'mode': 'valid', 'shuffle': False})
train_gen = train_ds.get_generator(expt_cfg['batch_size'], expt_cfg['nb_epochs'])
valid_gen = valid_ds.get_generator(expt_cfg['batch_size'], expt_cfg['nb_epochs'])
trainer = Trainer(net, opt_kwargs={'lr': model_cfg['lr'], 'decay': model_cfg['lr_decay']}, batch_size=expt_cfg['batch_size'])
trainer.train(train_gen, valid_gen, nb_val_steps=expt_cfg['nb_val_steps'], output_path=out_dir, steps_per_epoch=expt_cfg['steps_per_epoch'], epochs=expt_cfg['nb_epochs'], dropout=(1 - model_cfg['dr']), restore_path=expt_cfg['restore_path'], viz=(True if (expt_cfg['viz'] is 1) else False), cca_thresh=0.5, class_weight=expt_cfg['cw']) |
def hdfs_preprocessed_logrecord():
path = os.path.join(TEST_DATA_PATH, 'HDFS_AD', 'HDFS_5k_preprocessed_logrecord.csv')
return LogRecordObject.load_from_csv(path) |
def get_cleva_bias_metric_specs() -> List[MetricSpec]:
demographic_categories = ['race', 'gender']
target_categories = ['adjective', 'profession']
cross_dem_target = itertools.product(demographic_categories, target_categories)
return ([MetricSpec(class_name='helm.benchmark.metrics.cleva_harms_metrics.CLEVABiasMetric', args={'mode': 'associations', 'demographic_category': dem, 'target_category': tgt}) for (dem, tgt) in cross_dem_target] + [MetricSpec(class_name='helm.benchmark.metrics.cleva_harms_metrics.CLEVABiasMetric', args={'mode': 'representation', 'demographic_category': dem}) for dem in demographic_categories]) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes, namenet='ResNet', modalities=4, kmax=0.5, kmin=None, alpha=0.6, dropout=0.0):
assert (num_classes > 1), 'Number of classes must be > 1 ....[NOT OK]'
self.num_classes = num_classes
self.namenet = namenet
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
if isinstance(self.layer4[(- 1)], Bottleneck):
in_channel4 = self.layer1[(- 1)].bn3.weight.size()[0]
in_channel8 = self.layer2[(- 1)].bn3.weight.size()[0]
in_channel16 = self.layer3[(- 1)].bn3.weight.size()[0]
in_channel32 = self.layer4[(- 1)].bn3.weight.size()[0]
elif isinstance(self.layer4[(- 1)], BasicBlock):
in_channel4 = self.layer1[(- 1)].bn2.weight.size()[0]
in_channel8 = self.layer2[(- 1)].bn2.weight.size()[0]
in_channel16 = self.layer3[(- 1)].bn2.weight.size()[0]
in_channel32 = self.layer4[(- 1)].bn2.weight.size()[0]
else:
raise ValueError('Supported class .... [NOT OK]')
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.poolscores = WildCatClassifierHead(in_channel32, modalities, num_classes, kmax=kmax, kmin=kmin, alpha=alpha, dropout=dropout)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, seed=None, prngs_cuda=None):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x_4 = self.layer1(x)
x_8 = self.layer2(x_4)
x_16 = self.layer3(x_8)
x_32 = self.layer4(x_16)
(scores32, maps32) = self.poolscores(x=x_32, seed=seed, prngs_cuda=prngs_cuda)
return (scores32, maps32)
def get_nb_params(self):
return sum([p.numel() for p in self.parameters()])
def __str__(self):
return '{}(): deep module.'.format(self.__class__.__name__) |
class Multi_Trainer_dist_OSCC(Multi_BaseTrainer_dist):
def __init__(self, args, model, loss, metrics, optimizer, config, data_loader, valid_data_loader=None, lr_scheduler=None, len_epoch=None, writer=None, visualizer=None, tokenizer=None, max_samples_per_epoch=50000):
super().__init__(args, model, loss, metrics, optimizer, config, writer)
self.config = config
self.args = args
self.data_loader = data_loader
if (len_epoch is None):
self.len_epoch = min([len(x) for x in data_loader])
else:
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = (self.valid_data_loader is not None)
self.lr_scheduler = lr_scheduler
self.visualizer = visualizer
self.val_chunking = True
self.batch_size = self.data_loader[0].batch_size
self.log_step = int(np.sqrt(self.batch_size))
self.total_batch_sum = sum([x.batch_size for x in self.data_loader])
self.tokenizer = tokenizer
self.max_samples_per_epoch = max_samples_per_epoch
self.n_gpu = self.args.world_size
self.allgather = AllGather_multi.apply
def _eval_metrics(self, output):
acc_metrics = np.zeros(len(self.metrics))
for (i, metric) in enumerate(self.metrics):
acc_metrics[i] += metric(output)
return acc_metrics
def _adjust_learning_rate(self, optimizer, epoch, args):
lr = args.learning_rate1
for milestone in args.schedule:
lr *= (0.1 if (epoch >= milestone) else 1.0)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def _train_epoch(self, epoch):
self.model.train()
total_loss = ([0] * len(self.data_loader))
total_metrics = np.zeros(len(self.metrics))
for loader in self.data_loader:
loader.train_sampler.set_epoch(epoch)
for (batch_idx, data_li) in enumerate(zip(*self.data_loader)):
if (((batch_idx + 1) * self.total_batch_sum) > self.max_samples_per_epoch):
break
for (dl_idx, data) in enumerate(data_li):
data['video'] = data['video'].to(self.device)
cls_label = data['state'].to(self.device)
self.optimizer.zero_grad()
with torch.set_grad_enabled(True):
vid_scores = self.model(data, video_only=True)
vid_scores = self.allgather(vid_scores, self.n_gpu, self.args)
cls_label = self.allgather(cls_label, self.n_gpu, self.args)
loss = self.loss(vid_scores, cls_label)
loss.backward()
self.optimizer.step()
if ((self.writer is not None) and (self.args.rank == 0)):
total = int((self.data_loader[dl_idx].n_samples / self.n_gpu))
current = (batch_idx * self.data_loader[dl_idx].batch_size)
final_total = (((epoch - 1) * total) + current)
self.writer.add_scalar(f'Loss_training/loss_{dl_idx}', loss.detach().item(), final_total)
total_loss[dl_idx] += loss.detach().item()
if (((batch_idx % self.log_step) == 0) and (self.args.rank == 0)):
self.logger.info('Train Epoch: {} dl{} {} Loss: {:.6f}'.format(epoch, dl_idx, self._progress(batch_idx, dl_idx), loss.detach().item()))
self.optimizer.zero_grad()
if (batch_idx == self.len_epoch):
break
log = {f'loss_{dl_idx}': (total_loss[dl_idx] / self.len_epoch) for dl_idx in range(len(self.data_loader))}
if ((self.writer is not None) and (self.args.rank == 0)):
for dl_idx in range(len(self.data_loader)):
tl = (total_loss[dl_idx] / self.len_epoch)
self.writer.add_scalar(f'Loss_training/loss_total_{dl_idx}', tl, (epoch - 1))
if self.do_validation:
val_log = self._valid_epoch(epoch)
if (self.args.rank == 0):
log.update(val_log)
self._adjust_learning_rate(self.optimizer, epoch, self.args)
return log
def _valid_epoch(self, epoch):
self.model.eval()
total_val_loss = ([0] * len(self.valid_data_loader))
cls_arr = {x: [] for x in range(len(self.valid_data_loader))}
vid_embed_arr = {x: [] for x in range(len(self.valid_data_loader))}
id_arr = {x: [] for x in range(len(self.valid_data_loader))}
with torch.no_grad():
for (dl_idx, dl) in enumerate(self.valid_data_loader):
for (batch_idx, data) in enumerate(tqdm(dl)):
data['video'] = data['video'].to(self.device)
cls_label = data['state'].to(self.device)
vid_scores = self.model(data, video_only=True)
vid_embed_all = [torch.zeros_like(vid_scores) for _ in range(self.n_gpu)]
cls_all = [torch.zeros_like(cls_label) for _ in range(self.n_gpu)]
torch.distributed.all_gather(vid_embed_all, vid_scores)
torch.distributed.all_gather(cls_all, cls_label)
vid_embed_all = torch.cat(vid_embed_all, dim=0)
cls_all = torch.cat(cls_all, dim=0)
vid_embed_arr[dl_idx].append(vid_embed_all.cpu())
cls_arr[dl_idx].append(cls_all.cpu())
loss = self.loss(vid_scores, cls_label)
total_val_loss[dl_idx] += loss.item()
if ((self.writer is not None) and (self.args.rank == 0)):
for dl_idx in range(len(self.valid_data_loader)):
tl = (total_val_loss[dl_idx] / len(self.valid_data_loader[dl_idx]))
self.writer.add_scalar(f'Loss_val/loss_total_{dl_idx}', tl, (epoch - 1))
for dl_idx in range(len(self.valid_data_loader)):
nested_metrics = {x: {} for x in range(len(self.valid_data_loader))}
cls_arrs = torch.cat(cls_arr[dl_idx])
vid_embeds = torch.cat(vid_embed_arr[dl_idx])
for metric in self.metrics:
metric_name = metric.__name__
res = metric(vid_embeds, cls_arrs)
if (self.args.rank == 0):
self.logger.info(verbose(epoch=epoch, metrics=res, name=self.valid_data_loader[dl_idx].dataset_name))
nested_metrics[dl_idx][metric_name] = res
if ((self.writer is not None) and (self.args.rank == 0)):
to_write = format_nested_metrics_for_writer(res, mode=metric_name, name=self.valid_data_loader[dl_idx].dataset_name)
for (key, val) in to_write.items():
key = key.replace('[', '_').replace(']', '_')
self.writer.add_scalar(f'Val_metrics_{dl_idx}/{key}', val, (epoch - 1))
res_dict = {}
if (self.args.rank == 0):
res_dict = {f'val_loss_{dl_idx}': (total_val_loss[dl_idx] / len(self.valid_data_loader[dl_idx])) for dl_idx in range(len(self.valid_data_loader))}
res_dict['nested_val_metrics'] = nested_metrics
return res_dict
def _progress(self, batch_idx, dl_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader[dl_idx], 'n_samples'):
current = (batch_idx * self.data_loader[dl_idx].batch_size)
total = int((self.data_loader[dl_idx].n_samples / self.n_gpu))
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, ((100.0 * current) / total)) |
class PolynomialLR(_LRScheduler):
def __init__(self, optimizer, step_size, iter_warmup, iter_max, power, min_lr=0, last_epoch=(- 1)):
self.step_size = step_size
self.iter_warmup = int(iter_warmup)
self.iter_max = int(iter_max)
self.power = power
self.min_lr = min_lr
super(PolynomialLR, self).__init__(optimizer, last_epoch)
def polynomial_decay(self, lr):
iter_cur = float(self.last_epoch)
if (iter_cur < self.iter_warmup):
coef = (iter_cur / self.iter_warmup)
coef *= ((1 - (self.iter_warmup / self.iter_max)) ** self.power)
else:
coef = ((1 - (iter_cur / self.iter_max)) ** self.power)
return (((lr - self.min_lr) * coef) + self.min_lr)
def get_lr(self):
if ((self.last_epoch == 0) or ((self.last_epoch % self.step_size) != 0) or (self.last_epoch > self.iter_max)):
return [group['lr'] for group in self.optimizer.param_groups]
return [self.polynomial_decay(lr) for lr in self.base_lrs]
def step_update(self, num_updates):
self.step() |
class NoMixBlock(StateDictSerializationMixin, eqx.Module):
ln_1: hnn.LayerNorm
ln_2: hnn.LayerNorm
mlp: BackpackMlp
resid_dropout1: hnn.Dropout
resid_dropout2: hnn.Dropout
def init(config: BackpackConfig, *, key) -> 'NoMixBlock':
k_mlp = jrandom.split(key, 1)[0]
ln_1 = hnn.LayerNorm.init(config.Embed, eps=config.layer_norm_epsilon)
resid_dropout1 = hnn.Dropout(pdrop=config.resid_pdrop)
resid_dropout2 = hnn.Dropout(pdrop=config.resid_pdrop)
ln_2 = hnn.LayerNorm.init(config.Embed, eps=config.layer_norm_epsilon)
mlp = BackpackMlp.init(Embed=config.Embed, Mlp=config.Mlp, Out=config.Embed, activation_fn=config.activation_function, key=k_mlp, use_bias=config.use_bias)
return NoMixBlock(ln_1=ln_1, ln_2=ln_2, mlp=mlp, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2)
_call
def __call__(self, hidden_states: NamedArray, residual: NamedArray, *, key):
(k1, k2) = haliax.jax_utils.maybe_rng_split(key, 2)
residual = (self.resid_dropout1(hidden_states, key=k1) + residual)
hidden_states = self.ln_1(residual)
mlp_out = self.mlp(hidden_states)
residual = (self.resid_dropout2(mlp_out, key=k2) + residual)
hidden_states = self.ln_2(residual)
return hidden_states |
def p_c_simple_declarator(s, ctx, empty, is_type, cmethod_flag, assignable, nonempty):
pos = s.position()
calling_convention = p_calling_convention(s)
if (s.sy == '*'):
s.next()
if (s.systring == 'const'):
const_pos = s.position()
s.next()
const_base = p_c_declarator(s, ctx, empty=empty, is_type=is_type, cmethod_flag=cmethod_flag, assignable=assignable, nonempty=nonempty)
base = Nodes.CConstDeclaratorNode(const_pos, base=const_base)
else:
base = p_c_declarator(s, ctx, empty=empty, is_type=is_type, cmethod_flag=cmethod_flag, assignable=assignable, nonempty=nonempty)
result = Nodes.CPtrDeclaratorNode(pos, base=base)
elif (s.sy == '**'):
s.next()
base = p_c_declarator(s, ctx, empty=empty, is_type=is_type, cmethod_flag=cmethod_flag, assignable=assignable, nonempty=nonempty)
result = Nodes.CPtrDeclaratorNode(pos, base=Nodes.CPtrDeclaratorNode(pos, base=base))
elif (s.sy == '&'):
s.next()
base = p_c_declarator(s, ctx, empty=empty, is_type=is_type, cmethod_flag=cmethod_flag, assignable=assignable, nonempty=nonempty)
result = Nodes.CReferenceDeclaratorNode(pos, base=base)
else:
rhs = None
if (s.sy == 'IDENT'):
name = s.systring
if empty:
error(s.position(), 'Declarator should be empty')
s.next()
cname = p_opt_cname(s)
if ((name != 'operator') and (s.sy == '=') and assignable):
s.next()
rhs = p_test(s)
else:
if nonempty:
error(s.position(), 'Empty declarator')
name = ''
cname = None
if ((cname is None) and (ctx.namespace is not None) and nonempty):
cname = ((ctx.namespace + '::') + name)
if ((name == 'operator') and (ctx.visibility == 'extern') and nonempty):
op = s.sy
if [1 for c in op if (c in '+-*/<=>!%&|([^~,')]:
s.next()
if (op == '('):
s.expect(')')
op = '()'
elif (op == '['):
s.expect(']')
op = '[]'
elif ((op in ('-', '+', '|', '&')) and (s.sy == op)):
op *= 2
s.next()
elif (s.sy == '='):
op += s.sy
s.next()
if (op not in supported_overloaded_operators):
s.error(("Overloading operator '%s' not yet supported." % op), fatal=False)
name += op
elif (op == 'IDENT'):
op = s.systring
if (op not in supported_overloaded_operators):
s.error(("Overloading operator '%s' not yet supported." % op), fatal=False)
name = ((name + ' ') + op)
s.next()
result = Nodes.CNameDeclaratorNode(pos, name=name, cname=cname, default=rhs)
result.calling_convention = calling_convention
return result |
def _add_strip(sub_tab, full_tab, length):
if ((sum(sub_tab) + length) > sum(full_tab)):
raise ValueError('strip does not fit')
if (not sub_tab):
cliff_list = []
else:
cliff_list = [int((sub_tab[0] != full_tab[0]))]
for row in range(1, len(sub_tab)):
if (sub_tab[row] == full_tab[row]):
cliff_list.append(0)
elif ((sub_tab[(row - 1)] - 1) == sub_tab[row]):
cliff_list[(- 1)] += 1
else:
cliff_list.append(1)
if (len(sub_tab) < len(full_tab)):
cliff_list.append(0)
for primes_num in range((min(sum(cliff_list), length) + 1)):
for primed_list in IntegerVectors(n=primes_num, k=len(cliff_list), outer=cliff_list):
row = 0
primed_strip = []
for (i, cliff) in enumerate(cliff_list):
if (cliff == 0):
row += 1
primed_strip.append(0)
primed_strip.extend([int((primed_list[i] > j)) for j in range(cliff)])
row += cliff
plat_list = []
if (sub_tab and (len(sub_tab) < len(full_tab))):
plat_list.append(min(((sub_tab[(- 1)] + primed_strip[(- 2)]) - 1), full_tab[len(sub_tab)]))
for row in reversed(range(1, len(sub_tab))):
plat_list.append(((min(((sub_tab[(row - 1)] + primed_strip[(row - 1)]) - 1), full_tab[row]) - sub_tab[row]) - primed_strip[row]))
if sub_tab:
plat_list.append(((full_tab[0] - sub_tab[0]) - primed_strip[0]))
else:
plat_list.append(full_tab[0])
for non_primed_strip in IntegerVectors(n=(length - primes_num), k=len(plat_list), outer=plat_list):
(yield (list(primed_strip) + list(non_primed_strip))) |
class OutputImageGif(OutputBase):
def __init__(self, gif):
self.gif = OutputBuffer(gif)
def example(cls):
return cls(importlib.resources.read_binary(__package__, 'example.gif'))
def html_fragment(self):
b64 = bytes_to_str(base64.b64encode(self.gif.get()), 'ascii')
return '<img src="data:image/gif;base64,{0}"/>'.format(b64) |
def load_sample_json_for_exp(exp):
alg = get_alg_names(exp)[0]
exp_path = make_exp_path(alg, exp)
exp_path = os.path.join(exp_path, f'{alg}.json')
if (not os.path.exists(exp_path)):
print('No algorithms exist in the experiment directory...')
raise FileExistsError
with open(exp_path) as f:
json_exp_params = json.load(f)
return json_exp_params |
class MisconfiguredStorageBackend(Exception):
def __init__(self, message):
self.message = message
super().__init__(self.message) |
def test_recordarray_6():
def test_recordarray_6(x):
return (2 * (x.y ** 2))
(value_jvp, jvp_grad) = jax.jvp(test_recordarray_6, (test_recordarray,), (test_recordarray_tangent,))
(value_vjp, vjp_func) = jax.vjp(test_recordarray_6, test_recordarray)
assert (ak.to_list(value_jvp) == [[[2.0], [2.0, 9.]], [], [[2.0, 8.0, 18.0]]])
assert (ak.to_list(value_vjp) == [[[2.0], [2.0, 9.]], [], [[2.0, 8.0, 18.0]]])
assert (ak.to_list(jvp_grad) == [[[4.0], [6.0, 0.0]], [], [[8.0, 4.0, 12.0]]])
assert (ak.to_list(vjp_func(value_vjp)[0]) == [[{'x': 0.0, 'y': [8.0]}, {'x': 0.0, 'y': [8.0, 85.]}], [], [{'x': 0.0, 'y': [8.0, 64.0, 216.0]}]]) |
def linear(input_, output_size, with_w=False, reuse=False, name=None):
shape = input_.get_shape().as_list()
with tf.variable_scope((name or 'linear'), reuse=reuse):
try:
matrix = tf.get_variable('Matrix', [shape[1], output_size], tf.float32, tf.random_normal_initializer(stddev=0.02))
except ValueError as err:
msg = "NOTE: Usually, this is due to an issue with the image dimensions. Did you correctly set '--crop' or '--input_height' or '--output_height'?"
err.args = (err.args + (msg,))
raise
bias = tf.get_variable('bias', [output_size], initializer=tf.constant_initializer(0.0))
if with_w:
return ((tf.matmul(input_, matrix) + bias), matrix, bias)
else:
return (tf.matmul(input_, matrix) + bias) |
class Sub(Problem):
name = 'Sub'
dependencies = {}
symbols = ['-']
def generate(self):
max_num = (10 ** self.config['max_digits'])
left = random.randrange(0, max_num)
right = random.randrange(0, max_num)
if (left < right):
return (right, left)
return (left, right)
def question(args):
(left, right) = args
return f'<GO>{left}-{right}='
def answer(args):
(left, right) = args
return f'{(left - right)}<STOP>'
def thought(args) -> list[T]:
(left, right) = args
if ((left <= 19) and (right <= 9)):
return []
l_last = ((left % 10) + 10)
r_last = (right % 10)
thoughts = [T(Sub, (l_last, r_last))]
(l_rest, r_rest) = ((left // 10), (right // 10))
if ((l_last - r_last) < 10):
thoughts.append(T(Sub, (l_rest, 1)))
l_rest -= 1
if (r_rest > 0):
thoughts.append(T(Sub, (l_rest, r_rest)))
return thoughts
def enum_args(self):
max_num = (10 ** self.config['max_digits'])
args = []
for left in range(max_num):
for right in range((left + 1)):
args.append((left, right))
return args |
def open_with_intermediates(filepath, mode):
d = os.path.dirname(filepath)
if d:
if (not os.path.exists(d)):
os.makedirs(d)
elif (not os.path.isdir(d)):
raise IOError(('The file "%s" cannot be created because "%s" exists but is not a directory' % (filepath, d)))
return open(filepath, mode) |
class FundamentalGroupGLElement(FundamentalGroupElement):
def act_on_classical_ambient(self, wt):
return wt.map_support(self.parent().action(self.value())) |
def kl_div(input, targets, reduction='batchmean'):
return F.kl_div(F.log_softmax(input, dim=1), F.softmax(targets, dim=1), reduction=reduction) |
(scope='package')
def verysimple_vpacket_collection(nb_simulation_verysimple):
spectrum_frequency = nb_simulation_verysimple.transport.spectrum_frequency.value
return VPacketCollection(rpacket_index=0, spectrum_frequency=spectrum_frequency, number_of_vpackets=0, v_packet_spawn_start_frequency=0, v_packet_spawn_end_frequency=np.inf, temporary_v_packet_bins=0) |
def get_score(cm, grouping, lambda_):
from botsim.botsim_utils.clana.optimize import calculate_score
inter_cluster_err = 0.0
weights = create_weight_matrix(grouping)
inter_cluster_err = calculate_score(cm, weights)
return ((lambda_ * inter_cluster_err) - sum(grouping)) |
class Agent(object):
def action(self, state):
return NotImplementedError()
def set_agent_index(self, agent_index):
self.agent_index = agent_index
def set_mdp(self, mdp):
self.mdp = mdp
def reset(self):
pass |
class TokenizedSequence(TokenizedLine):
def __init__(self, tokens: List[Token], max_seq_length: int, eos_token_id: int):
if (max_seq_length < 1):
err_msg = f'Cannot have zero / negative max_seq_length. Found max_seq_length == {max_seq_length}'
raise ValueError(err_msg)
if (len(tokens) > max_seq_length):
err_msg = f'Tokens have length == {len(tokens)}, expected length to be <= {max_seq_length}'
raise ValueError(err_msg)
super().__init__(tokens)
self.max_seq_length = max_seq_length
self.eos_token_id = eos_token_id
def get_empty(cls, max_seq_length: int, eos_token_id: int) -> 'TokenizedSequence':
return cls.from_article(TokenizedArticle.get_empty(), max_seq_length, eos_token_id)
def from_article(cls, tokenized_article: TokenizedArticle, max_seq_length: int, eos_token_id: int):
return cls(tokenized_article.tokens, max_seq_length, eos_token_id)
def __iadd__(self, tokenized_line: TokenizedLine) -> 'TokenizedSequence':
if ((len(self) + len(tokenized_line)) > self.max_seq_length):
err_msg_1 = f'Tokenized line with length: {len(tokenized_line)} is too long to be added to'
err_msg_2 = f'sequence with length: {len(self)} and max sequence length: {self.max_seq_length}'
raise ValueError(f'{err_msg_1} {err_msg_2}')
return super().__iadd__(tokenized_line)
def free_tokens(self):
return (self.max_seq_length - len(self))
def prompt_tokens(self):
return sum(((token.token_type_id == TokenTypeIds.PROMPT) for token in self.tokens))
def completion_tokens(self):
return sum(((token.token_type_id in [TokenTypeIds.COMPLETION, TokenTypeIds.SEP]) for token in self.tokens))
def pad_tokens(self):
return sum(((token.token_type_id == TokenTypeIds.PADDING) for token in self.tokens))
def is_packed(self) -> bool:
return (len(self.tokens) == self.max_seq_length)
def pack(self, tokenized_line: TokenizedLineSubClass) -> TokenizedLineSubClass:
slice_index = (self.max_seq_length - len(self))
self += tokenized_line[:slice_index]
return tokenized_line[slice_index:]
def pad(self):
padding_size = (self.max_seq_length - len(self))
self._tokens += ([Token(self.eos_token_id, TokenTypeIds.PADDING)] * padding_size)
def _get_slice(self, slice_index: slice) -> 'TokenizedSequence':
tokenized_article = TokenizedArticle(self.tokens[slice_index])
return TokenizedSequence.from_article(tokenized_article, self.max_seq_length, self.eos_token_id) |
def worst_approximated(data, est, workload, eps, prng=None):
if (prng == None):
prng = np.random
errors = np.array([])
for (ax, W) in workload:
x = data.project(ax).datavector()
xest = est.project(ax).datavector()
W = matrix.Identity(x.size)
errors = np.append(errors, np.abs(W.dot((x - xest))))
merr = np.max(errors)
prob = np.exp(((eps * (errors - merr)) / 2.0))
key = prng.choice(len(errors), p=(prob / prob.sum()))
print(key)
for (ax, W) in workload:
if (key < W.shape[0]):
return (ax, W[key])
key = (key - W.shape[0]) |
def test_random_blur():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['generalized_iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['generalized_aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['plateau_iso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[41], kernel_list=['plateau_aniso'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[11], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
model = RandomBlur(params=dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], omega=[0.1, 0.1]), keys=['lq'])
results = model(results)
assert (results['lq'].shape == (8, 8, 3))
params = dict(kernel_size=[15], kernel_list=['sinc'], kernel_prob=[1], sigma_x=[0.2, 10], sigma_y=[0.2, 10], rotate_angle=[(- 3.1416), 3.1416], prob=0)
model = RandomBlur(params=params, keys=['lq'])
assert (model(results) == results)
assert (repr(model) == ((model.__class__.__name__ + f'(params={params}, ') + "keys=['lq'])")) |
def get_info_backend():
if _torchaudio_available():
return torchaudio_info
if _sndfile_available():
return soundfile_info |
class SPPCSPC(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)):
super(SPPCSPC, self).__init__()
c_ = int(((2 * c2) * e))
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(c_, c_, 3, 1)
self.cv4 = Conv(c_, c_, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=(x // 2)) for x in k])
self.cv5 = Conv((4 * c_), c_, 1, 1)
self.cv6 = Conv(c_, c_, 3, 1)
self.cv7 = Conv((2 * c_), c2, 1, 1)
def forward(self, x):
x1 = self.cv4(self.cv3(self.cv1(x)))
y1 = self.cv6(self.cv5(torch.cat(([x1] + [m(x1) for m in self.m]), 1)))
y2 = self.cv2(x)
return self.cv7(torch.cat((y1, y2), dim=1)) |
class spark_nlp_labeling_function(base_nlp_labeling_function):
_lf_cls = SparkNLPLabelingFunction |
def register_Ns3NonCommunicatingNetDevice_methods(root_module, cls):
cls.add_constructor([param('ns3::NonCommunicatingNetDevice const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetBroadcast', 'ns3::Address', [], is_const=True, is_virtual=True)
cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_const=True, is_virtual=True)
cls.add_method('GetIfIndex', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMtu', 'uint16_t', [], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_const=True, is_virtual=True)
cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_const=True, is_virtual=True)
cls.add_method('GetPhy', 'ns3::Ptr< ns3::Object >', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsBridge', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsBroadcast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsLinkUp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsMulticast', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('IsPointToPoint', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('NeedsArp', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_virtual=True)
cls.add_method('SetChannel', 'void', [param('ns3::Ptr< ns3::Channel >', 'c')])
cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_virtual=True)
cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_virtual=True)
cls.add_method('SetPhy', 'void', [param('ns3::Ptr< ns3::Object >', 'phy')])
cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
return |
def get_metadata(task: TaskType, transform: dict[(str, Any)], model: AnomalyModule) -> dict[(str, Any)]:
data_metadata = {'task': task, 'transform': transform}
model_metadata = get_model_metadata(model)
metadata = {**data_metadata, **model_metadata}
for (key, value) in metadata.items():
if isinstance(value, Tensor):
metadata[key] = value.numpy().tolist()
return metadata |
def register_op(opname, op, domain, version):
if ((domain is None) or (version is None)):
warnings.warn('ONNX export failed. The ONNX domain and/or version to register are None.')
global _registry
if (not is_registered_version(domain, version)):
_registry[(domain, version)] = {}
_registry[(domain, version)][opname] = op |
def register_Ns3MeshWifiInterfaceMac_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('CheckSupportedRates', 'bool', [param('ns3::SupportedRates', 'rates')], is_const=True)
cls.add_method('Enqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'to'), param('ns3::Mac48Address', 'from')], is_virtual=True)
cls.add_method('Enqueue', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet'), param('ns3::Mac48Address', 'to')], is_virtual=True)
cls.add_method('FinishConfigureStandard', 'void', [param('ns3::WifiPhyStandard', 'standard')], is_virtual=True)
cls.add_method('GetBeaconInterval', 'ns3::Time', [], is_const=True)
cls.add_method('GetFrequencyChannel', 'uint16_t', [], is_const=True)
cls.add_method('GetLinkMetric', 'uint32_t', [param('ns3::Mac48Address', 'peerAddress')])
cls.add_method('GetMeshPointAddress', 'ns3::Mac48Address', [], is_const=True)
cls.add_method('GetPhyStandard', 'ns3::WifiPhyStandard', [], is_const=True)
cls.add_method('GetSupportedRates', 'ns3::SupportedRates', [], is_const=True)
cls.add_method('GetTbtt', 'ns3::Time', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('InstallPlugin', 'void', [param('ns3::Ptr< ns3::MeshWifiInterfaceMacPlugin >', 'plugin')])
cls.add_method('Report', 'void', [param('std::ostream &', 'arg0')], is_const=True)
cls.add_method('ResetStats', 'void', [])
cls.add_method('SendManagementFrame', 'void', [param('ns3::Ptr< ns3::Packet >', 'frame'), param('ns3::WifiMacHeader const &', 'hdr')])
cls.add_method('SetBeaconGeneration', 'void', [param('bool', 'enable')])
cls.add_method('SetBeaconInterval', 'void', [param('ns3::Time', 'interval')])
cls.add_method('SetLinkMetricCallback', 'void', [param('ns3::Callback< unsigned int, ns3::Mac48Address, ns3::Ptr< ns3::MeshWifiInterfaceMac >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
cls.add_method('SetLinkUpCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'linkUp')], is_virtual=True)
cls.add_method('SetMeshPointAddress', 'void', [param('ns3::Mac48Address', 'arg0')])
cls.add_method('SetRandomStartDelay', 'void', [param('ns3::Time', 'interval')])
cls.add_method('ShiftTbtt', 'void', [param('ns3::Time', 'shift')])
cls.add_method('SupportsSendFrom', 'bool', [], is_const=True, is_virtual=True)
cls.add_method('SwitchFrequencyChannel', 'void', [param('uint16_t', 'new_id')])
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::WifiMacHeader const *', 'hdr')], visibility='private', is_virtual=True)
return |
.node
class Gemm(dace.sdfg.nodes.LibraryNode):
implementations = {'pure': ExpandGemmPure, 'MKL': ExpandGemmMKL, 'OpenBLAS': ExpandGemmOpenBLAS, 'cuBLAS': ExpandGemmCuBLAS, 'rocBLAS': ExpandGemmRocBLAS, 'PBLAS': ExpandGemmPBLAS, 'FPGA1DSystolic': ExpandGemmFPGA1DSystolic}
default_implementation = None
transA = properties.Property(dtype=bool, desc='Whether to transpose A before multiplying')
transB = properties.Property(dtype=bool, desc='Whether to transpose B before multiplying')
alpha = properties.Property(allow_none=False, default=1, desc='A scalar which will be multiplied with A B before adding C')
beta = properties.Property(allow_none=False, default=0, desc='A scalar which will be multiplied with C before adding C')
cin = properties.Property(dtype=bool, default=True, desc='Whether to have a _cin connector when beta != 0')
algorithm = properties.Property(dtype=str, allow_none=True, default=None, desc='If applicable, chooses the vendor-provided implementation (algorithm) for the multiplication')
accumulator_type = properties.TypeClassProperty(default=None, choices=dtypes.Typeclasses, allow_none=True, desc='Accumulator or intermediate storage type used in multiplication')
compute_type = properties.Property(default=None, dtype=str, allow_none=True, desc='If applicable, overrides computation type (CUBLAS-specific, see ``cublasComputeType_t``)')
def __init__(self, name, location=None, transA=False, transB=False, alpha=1, beta=0, cin=True):
super().__init__(name, location=location, inputs=({'_a', '_b', '_cin'} if ((beta != 0) and cin) else {'_a', '_b'}), outputs={'_c'})
self.transA = (True if transA else False)
self.transB = (True if transB else False)
self.alpha = alpha
self.beta = beta
self.cin = cin
def validate(self, sdfg, state):
in_edges = state.in_edges(self)
if (len(in_edges) not in [2, 3]):
raise ValueError('Expected 2 or 3 inputs to gemm')
size2 = None
for (_, _, _, dst_conn, memlet) in state.in_edges(self):
if (dst_conn == '_a'):
subset = dc(memlet.subset)
subset.squeeze()
size0 = subset.size()
if (dst_conn == '_b'):
subset = dc(memlet.subset)
subset.squeeze()
size1 = subset.size()
if (dst_conn == '_c'):
subset = dc(memlet.subset)
subset.squeeze()
size2 = subset.size()
if self.transA:
size0 = list(reversed(size0))
if self.transB:
size1 = list(reversed(size1))
out_edges = state.out_edges(self)
if (len(out_edges) != 1):
raise ValueError('Expected exactly one output from matrix-matrix product')
out_memlet = out_edges[0].data
if ((len(size0) != 2) or (len(size1) != 2)):
raise ValueError('matrix-matrix product only supported on matrices')
res = equal(size0[1], size1[0])
if (res is None):
warnings.warn(f'First matrix columns {size0[1]} and second matrix rows {size1[0]} may not match', UserWarning)
elif (not res):
raise ValueError('Inputs to matrix-matrix product must agree in the k-dimension')
out_subset = dc(out_memlet.subset)
out_subset.squeeze()
size3 = out_subset.size()
if (size2 is not None):
res = [equal(s0, s1) for (s0, s1) in zip(size2, size3)]
fail = any([(r is False) for r in res])
success = all([(r is True) for r in res])
if fail:
raise ValueError('Input C matrix must match output matrix.')
elif (not success):
warnings.warn(f'Size of input C matrix {size2} may not match output matrix size {size3}', UserWarning)
if (len(size3) != 2):
raise ValueError('matrix-matrix product only supported on matrices')
if (len(size3) == 2):
res = [equal(s0, s1) for (s0, s1) in zip(size3, [size0[(- 2)], size1[(- 1)]])]
fail = any([(r is False) for r in res])
success = all([(r is True) for r in res])
if fail:
raise ValueError('Output to matrix-matrix product must agree in the m and n dimensions')
elif (not success):
warnings.warn(f'Size of output {size3} may not match input {size0} {size1}', UserWarning) |
def find_multitokentargets(examples, split):
multitoktargs = tottargs = 0.0
for tr in examples:
tottargs += 1
if (len(tr.targetframedict) > 1):
multitoktargs += 1
tfs = set(tr.targetframedict.values())
if (len(tfs) > 1):
raise Exception('different frames for neighboring targets!', tr.targetframedict)
sys.stderr.write(('multi-token targets in %s: %.3f%% [%d / %d]\n' % (split, ((multitoktargs * 100) / tottargs), multitoktargs, tottargs))) |
def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
bort_4_8_768_1024_hparams = {'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1024, 'hidden_size': 768, 'max_length': 512, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1024, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-05, 'token_type_vocab_size': 2}
predefined_args = bort_4_8_768_1024_hparams
encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'], num_layers=predefined_args['num_layers'], units=predefined_args['units'], hidden_size=predefined_args['hidden_size'], max_length=predefined_args['max_length'], num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'], dropout=predefined_args['dropout'], output_attention=False, output_all_encodings=False, use_residual=predefined_args['use_residual'], activation=predefined_args.get('activation', 'gelu'), layer_norm_eps=predefined_args.get('layer_norm_eps', None))
vocab_name = 'openwebtext_ccnews_stories_books_cased'
gluon_cache_dir = os.path.join(get_home_dir(), 'models')
bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
original_bort = nlp.model.BERTModel(encoder, len(bort_vocab), units=predefined_args['units'], embed_size=predefined_args['embed_size'], embed_dropout=predefined_args['embed_dropout'], word_embed=predefined_args['word_embed'], use_pooler=False, use_token_type_embed=False, token_type_vocab_size=predefined_args['token_type_vocab_size'], use_classifier=False, use_decoder=False)
original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
params = original_bort._collect_params_with_prefix()
hf_bort_config_json = {'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, 'type_vocab_size': 1, 'vocab_size': len(bort_vocab)}
hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
hf_bort_model = BertForMaskedLM(hf_bort_config)
hf_bort_model.eval()
def to_torch(mx_array) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))
def check_and_map_params(hf_param, gluon_param):
shape_hf = hf_param.shape
gluon_param = to_torch(params[gluon_param])
shape_gluon = gluon_param.shape
assert (shape_hf == shape_gluon), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(hf_bort_model.bert.embeddings.word_embeddings.weight, 'word_embed.0.weight')
hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(hf_bort_model.bert.embeddings.position_embeddings.weight, 'encoder.position_weight')
hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(hf_bort_model.bert.embeddings.LayerNorm.bias, 'encoder.layer_norm.beta')
hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(hf_bort_model.bert.embeddings.LayerNorm.weight, 'encoder.layer_norm.gamma')
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(hf_bort_model.bert.embeddings.token_type_embeddings.weight.data)
for i in range(hf_bort_config.num_hidden_layers):
layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
self_attn: BertSelfAttention = layer.attention.self
self_attn.key.bias.data = check_and_map_params(self_attn.key.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias')
self_attn.key.weight.data = check_and_map_params(self_attn.key.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight')
self_attn.query.bias.data = check_and_map_params(self_attn.query.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias')
self_attn.query.weight.data = check_and_map_params(self_attn.query.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight')
self_attn.value.bias.data = check_and_map_params(self_attn.value.bias.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias')
self_attn.value.weight.data = check_and_map_params(self_attn.value.weight.data, f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight')
self_output: BertSelfOutput = layer.attention.output
self_output.dense.bias = check_and_map_params(self_output.dense.bias, f'encoder.transformer_cells.{i}.proj.bias')
self_output.dense.weight = check_and_map_params(self_output.dense.weight, f'encoder.transformer_cells.{i}.proj.weight')
self_output.LayerNorm.bias = check_and_map_params(self_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.layer_norm.beta')
self_output.LayerNorm.weight = check_and_map_params(self_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.layer_norm.gamma')
intermediate: BertIntermediate = layer.intermediate
intermediate.dense.bias = check_and_map_params(intermediate.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_1.bias')
intermediate.dense.weight = check_and_map_params(intermediate.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_1.weight')
bert_output: BertOutput = layer.output
bert_output.dense.bias = check_and_map_params(bert_output.dense.bias, f'encoder.transformer_cells.{i}.ffn.ffn_2.bias')
bert_output.dense.weight = check_and_map_params(bert_output.dense.weight, f'encoder.transformer_cells.{i}.ffn.ffn_2.weight')
bert_output.LayerNorm.bias = check_and_map_params(bert_output.LayerNorm.bias, f'encoder.transformer_cells.{i}.ffn.layer_norm.beta')
bert_output.LayerNorm.weight = check_and_map_params(bert_output.LayerNorm.weight, f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma')
hf_bort_model.half()
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
input_ids = tokenizer.encode_plus(SAMPLE_TEXT)['input_ids']
gluon_input_ids = mx.nd.array([input_ids])
output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
hf_bort_model.save_pretrained(pytorch_dump_folder_path)
hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
hf_bort_model.eval()
input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors='pt')
output_hf = hf_bort_model(**input_ids)[0]
gluon_layer = output_gluon[0].asnumpy()
hf_layer = output_hf[0].detach().numpy()
max_absolute_diff = np.max(np.abs((hf_layer - gluon_layer))).item()
success = np.allclose(gluon_layer, hf_layer, atol=0.001)
if success:
print(' Both model do output the same tensors')
else:
print(' Both model do **NOT** output the same tensors')
print('Absolute difference is:', max_absolute_diff) |
def parallel_apply(modules, inputs, kwargs_tup=None, devices=None):
assert (len(modules) == len(inputs))
if (kwargs_tup is not None):
assert (len(modules) == len(kwargs_tup))
else:
kwargs_tup = (({},) * len(modules))
if (devices is not None):
assert (len(modules) == len(devices))
else:
devices = ([None] * len(modules))
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
if (device is None):
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
if (not isinstance(input, (list, tuple))):
input = (input,)
output = module(*input, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if (len(modules) > 1):
threads = [threading.Thread(target=_worker, args=(i, module, input, kwargs, device)) for (i, (module, input, kwargs, device)) in enumerate(zip(modules, inputs, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs |
def test_set_smart_llm_model(config: Config):
smart_llm_model = config.smart_llm_model
config.set_smart_llm_model('gpt-4-test')
assert (config.smart_llm_model == 'gpt-4-test')
config.set_smart_llm_model(smart_llm_model) |
def get_most_recent(models):
model_numbers = [(int(model.split('model.pt')[0]) if (model != 'model.pt') else 0) for model in models]
return (str(max(model_numbers)) + 'model.pt') |
def load_eth_accounts(root_path):
path = os.path.join(root_path, 'emulator_data')
filename = os.path.join(path, 'accounts.json')
if (os.path.exists(filename) is False):
getEmulatorAccounts(path, 'accounts.json')
with open(filename) as json_file:
eth_accounts = json.load(json_file)
counters = {}
new_eth_accounts = {}
for address in eth_accounts:
account = eth_accounts[address]
name = account['name']
if (name not in counters):
counters[name] = 0
else:
counters[name] += 1
name = (name + ('-%d' % counters[name]))
new_eth_accounts[address] = {'name': name, 'chain_id': account['chain_id']}
return new_eth_accounts |
def merge_files(paths):
data = []
for path in paths:
with open(path, 'r') as f:
data.append(f.read())
data = ''.join(data)
out_path = input('Please specify output file path: ')
with open(out_path, 'w') as f:
f.write(data)
print('Merge files done!') |
def batch_pesq(clean, noisy):
pesq_score = Parallel(n_jobs=(- 1))((delayed(pesq_loss)(c, n) for (c, n) in zip(clean, noisy)))
pesq_score = np.array(pesq_score)
if ((- 1) in pesq_score):
return None
pesq_score = ((pesq_score + 0.5) / 5)
return torch.FloatTensor(pesq_score).to('cuda') |
class ConformerLargeConfig(ConformerConfig):
encoder_dim: int = 512
decoder_dim: int = 640
num_encoder_layers: int = 17
num_attention_heads: int = 8 |
.parametrize('reference', [0.0, [0.0], [[0.0]]])
def test_divide_conquer_non_dominated_partition_bounds_raises_for_reference_with_invalid_shape(reference: SequenceN[float]) -> None:
partition = DividedAndConquerNonDominated(tf.constant([[0.0, 2.0, 1.0], [7.0, 6.0, 0.0], [9.0, 0.0, 1.0]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
partition.partition_bounds(tf.constant([0.0, 0.0, 0.0]), tf.constant(reference)) |
def grad_test_fwd(tifunc, npfunc=None):
npfunc = (npfunc or tifunc)
print(f'arch={ti.lang.impl.current_cfg().arch} default_fp={ti.lang.impl.current_cfg().default_fp}')
x = ti.field(ti.lang.impl.current_cfg().default_fp)
y = ti.field(ti.lang.impl.current_cfg().default_fp)
ti.root.dense(ti.i, 1).place(x, x.dual, y, y.dual)
def func():
for i in x:
y[i] = tifunc(x[i])
v = 0.234
x[0] = v
with ti.ad.FwdMode(loss=y, param=x, seed=[1.0]):
func()
assert (y[0] == test_utils.approx(npfunc(v), rel=0.0001))
assert (y.dual[0] == test_utils.approx(grad(npfunc)(v), rel=0.0001)) |
class TestGenerateSimpleLabelMatrix(unittest.TestCase):
def setUp(self) -> None:
self.m = 10
self.n = 1000
def _test_generate_L(self, k: int, decimal: Optional[int]=2) -> None:
np.random.seed(123)
(P, Y, L) = generate_simple_label_matrix(self.n, self.m, k)
P_emp = LFAnalysis(L).lf_empirical_probs(Y, k=k)
np.testing.assert_array_almost_equal(P, P_emp, decimal=decimal)
def test_generate_L(self) -> None:
self._test_generate_L(2, decimal=1)
def test_generate_L_multiclass(self) -> None:
self._test_generate_L(3, decimal=1) |
_grad()
def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=''):
ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
return ddim_latents |
class CustomHelpFormatter(HelpFormatter):
def _format_action(self, action):
if (type(action) == _SubParsersAction):
msg = ''
for subaction in action._get_subactions():
msg += self._format_action(subaction)
return msg
else:
return super(CustomHelpFormatter, self)._format_action(action) |
def scatter(tensor, devices, chunk_sizes=None, dim=0, streams=None):
return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams)) |
_if_pypy
.parametrize('data_id, dataset_params, n_samples, n_features, n_targets', [(61, {'data_id': 61}, 150, 4, 1), (61, {'name': 'iris', 'version': 1}, 150, 4, 1), (2, {'data_id': 2}, 11, 38, 1), (2, {'name': 'anneal', 'version': 1}, 11, 38, 1), (561, {'data_id': 561}, 209, 7, 1), (561, {'name': 'cpu', 'version': 1}, 209, 7, 1), (40589, {'data_id': 40589}, 13, 72, 6), (1119, {'data_id': 1119}, 10, 14, 1), (1119, {'name': 'adult-census'}, 10, 14, 1), (40966, {'data_id': 40966}, 7, 77, 1), (40966, {'name': 'MiceProtein'}, 7, 77, 1), (40945, {'data_id': 40945}, 1309, 13, 1)])
.parametrize('parser', ['liac-arff', 'pandas'])
.parametrize('gzip_response', [True, False])
def test_fetch_openml_as_frame_true(monkeypatch, data_id, dataset_params, n_samples, n_features, n_targets, parser, gzip_response):
pd = pytest.importorskip('pandas')
_monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=gzip_response)
bunch = fetch_openml(as_frame=True, cache=False, parser=parser, **dataset_params)
assert (int(bunch.details['id']) == data_id)
assert isinstance(bunch, Bunch)
assert isinstance(bunch.frame, pd.DataFrame)
assert (bunch.frame.shape == (n_samples, (n_features + n_targets)))
assert isinstance(bunch.data, pd.DataFrame)
assert (bunch.data.shape == (n_samples, n_features))
if (n_targets == 1):
assert isinstance(bunch.target, pd.Series)
assert (bunch.target.shape == (n_samples,))
else:
assert isinstance(bunch.target, pd.DataFrame)
assert (bunch.target.shape == (n_samples, n_targets))
assert (bunch.categories is None) |
def get_adv_losses(discriminator_real_outputs, discriminator_fake_outputs, kind):
if (kind == 'classic'):
loss_fn = classic_gan_losses
elif (kind == 'nonsaturating'):
loss_fn = nonsaturating_gan_losses
elif (kind == 'imbalanced-nonsaturating-0.5'):
loss_fn = (lambda r, f: imbalanced_nonsaturating_gan_losses(r, f, 0.5))
elif (kind == 'imbalanced-nonsaturating-0.9'):
loss_fn = (lambda r, f: imbalanced_nonsaturating_gan_losses(r, f, 0.9))
elif (kind == 'imbalanced-nonsaturating-1.1'):
loss_fn = (lambda r, f: imbalanced_nonsaturating_gan_losses(r, f, 1.1))
elif (kind == 'imbalanced-nonsaturating-2.0'):
loss_fn = (lambda r, f: imbalanced_nonsaturating_gan_losses(r, f, 2.0))
elif (kind == 'wasserstein'):
loss_fn = wasserstein_gan_losses
elif (kind == 'imbalanced-wasserstein-0.5'):
loss_fn = (lambda r, f: imbalanced_wasserstein_gan_losses(r, f, 0.5))
elif (kind == 'imbalanced-wasserstein-0.9'):
loss_fn = (lambda r, f: imbalanced_wasserstein_gan_losses(r, f, 0.9))
elif (kind == 'imbalanced-wasserstein-1.1'):
loss_fn = (lambda r, f: imbalanced_wasserstein_gan_losses(r, f, 1.1))
elif (kind == 'imbalanced-wasserstein-2.0'):
loss_fn = (lambda r, f: imbalanced_wasserstein_gan_losses(r, f, 2.0))
elif (kind == 'hinge'):
loss_fn = hinge_gan_losses
elif (kind == 'imbalanced-hinge-0.5'):
loss_fn = (lambda r, f: imbalanced_hinge_gan_losses(r, f, 0.5))
elif (kind == 'imbalanced-hinge-0.9'):
loss_fn = (lambda r, f: imbalanced_hinge_gan_losses(r, f, 0.9))
elif (kind == 'imbalanced-hinge-1.1'):
loss_fn = (lambda r, f: imbalanced_hinge_gan_losses(r, f, 1.1))
elif (kind == 'imbalanced-hinge-2.0'):
loss_fn = (lambda r, f: imbalanced_hinge_gan_losses(r, f, 2.0))
elif (kind == 'least-squares'):
loss_fn = least_squares_gan_losses
elif (kind == 'absolute'):
loss_fn = absolute_gan_losses
elif (kind == 'new'):
loss_fn = new_gan_losses
elif (kind == 'relativistic-average'):
loss_fn = relativistic_average_gan_losses
elif (kind == 'relativistic-average-hinge'):
loss_fn = relativistic_average_hinge_gan_losses
elif (kind == 'improved-classic'):
loss_fn = improved_classic_gan_losses
elif (kind == 'improved-hinge'):
loss_fn = improved_hinge_gan_losses
elif (kind == 'improved-hinge-alternative'):
loss_fn = improved_hinge_alternative_gan_losses
elif (kind == 'nonsaturating-hinge'):
loss_fn = nonsaturating_hinge_gan_losses
elif (kind == 'minimax-hinge'):
loss_fn = minimax_hinge_gan_losses
elif (kind == 'double-absolute'):
loss_fn = double_absolute_gan_losses
else:
raise ValueError(('Unrecognized adversarial loss type: ' + str(kind)))
return loss_fn(discriminator_real_outputs, discriminator_fake_outputs) |
class ResidualCNN(nn.Module):
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=(kernel // 2))
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
self.srs = SRS()
def forward(self, x):
residual = x
x = self.layer_norm1(x)
x = self.srs(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = self.srs(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x |
class ConfusionMatrix():
def __init__(self, num_classes, class_names=None):
self.n_classes = num_classes
if (class_names is None):
self.class_names = map(str, range(num_classes))
else:
self.class_names = class_names
max_len = max(map(len, self.class_names))
self.max_len = max_len
for (idx, name) in enumerate(self.class_names):
if (len(self.class_names) < max_len):
self.class_names[idx] = (name + (' ' * (max_len - len(name))))
self.mat = np.zeros((num_classes, num_classes), dtype='int')
def __str__(self):
col_sum = np.sum(self.mat, axis=1)
row_sum = np.sum(self.mat, axis=0)
s = []
mat_str = self.mat.__str__()
mat_str = mat_str.replace('[', '').replace(']', '').split('\n')
for (idx, row) in enumerate(mat_str):
if (idx == 0):
pad = ' '
else:
pad = ''
class_name = self.class_names[idx]
class_name = ((' ' + class_name) + ' |')
row_str = ((class_name + pad) + row)
row_str += (' |' + str(col_sum[idx]))
s.append(row_str)
row_sum = [(((self.max_len + 4) * ' ') + ' '.join(map(str, row_sum)))]
hline = [(((1 + self.max_len) * ' ') + ('-' * len(row_sum[0])))]
s = (((hline + s) + hline) + row_sum)
s_out = [(line + '\n') for line in s]
return ''.join(s_out)
def batch_add(self, targets, preds):
assert (targets.shape == preds.shape)
assert (len(targets) == len(preds))
assert (max(targets) < self.n_classes)
assert (max(preds) < self.n_classes)
targets = targets.flatten()
preds = preds.flatten()
for i in range(len(targets)):
self.mat[(targets[i], preds[i])] += 1
def get_errors(self):
tp = np.asarray(np.diag(self.mat).flatten(), dtype='float')
fn = (np.asarray(np.sum(self.mat, axis=1).flatten(), dtype='float') - tp)
fp = (np.asarray(np.sum(self.mat, axis=0).flatten(), dtype='float') - tp)
tn = (((np.asarray((np.sum(self.mat) * np.ones(self.n_classes).flatten()), dtype='float') - tp) - fn) - fp)
return (tp, fn, fp, tn)
def accuracy(self):
(tp, _, _, _) = self.get_errors()
n_samples = np.sum(self.mat)
return (np.sum(tp) / n_samples)
def sensitivity(self):
(tp, tn, fp, fn) = self.get_errors()
res = (tp / (tp + fn))
res = res[(~ np.isnan(res))]
return res
def specificity(self):
(tp, tn, fp, fn) = self.get_errors()
res = (tn / (tn + fp))
res = res[(~ np.isnan(res))]
return res
def positive_predictive_value(self):
(tp, tn, fp, fn) = self.get_errors()
res = (tp / (tp + fp))
res = res[(~ np.isnan(res))]
return res
def negative_predictive_value(self):
(tp, tn, fp, fn) = self.get_errors()
res = (tn / (tn + fn))
res = res[(~ np.isnan(res))]
return res
def false_positive_rate(self):
(tp, tn, fp, fn) = self.get_errors()
res = (fp / (fp + tn))
res = res[(~ np.isnan(res))]
return res
def false_discovery_rate(self):
(tp, tn, fp, fn) = self.get_errors()
res = (fp / (tp + fp))
res = res[(~ np.isnan(res))]
return res
def F1(self):
(tp, tn, fp, fn) = self.get_errors()
res = ((2 * tp) / (((2 * tp) + fp) + fn))
res = res[(~ np.isnan(res))]
return res
def matthews_correlation(self):
(tp, tn, fp, fn) = self.get_errors()
numerator = ((tp * tn) - (fp * fn))
denominator = np.sqrt(((((tp + fp) * (tp + fn)) * (tn + fp)) * (tn + fn)))
res = (numerator / denominator)
res = res[(~ np.isnan(res))]
return res |
def get_prior(batch_size, num_points, inp_dim):
return (((torch.rand(batch_size, num_points, inp_dim) * 2) - 1.0) * 1.5) |
def download_and_extract(url, dst, remove=True):
gdown.download(url, dst, quiet=False)
if dst.endswith('.tar.gz'):
tar = tarfile.open(dst, 'r:gz')
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith('.tar'):
tar = tarfile.open(dst, 'r:')
tar.extractall(os.path.dirname(dst))
tar.close()
if dst.endswith('.zip'):
zf = ZipFile(dst, 'r')
zf.extractall(os.path.dirname(dst))
zf.close()
if remove:
os.remove(dst) |
def r_while(tn, t):
(cond, stmt) = (t[2], t[5])
token_hit = ((tn[:2] + tn[3:5]) + tn[6:])
def fn(world, n):
if (n > MAX_FUNC_CALL):
return (token_hit, n, False)
(hit_c, n, s, c) = cond(world, n)
if (not s):
return ((token_hit + hit_c), n, s)
total_hit = token_hit
while c:
(hit_s, n, s) = stmt(world, n)
total_hit.extend(hit_s)
if (not s):
return (total_hit, n, s)
(hit_c, n, s, c) = cond(world, n)
total_hit.extend(hit_c)
if (not s):
return (total_hit, n, s)
return (total_hit, n, s)
return [('while_stmt', (- 1), fn)] |
def remove_punctuation(in_str):
in_str = str(in_str).lower().strip()
sp_char = ['-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '', ':', '?', '!', '', '', ';', '', '', '', '......', '', '', '', '', '(', ')', '-', '~', '', '']
out_segs = []
for char in in_str:
if (char in sp_char):
continue
else:
out_segs.append(char)
return ''.join(out_segs) |
def register_Ns3OlsrDuplicateTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_constructor([])
cls.add_constructor([param('ns3::olsr::DuplicateTuple const &', 'arg0')])
cls.add_instance_attribute('address', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
cls.add_instance_attribute('ifaceList', 'std::vector< ns3::Ipv4Address >', is_const=False)
cls.add_instance_attribute('retransmitted', 'bool', is_const=False)
cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False)
return |
def init_warprna(verbose=False):
global _tf_mod
if _tf_mod:
return
assert is_checked_out(), 'submodule not checked out? Run `git submodule update --init --recursive`'
enable_gpu = OpCodeCompiler.cuda_available()
enable_cpu = os.path.exists(('%s/core_cpu.cpp' % submodule_dir))
src_files = [('%s/tensorflow_binding/src/warp_rna_op.cc' % submodule_dir)]
if enable_gpu:
src_files.append(('%s/core.cu' % submodule_dir))
if enable_cpu:
src_files.append(('%s/core_cpu.cpp' % submodule_dir))
src_code = ''
for fn in src_files:
f_code = open(fn).read()
src_code += ('\n// %s : BEGIN { \n' % os.path.basename(fn))
src_code += ('#line 1 "%s"\n' % os.path.basename(fn))
src_code += f_code
src_code += ('\n// %s : END } \n\n' % os.path.basename(fn))
compiler = OpCodeCompiler(base_name='warprna_kernels', code_version=1, code=src_code, include_paths=(submodule_dir, ('%s/tensorflow_binding/src' % submodule_dir)), c_macro_defines={'WARPRNA_ENABLE_CPU': (1 if enable_cpu else None), 'WARPRNA_ENABLE_GPU': (1 if enable_gpu else None)}, is_cpp=True, use_cuda_if_available=enable_gpu, verbose=verbose)
tf_mod = compiler.load_tf_module()
assert hasattr(tf_mod, 'WarpRNA'), ('content of mod: %r' % (dir(tf_mod),))
_tf_mod = tf_mod
return tf_mod |
def to_numpy(tensor):
if isinstance(tensor, np.ndarray):
return tensor
elif isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
elif isinstance(tensor, list):
return np.array(tensor)
else:
raise TypeError('Unsupported type for conversion to numpy array') |
class Vincent(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([0.25] * self.N), ([10.0] * self.N)))
self.global_optimum = [[7. for _ in range(self.N)]]
self.fglob = (- float(self.N))
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return (- sum(sin((10.0 * log(x))))) |
def test_run_ignore():
parser = _get_command_line_parser(['valid-detector'], [], [])
black_list = ['a', 'b', 'c']
result = parser.parse_args((['run', 'ex1', 'valid-detector', '--skip'] + black_list))
assert (result.black_list == black_list) |
def test_sub(Poly):
c1 = list((random((4,)) + 0.5))
c2 = list((random((3,)) + 0.5))
p1 = Poly(c1)
p2 = Poly(c2)
p3 = (p1 - p2)
assert_poly_almost_equal((p2 - p1), (- p3))
assert_poly_almost_equal((p1 - c2), p3)
assert_poly_almost_equal((c2 - p1), (- p3))
assert_poly_almost_equal((p1 - tuple(c2)), p3)
assert_poly_almost_equal((tuple(c2) - p1), (- p3))
assert_poly_almost_equal((p1 - np.array(c2)), p3)
assert_poly_almost_equal((np.array(c2) - p1), (- p3))
assert_raises(TypeError, op.sub, p1, Poly([0], domain=(Poly.domain + 1)))
assert_raises(TypeError, op.sub, p1, Poly([0], window=(Poly.window + 1)))
if (Poly is Polynomial):
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0])) |
def makeHeatmap(bitmap, x_ws, y_ti, vmax=None, title=None, saveas=None):
x_min = np.min(x_ws)
x_max = np.max(x_ws)
y_min = np.min(y_ti)
y_max = np.max(y_ti)
maxval = np.max(np.abs([bitmap.min(), bitmap.max()]))
if vmax:
maxval = vmax
vmin = (- maxval)
if ((title == 'Floris time') or (title == 'Neural time')):
cmap = 'RdYlGn_r'
vmin = 0
else:
cmap = 'RdYlGn'
plt.figure()
plt.imshow(bitmap, cmap=cmap, interpolation='nearest', vmin=vmin, vmax=maxval, extent=[x_min, x_max, y_min, y_max], aspect=((x_max - x_min) / (y_max - y_min)))
plt.title(title, fontname='serif')
plt.xlabel('Free stream velocity (m/s)', fontname='serif')
plt.ylabel('Turbulence intensity', fontname='serif')
plt.colorbar()
if (saveas != None):
plt.savefig(('figures/' + str(saveas)), dpi=1200)
else:
plt.show()
return maxval |
def imread(path, is_grayscale=False):
if is_grayscale:
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float) |
class compression_module(nn.Module):
def __init__(self, input_channel=256, hidden_channel=128, noise=10, channel=1, spatial=0):
super(compression_module, self).__init__()
self.conv1 = nn.Conv2d((input_channel + 1), hidden_channel, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(hidden_channel, input_channel, kernel_size=3, stride=1, padding=1)
self.batchnorm1 = nn.BatchNorm2d(hidden_channel)
self.batchnorm2 = nn.BatchNorm2d(input_channel)
self.conv3 = nn.Conv2d((input_channel + 1), hidden_channel, kernel_size=2, stride=2)
self.conv4 = nn.ConvTranspose2d(hidden_channel, input_channel, kernel_size=2, stride=2)
self.noise = noise
self.channel = channel
self.spatial = spatial
def forward(self, x):
H = x.size()[2]
C = x.size()[1]
B = x.size()[0]
noise_factor = (torch.rand(1) * self.noise)
p = noise_factor.numpy()
noise_factor = noise_factor.to(device)
noise_matrix = (torch.FloatTensor(np.ones((B, 1, H, H))).to(device) * noise_factor)
x = torch.cat((x, noise_matrix), dim=1)
if (self.spatial == 0):
x = torch.sigmoid(self.batchnorm1(self.conv1(x)))
elif (self.spatial == 1):
x = torch.sigmoid(self.batchnorm1(self.conv3(x)))
x_tmp = x
if (self.channel == 'a'):
x = awgn_noise(x, noise_factor)
elif (self.channel == 'e'):
bec = BEC.apply
x = bec(x, p)
elif (self.channel == 'w'):
x = x
else:
print('error')
if (self.spatial == 1):
x = F.relu(self.batchnorm2(self.conv2(x)))
else:
x = F.relu(self.batchnorm2(self.conv4(x)))
return x |
def ClassFunction(group, values):
try:
return group.class_function(values)
except AttributeError:
pass
if isinstance(values, LibGapElement):
return ClassFunction_libgap(group, values)
return ClassFunction_gap(group, values) |
def sigmoid(x):
x = np.asarray(x, dtype=np.float64)
np.clip(x, _MIN_CLIP, _MAX_CLIP)
return (np.exp(x) / (1 + np.exp(x))) |
class GeneralEdgeAttConvVisualization(nn.Module):
def __init__(self, dim_in, dim_out, bias=False, **kwargs):
super(GeneralEdgeAttConvVisualization, self).__init__()
self.model = GeneralEdgeAttConvLayerVis(dim_in, dim_out, bias=bias)
def forward(self, batch):
batch.node_feature = self.model(batch.node_feature, batch.edge_index, edge_feature=batch.edge_feature)
return batch |
.parametrize('output_dims,expected_shape', [(1, [120]), (2, [24, 5]), (3, [6, 4, 5]), (4, [2, 3, 4, 5])])
def test_flatten_leading_dims_output_dims(output_dims: int, expected_shape: list[int]) -> None:
x_old = tf.random.uniform([2, 3, 4, 5])
(flat_x_old, unflatten) = flatten_leading_dims(x_old, output_dims=output_dims)
npt.assert_array_equal(tf.shape(flat_x_old), expected_shape)
x_new = unflatten(flat_x_old)
npt.assert_array_equal(x_old, x_new) |
def generate_image_info(img_path, images_info):
img = cv2.imread(img_path)
img_name = img_path.split('/')[(- 1)]
img_w = img.shape[1]
img_h = img.shape[0]
info = {'file_name': img_name, 'height': img_h, 'width': img_w, 'id': img_name[:(- 4)]}
images_info.append(info)
return images_info |
def linear_lognormal_size(magnitude, a_mu, b_mu, sigma, size=None):
return late_type_lognormal_size(magnitude, ((- a_mu) / 0.4), ((- a_mu) / 0.4), b_mu, (- np.inf), sigma, sigma, size=size) |
class Res50_SCAR(nn.Module):
def __init__(self, pretrained=True):
super(Res50_SCAR, self).__init__()
self.seen = 0
self.backend_feat = [512, 512, 512, 256, 128, 64]
self.frontend = []
self.backend = make_layers(self.backend_feat, in_channels=1024, dilation=True)
self.output_layer = SCAModule(64, 1)
initialize_weights(self.modules())
res = models.resnet50(pretrained=pretrained)
self.frontend = nn.Sequential(res.conv1, res.bn1, res.relu, res.maxpool, res.layer1, res.layer2)
self.own_reslayer_3 = make_res_layer(Bottleneck, 256, 6, stride=1)
self.own_reslayer_3.load_state_dict(res.layer3.state_dict())
def forward(self, x):
x = self.frontend(x)
x = self.own_reslayer_3(x)
x = self.backend(x)
x = self.output_layer(x)
x = F.interpolate(x, scale_factor=8, mode='nearest')
return x |
class SummarizationHumanEvalAnalyzer():
def __init__(self, dataset: str, eval_download_path: str, shots: int):
self.dataset = dataset
self.eval_download_path = eval_download_path
self.shots = shots
os.makedirs(eval_download_path, exist_ok=True)
self.load_humaneval_data()
def load_humaneval_data(self):
filename = f'{self.dataset}_{self.shots}shots.csv'
tasks_by_id = defaultdict(list)
download_filename = HUMAN_EVAL_URL.format(file_name=filename)
filename = os.path.join(self.eval_download_path, filename)
ensure_file_downloaded(source_url=download_filename, target_path=filename)
mturk_data = pandas.read_csv(filename)
for (i, row) in mturk_data.iterrows():
tasks_by_id[row.HITId].append(row)
self.faithfulness = defaultdict(list)
self.faithfulness_full = dict()
for (idx, tasks) in tasks_by_id.items():
scores = []
for task in tasks:
scores.append((1 if task['Answer.consistency.consistent'] else 0))
self.faithfulness_full[(task['Input.model_name'], task['Input.id'], task['Input.output_text'])] = np.mean(scores)
self.faithfulness[task['Input.model_name']].append(np.mean(scores))
self.coherence = defaultdict(list)
self.coherence_full = dict()
for (idx, tasks) in tasks_by_id.items():
scores = []
for task in tasks:
for i in range(1, 6):
if task[f'Answer.coherence.cohere_{i}']:
scores.append(i)
break
self.coherence_full[(task['Input.model_name'], task['Input.id'], task['Input.output_text'])] = np.mean(scores)
self.coherence[task['Input.model_name']].append(np.mean(scores))
self.relevance = defaultdict(list)
self.relevance_full = dict()
for (idx, tasks) in tasks_by_id.items():
scores = []
for task in tasks:
for i in range(1, 6):
if task[f'Answer.relevance.rel_{i}']:
scores.append(i)
break
self.relevance_full[(task['Input.model_name'], task['Input.id'], task['Input.output_text'])] = np.mean(scores)
self.relevance[task['Input.model_name']].append(np.mean(scores))
def _compute_average(self, scores: dict):
return [(x, np.mean(y)) for (x, y) in scores.items()]
def print_summary(self):
assert self.faithfulness
assert self.coherence
assert self.relevance
print('FAITHFULNESS')
for (model, score) in self._compute_average(self.faithfulness):
print(f'{model:40}: {score:.4f}')
print(('=' * 40))
print('RELEVANCE')
for (model, score) in self._compute_average(self.relevance):
print(f'{model:40}: {score:.4f}')
print(('=' * 40))
print('COHERENCE')
for (model, score) in self._compute_average(self.relevance):
print(f'{model:40}: {score:.4f}')
print(('=' * 40))
def dump_test_result(self, output_file_path: str):
assert self.faithfulness
assert self.coherence
assert self.relevance
output_pvalues = defaultdict(list)
avg_faithful_scores = self._compute_average(self.faithfulness)
(sorted_models, _) = zip(*sorted(avg_faithful_scores, key=(lambda x: x[1]), reverse=True))
for (i, best_model) in enumerate(sorted_models):
for other_model in sorted_models[(i + 1):]:
p_value = _paired_bootstrap_test(self.faithfulness[best_model], self.faithfulness[other_model])
output_pvalues['faithfulness'].append({'model1': best_model, 'model2': other_model, 'p value': p_value})
avg_relevance_scores = self._compute_average(self.relevance)
(sorted_models, _) = zip(*sorted(avg_relevance_scores, key=(lambda x: x[1]), reverse=True))
for (i, best_model) in enumerate(sorted_models):
for other_model in sorted_models[(i + 1):]:
p_value = _paired_bootstrap_test(self.relevance[best_model], self.relevance[other_model])
output_pvalues['relevance'].append({'model1': best_model, 'model2': other_model, 'p value': p_value})
avg_coherence_scores = self._compute_average(self.coherence)
(sorted_models, _) = zip(*sorted(avg_coherence_scores, key=(lambda x: x[1]), reverse=True))
for (i, best_model) in enumerate(sorted_models):
for other_model in sorted_models[(i + 1):]:
p_value = _paired_bootstrap_test(self.coherence[best_model], self.coherence[other_model])
output_pvalues['coherence'].append({'model1': best_model, 'model2': other_model, 'p value': p_value})
with open(output_file_path, 'w') as f:
json.dump(dict(output_pvalues), f) |
def memory_none(agent_test_config: Config, mock_get_embedding):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend('no_memory')
(yield get_memory(agent_test_config))
agent_test_config.set_memory_backend(was_memory_backend) |
def test_symbols():
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
assert (symbols('x') == x)
assert (symbols('x ') == x)
assert (symbols(' x ') == x)
assert (symbols('x,') == (x,))
assert (symbols('x, ') == (x,))
assert (symbols('x ,') == (x,))
assert (symbols('x , y') == (x, y))
assert (symbols('x,y,z') == (x, y, z))
assert (symbols('x y z') == (x, y, z))
assert (symbols('x,y,z,') == (x, y, z))
assert (symbols('x y z ') == (x, y, z))
xyz = Symbol('xyz')
abc = Symbol('abc')
assert (symbols('xyz') == xyz)
assert (symbols('xyz,') == (xyz,))
assert (symbols('xyz,abc') == (xyz, abc))
assert (symbols(('xyz',)) == (xyz,))
assert (symbols(('xyz,',)) == ((xyz,),))
assert (symbols(('x,y,z,',)) == ((x, y, z),))
assert (symbols(('xyz', 'abc')) == (xyz, abc))
assert (symbols(('xyz,abc',)) == ((xyz, abc),))
assert (symbols(('xyz,abc', 'x,y,z')) == ((xyz, abc), (x, y, z)))
assert (symbols(('x', 'y', 'z')) == (x, y, z))
assert (symbols(['x', 'y', 'z']) == [x, y, z])
assert (symbols(set(['x', 'y', 'z'])) == set([x, y, z]))
raises(ValueError, (lambda : symbols('')))
raises(ValueError, (lambda : symbols(',')))
raises(ValueError, (lambda : symbols('x,,y,,z')))
raises(ValueError, (lambda : symbols(('x', '', 'y', '', 'z'))))
x0 = Symbol('x0')
x1 = Symbol('x1')
x2 = Symbol('x2')
y0 = Symbol('y0')
y1 = Symbol('y1')
assert (symbols('x0:0') == ())
assert (symbols('x0:1') == (x0,))
assert (symbols('x0:2') == (x0, x1))
assert (symbols('x0:3') == (x0, x1, x2))
assert (symbols('x:0') == ())
assert (symbols('x:1') == (x0,))
assert (symbols('x:2') == (x0, x1))
assert (symbols('x:3') == (x0, x1, x2))
assert (symbols('x1:1') == ())
assert (symbols('x1:2') == (x1,))
assert (symbols('x1:3') == (x1, x2))
assert (symbols('x1:3,x,y,z') == (x1, x2, x, y, z))
assert (symbols('x:3,y:2') == (x0, x1, x2, y0, y1))
assert (symbols(('x:3', 'y:2')) == ((x0, x1, x2), (y0, y1)))
a = Symbol('a')
b = Symbol('b')
c = Symbol('c')
d = Symbol('d')
assert (symbols('x:z') == (x, y, z))
assert (symbols('a:d,x:z') == (a, b, c, d, x, y, z))
assert (symbols(('a:d', 'x:z')) == ((a, b, c, d), (x, y, z)))
aa = Symbol('aa')
ab = Symbol('ab')
ac = Symbol('ac')
ad = Symbol('ad')
assert (symbols('aa:d') == (aa, ab, ac, ad))
assert (symbols('aa:d,x:z') == (aa, ab, ac, ad, x, y, z))
assert (symbols(('aa:d', 'x:z')) == ((aa, ab, ac, ad), (x, y, z)))
def sym(s):
return str(symbols(s))
assert (sym('a0:4') == '(a0, a1, a2, a3)')
assert (sym('a2:4,b1:3') == '(a2, a3, b1, b2)')
assert (sym('a1(2:4)') == '(a12, a13)')
assert (sym('a0:2.0:2') == '(a0.0, a0.1, a1.0, a1.1)')
assert (sym('aa:cz') == '(aaz, abz, acz)')
assert (sym('aa:c0:2') == '(aa0, aa1, ab0, ab1, ac0, ac1)')
assert (sym('aa:ba:b') == '(aaa, aab, aba, abb)')
assert (sym('a:3b') == '(a0b, a1b, a2b)')
assert (sym('a-1:3b') == '(a-1b, a-2b)')
assert (sym(('a:2\\,:2' + chr(0))) == ('(a0,0%s, a0,1%s, a1,0%s, a1,1%s)' % ((chr(0),) * 4)))
assert (sym('x(:a:3)') == '(x(a0), x(a1), x(a2))')
assert (sym('x(:c):1') == '(xa0, xb0, xc0)')
assert (sym('x((:a)):3') == '(x(a)0, x(a)1, x(a)2)')
assert (sym('x(:a:3') == '(x(a0, x(a1, x(a2)')
assert (sym(':2') == '(0, 1)')
assert (sym(':b') == '(a, b)')
assert (sym(':b:2') == '(a0, a1, b0, b1)')
assert (sym(':2:2') == '(00, 01, 10, 11)')
assert (sym(':b:b') == '(aa, ab, ba, bb)')
raises(ValueError, (lambda : symbols(':')))
raises(ValueError, (lambda : symbols('a:')))
raises(ValueError, (lambda : symbols('::')))
raises(ValueError, (lambda : symbols('a::')))
raises(ValueError, (lambda : symbols(':a:')))
raises(ValueError, (lambda : symbols('::a'))) |
class SegformerLayer(nn.Module):
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = SegformerAttention(config, hidden_size=hidden_size, num_attention_heads=num_attention_heads, sequence_reduction_ratio=sequence_reduction_ratio)
self.drop_path = (SegformerDropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int((hidden_size * mlp_ratio))
self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(self.layer_norm_1(hidden_states), height, width, output_attentions=output_attentions)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:]
attention_output = self.drop_path(attention_output)
hidden_states = (attention_output + hidden_states)
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
mlp_output = self.drop_path(mlp_output)
layer_output = (mlp_output + hidden_states)
outputs = ((layer_output,) + outputs)
return outputs |
class Environment():
def __init__(self, use_offline_controller, grid_size=0.25, fov=100.0, offline_data_dir='/tmp/data_dhm/AI2thor_Dataset/Scene_Data', detection_feature_file_name='det_feature_60_categories.hdf5', images_file_name='resnet18_featuremap.hdf5', visible_object_map_file_name='visible_object_map.json', local_executable_path=None, optimal_action_file_name=None):
self.offline_data_dir = offline_data_dir
self.use_offline_controller = use_offline_controller
self.images_file_name = images_file_name
self.controller = OfflineControllerWithSmallRotation(grid_size=grid_size, fov=fov, offline_data_dir=offline_data_dir, detection_feature_file_name=detection_feature_file_name, images_file_name=images_file_name, metadata_file_name=visible_object_map_file_name, visualize=False, local_executable_path=local_executable_path, optimal_action_file_name=optimal_action_file_name)
self.grid_size = grid_size
self._reachable_points = None
self.start_state = None
self.last_action = None
self.fov = fov
def scene_name(self):
return self.controller.last_event.metadata['sceneName']
def current_frame(self):
return self.controller.last_event.frame
def current_detection_feature(self):
return self.controller.get_detection_feature()
def current_cls_masks(self):
return self.controller.get_cls_masks()
def current_depth(self):
return self.controller.get_depth()
def last_event(self):
return self.controller.last_event
def last_action_success(self):
if self.use_offline_controller:
return self.controller.last_action_success
return self.controller.last_event.metadata['lastActionSuccess']
def object_is_visible(self, objId):
if (not self.use_offline_controller):
objects = self.last_event.metadata['objects']
visible_objects = [o['objectId'] for o in objects if o['visible']]
return (objId in visible_objects)
return self.controller.object_is_visible(objId)
def start(self, scene_name):
self.controller.start()
self.reset(scene_name=scene_name)
def reset(self, scene_name):
self.controller.reset(scene_name)
self.controller.step(dict(action='Initialize', gridSize=self.grid_size, fieldOfView=self.fov))
def all_objects(self):
if (not self.use_offline_controller):
objects = self.controller.last_event.metadata['objects']
return [o['objectId'] for o in objects]
return self.controller.all_objects()
def step(self, action_dict):
return self.controller.step(action_dict)
def teleport_agent_to(self, x, y, z, rotation, horizon):
self.controller.step(dict(action='Teleport', x=x, y=y, z=z))
self.controller.step(dict(action='Rotate', rotation=rotation))
self.controller.step(dict(action='Look', horizon=horizon))
def random_reachable_state(self, seed=None):
if (seed is not None):
random.seed(seed)
xyz = random.choice(self.reachable_points)
rotation = random.choice([0, 90, 180, 270])
horizon = random.choice([0, 30, 330])
state = copy.copy(xyz)
state['rotation'] = rotation
state['horizon'] = horizon
return state
def randomize_agent_location(self, seed=None):
if (not self.use_offline_controller):
state = self.random_reachable_state(seed=seed)
self.teleport_agent_to(**state)
self.start_state = copy.deepcopy(state)
return
self.controller.randomize_state()
self.start_state = copy.deepcopy(self.controller.state)
def back_to_start(self):
if (self.start_state is None):
self.reset(self.scene_name)
return
if (not self.use_offline_controller):
self.teleport_agent_to(**self.start_state)
else:
self.controller.back_to_start(self.start_state)
def reachable_points(self):
if (self._reachable_points is not None):
return self._reachable_points
points_path = os.path.join(self.offline_data_dir, self.scene_name, 'grid.json')
if (not os.path.exists(points_path)):
raise IOError('Path {0} does not exist'.format(points_path))
self._reachable_points = json.load(open(points_path))
return self._reachable_points |
def save_args_to_json(args, output_file):
args_dict = to_dict(args)
with open(output_file, 'w') as f:
f.write(json.dumps(args_dict)) |
class GPTSanJapanesePreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
_properties
class StreamTransient(transformation.SingleStateTransformation):
with_buffer = Property(dtype=bool, default=True, desc='Use an intermediate buffer for accumulation')
tasklet = transformation.PatternNode(nodes.Tasklet)
map_exit = transformation.PatternNode(nodes.MapExit)
outer_map_exit = transformation.PatternNode(nodes.MapExit)
def expressions(cls):
return [sdutil.node_path_graph(cls.tasklet, cls.map_exit, cls.outer_map_exit)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
map_exit = self.map_exit
outer_map_exit = self.outer_map_exit
for (_src, _, dest, _, memlet) in graph.out_edges(map_exit):
if (isinstance(sdfg.arrays[memlet.data], data.Stream) and (dest == outer_map_exit)):
return True
return False
def apply(self, graph: SDFGState, sdfg: SDFG):
tasklet = self.tasklet
map_exit = self.map_exit
outer_map_exit = self.outer_map_exit
memlet = None
edge = None
for e in graph.out_edges(map_exit):
memlet = e.data
if ((e.dst == outer_map_exit) and isinstance(sdfg.arrays[memlet.data], data.Stream)):
edge = e
break
tasklet_memlet = None
for e in graph.out_edges(tasklet):
tasklet_memlet = e.data
if (tasklet_memlet.data == memlet.data):
break
bbox = map_exit.map.range.bounding_box_size()
bbox_approx = [symbolic.overapproximate(dim) for dim in bbox]
dataname = memlet.data
(newname, _) = sdfg.add_stream(('trans_' + dataname), sdfg.arrays[memlet.data].dtype, bbox_approx[0], storage=sdfg.arrays[memlet.data].storage, transient=True, find_new_name=True)
snode = graph.add_access(newname)
to_stream_mm = copy.deepcopy(memlet)
to_stream_mm.data = snode.data
tasklet_memlet.data = snode.data
if self.with_buffer:
(newname_arr, _) = sdfg.add_transient(('strans_' + dataname), [bbox_approx[0]], sdfg.arrays[memlet.data].dtype, find_new_name=True)
anode = graph.add_access(newname_arr)
to_array_mm = copy.deepcopy(memlet)
to_array_mm.data = anode.data
graph.add_edge(snode, None, anode, None, to_array_mm)
else:
anode = snode
graph.remove_edge(edge)
graph.add_edge(map_exit, edge.src_conn, snode, None, to_stream_mm)
graph.add_edge(anode, None, outer_map_exit, edge.dst_conn, memlet)
return |
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--split', required=True, help='split to operate on')
args = parser.parse_args()
print('split', args.split)
return args |
class DistanceToken(ElementSetToken):
__metaclass__ = abc.ABCMeta
def __init__(self, token, classes=None):
super(DistanceToken, self).__init__(classes)
assert (token.return_type == ElementSet)
self._token = token
def _execute(self, env):
return_elems = set()
elem_set = self._token.execute(env)
for elem in elem_set.elements:
neighbors = self._neighbors(elem, env)
return_elems |= neighbors
return ElementSet(list(return_elems))
def _neighbors(self, elem, env):
neighbors = set()
neighbor_candidates = env.elements_by_classes(self._classes)
for neighbor_candidate in neighbor_candidates:
if self._neighbor_match(elem, neighbor_candidate):
neighbors.add(neighbor_candidate)
return neighbors
def _neighbor_match(self, input_elem, output_elem):
raise NotImplementedError() |
class ProphetNetPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class SpacyParallelismModel(Coref, SpacyModel):
def __init__(self, model):
self.model = model
def predict(self, text, a, b, pronoun_offset, a_offset, b_offset, **kwargs):
(doc, tokens, pronoun_offset, a_offset, b_offset, a_span, b_span, pronoun_token, a_tokens, b_tokens) = self.tokenize(text, a, b, pronoun_offset, a_offset, b_offset, **kwargs)
candidates = ([token for token in a_tokens] + [token for token in b_tokens])
candidates = sorted(candidates, key=(lambda token: abs((token.i - pronoun_offset))))
clusters = []
if (get_normalized_tag(pronoun_token) in ['subj', 'dobj']):
for candidate in candidates:
if (get_normalized_tag(candidate) == get_normalized_tag(pronoun_token)):
clusters.append([[pronoun_offset, pronoun_offset], [candidate.i, candidate.i]])
break
return (tokens, clusters, pronoun_offset, a_span, b_span) |
def find_array_typestr(behavior: (None | Mapping), parameters: (None | Mapping[(str, Any)]), default: (str | None)=None) -> (str | None):
if (parameters is None):
return default
behavior = overlay_behavior(behavior)
return behavior.get(('__typestr__', parameters.get('__list__')), default) |
def read_permutation(cm_file: str, perm_file: Optional[str]) -> List[int]:
if (not os.path.isfile(cm_file)):
raise ValueError(f'cm_file={cm_file} is not a file')
if ((perm_file is not None) and os.path.isfile(perm_file)):
with open(perm_file) as data_file:
if perm_file.lower().endswith('csv'):
with open(perm_file) as file:
content = file.read()
perm = [int(el) for el in content.split(',')]
else:
perm = json.load(data_file)
else:
perm = ClanaCfg.get_perm(cm_file)
return perm |
def register_Ns3MmWaveMacCschedSapUser_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacCschedSapUser const &', 'arg0')])
cls.add_method('CschedCellConfigCnf', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedCellConfigCnfParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedCellConfigUpdateInd', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedCellConfigUpdateIndParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedLcConfigCnf', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedLcConfigCnfParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedLcReleaseCnf', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedLcReleaseCnfParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedUeConfigCnf', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedUeConfigCnfParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedUeConfigUpdateInd', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedUeConfigUpdateIndParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
cls.add_method('CschedUeReleaseCnf', 'void', [param('ns3::MmWaveMacCschedSapUser::CschedUeReleaseCnfParameters const &', 'params')], is_pure_virtual=True, is_virtual=True)
return |
class SpectralNorm(object):
def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12):
self.name = name
self.dim = dim
if (n_power_iterations <= 0):
raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module):
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
weight_mat = weight
if (self.dim != 0):
weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, (- 1))
with torch.no_grad():
for _ in range(self.n_power_iterations):
v = F.normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = F.normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = (weight / sigma)
return (weight, u)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight))
def __call__(self, module, inputs):
if module.training:
(weight, u) = self.compute_weight(module)
setattr(module, self.name, weight)
setattr(module, (self.name + '_u'), u)
else:
r_g = getattr(module, (self.name + '_orig')).requires_grad
getattr(module, self.name).detach_().requires_grad_(r_g)
def apply(module, name, n_power_iterations, dim, eps):
fn = SpectralNorm(name, n_power_iterations, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = F.normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
module.register_buffer(fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_forward_pre_hook(fn)
return fn |
def full_group_by(l, key=None):
if (key is None):
key = (lambda x: x)
elements = defaultdict(list)
original_keys = {}
for item in l:
k = key(item)
s = str(k)
if (s in original_keys):
if (original_keys[s] != k):
raise ValueError('two distinct elements with representation {}'.format(s))
else:
original_keys[s] = k
elements[s].append(item)
return [(original_keys[s], elements[s]) for s in sorted(elements)] |
def get_net_name(netlike):
if isinstance(netlike, Net):
return netlike.Proto().name
elif isinstance(netlike, caffe2_pb2.NetDef):
return netlike.name
else:
return netlike |
class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
output_size: _size_2_opt_t
def forward(self, input: Tensor) -> Tensor:
return F.adaptive_max_pool2d(input, self.output_size, self.return_indices) |
def v1_cached_gpt3_turbo_request_v2(**kwargs):
if ('stringify_request' in kwargs):
kwargs = json.loads(kwargs['stringify_request'])
return openai.chat.completions.create(**kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.