code stringlengths 101 5.91M |
|---|
class docXRefSectType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
self.id = id
if (xreftitle is None):
self.xreftitle = []
else:
self.xreftitle = xreftitle
self.xrefdescription = xrefdescription
def factory(*args_, **kwargs_):
if docXRefSectType.subclass:
return docXRefSectType.subclass(*args_, **kwargs_)
else:
return docXRefSectType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_xreftitle(self):
return self.xreftitle
def set_xreftitle(self, xreftitle):
self.xreftitle = xreftitle
def add_xreftitle(self, value):
self.xreftitle.append(value)
def insert_xreftitle(self, index, value):
self.xreftitle[index] = value
def get_xrefdescription(self):
return self.xrefdescription
def set_xrefdescription(self, xrefdescription):
self.xrefdescription = xrefdescription
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'):
if (self.id is not None):
outfile.write((' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'),)))
def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'):
for xreftitle_ in self.xreftitle:
showIndent(outfile, level)
outfile.write(('<%sxreftitle>%s</%sxreftitle>\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)))
if self.xrefdescription:
self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription')
def hasContent_(self):
if ((self.xreftitle is not None) or (self.xrefdescription is not None)):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='docXRefSectType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if (self.id is not None):
showIndent(outfile, level)
outfile.write(('id = %s,\n' % (self.id,)))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('xreftitle=[\n')
level += 1
for xreftitle in self.xreftitle:
showIndent(outfile, level)
outfile.write(('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.xrefdescription:
showIndent(outfile, level)
outfile.write('xrefdescription=model_.descriptionType(\n')
self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('id'):
self.id = attrs.get('id').value
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'xreftitle')):
xreftitle_ = ''
for text__content_ in child_.childNodes:
xreftitle_ += text__content_.nodeValue
self.xreftitle.append(xreftitle_)
elif ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'xrefdescription')):
obj_ = descriptionType.factory()
obj_.build(child_)
self.set_xrefdescription(obj_) |
def preactresnet18(num_classes=10, dropout=False, stride=1):
return PreActResNet(PreActBlock, [2, 2, 2, 2], 64, num_classes, stride=stride) |
def adjacent_tmp_file(path, **kwargs):
with NamedTemporaryFile(delete=False, dir=os.path.dirname(path), prefix=os.path.basename(path), suffix='.tmp', **kwargs) as f:
result = cast('NamedTemporaryFileResult', f)
try:
(yield result)
finally:
result.file.flush()
os.fsync(result.file.fileno()) |
def eval(config):
np.random.seed(2019)
tf.random.set_seed(2019)
if config['data.cuda']:
cuda_num = config['data.gpu']
device_name = f'GPU:{cuda_num}'
else:
device_name = 'CPU:0'
data_dir = config['data.dataset_path']
ret = load(data_dir, config, ['test'])
test_loader = ret['test']
way = config['data.test_way']
lstm_dim = config['model.lstm_size']
(w, h, c) = list(map(int, config['model.x_dim'].split(',')))
test_loss = tf.metrics.Mean(name='test_loss')
test_acc = tf.metrics.Mean(name='test_accuracy')
model = MatchingNetwork(way, w, h, c, lstm_size=lstm_dim)
model.load(config['model.save_dir'])
def calc_loss(x_support, y_support, x_query, y_query):
(loss, acc) = model(x_support, y_support, x_query, y_query)
return (loss, acc)
with tf.device(device_name):
for i_episode in tqdm(range(config['data.episodes'])):
(x_support, y_support, x_query, y_query) = test_loader.get_next_episode()
(loss, acc) = calc_loss(x_support, y_support, x_query, y_query)
test_loss(loss)
test_acc(acc)
print('Loss: ', test_loss.result().numpy())
print('Accuracy: ', test_acc.result().numpy()) |
class LinearClassifier(nn.Module):
def __init__(self, features):
super(LinearClassifier, self).__init__()
self.features = features
self.classifier = nn.Conv2d(features.latent_dim, 1, 1)
def width(self):
return self.features.width
def latent_dim(self):
return self.features.latent_dim
def fill(self, stride=1):
return self.features.fill(stride=stride)
def unfill(self):
self.features.unfill()
def forward(self, x):
z = self.features(x)
y = self.classifier(z)
return y |
def conv(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, WeightInitializer=None, BiasInitializer=None, group=1, transform_inputs=None, **kwargs):
return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel, weight_init, bias_init, WeightInitializer, BiasInitializer, group, transform_inputs, **kwargs) |
class DriverNmslibIndexBuilder(IndexBuilder):
def produce_inferer(self, filter_seen_items: bool) -> IndexInferer:
if filter_seen_items:
return NmslibFilterIndexInferer(self.index_params, self.index_store)
else:
return NmslibIndexInferer(self.index_params, self.index_store)
def build_index(self, vectors: SparkDataFrame, features_col: str, ids_col: Optional[str]=None):
vectors = spark_to_pandas(vectors, self.allow_collect_to_master)
NmslibIndexBuilderMixin.build_and_save_index(vectors, self.index_params, self.index_store) |
class UtteranceItem():
def __init__(self, interaction, index):
self.interaction = interaction
self.utterance_index = index
def __str__(self):
return str(self.interaction.utterances[self.utterance_index])
def histories(self, maximum):
if (maximum > 0):
history_seqs = []
for utterance in self.interaction.utterances[:self.utterance_index]:
history_seqs.append(utterance.input_seq_to_use)
if (len(history_seqs) > maximum):
history_seqs = history_seqs[(- maximum):]
return history_seqs
return []
def input_sequence(self):
return self.interaction.utterances[self.utterance_index].input_seq_to_use
def previous_query(self):
if (self.utterance_index == 0):
return []
return self.interaction.utterances[(self.utterance_index - 1)].anonymized_gold_query
def anonymized_gold_query(self):
return self.interaction.utterances[self.utterance_index].anonymized_gold_query
def snippets(self):
return self.interaction.utterances[self.utterance_index].available_snippets
def original_gold_query(self):
return self.interaction.utterances[self.utterance_index].original_gold_query
def contained_entities(self):
return self.interaction.utterances[self.utterance_index].contained_entities
def original_gold_queries(self):
return [q[0] for q in self.interaction.utterances[self.utterance_index].all_gold_queries]
def gold_tables(self):
return [q[1] for q in self.interaction.utterances[self.utterance_index].all_gold_queries]
def gold_query(self):
return (self.interaction.utterances[self.utterance_index].gold_query_to_use + [vocab.EOS_TOK])
def gold_edit_sequence(self):
return self.interaction.utterances[self.utterance_index].gold_edit_sequence
def gold_table(self):
return self.interaction.utterances[self.utterance_index].gold_sql_results
def all_snippets(self):
return self.interaction.snippets
def within_limits(self, max_input_length=float('inf'), max_output_length=float('inf')):
return self.interaction.utterances[self.utterance_index].length_valid(max_input_length, max_output_length)
def expand_snippets(self, sequence):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
no_snippets_sequence = self.interaction.expand_snippets(sequence)
no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence)
return no_snippets_sequence
def flatten_sequence(self, sequence):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
no_snippets_sequence = self.interaction.expand_snippets(sequence)
deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql')
return deanon_sequence |
class Function_arctanh(GinacFunction):
def __init__(self):
GinacFunction.__init__(self, 'arctanh', latex_name='\\operatorname{artanh}', conversions=dict(maxima='atanh', sympy='atanh', fricas='atanh', giac='atanh', mathematica='ArcTanh')) |
def __parse_free_rusage(args):
free_filepath = f'{args.prefix}/free.log'
if (not os.path.exists(free_filepath)):
free_filepath += '.xz'
if (not os.path.exists(free_filepath)):
logging.warning(f'Unable to find memory usage data at {free_filepath}')
return False
rusage = {}
last_ts = None
mem_header = None
with open_readable_file(free_filepath) as inf:
for line in inf:
if ('UTC' in line):
parts = line.strip().split()
if (len(parts) >= 1):
ts = float(parts[0])
last_ts = ts
elif (('total' in line) and (mem_header is None)):
mem_header = [p.strip() for p in line.strip().split()]
elif ('Mem:' in line):
parts = [p.strip() for p in line.strip().split()]
mem_counts = [int(p) for p in parts[1:]]
memd = {f'mem_{mem_header[i]}': mem_counts[i] for i in range(len(mem_counts))}
rusage.setdefault(last_ts, memd)
if (len(rusage) > 0):
outpath = f'{args.prefix}/free_rusage.json.xz'
dump_json_data(rusage, outpath, compress=True)
return True
else:
logging.warning(f'Unable to parse memory usage data from {free_filepath}.')
return False |
def get_mangle_prefix(name: str) -> str:
return (name.partition('.')[0] if is_mangled(name) else name) |
class TGCR():
def __init__(self):
self.regs = dict(T5=0, T6=0, T32=0, T33=0, T127=0)
def setter(self, index, value):
self.regs[('T' + str(index))] = value
def getter(self, index):
return int(self.regs[('T' + str(index))]) |
def main():
parser = argparse.ArgumentParser(description='OGBN-Products (SIGN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-products')
split_idx = dataset.get_idx_split()
data = SIGN(args.num_layers)(dataset[0])
xs = ([data.x] + [data[f'x{i}'] for i in range(1, (args.num_layers + 1))])
xs_train = [x[split_idx['train']].to(device) for x in xs]
xs_valid = [x[split_idx['valid']].to(device) for x in xs]
xs_test = [x[split_idx['test']].to(device) for x in xs]
y_train_true = data.y[split_idx['train']].to(device)
y_valid_true = data.y[split_idx['valid']].to(device)
y_test_true = data.y[split_idx['test']].to(device)
model = MLP(data.x.size((- 1)), args.hidden_channels, dataset.num_classes, args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbn-products')
logger = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
loss = train(model, xs_train, y_train_true, optimizer)
train_acc = test(model, xs_train, y_train_true, evaluator)
valid_acc = test(model, xs_valid, y_valid_true, evaluator)
test_acc = test(model, xs_test, y_test_true, evaluator)
result = (train_acc, valid_acc, test_acc)
logger.add_result(run, result)
if ((epoch % args.log_steps) == 0):
(train_acc, valid_acc, test_acc) = result
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {(100 * train_acc):.2f}%, Valid: {(100 * valid_acc):.2f}%, Test: {(100 * test_acc):.2f}%')
logger.print_statistics(run)
logger.print_statistics() |
class BackboneMixin():
def out_feature_channels(self):
return {stage: self.num_features[i] for (i, stage) in enumerate(self.stage_names)}
def channels(self):
return [self.out_feature_channels[name] for name in self.out_features]
def forward_with_filtered_kwargs(self, *args, **kwargs):
signature = dict(inspect.signature(self.forward).parameters)
filtered_kwargs = {k: v for (k, v) in kwargs.items() if (k in signature)}
return self(*args, **filtered_kwargs)
def forward(self, pixel_values, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None):
raise NotImplementedError('This method should be implemented by the derived class.')
def out_features(self):
return self._out_features
_features.setter
def out_features(self, out_features: List[str]):
(self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=out_features, out_indices=None, stage_names=self.stage_names)
def out_indices(self):
return self._out_indices
_indices.setter
def out_indices(self, out_indices: Union[(Tuple[int], List[int])]):
(self._out_features, self._out_indices) = get_aligned_output_features_output_indices(out_features=None, out_indices=out_indices, stage_names=self.stage_names) |
def _griffin_lim(S, hparams):
angles = np.exp(((2j * np.pi) * np.random.rand(*S.shape)))
S_complex = np.abs(S).astype(np.complex)
y = _istft((S_complex * angles), hparams)
for i in range(hparams.griffin_lim_iters):
angles = np.exp((1j * np.angle(_stft(y, hparams))))
y = _istft((S_complex * angles), hparams)
return y |
def iou_t_tf(gtrs, pred, threshold=0.5):
gtrs = tf.cast(tf.reshape((gtrs > threshold), [gtrs.get_shape()[0], ((32 * 32) * 32)]), tf.bool)
pred = tf.cast(tf.reshape((pred > threshold), [pred.get_shape()[0], ((32 * 32) * 32)]), tf.bool)
union = tf.cast(tf.reduce_sum(tf.cast(tf.logical_or(gtrs, pred), tf.int64), axis=1), tf.float32)
inter = tf.cast(tf.reduce_sum(tf.cast(tf.logical_and(gtrs, pred), tf.int64), axis=1), tf.float32)
return (inter / union) |
class MobileNetV3(nn.Module):
def __init__(self, width_mult=1.0):
super(MobileNetV3, self).__init__()
cfgs = [[3, 1, 16, 0, 0, 1], [3, 4, 24, 0, 0, 2], [3, 3, 24, 0, 0, 1], [5, 3, 40, 1, 0, 2], [5, 3, 40, 1, 0, 1], [5, 3, 40, 1, 0, 1], [3, 6, 80, 0, 1, 2], [3, 2.5, 80, 0, 1, 1], [3, 2.3, 80, 0, 1, 1], [3, 2.3, 80, 0, 1, 1], [3, 6, 112, 1, 1, 1], [3, 6, 112, 1, 1, 1], [5, 6, 160, 1, 1, 2], [5, 6, 160, 1, 1, 1], [5, 6, 160, 1, 1, 1]]
input_channel = _make_divisible((16 * width_mult), 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
block = InvertedResidual
for (k, t, c, use_se, use_hs, s) in cfgs:
output_channel = _make_divisible((c * width_mult), 8)
exp_size = _make_divisible((input_channel * t), 8)
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
input_channel = output_channel
self.features = nn.Sequential(*layers) |
def main(args, store=None):
data_path = os.path.expandvars(args.data)
dataset = DATASETS[args.dataset](data_path)
(train_loader, val_loader) = dataset.make_loaders(args.workers, args.batch_size, data_aug=bool(args.data_aug))
train_loader = helpers.DataPrefetcher(train_loader)
val_loader = helpers.DataPrefetcher(val_loader)
loaders = (train_loader, val_loader)
(model, checkpoint) = make_and_restore_model(arch=args.arch, dataset=dataset, resume_path=args.resume)
if ('module' in dir(model)):
model = model.module
print(args)
if args.eval_only:
return eval_model(args, model, val_loader, store=store)
model = train_model(args, model, loaders, store=store)
return model |
class PretrainConfig():
defaults: List[Any] = field(default_factory=(lambda : DEFAULTS))
hydra: Dict[(str, Any)] = field(default_factory=(lambda : {'run': {'dir': './runs/train/${model.identifier}+dataset-${dataset.name}'}}))
run_id: Optional[str] = None
seed: int = 21
resume: bool = True
resume_epoch: Optional[int] = None
checkpoint_path: Optional[str] = None
wandb_resume_id: Optional[str] = None
model: ModelConfig = MISSING
dataset: DatasetConfig = MISSING
accelerator: AcceleratorConfig = MISSING
tracking: TrackingConfig = MISSING |
def set_global_backend(backend):
backend = _backend_from_arg(backend)
ua.set_global_backend(backend) |
class BloomForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def filter_state_dict(state_dict, remove_name='fc'):
new_state_dict = {}
for key in state_dict:
if (remove_name in key):
continue
new_state_dict[key] = state_dict[key]
return new_state_dict |
def create_header_embedding(data_dir, header_vocab, origin_embed, is_bert=False):
with open(os.path.join(data_dir, ('header_embedding_312_bert.pkl' if is_bert else 'header_embedding_312.pkl')), 'rb') as f:
header_embed = pickle.load(f)
for header_id in header_vocab:
origin_embed[header_id] = header_embed[header_vocab[header_id]]
return origin_embed |
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location='cpu')
if ((checkpoint_key is not None) and (checkpoint_key in state_dict)):
print(f'Take key {checkpoint_key} in provided checkpoint dict')
state_dict = state_dict[checkpoint_key]
state_dict = {k.replace('module.', ''): v for (k, v) in state_dict.items()}
state_dict = {k.replace('backbone.', ''): v for (k, v) in state_dict.items()}
state_dict = {k.replace('encoder.', ''): v for (k, v) in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print('There is no reference weights available for this model => We use random weights.') |
class HRNet(nn.Module):
def __init__(self, aligned=False, use_se=False, use_global=False, avg_down=False, base_width=32, norm='bn', stage_with_conv=('normal', 'normal', 'normal', 'normal'), num_classes=1000):
super(HRNet, self).__init__()
block_1 = (AlignedBottleneck if aligned else Bottleneck)
block_2 = (AlignedBasicBlock if aligned else BasicBlock)
self.use_se = use_se
self.avg_down = avg_down
self.base_width = base_width
self.norm = norm
self.head_dim = (32, 64, 128, 256)
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, 3, 2, 1, bias=False)
self.bn1 = make_norm(64, norm=self.norm)
self.conv2 = nn.Conv2d(64, 64, 3, 2, 1, bias=False)
self.bn2 = make_norm(64, norm=self.norm)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block_1, 64, 4, 1, conv=stage_with_conv[0])
self.transition1 = self._make_transition(index=1, stride=2)
self.stage2 = nn.Sequential(StageModule(block_2, base_width, norm, stage_with_conv[1], use_se, False, stage=2, output_branches=2))
self.transition2 = self._make_transition(index=2, stride=2)
self.stage3 = nn.Sequential(StageModule(block_2, base_width, norm, stage_with_conv[2], use_se, use_global, stage=3, output_branches=3), StageModule(block_2, base_width, norm, stage_with_conv[2], use_se, use_global, stage=3, output_branches=3), StageModule(block_2, base_width, norm, stage_with_conv[2], use_se, use_global, stage=3, output_branches=3), StageModule(block_2, base_width, norm, stage_with_conv[2], use_se, use_global, stage=3, output_branches=3))
self.transition3 = self._make_transition(index=3, stride=2)
self.stage4 = nn.Sequential(StageModule(block_2, base_width, norm, stage_with_conv[3], use_se, use_global, stage=4, output_branches=4), StageModule(block_2, base_width, norm, stage_with_conv[3], use_se, use_global, stage=4, output_branches=4), StageModule(block_2, base_width, norm, stage_with_conv[3], use_se, use_global, stage=4, output_branches=4))
pre_stage_channels = [base_width, (base_width * 2), (base_width * 4), (base_width * 8)]
(self.incre_modules, self.downsamp_modules, self.final_layer) = self._make_head(block_1, pre_stage_channels, outplanes=2048, conv=stage_with_conv[3])
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(2048, num_classes)
self._init_weights()
def stage_out_dim(self):
return [64, self.base_width, (self.base_width * 2), (self.base_width * 4), (self.base_width * 8)]
def stage_out_spatial(self):
return [(1 / 2.0), (1 / 4.0), (1 / 8.0), (1 / 16.0), (1 / 32.0)]
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.0001)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, ops.DeformConvPack):
nn.init.constant_(m.conv_offset.weight, 0)
nn.init.constant_(m.conv_offset.bias, 0)
if isinstance(m, ops.ModulatedDeformConvPack):
nn.init.constant_(m.conv_offset_mask.weight, 0)
nn.init.constant_(m.conv_offset_mask.bias, 0)
for m in self.modules():
if isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
elif isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, AlignedBottleneck):
nn.init.constant_(m.bn.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, conv='normal'):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
if self.avg_down:
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride), nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), make_norm((planes * block.expansion), norm=self.norm))
else:
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), make_norm((planes * block.expansion), norm=self.norm))
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, self.norm, conv, self.use_se, True, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, 1, dilation, self.norm, conv, self.use_se, True))
return nn.Sequential(*layers)
def _make_transition(self, index=1, stride=1):
transition = nn.ModuleList()
if (index == 1):
transition.append(nn.Sequential(nn.Conv2d(self.inplanes, self.base_width, kernel_size=3, stride=1, padding=1, bias=False), make_norm(self.base_width, norm=self.norm), nn.ReLU(inplace=True)))
else:
transition.extend([nn.Sequential() for _ in range(index)])
transition.append(nn.Sequential(nn.Sequential(nn.Conv2d((self.inplanes if (index == 1) else (self.base_width * (2 ** (index - 1)))), (self.base_width * (2 ** index)), kernel_size=3, stride=stride, padding=1, bias=False), make_norm((self.base_width * (2 ** index)), norm=self.norm), nn.ReLU(inplace=True))))
return transition
def _make_head(self, block, pre_stage_channels, outplanes=2048, conv='normal'):
incre_modules = []
for (i, channels) in enumerate(pre_stage_channels):
self.inplanes = channels
incre_module = self._make_layer(block, self.head_dim[i], 1, stride=1, dilation=1, conv=conv)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
downsamp_modules = []
for i in range((len(pre_stage_channels) - 1)):
in_channels = (self.head_dim[i] * block.expansion)
out_channels = (self.head_dim[(i + 1)] * block.expansion)
downsamp_module = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 2, 1), make_norm(out_channels, norm=self.norm), nn.ReLU(inplace=True))
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(nn.Conv2d((self.head_dim[3] * block.expansion), outplanes, 1, 1, 0), make_norm(outplanes, norm=self.norm), nn.ReLU(inplace=True))
return (incre_modules, downsamp_modules, final_layer)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x = [trans(x) for trans in self.transition1]
x = self.stage2(x)
x = [self.transition2[0](x[0]), self.transition2[1](x[1]), self.transition2[2](x[(- 1)])]
x = self.stage3(x)
x = [self.transition3[0](x[0]), self.transition3[1](x[1]), self.transition3[2](x[2]), self.transition3[3](x[(- 1)])]
x = self.stage4(x)
y = self.incre_modules[0](x[0])
for i in range(len(self.downsamp_modules)):
y = (self.incre_modules[(i + 1)](x[(i + 1)]) + self.downsamp_modules[i](y))
y = self.final_layer(y)
y = self.avgpool(y)
y = y.view(y.size(0), (- 1))
y = self.classifier(y)
return y |
class StepVisitor(StepVisitor):
def __init__(self, nodes, flow):
super().__init__(nodes, flow)
def visit_FunctionDef(self, node):
func = getattr(self.flow, node.name)
if hasattr(func, 'is_step'):
self.nodes[node.name] = DAGnode(node, func.decorators, func.__doc__) |
def mse_array(array_x, array_y, size):
rescale_x = array_x
rescale_y = array_y
se = tf.reduce_sum(tf.squared_difference(rescale_x, rescale_y), 1)
inv_size = tf.to_float((1 / size))
return tf.scalar_mul(inv_size, se) |
def get_link(id, entity='paper'):
api = '
webpage = '
for base in [api, webpage]:
link = base.format(entity, id)
txt = f'<a href="{link}">{link}</a>'
ipd.display(ipd.HTML(txt)) |
def custom_tokenizers(test_case):
if (not _run_custom_tokenizers):
test_case = unittest.skip('test of custom tokenizers')(test_case)
return test_case |
def calculate_val(thresholds_val, distances, labels, far_target=0.001, num_folds=10):
num_pairs = min(len(labels), len(distances))
num_thresholds = len(thresholds_val)
k_fold = KFold(n_splits=num_folds, shuffle=False)
tar = np.zeros(num_folds)
far = np.zeros(num_folds)
indices = np.arange(num_pairs)
for (fold_index, (train_set, test_set)) in enumerate(k_fold.split(indices)):
far_train = np.zeros(num_thresholds)
for (threshold_index, threshold) in enumerate(thresholds_val):
(_, far_train[threshold_index]) = calculate_val_far(threshold=threshold, dist=distances[train_set], actual_issame=labels[train_set])
if (np.max(far_train) >= far_target):
f = interpolate.interp1d(far_train, thresholds_val, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
(tar[fold_index], far[fold_index]) = calculate_val_far(threshold=threshold, dist=distances[test_set], actual_issame=labels[test_set])
return (tar, far) |
class SLeNet300(nn.Module):
def __init__(self, input_dim, output_dim, Q_l):
super(SLeNet300, self).__init__()
self.Q_l = Q_l
self.qlevels = Q_l.size(0)
self.input_dim = input_dim
self.output_dim = output_dim
self.w1 = sl.SLinear(input_dim, 300, Q_l)
self.bn1 = nn.BatchNorm1d(300, affine=True)
self.relu1 = nn.ReLU(inplace=True)
self.w2 = sl.SLinear(300, 100, Q_l)
self.bn2 = nn.BatchNorm1d(100, affine=True)
self.relu2 = nn.ReLU(inplace=True)
self.w3 = sl.SLinear(100, output_dim, Q_l)
def forward(self, x):
x = x.view((- 1), self.input_dim)
x = self.w1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.w2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.w3(x)
return x |
class TriFingerAction(object):
def __init__(self, action_mode='joint_positions', normalize_actions=True):
self.normalize_actions = normalize_actions
self.max_motor_torque = 0.36
self.low = None
self.high = None
num_fingers = 3
self.action_mode = action_mode
self.joint_positions_lower_bounds = np.array(([(- 1.57), (- 1.2), (- 3.0)] * 3))
self.joint_positions_upper_bounds = np.array(([1.0, 1.57, 3.0] * 3))
self.joint_positions_raised = np.array(([(- 1.56), (- 0.08), (- 2.7)] * 3))
if (action_mode == 'joint_positions'):
lower_bounds = self.joint_positions_lower_bounds
upper_bounds = self.joint_positions_upper_bounds
elif (action_mode == 'joint_torques'):
lower_bounds = np.array((([(- self.max_motor_torque)] * 3) * num_fingers))
upper_bounds = np.array((([self.max_motor_torque] * 3) * num_fingers))
elif (action_mode == 'end_effector_positions'):
lower_bounds = np.array(([(- 0.5), (- 0.5), 0] * 3))
upper_bounds = np.array(([0.5, 0.5, 0.5] * 3))
else:
raise ValueError('No valid action_mode specified: {}'.format(action_mode))
self.set_action_space(lower_bounds, upper_bounds)
def set_action_space(self, lower_bounds, upper_bounds):
assert (len(lower_bounds) == len(upper_bounds))
self.low = lower_bounds
self.high = upper_bounds
def get_action_space(self):
if self.normalize_actions:
return spaces.Box(low=(- np.ones(len(self.low))), high=np.ones(len(self.high)), dtype=np.float64)
else:
return spaces.Box(low=self.low, high=self.high, dtype=np.float64)
def is_normalized(self):
return self.normalize_actions
def satisfy_constraints(self, action):
if self.normalize_actions:
return ((action > (- 1.0)).all() and (action < 1.0).all())
else:
return ((action > self.low).all() and (action < self.high).all())
def clip_action(self, action):
if self.normalize_actions:
return clip(action, (- 1.0), 1.0)
else:
return clip(action, self.low, self.high)
def normalize_action(self, action):
return (((2.0 * (action - self.low)) / (self.high - self.low)) - 1.0)
def denormalize_action(self, action):
return (self.low + (((action + 1.0) / 2.0) * (self.high - self.low))) |
class InputExample(object):
def __init__(self, tokens, segment_ids, masked_lm_positions, masked_lm_labels, is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
self.is_random_next = is_random_next
def __repr__(self):
return '\n'.join((((k + ':') + str(v)) for (k, v) in self.__dict__.items())) |
def get_train_loaders(dataset_train, args, batch_size=None, drop_last=True):
batch_size = (batch_size or args.batch_size)
sampler_train = samplers.get_train_sampler(dataset_train, args)
loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=drop_last)
return loader_train |
def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])
return |
def noisy_hartmann_6(x: TensorType) -> TensorType:
return (hartmann_6(x) + tf.random.normal([len(x), 1], 0, 1, tf.float64)) |
def on_key_press(k, modifiers):
if (k == key.SPACE):
action[0][2] = 1
else:
action[0][2] = 0 |
class TestDeriavtives(TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoidRandom()
self.data = self.model.createData()
qmax = np.full((self.model.nq, 1), np.pi)
self.q = pin.randomConfiguration(self.model, (- qmax), qmax)
self.v = np.random.rand(self.model.nv)
self.tau = np.random.rand(self.model.nv)
self.fext = []
for _ in range(self.model.njoints):
self.fext.append(pin.Force.Random())
def test_aba_derivatives(self):
res = pin.computeABADerivatives(self.model, self.data, self.q, self.v, self.tau)
self.assertTrue((len(res) == 3))
data2 = self.model.createData()
pin.aba(self.model, data2, self.q, self.v, self.tau)
self.assertApprox(self.data.ddq, data2.ddq)
res = pin.computeABADerivatives(self.model, self.data, self.q, self.v, self.tau, self.fext)
self.assertTrue((len(res) == 3))
pin.aba(self.model, data2, self.q, self.v, self.tau, self.fext)
self.assertApprox(self.data.ddq, data2.ddq) |
class RingDerivationWithoutTwist_zero(RingDerivationWithoutTwist):
def __init__(self, parent, arg=None):
if (isinstance(arg, list) and (len(arg) == 1) and isinstance(arg[0], RingDerivation)):
arg = arg[0]
if (arg and (not (isinstance(arg, RingDerivation) and arg.is_zero()))):
raise ValueError('unable to create the derivation')
RingDerivation.__init__(self, parent)
def _repr_(self):
return '0'
def _latex_(self):
return '0'
def __hash__(self):
return hash(tuple(self.list()))
def _add_(self, other):
return other
def _sub_(self, other):
return (- other)
def _neg_(self):
return self
def _lmul_(self, factor):
return self
def _rmul_(self, left):
return self
def _call_(self, x):
return self.parent().codomain().zero()
def _bracket_(self, other):
return self
def is_zero(self):
return True
def list(self):
return [] |
(frozen=True)
class ContaminationPoint():
models: List[str]
groups: List[str]
level: str
description: str |
class ConcatTable(nn.Module):
def __init__(self, module_list=None):
super(ConcatTable, self).__init__()
self.modules_list = nn.ModuleList(module_list)
def forward(self, x: Variable):
y = []
for i in range(len(self.modules_list)):
y.append(self.modules_list[i](x))
return y
def add(self, module):
self.modules_list.append(module) |
class M2M100Tokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
class Mlp3Layer128Unit(Mlp3LayerTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_hidden = 128 |
class MCLogTabWidget(QtWidgets.QTabWidget):
def __init__(self, parent=None):
super(MCLogTabWidget, self).__init__(parent)
self.setTabBar(MCLogTabBar(self)) |
def get_block_stats(G, node_labels, n_blocks=None):
if (n_blocks is None):
n_blocks = (max(node_labels) + 1)
seen_nodes = set()
block_ns = np.zeros(n_blocks, dtype=int)
block_ms = np.zeros((n_blocks, n_blocks), dtype=np.int64)
for (i, j) in G.edges():
i_block = node_labels[i]
j_block = node_labels[j]
block_ms[(i_block, j_block)] += 1
if (i_block != j_block):
block_ms[(j_block, i_block)] += 1
if (i not in seen_nodes):
block_ns[i_block] += 1
seen_nodes.add(i)
if (j not in seen_nodes):
block_ns[j_block] += 1
seen_nodes.add(j)
block_Ms = np.zeros((n_blocks, n_blocks), dtype=np.int64)
for r in range(n_blocks):
for s in range((r + 1)):
M_rs = get_max_edges(r, s, block_ns)
block_Ms[(r, s)] = M_rs
if (r != s):
block_Ms[(s, r)] = M_rs
return (block_ns, block_ms, block_Ms) |
class CloneExample(object):
def __init__(self, code1, code2, label, url1, url2):
self.source = code1
self.target = code2
self.label = label
self.url1 = url1
self.url2 = url2 |
def save_log(logs, columns, filename):
df = pd.DataFrame(logs)
df.columns = columns
df.to_csv(filename)
return |
class FeatureSet(object):
def extract(self, data):
return NotImplementedError('Method needs to be overwritten by subclass') |
def clip_long_spans(spans, maxspanlen):
faultyspans = []
for i in range(len(spans)):
span = spans[i]
spanlen = ((span[1] - span[0]) + 1)
if (spanlen <= maxspanlen):
continue
faultyspans.append(span)
if (len(faultyspans) == 0):
return spans
for span in faultyspans:
spanlen = ((span[1] - span[0]) + 1)
numbreaks = (spanlen // maxspanlen)
newspans = []
spanbeg = span[0]
for x in range(numbreaks):
newspans.append((spanbeg, ((spanbeg + maxspanlen) - 1)))
spanbeg += maxspanlen
if ((spanlen % maxspanlen) != 0):
newspans.append(((span[0] + (numbreaks * maxspanlen)), span[1]))
spans.remove(span)
spans.extend(newspans)
spans.sort() |
def circle_thin(N=5000):
phi = np.random.randn(N)
x = [[np.sin(phi0), np.cos(phi0)] for phi0 in phi]
x = np.array(x)
x = (x + (0.05 * np.random.randn(N, 2)))
return x |
class Vocabulary(object):
def __init__(self, *args, **kwargs):
self.sos_id = None
self.eos_id = None
self.pad_id = None
self.blank_id = None
def label_to_string(self, labels):
raise NotImplementedError |
class LRScheduler(TrainHook):
def __init__(self, optimizer, scheduler):
self.optimizer = optimizer
self.scheduler = scheduler
largest_group = max((len(g['params']) for g in optimizer.param_groups))
if (largest_group == 1):
lr_count = Counter([g['lr'] for g in optimizer.param_groups])
lr = lr_count.most_common()[0][0]
for (i, g) in enumerate(optimizer.param_groups):
if (g['lr'] == lr):
self._best_param_group_id = i
break
else:
for (i, g) in enumerate(optimizer.param_groups):
if (len(g['params']) == largest_group):
self._best_param_group_id = i
break
def after_step(self, storage, **kwargs):
lr = self.optimizer.param_groups[self._best_param_group_id]['lr']
storage.put_scalar('lr', lr, smoothing_hint=False)
self.scheduler.step() |
def test_predict_proba_with_ds_hard(create_pool_classifiers):
expected = np.array([0.666, 0.333])
DFP_mask = np.ones((1, 6))
predictions = np.array([[0, 1, 0, 0, 1, 0]])
probabilities = np.array([[[0.5, 0.5], [1, 0], [0.33, 0.67], [0.5, 0.5], [1, 0], [0.33, 0.67]]])
pool_classifiers = (create_pool_classifiers + create_pool_classifiers)
desmi_test = DESMI(pool_classifiers, DFP=True, voting='hard')
desmi_test.n_classes_ = 2
selected_indices = np.array([[0, 1, 5]])
desmi_test.estimate_competence = MagicMock(return_value=np.ones(6))
desmi_test.select = MagicMock(return_value=selected_indices)
predicted_proba = desmi_test.predict_proba_with_ds(predictions, probabilities, DFP_mask=DFP_mask)
assert np.isclose(predicted_proba, expected, atol=0.01).all() |
def _create_dummy_line_json_file(ann_file):
ann_info1 = {'filename': 'sample1.jpg', 'text': 'hello'}
ann_info2 = {'filename': 'sample2.jpg', 'text': 'world'}
with open(ann_file, 'w') as fw:
for ann_info in [ann_info1, ann_info2]:
fw.write((json.dumps(ann_info) + '\n')) |
class CompileTimeScope(object):
def __init__(self, outer=None):
self.entries = {}
self.outer = outer
def declare(self, name, value):
self.entries[name] = value
def update(self, other):
self.entries.update(other)
def lookup_here(self, name):
return self.entries[name]
def __contains__(self, name):
return (name in self.entries)
def lookup(self, name):
try:
return self.lookup_here(name)
except KeyError:
outer = self.outer
if outer:
return outer.lookup(name)
else:
raise |
def _get_graph(explainer):
_import_tf()
if (not tf.executing_eagerly()):
return explainer.session.graph
else:
from tensorflow.python.keras import backend
graph = backend.get_graph()
return graph |
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
cls.add_constructor([param('char const *', 'name')])
cls.add_constructor([])
cls.add_constructor([param('ns3::TypeId const &', 'o')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True)
cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True)
cls.add_method('GetGroupName', 'std::string', [], is_const=True)
cls.add_method('GetHash', 'ns3::TypeId::hash_t', [], is_const=True)
cls.add_method('GetName', 'std::string', [], is_const=True)
cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True)
cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True)
cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True)
cls.add_method('GetSize', 'std::size_t', [], is_const=True)
cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True)
cls.add_method('GetUid', 'uint16_t', [], is_const=True)
cls.add_method('HasConstructor', 'bool', [], is_const=True)
cls.add_method('HasParent', 'bool', [], is_const=True)
cls.add_method('HideFromDocumentation', 'ns3::TypeId', [])
cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True)
cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True)
cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True)
cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True)
cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True)
cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True)
cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True)
cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')])
cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')])
cls.add_method('SetParent', 'ns3::TypeId', [], template_parameters=[u'ns3::QueueBase'])
cls.add_method('SetParent', 'ns3::TypeId', [], template_parameters=[u'ns3::Object'])
cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')])
cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')])
return |
()
def common_kwargs(tmp_path):
outputnb = tmp_path.joinpath('output.ipynb')
return {'output_path': str(outputnb), 'kernel_name': f'python{sys.version_info.major}', 'progress_bar': False} |
def _remove_bracketed(text: Any, brackets: Union[(str, Set[str])], inclusive: bool=True) -> Any:
if pd.isna(text):
return text
text = str(text)
value = ('' if inclusive else '\\g<1>\\g<2>')
if isinstance(brackets, set):
for bracket in brackets:
text = re.sub(REGEX_BRACKETS[bracket], value, text)
else:
text = re.sub(REGEX_BRACKETS[brackets], value, text)
return text |
def layout():
df = convert_date_to_pandas(get_time_series_from_db())
df = df.replace({'name': get_aliases()})
names = df.name.unique()
children_list = [html.Div([html.H2('Monthly trends: People quoted'), dcc.Markdown("\n In this section, we visualize historical trends related to the top women/men quoted, on a monthly basis.\n The sample chart below shows how we observed a steep decline in the\n number of times former U.S. President Donald Trump was quoted per month in Canadian media,\n following his defeat to Joe Biden in the November 2020 elections. The sharp rise in\n the number of quotes for both men in January 2021 is likely due to the extensive media\n commentary following [the storming of the U.S. Capitol by rioters]( [the ensuing presidential impeachment trial]( and [Donald Trump's ban from several social media platforms]( To compare similar trends for other notable individuals that are regularly quoted in the news,\n begin by typing in a name into the menu below (autocomplete \n will detect similar names). Selections can be removed by clicking the 'x' button\n on a given name.\n "), html.Div(dcc.Dropdown(id='multi-dropdown', options=[{'label': name, 'value': name} for name in names], value=['Donald Trump', 'Joe Biden'], multi=True), style={'padding': 5}, className='custom-multi-dropdown'), html.Div(dcc.Loading(id='loading-progress', children=[html.Div(dcc.Graph(id='line-chart'), className='chart')])), html.H5('Disclaimer'), dcc.Markdown('\n To allow for faster response times, we only count and show monthly trends of quote counts for \n people who appeared in the top 50 most frequently quoted women/men in any given month. As a result,\n only prominent, **public-facing** individuals are likely to feature in the drop-down selection menu.\n ')])]
return children_list |
class CalibratorBase():
def __init__(self, image_generator, cache_file_path):
self._logger = trt.Logger(trt.Logger.INFO)
self._logger.min_severity = trt.Logger.Severity.VERBOSE
self._image_generator = image_generator
self._cache_file_path = cache_file_path
input_spec = image_generator.get_input_spec()
allocation_size = int((np.dtype(np.float32).itemsize * np.prod(input_spec)))
self._allocation = cuda.mem_alloc(allocation_size)
self._batch_iterator = image_generator.get_batches()
self._num_images_seen = 0
self._logger.log(trt.Logger.INFO, 'Using {} calibrator for INT8 calibration'.format(self.__class__.__name__))
def get_batch_size(self):
return self._image_generator._batch_size
def get_batch(self, *args, **kwargs):
try:
batch = next(self._batch_iterator)
cuda.memcpy_htod(self._allocation, np.ascontiguousarray(batch))
self._num_images_seen += self._image_generator._batch_size
self._logger.log(trt.Logger.INFO, 'On image {}/{}'.format(self._num_images_seen, self._image_generator.num_images))
return [int(self._allocation)]
except StopIteration:
return None
def read_calibration_cache(self):
if os.path.exists(self._cache_file_path):
self._logger.log(trt.Logger.INFO, 'Using existing calibration cache from file: {}'.format(self._cache_file_path))
with open(self._cache_file_path, 'rb') as f:
return f.read()
def write_calibration_cache(self, cache):
self._logger.log(trt.Logger.INFO, 'Writing calibration cache file: {}'.format(self._cache_file_path))
with open(self._cache_file_path, 'wb') as f:
f.write(cache) |
def packed_sequence_gather(seqs, target_device):
out = seqs[0].cuda(target_device)
for i in range(1, len(seqs)):
out += seqs[i].cuda(target_device)
return out |
def test_knorae_subspaces():
rng = np.random.RandomState(123456)
(X_dsel, X_test, X_train, y_dsel, y_test, y_train) = load_dataset(None, rng)
pool = BaggingClassifier(LogisticRegression(), max_features=0.5, random_state=rng).fit(X_train, y_train)
knorae = KNORAE(pool)
knorae.fit(X_dsel, y_dsel)
assert np.isclose(knorae.score(X_test, y_test), 0.) |
class test_segmentation(VOCSegmentation):
def __init__(self, base_dir=Path.db_root_dir('pascal'), split='train', transform=None, flip=True):
super(test_segmentation, self).__init__(base_dir=base_dir, split=split, transform=transform)
self._flip_flag = flip
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample |
def load_parameters(yml: str) -> DictConfig:
cfg = OmegaConf.load(yml)
return cfg['best_params'] |
_utils.test()
def test_atomic_add_with_if_simplify():
x = ti.field(ti.i32)
step = 42
ti.root.dense(ti.i, n).place(x)
boundary = (n / 2)
def func():
for i in range(n):
if (i > boundary):
s = i
j = ti.atomic_add(s, s)
k = (j + s)
x[i] = k
else:
ti.atomic_add(x[i], i)
x[i] += step
func()
for i in range(n):
expect = ((i * 3) if (i > boundary) else (i + step))
assert (x[i] == expect) |
def draw_black_img(img_height: int, img_width: int, blank_path: str) -> None:
blank_image = np.zeros((img_height, img_width, 3), np.uint8)
cv2.imwrite(blank_path, blank_image) |
class AnyConverter(BaseConverter):
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = ('(?:%s)' % '|'.join([re.escape(x) for x in items])) |
def main():
all_data_file = os.path.join(DATA_DIR, 'reviews_Electronics01_5.csv')
format_5core(in_json=os.path.join(RAW_DATA, 'reviews_Electronics_5.json'), out_csv=all_data_file, label01=True)
dataset_name = '5Electronics01-1-5'
leave_out_by_time_csv(all_data_file, dataset_name, leave_n=1, warm_n=5)
return |
def get_drives(dir):
folders = []
while 1:
(dir, folder) = os.path.split(dir)
if ((folder != '') and (folder != '.')):
folders.append(folder)
else:
break
folders.reverse()
return folders |
class Classifier():
def __init__(self, label_list, ren, norm_fn, device):
self._label_list = label_list
self._ren = ren
self._device = device
self._tokenizer = BertTokenizer.from_pretrained(BERT_MODEL, do_lower_case=True)
self._model = BertForSequenceClassification.from_pretrained(BERT_MODEL, num_labels=len(label_list)).to(device)
self._optimizer = None
self._dataset = {}
self._data_loader = {}
self._weights = None
self._w_decay = None
if (norm_fn == 'linear'):
self._norm_fn = _linear_normalize
elif (norm_fn == 'softmax'):
self._norm_fn = _softmax_normalize
if ren:
assert (norm_fn == 'linear')
def init_weights(self, n_examples, w_init, w_decay):
if self._ren:
raise ValueError("no global weighting initialization when 'ren'=True")
self._weights = torch.tensor(([w_init] * n_examples), requires_grad=True).to(device=self._device)
self._w_decay = w_decay
def load_data(self, set_type, examples, batch_size, shuffle):
self._dataset[set_type] = examples
self._data_loader[set_type] = _make_data_loader(examples=examples, label_list=self._label_list, tokenizer=self._tokenizer, batch_size=batch_size, shuffle=shuffle)
def get_optimizer(self, learning_rate):
self._optimizer = _get_optimizer(self._model, learning_rate=learning_rate)
def pretrain_epoch(self):
self._model.train()
for (step, batch) in enumerate(tqdm(self._data_loader['train'], desc='Pre-training')):
batch = tuple((t.to(self._device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids, _) = batch
self._optimizer.zero_grad()
loss = self._model(input_ids, segment_ids, input_mask, label_ids)
loss.backward()
self._optimizer.step()
def train_epoch(self):
self._model.train()
for (step, batch) in enumerate(tqdm(self._data_loader['train'], desc='Training')):
batch = tuple((t.to(self._device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids, _) = batch
batch_size = batch[(- 1)].shape[0]
weights = []
for i in range(0, batch_size, 8):
lil_batch = tuple((t[i:(i + 8)] for t in batch))
weights.append(self._get_weights(lil_batch))
weights = self._norm_fn(torch.cat(weights, dim=0))
self._optimizer.zero_grad()
criterion = nn.CrossEntropyLoss(reduction='none')
logits = self._model(input_ids, segment_ids, input_mask)
loss = criterion(logits, label_ids)
loss = torch.sum((loss * weights.data))
loss.backward()
self._optimizer.step()
def _get_weights(self, batch):
(input_ids, input_mask, segment_ids, label_ids, ids) = batch
batch_size = label_ids.shape[0]
optimizer_initialized = ('exp_avg' in self._optimizer.state[next(self._model.parameters())])
if (not optimizer_initialized):
return torch.ones(batch_size).to(self._device)
if self._ren:
weights = torch.zeros(batch_size, requires_grad=True).to(self._device)
else:
weights = self._weights[ids]
magic_model = MagicModule(self._model)
criterion = nn.CrossEntropyLoss()
for i in range(batch_size):
self._model.zero_grad()
logits = self._model(input_ids[i:(i + 1)], segment_ids[i:(i + 1)], input_mask[i:(i + 1)])
loss = criterion(logits, label_ids[i:(i + 1)])
grads = torch.autograd.grad(loss, [param for (name, param) in self._model.named_parameters()])
grads = {param: grads[j] for (j, (name, param)) in enumerate(self._model.named_parameters())}
deltas = _adam_delta(self._optimizer, self._model, grads)
deltas = {name: (weights[i] * delta.data) for (name, delta) in deltas.items()}
magic_model.update_params(deltas)
weights_grad_list = []
for (step, val_batch) in enumerate(self._data_loader['dev']):
val_batch = (t.to(self._device) for t in val_batch)
(val_input_ids, val_input_mask, val_segment_ids, val_label_ids, _) = val_batch
val_batch_size = val_label_ids.shape[0]
val_loss = magic_model(val_input_ids, val_segment_ids, val_input_mask, val_label_ids)
val_loss = ((val_loss * float(val_batch_size)) / float(len(self._dataset['dev'])))
weights_grad = torch.autograd.grad(val_loss, weights, retain_graph=True)[0]
weights_grad_list.append(weights_grad)
weights_grad = sum(weights_grad_list)
if self._ren:
return (- weights_grad)
else:
self._weights[ids] = ((weights.data / self._w_decay) - weights_grad)
self._weights[ids] = torch.max(self._weights[ids], torch.ones_like(self._weights[ids]).fill_(EPSILON))
return self._weights[ids].data
def evaluate(self, set_type):
self._model.eval()
(preds_all, labels_all) = ([], [])
data_loader = self._data_loader[set_type]
for batch in tqdm(data_loader, desc='Evaluating {} set'.format(set_type)):
batch = tuple((t.to(self._device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch[:4]
with torch.no_grad():
logits = self._model(input_ids, segment_ids, input_mask)
preds = torch.argmax(logits, dim=1)
preds_all.append(preds)
labels_all.append(label_ids)
preds_all = torch.cat(preds_all, dim=0)
labels_all = torch.cat(labels_all, dim=0)
return (torch.sum((preds_all == labels_all)).item() / labels_all.shape[0]) |
def _sort_helper(g, input, dim, decending=True, out=None):
if (out is not None):
_unimplemented('Sort', 'Out parameter is not supported')
shape_ = g.op('Shape', input)
dim_size_ = g.op('Gather', shape_, g.op('Constant', value_t=torch.tensor([dim], dtype=torch.int64)))
if (_export_onnx_opset_version <= 10):
if (not decending):
_unimplemented('Sort', 'Ascending is not supported')
return g.op('TopK', input, dim_size_, axis_i=dim, outputs=2)
else:
return g.op('TopK', input, dim_size_, axis_i=dim, largest_i=decending, outputs=2) |
def tonal_dist(chroma1, chroma2, tonal_matrix=None):
if (tonal_matrix is None):
tonal_matrix = get_tonal_matrix()
warnings.warn('`tonal matrix` not specified. Use default tonal matrix', RuntimeWarning)
chroma1 = (chroma1 / np.sum(chroma1))
result1 = np.matmul(tonal_matrix, chroma1)
chroma2 = (chroma2 / np.sum(chroma2))
result2 = np.matmul(tonal_matrix, chroma2)
return np.linalg.norm((result1 - result2)) |
def get_count(data, tag, level):
assert (level in [1, 2, 3])
count = 0
if (level == 1):
assert (tag in get_list_tag_level_1())
for fam in llvm_IR_stmt_families:
if (fam[0] == tag):
for (key, value) in data.items():
if re.match(fam[3], key):
count += value
elif (level == 2):
assert (tag in get_list_tag_level_2())
for fam in llvm_IR_stmt_families:
if (fam[1] == tag):
for (key, value) in data.items():
if re.match(fam[3], key):
count += value
elif (level == 3):
assert (tag in get_list_tag_level_3())
for fam in llvm_IR_stmt_families:
if (fam[2] == tag):
for (key, value) in data.items():
if re.match(fam[3], key):
count += value
return count |
class BlockPairDataset(FairseqDataset):
def __init__(self, dataset, dictionary, sizes, block_size, break_mode='doc', short_seq_prob=0.1, doc_break_size=1):
super().__init__()
self.dataset = dataset
self.pad = dictionary.pad()
self.eos = dictionary.eos()
self.cls = dictionary.cls()
self.mask = dictionary.mask()
self.sep = dictionary.sep()
self.break_mode = break_mode
self.dictionary = dictionary
self.short_seq_prob = short_seq_prob
self.block_indices = []
assert (len(dataset) == len(sizes))
if (break_mode == 'doc'):
cur_doc = []
for (sent_id, sz) in enumerate(sizes):
assert ((doc_break_size == 0) or (sz != 0)), 'when doc_break_size is non-zero, we expect documents to beseparated by a blank line with a single eos.'
if (sz == doc_break_size):
if (len(cur_doc) == 0):
continue
self.block_indices.append(cur_doc)
cur_doc = []
else:
cur_doc.append(sent_id)
max_num_tokens = (block_size - 3)
self.sent_pairs = []
self.sizes = []
for (doc_id, doc) in enumerate(self.block_indices):
self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes)
elif ((break_mode is None) or (break_mode == 'none')):
sent_length = ((block_size - 3) // 2)
total_len = sum(dataset.sizes)
length = math.ceil((total_len / sent_length))
def block_at(i):
start = (i * sent_length)
end = min((start + sent_length), total_len)
return (start, end)
sent_indices = np.array([block_at(i) for i in range(length)])
sent_sizes = np.array([(e - s) for (s, e) in sent_indices])
dataset_index = self._sent_to_dataset_index(sent_sizes)
self._pair_sentences(dataset_index)
else:
raise ValueError(('Invalid break_mode: ' + break_mode))
def _pair_sentences(self, dataset_index):
for (sent_id, sent) in enumerate(dataset_index):
next_sent_label = (1 if ((np.random.rand() > 0.5) and (sent_id != (len(dataset_index) - 1))) else 0)
if next_sent_label:
next_sent = dataset_index[(sent_id + 1)]
else:
next_sent = dataset_index[self._skip_sampling(len(dataset_index), [sent_id, (sent_id + 1)])]
self.sent_pairs.append((sent, next_sent, next_sent_label))
self.sizes.append(((3 + sent[3]) + next_sent[3]))
def _sent_to_dataset_index(self, sent_sizes):
dataset_index = []
(ds_idx, ds_remaining) = ((- 1), 0)
for to_consume in sent_sizes:
sent_size = to_consume
if (ds_remaining == 0):
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
start_ds_idx = ds_idx
start_offset = (sent_sizes[ds_idx] - ds_remaining)
while (to_consume > ds_remaining):
to_consume -= ds_remaining
ds_idx += 1
ds_remaining = sent_sizes[ds_idx]
ds_remaining -= to_consume
dataset_index.append((start_ds_idx, start_offset, ds_idx, sent_size))
assert (ds_remaining == 0)
assert (ds_idx == (len(self.dataset) - 1))
return dataset_index
def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes):
current_chunk = []
current_length = 0
curr = 0
target_seq_length = max_num_tokens
if (np.random.random() < self.short_seq_prob):
target_seq_length = np.random.randint(2, max_num_tokens)
while (curr < len(doc)):
sent_id = doc[curr]
current_chunk.append(sent_id)
current_length = sum(sizes[current_chunk])
if ((curr == (len(doc) - 1)) or (current_length >= target_seq_length)):
a_end = 1
if (len(current_chunk) > 2):
a_end = np.random.randint(1, (len(current_chunk) - 1))
sent_a = current_chunk[:a_end]
len_a = sum(sizes[sent_a])
next_sent_label = (1 if ((np.random.rand() > 0.5) and (len(current_chunk) != 1)) else 0)
if (not next_sent_label):
target_b_length = (target_seq_length - len_a)
rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id])
random_doc = self.block_indices[rand_doc_id]
random_start = np.random.randint(0, len(random_doc))
sent_b = []
len_b = 0
for j in range(random_start, len(random_doc)):
sent_b.append(random_doc[j])
len_b = sum(sizes[sent_b])
if (len_b >= target_b_length):
break
num_unused_segments = (len(current_chunk) - a_end)
curr -= num_unused_segments
else:
sent_b = current_chunk[a_end:]
len_b = sum(sizes[sent_b])
(sent_a, sent_b) = self._truncate_sentences(sent_a, sent_b, max_num_tokens)
self.sent_pairs.append((sent_a, sent_b, next_sent_label))
self.sizes.append(((3 + sent_a[3]) + sent_b[3]))
current_chunk = []
curr += 1
def _skip_sampling(self, total, skip_ids):
rand_id = np.random.randint((total - len(skip_ids)))
return (rand_id if (rand_id < min(skip_ids)) else (rand_id + len(skip_ids)))
def _truncate_sentences(self, sent_a, sent_b, max_num_tokens):
(len_a, len_b) = (sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]))
front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0
while True:
total_length = (((((len_a + len_b) - front_cut_a) - front_cut_b) - end_cut_a) - end_cut_b)
if (total_length <= max_num_tokens):
break
if (((len_a - front_cut_a) - end_cut_a) > ((len_b - front_cut_b) - end_cut_b)):
if (np.random.rand() < 0.5):
front_cut_a += 1
else:
end_cut_a += 1
elif (np.random.rand() < 0.5):
front_cut_b += 1
else:
end_cut_b += 1
truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a)
truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b)
return (truncated_sent_a, truncated_sent_b)
def _cut_sentence(self, sent, front_cut, end_cut):
(start_ds_idx, end_ds_idx, offset) = (sent[0], sent[(- 1)], 0)
target_len = ((sum(self.dataset.sizes[sent]) - front_cut) - end_cut)
while (front_cut > 0):
if (self.dataset.sizes[start_ds_idx] > front_cut):
offset += front_cut
break
else:
front_cut -= self.dataset.sizes[start_ds_idx]
start_ds_idx += 1
while (end_cut > 0):
if (self.dataset.sizes[end_ds_idx] > end_cut):
break
else:
end_cut -= self.dataset.sizes[end_ds_idx]
end_ds_idx -= 1
return (start_ds_idx, offset, end_ds_idx, target_len)
def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length):
buffer = torch.cat([self.dataset[idx] for idx in range(start_ds_idx, (end_ds_idx + 1))])
(s, e) = (offset, (offset + length))
return buffer[s:e]
def __getitem__(self, index):
(block1, block2, next_sent_label) = self.sent_pairs[index]
block1 = self._fetch_block(*block1)
block2 = self._fetch_block(*block2)
return (block1, block2, next_sent_label)
def __len__(self):
return len(self.sizes)
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
prefetch_idx = set()
for index in indices:
for (block1, block2, _) in [self.sent_pairs[index]]:
for ds_idx in range(block1[0], (block1[2] + 1)):
prefetch_idx.add(ds_idx)
for ds_idx in range(block2[0], (block2[2] + 1)):
prefetch_idx.add(ds_idx)
self.dataset.prefetch(prefetch_idx) |
def execute(chunk: np.ndarray):
if np.issubdtype(chunk.dtype, np.uint8):
chunk = (255 - chunk)
elif (np.issubdtype(chunk.dtype, np.float32) and (chunk.max() <= 1) and (chunk.min() >= 0)):
chunk = (1.0 - chunk)
else:
raise TypeError('unsupported chunk data type.')
return [chunk] |
def _remove_urls(text: Any) -> Any:
return (re.sub(REGEX_URL, '', str(text)) if pd.notna(text) else text) |
def objective(points, sleep=True):
if (points.shape[1] != 2):
raise ValueError(f'Incorrect input shape, expected (*, 2), got {points.shape}')
observations = []
for point in points:
observation = ScaledBranin.objective(point).numpy()
if sleep:
delay = (3 * np.sum(point))
time.sleep(delay)
observations.append((point, observation))
return observations |
class calculateDistExp():
def __init__(self, waypts='assets/data/Trials/Trial1/odom.csv', wifi='assets/data/Trials/Trial1/wifi.csv'):
self.wFile = waypts
self.dim = 2
self.TX = wifi
self.maxZ = 2.4
self.TXName = None
self.numPts = None
self.numAPs = None
self.name2MAC = None
self.MAC2Name = None
self.name2Pos = None
self.distMap = []
self.wayPts = None
self.defineAPS()
self.parseWaypts()
self.readDistances()
def distance(self, x, y):
if ((len(x) == 3) and (len(y) == 3)):
return math.sqrt(((((x[1] - y[1]) ** 2) + ((x[0] - y[0]) ** 2)) + ((x[2] - y[2]) ** 2)))
else:
return math.sqrt((((x[1] - y[1]) ** 2) + ((x[0] - y[0]) ** 2)))
def rssi2Dist(self, rssi):
if (abs(rssi) > 60):
exp = ((abs(rssi) - 32.44) / 20)
else:
exp = ((abs(rssi) - 12.55) / 20)
val = ((10 ** exp) / 60)
return val
def defineAPS(self):
self.TXName = ['106', '10M', '111', '114-H', '115', '116', '117', '120', '121', '124', '125', '125-H', '129', '130', '131', '134-E', '134-W', '135', '137', '138-E', '138-W', '139', '203', '203-H', '204', '208', '209', '210', '211', '212', '213', '214', '216', '219', '220', '221', '222', '229', '232', '233', '235', '236', '242', '244-E', '244-W']
self.numAPs = len(self.TXName)
self.name2MAC = {'106': '2c:33:11:89:6d:c2', '10M': '2c:33:11:68:49:92', '111': '2c:33:11:89:68:82', '114-H': '2c:33:11:3c:53:12', '115': '2c:33:11:9e:06:32', '116': '2c:33:11:3c:53:62', '117': '2c:33:11:5b:a1:92', '120': 'e8:65:49:c2:d4:62', '121': '2c:33:11:5b:a5:e2', '124': '2c:33:11:89:42:f2', '125': '2c:33:11:3c:55:12', '125-H': '2c:33:11:89:6a:52', '129': '38:90:a5:19:87:e2', '130': '2c:33:11:68:48:82', '131': '2c:33:11:9e:0a:32', '134-E': '2c:33:11:9e:07:f2', '134-W': '2c:33:11:9e:0d:82', '135': '2c:33:11:5b:a6:e2', '137': '2c:33:11:9e:05:42', '138-E': '2c:33:11:9e:0a:92', '138-W': '2c:33:11:5b:a5:32', '139': '2c:33:11:89:48:22', '203': '2c:33:11:3c:52:a2', '203-H': '2c:33:11:5b:95:52', '204': '2c:33:11:89:69:72', '208': '2c:33:11:49:49:c2', '209': '2c:33:11:89:63:72', '210': '2c:33:11:89:6e:12', '211': '2c:33:11:89:61:62', '212': '2c:33:11:5b:9e:82', '213': '2c:33:11:5b:9d:82', '214': '2c:33:11:68:2e:32', '216': '2c:33:11:68:43:52', '219': '2c:33:11:9e:03:12', '220': '2c:33:11:5b:a4:52', '221': '2c:33:11:21:b6:22', '222': '2c:33:11:9e:02:d2', '229': '2c:33:11:5b:a3:72', '232': '2c:33:11:68:2e:62', '233': '2c:33:11:68:43:b2', '235': '2c:33:11:89:63:a2', '236': '2c:33:11:3c:40:a2', '242': '2c:33:11:89:54:12', '244-E': '2c:33:11:89:69:22', '244-W': '2c:33:11:5b:a4:42'}
self.MAC2Name = {'2c:33:11:89:6d:c2': '106', '2c:33:11:68:49:92': '10M', '2c:33:11:89:68:82': '111', '2c:33:11:3c:53:12': '114-H', '2c:33:11:9e:06:32': '115', '2c:33:11:3c:53:62': '116', '2c:33:11:5b:a1:92': '117', 'e8:65:49:c2:d4:62': '120', '2c:33:11:5b:a5:e2': '121', '2c:33:11:89:42:f2': '124', '2c:33:11:3c:55:12': '125', '2c:33:11:89:6a:52': '125-H', '38:90:a5:19:87:e2': '129', '2c:33:11:68:48:82': '130', '2c:33:11:9e:0a:32': '131', '2c:33:11:9e:07:f2': '134-E', '2c:33:11:9e:0d:82': '134-W', '2c:33:11:5b:a6:e2': '135', '2c:33:11:9e:05:42': '137', '2c:33:11:9e:0a:92': '138-E', '2c:33:11:5b:a5:32': '138-W', '2c:33:11:89:48:22': '139', '2c:33:11:3c:52:a2': '203', '2c:33:11:5b:95:52': '203-H', '2c:33:11:89:69:72': '204', '2c:33:11:49:49:c2': '208', '2c:33:11:89:63:72': '209', '2c:33:11:89:6e:12': '210', '2c:33:11:89:61:62': '211', '2c:33:11:5b:9e:82': '212', '2c:33:11:5b:9d:82': '213', '2c:33:11:68:2e:32': '214', '2c:33:11:68:43:52': '216', '2c:33:11:9e:03:12': '219', '2c:33:11:5b:a4:52': '220', '2c:33:11:21:b6:22': '221', '2c:33:11:9e:02:d2': '222', '2c:33:11:5b:a3:72': '229', '2c:33:11:68:2e:62': '232', '2c:33:11:68:43:b2': '233', '2c:33:11:89:63:a2': '235', '2c:33:11:3c:40:a2': '236', '2c:33:11:89:54:12': '242', '2c:33:11:89:69:22': '244-E', '2c:33:11:5b:a4:42': '244-W'}
self.name2Pos = {'106': [1.2, 3.2, (- 0.6)], '10M': [0.8, 4.2, (- 0.6)], '111': [8, (- 4), (- 0.6)], '114-H': [8, 4, (- 0.6)], '115': [17.8, (- 1.2), (- 0.6)], '116': [17.8, 1.8, (- 0.6)], '117': [24, (- 1.8), (- 0.6)], '120': [27, 1.8, (- 0.6)], '121': [30, (- 1.2), (- 0.6)], '124': [34, 1, (- 0.6)], '125': [38, (- 2.8), (- 0.6)], '125-H': [40, 0, (- 0.6)], '129': [44, (- 2.8), (- 0.6)], '130': [43, 2, (- 0.6)], '131': [50, (- 0.8), (- 0.6)], '134-E': [58, 4, (- 0.6)], '134-W': [53, 2.4, (- 0.6)], '135': [56, (- 1), (- 0.6)], '137': [62, (- 0.8), (- 0.6)], '138-E': [72.4, 3.2, (- 0.6)], '138-W': [64, 1.8, (- 0.6)], '139': [68, (- 1.5), (- 0.6)], '203': [3.8, (- 1.5), 2.4], '203-H': [0, 0, 2.4], '204': [0.8, 4.7, 2.4], '208': [4.2, 3.2, 2.4], '209': [8, (- 4), 2.4], '210': [7.6, 4.7, 2.4], '211': [15, (- 1.5), 2.4], '212': [11, 1, 2.4], '213': [18, (- 1), 2.4], '214': [14.4, 3.2, 2.4], '216': [17.8, 4.7, 2.4], '219': [28, (- 1.5), 2.4], '220': [24.6, 4, 2.4], '221': [34, (- 1.5), 2.4], '222': [28, 1.8, 2.4], '229': [40.4, (- 1.5), 2.4], '232': [44.4, 2.8, 2.4], '233': [48.4, (- 2.5), 2.4], '235': [54.4, (- 2.5), 2.4], '236': [50.4, 5.2, 2.4], '242': [62.4, 2.4, 2.4], '244-E': [72.4, 4.8, 2.4], '244-W': [66.4, (- 2.5), 2.4]}
def parseWaypts(self):
wpt_file = open(self.wFile, 'r')
reader = csv.reader(wpt_file)
waypts = []
for row in reader:
x = float(row[0])
y = float(row[1])
waypts.append([x, y, 1])
self.numPts = len(waypts)
self.wayPts = waypts
def readDistances(self):
wifi_file = open(self.TX, 'r')
reader = csv.reader(wifi_file)
measures = []
for row in reader:
rowint = [float(x) for x in row]
measures.append(rowint)
for i in range(self.numPts):
pt = self.wayPts[i]
APMap = []
for j in range(self.numAPs):
name = self.TXName[j]
tx = self.name2Pos[name]
rssiVal = measures[i][j]
label = 1
if ((name == '203-H') or (name == '125-H')):
label = 0
euclid = self.distance(pt, tx)
if (rssiVal == 0):
rssiDist = normrnd(20, 3)
else:
rssiDist = self.rssi2Dist(rssiVal)
APMap.append(distanceMap(rssiDist, euclid, label, name))
self.distMap.append(APMap)
print('All distances mapped on grid!') |
def _evalWrapper(eval_id: int, fn: Callable, *args, **kwargs) -> Tuple[(int, float, Any, Any)]:
start_time = timer()
exc = None
fitness = None
features = None
res = None
try:
res = fn(*args, **kwargs)
except Exception as e:
print(f'Exception during evaluation: {e}')
traceback.print_exc()
exc = e
elapsed = (timer() - start_time)
return (eval_id, elapsed, res, exc) |
def propagate_memlets_scope(sdfg, state, scopes, propagate_entry=True, propagate_exit=True):
from dace.sdfg.scope import ScopeTree
if isinstance(scopes, ScopeTree):
scopes_to_process = [scopes]
else:
scopes_to_process = scopes
next_scopes = set()
while (len(scopes_to_process) > 0):
for scope in scopes_to_process:
if (scope.entry is None):
continue
if propagate_entry:
_propagate_node(state, scope.entry)
if propagate_exit:
_propagate_node(state, scope.exit)
next_scopes.add(scope.parent)
scopes_to_process = next_scopes
next_scopes = set() |
def kde_viz_figure(hist: List[Tuple[(np.ndarray, np.ndarray)]], kde: np.ndarray, col: str, plot_width: int, plot_height: int, cfg: Config) -> Figure:
fig = Figure(plot_width=plot_width, plot_height=plot_height, title=col, toolbar_location=None, y_axis_type=cfg.kde.yscale)
for (i, (data, kde2)) in enumerate(zip(hist, kde)):
(dens, bins) = data
intvls = _format_bin_intervals(bins)
df = pd.DataFrame({'intvl': intvls, 'left': bins[:(- 1)], 'right': bins[1:], 'dens': dens})
bottom = (0 if ((cfg.kde.yscale == 'linear') or df.empty) else (df['dens'].min() / 2))
hist = fig.quad(source=df, left='left', right='right', bottom=bottom, top='dens', alpha=0.5, fill_color=CATEGORY10[i], line_color=CATEGORY10[i])
hover_hist = HoverTool(renderers=[hist], tooltips=[('Bin', ''), ('Density', '')], mode='vline')
pts_rng = np.linspace(df.loc[(0, 'left')], df.loc[((len(df) - 1), 'right')], 1000)
pdf = kde2(pts_rng)
line = fig.line(x=pts_rng, y=pdf, line_color=CATEGORY10[i], line_width=2, alpha=0.5)
hover_dist = HoverTool(renderers=[line], tooltips=[('x', ''), ('y', '')])
fig.add_tools(hover_hist)
fig.add_tools(hover_dist)
tweak_figure(fig, 'kde')
fig.yaxis.axis_label = 'Density'
fig.xaxis.axis_label = col
_format_axis(fig, df.iloc[0]['left'], df.iloc[(- 1)]['right'], 'x')
if (cfg.kde.yscale == 'linear'):
_format_axis(fig, 0, max(df['dens'].max(), pdf.max()), 'y')
return fig |
.parametrize('function_name', get_all_functions_names())
def test_function_docstring(function_name, request):
if (function_name in FUNCTION_DOCSTRING_IGNORE_LIST):
request.applymarker(pytest.mark.xfail(run=False, reason='TODO pass numpydoc validation'))
res = numpydoc_validation.validate(function_name)
res['errors'] = list(filter_errors(res['errors'], method='function'))
if res['errors']:
msg = repr_errors(res, method=f'Tested function: {function_name}')
raise ValueError(msg) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='.\\inputs', help='Input image or folder')
parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/A_ESRGAN_Single.pth', help='Path to the pre-trained model')
parser.add_argument('--output', type=str, default='results', help='Output folder')
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
parser.add_argument('--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument('--block', type=int, default=23, help='num_block in RRDB')
parser.add_argument('--ext', type=str, default='auto', help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
netscale = 4
outscale = 4
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=args.block, num_grow_ch=32, scale=netscale)
upsampler = RealESRGANer(scale=netscale, model_path=args.model_path, model=model, tile=args.tile, tile_pad=args.tile_pad, pre_pad=args.pre_pad, half=args.half)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for (idx, path) in enumerate(paths):
(imgname, extension) = os.path.splitext(os.path.basename(path))
print('Testing', idx, imgname)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if ((len(img.shape) == 3) and (img.shape[2] == 4)):
img_mode = 'RGBA'
else:
img_mode = None
try:
(output, _) = upsampler.enhance(img, outscale=outscale)
except Exception as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
if (args.ext == 'auto'):
extension = extension[1:]
else:
extension = args.ext
if (img_mode == 'RGBA'):
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
cv2.imwrite(save_path, output) |
class OptionFillable(Fillable):
def __init__(self, offsets, content):
assert isinstance(offsets, list)
assert isinstance(content, Fillable)
self.offsets = offsets
self.content = content
def fromnulls(cls, nullcount, content):
return cls(([(- 1)] * nullcount), content)
def fromvalids(cls, content):
return cls(list(range(len(content))), content)
def snapshot(self):
return OptionArray(list(self.offsets), self.content.snapshot())
def __len__(self):
return len(self.offsets)
def active(self):
return self.content.active()
def null(self):
if (not self.content.active()):
self.offsets.append((- 1))
else:
self.content.null()
return self
def real(self, x):
if (not self.content.active()):
length = len(self.content)
self._maybeupdate(self.content.real(x))
self.offsets.append(length)
else:
self.content.real(x)
return self
def beginlist(self):
if (not self.content.active()):
self._maybeupdate(self.content.beginlist())
else:
self.content.beginlist()
return self
def endlist(self):
if (not self.content.active()):
raise ValueError("'endlist' without corresponding 'beginlist'")
else:
length = len(self.content)
self.content.endlist()
if (length != len(self.content)):
self.offsets.append(length)
return self
def begintuple(self, numfields):
if (not self.content.active()):
self._maybeupdate(self.content.begintuple(numfields))
else:
self.content.begintuple(numfields)
return self
def index(self, i):
if (not self.content.active()):
raise ValueError("'index' without corresponding 'begintuple'")
else:
self.content.index(i)
return self
def endtuple(self):
if (not self.content.active()):
raise ValueError("'endtuple' without corresponding 'begintuple'")
else:
length = len(self.content)
self.content.endtuple()
if (length != len(self.content)):
self.offsets.append(length)
return self
def _maybeupdate(self, fillable):
assert (fillable is not None)
if (fillable is not self.content):
self.content = fillable |
def _get_native_lib_filename():
global _native_lib_filename
if _native_lib_filename:
return _native_lib_filename
native = NativeCodeCompiler(base_name='pytopickle', code_version=1, code=open(_native_cpp_filename).read(), is_cpp=True, c_macro_defines={'LIB': 1})
_native_lib_filename = native.get_lib_filename()
return _native_lib_filename |
def xydata_from_point_list(points):
import numbers
zero = float(0)
xdata = []
ydata = []
for xy in points:
if isinstance(xy, Expression):
xy = xy.n()
if isinstance(xy, numbers.Real):
xdata.append(float(xy))
ydata.append(zero)
elif isinstance(xy, numbers.Complex):
xdata.append(float(xy.real()))
ydata.append(float(xy.imag()))
else:
try:
(x, y) = xy
except TypeError:
raise TypeError('invalid input for 2D point')
else:
xdata.append(float(x))
ydata.append(float(y))
return (xdata, ydata) |
def test_get_data_for_tensorkey_from_db(collaborator_mock, tensor_key):
expected_nparray = 'some_data'
collaborator_mock.tensor_db.get_tensor_from_cache = mock.Mock(return_value='some_data')
nparray = collaborator_mock.get_data_for_tensorkey(tensor_key)
assert (nparray == expected_nparray) |
class DCProblemTestsCC_storeJ(unittest.TestCase):
def setUp(self):
aSpacing = 2.5
nElecs = 5
surveySize = ((nElecs * aSpacing) - aSpacing)
cs = ((surveySize / nElecs) / 4)
mesh = discretize.TensorMesh([[(cs, 10, (- 1.3)), (cs, (surveySize / cs)), (cs, 10, 1.3)], [(cs, 3, (- 1.3)), (cs, 3, 1.3)]], 'CN')
source_list = dc.utils.WennerSrcList(nElecs, aSpacing, in2D=True)
survey = dc.survey.Survey(source_list)
simulation = dc.simulation.Simulation3DCellCentered(mesh=mesh, survey=survey, rhoMap=maps.IdentityMap(mesh), storeJ=True)
mSynth = np.ones(mesh.nC)
dobs = simulation.make_synthetic_data(mSynth, add_noise=True)
dmis = data_misfit.L2DataMisfit(simulation=simulation, data=dobs)
reg = regularization.WeightedLeastSquares(mesh)
opt = optimization.InexactGaussNewton(maxIterLS=20, maxIter=10, tolF=1e-06, tolX=1e-06, tolG=1e-06, maxIterCG=6)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=10000.0)
inv = inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = simulation
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
self.dobs = dobs
def test_misfit(self):
passed = tests.check_derivative((lambda m: [self.p.dpred(m), (lambda mx: self.p.Jvec(self.m0, mx))]), self.m0, plotIt=False, num=3)
self.assertTrue(passed)
def test_adjoint(self):
v = np.random.rand(self.mesh.nC)
w = np.random.rand(mkvc(self.dobs).shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = (np.abs((wtJv - vtJtw)) < 1e-10)
print('Adjoint Test', np.abs((wtJv - vtJtw)), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = tests.check_derivative((lambda m: [self.dmis(m), self.dmis.deriv(m)]), self.m0, plotIt=False, num=6)
self.assertTrue(passed)
def tearDown(self):
try:
shutil.rmtree(self.p.sensitivity_path)
except FileNotFoundError:
pass |
def set_global_config(config):
_get_or_set_config_via_tf_default_graph(config)
global _global_config
_global_config = config |
class GANloss(_Loss):
def __init__(self):
super(GANloss, self).__init__()
def forward(self, pred, label_type):
MSE = nn.MSELoss()
loss = 0
for i in range(0, len(pred)):
if label_type:
labels = torch.ones(pred[i][0].shape)
else:
labels = torch.zeros(pred[i][0].shape)
labels = Variable(labels.cuda())
loss += MSE(pred[i][0], labels)
return (loss / len(pred)) |
class TanhConcatAttention(Attention):
def __init__(self, query_size, key_size, dropout=0):
super(TanhConcatAttention, self).__init__(dropout)
self.query_weights = nn.Parameter(torch.Tensor(query_size, 1))
self.key_weights = nn.Parameter(torch.Tensor(key_size, 1))
init.xavier_uniform_(self.query_weights)
init.xavier_uniform_(self.key_weights)
def _score(self, query, key):
(batch_size, num_queries, time_step) = (query.size(0), query.size(1), key.size(1))
query = query.matmul(self.query_weights).expand(batch_size, num_queries, time_step)
key = key.matmul(self.key_weights).transpose(1, 2).expand(batch_size, num_queries, time_step)
score = (query + key)
score = torch.tanh(score)
return score |
class LocationAwareAttention(nn.Module):
def __init__(self, decoder_dim: int=1024, attn_dim: int=1024, smoothing: bool=False) -> None:
super(LocationAwareAttention, self).__init__()
self.decoder_dim = decoder_dim
self.attn_dim = attn_dim
self.location_conv = nn.Conv1d(in_channels=1, out_channels=attn_dim, kernel_size=3, padding=1)
self.query_proj = Linear(decoder_dim, attn_dim, bias=False)
self.value_proj = Linear(decoder_dim, attn_dim, bias=False)
self.bias = nn.Parameter(torch.rand(attn_dim).uniform_((- 0.1), 0.1))
self.fc = Linear(attn_dim, 1, bias=True)
self.smoothing = smoothing
def forward(self, query: Tensor, value: Tensor, last_alignment_energy: Tensor) -> Tuple[(Tensor, Tensor)]:
(batch_size, hidden_dim, seq_length) = (query.size(0), query.size(2), value.size(1))
if (last_alignment_energy is None):
last_alignment_energy = value.new_zeros(batch_size, seq_length)
last_alignment_energy = self.location_conv(last_alignment_energy.unsqueeze(dim=1))
last_alignment_energy = last_alignment_energy.transpose(1, 2)
alignmment_energy = self.fc(torch.tanh((((self.query_proj(query) + self.value_proj(value)) + last_alignment_energy) + self.bias))).squeeze(dim=(- 1))
if self.smoothing:
alignmment_energy = torch.sigmoid(alignmment_energy)
alignmment_energy = torch.div(alignmment_energy, alignmment_energy.sum(dim=(- 1)).unsqueeze(dim=(- 1)))
else:
alignmment_energy = F.softmax(alignmment_energy, dim=(- 1))
context = torch.bmm(alignmment_energy.unsqueeze(dim=1), value)
return (context, alignmment_energy) |
(scope='module')
def os_custom_keys_norm():
return TriFingerObservations(observation_mode='structured', normalize_observations=True, observation_keys=['end_effector_positions', 'action_joint_positions']) |
def NN(inputs, weights, sigma, output_activation=None):
r = as_vector(inputs)
depth = len(weights)
for (i, weight) in enumerate(weights):
term = (weight['weight'] * r)
if ('bias' in weight):
term += weight['bias']
if ((i + 1) >= depth):
r = term
else:
r = apply_activation(term, func=sigma)
if (output_activation is not None):
r = apply_activation(r, func=output_activation)
if (r.ufl_shape[0] == 1):
return r[0]
return r |
_ARCH_REGISTRY.register()
class ProposalNetwork(nn.Module):
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view((- 1), 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view((- 1), 1, 1)
self.normalizer = (lambda x: ((x - pixel_mean) / pixel_std))
self.to(self.device)
def forward(self, batched_inputs):
images = [x['image'].to(self.device) for x in batched_inputs]
images = [self.normalizer(x) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if ('instances' in batched_inputs[0]):
gt_instances = [x['instances'].to(self.device) for x in batched_inputs]
elif ('targets' in batched_inputs[0]):
log_first_n(logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10)
gt_instances = [x['targets'].to(self.device) for x in batched_inputs]
else:
gt_instances = None
(proposals, proposal_losses) = self.proposal_generator(images, features, gt_instances)
if self.training:
return proposal_losses
processed_results = []
for (results_per_image, input_per_image, image_size) in zip(proposals, batched_inputs, images.image_sizes):
height = input_per_image.get('height', image_size[0])
width = input_per_image.get('width', image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({'proposals': r})
return processed_results |
def GaussianClippingSimulation(Alpha, sigma, bitWidth):
highPrecision = np.random.normal(0, sigma, size=100000)
simulations = []
for alpha in Alpha:
s = np.copy(highPrecision)
Q = ((2 * alpha) / (2 ** bitWidth))
s[(s > alpha)] = alpha
s[(s < (- alpha))] = (- alpha)
s = uniform_midtread_quantizer(s, Q)
mse = ((s - highPrecision) ** 2).mean()
simulations.append(mse)
return simulations |
def calc_mean_std(feat, eps=1e-05):
size = feat.size()
assert (len(size) == 4)
(N, C) = size[:2]
feat_var = (feat.view(N, C, (- 1)).var(dim=2) + eps)
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, (- 1)).mean(dim=2).view(N, C, 1, 1)
return (feat_mean, feat_std) |
_function
def parse_dependencies(source_filename):
with Utils.open_source_file(source_filename, error_handling='ignore') as fh:
source = fh.read()
distutils_info = DistutilsInfo(source)
(source, literals) = strip_string_literals(source)
source = source.replace('\\\n', ' ').replace('\t', ' ')
cimports = []
includes = []
externs = []
for m in dependency_regex.finditer(source):
(cimport_from, cimport_list, extern, include) = m.groups()
if cimport_from:
cimports.append(cimport_from)
m_after_from = dependency_after_from_regex.search(source, pos=m.end())
if m_after_from:
(multiline, one_line) = m_after_from.groups()
subimports = (multiline or one_line)
cimports.extend(('{0}.{1}'.format(cimport_from, s.strip()) for s in subimports.split(',')))
elif cimport_list:
cimports.extend((x.strip() for x in cimport_list.split(',')))
elif extern:
externs.append(literals[extern])
else:
includes.append(literals[include])
return (cimports, includes, externs, distutils_info) |
def can_access(schedule: ScheduleType, storage: StorageType):
if (storage == StorageType.Register):
return True
if (schedule in [ScheduleType.GPU_Device, ScheduleType.GPU_Persistent, ScheduleType.GPU_ThreadBlock, ScheduleType.GPU_ThreadBlock_Dynamic, ScheduleType.GPU_Default]):
return (storage in [StorageType.GPU_Global, StorageType.GPU_Shared, StorageType.CPU_Pinned])
elif (schedule in [ScheduleType.Default, ScheduleType.CPU_Multicore, ScheduleType.CPU_Persistent]):
return (storage in [StorageType.Default, StorageType.CPU_Heap, StorageType.CPU_Pinned, StorageType.CPU_ThreadLocal])
elif (schedule in [ScheduleType.FPGA_Device]):
return (storage in [StorageType.FPGA_Local, StorageType.FPGA_Global, StorageType.FPGA_Registers, StorageType.FPGA_ShiftRegister, StorageType.CPU_Pinned])
elif (schedule == ScheduleType.Sequential):
raise ValueError('Not well defined') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.