code stringlengths 101 5.91M |
|---|
class Generator(abc.ABC):
def __init__(self, cube_size: int):
if (cube_size < 2):
raise ValueError(f'Cannot meaningfully construct a cube smaller than 2x2x2, but received cube_size={cube_size}')
self.cube_size = cube_size
def generate_cube(self, key: chex.PRNGKey) -> Cube:
def __call__(self, key: chex.PRNGKey) -> State:
(key, scramble_key) = jax.random.split(key)
cube = self.generate_cube(key=scramble_key)
step_count = jnp.array(0, jnp.int32)
return State(cube=cube, step_count=step_count, key=key) |
def wide_resnet50_2(in_channels=3, pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = (64 * 2)
return _resnet(in_channels, 'wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
class ResNet18_torch(nn.Module):
def __init__(self, pretrained=False, device=None):
super().__init__()
self.resnet = models.resnet18(pretrained=pretrained)
num_ftrs = self.resnet.fc.in_features
self.resnet.fc = nn.Linear(num_ftrs, 10)
self.resnet.conv1 = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.resnet.maxpool = torch.nn.Identity()
def forward(self, x):
x = self.resnet(x)
x = F.log_softmax(x, dim=1)
return x |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/BasicVSR_REDS4.pth')
parser.add_argument('--input_path', type=str, default='datasets/REDS4/sharp_bicubic/000', help='input test image folder')
parser.add_argument('--save_path', type=str, default='results/BasicVSR', help='save image path')
parser.add_argument('--interval', type=int, default=15, help='interval size')
args = parser.parse_args()
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = BasicVSR(num_feat=64, num_block=30)
model.load_state_dict(torch.load(args.model_path)['params'], strict=True)
model.eval()
model = model.to(device)
os.makedirs(args.save_path, exist_ok=True)
input_path = args.input_path
use_ffmpeg = False
if (not os.path.isdir(input_path)):
use_ffmpeg = True
video_name = os.path.splitext(os.path.split(args.input_path)[(- 1)])[0]
input_path = os.path.join('./BasicVSR_tmp', video_name)
os.makedirs(os.path.join('./BasicVSR_tmp', video_name), exist_ok=True)
os.system(f'ffmpeg -i {args.input_path} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {input_path} /frame%08d.png')
imgs_list = sorted(glob.glob(os.path.join(input_path, '*')))
num_imgs = len(imgs_list)
if (len(imgs_list) <= args.interval):
(imgs, imgnames) = read_img_seq(imgs_list, return_imgname=True)
imgs = imgs.unsqueeze(0).to(device)
inference(imgs, imgnames, model, args.save_path)
else:
for idx in range(0, num_imgs, args.interval):
interval = min(args.interval, (num_imgs - idx))
(imgs, imgnames) = read_img_seq(imgs_list[idx:(idx + interval)], return_imgname=True)
imgs = imgs.unsqueeze(0).to(device)
inference(imgs, imgnames, model, args.save_path)
if use_ffmpeg:
shutil.rmtree(input_path) |
def nnsmith_and(left, right):
if (isinstance(left, bool) and isinstance(right, bool)):
return (left and right)
return z3.And(left, right) |
def supervised_finetuning(encoder, episode, device='cpu', proto_init=True, freeze_backbone=False, finetune_batch_norm=False, inner_lr=0.001, total_epoch=15, n_way=5):
x_support = episode['train'][0][0]
x_support = x_support.to(device)
x_support_var = Variable(x_support)
x_query = episode['test'][0][0]
x_query = x_query.to(device)
x_query_var = Variable(x_query)
n_support = (x_support.shape[0] // n_way)
n_query = (x_query.shape[0] // n_way)
batch_size = n_way
support_size = (n_way * n_support)
y_a_i = Variable(torch.from_numpy(np.repeat(range(n_way), n_support))).to(device)
x_b_i = x_query_var
x_a_i = x_support_var
encoder.eval()
z_a_i = encoder(x_a_i.to(device))
encoder.train()
input_dim = z_a_i.shape[1]
classifier = Classifier(input_dim, n_way=n_way)
classifier.to(device)
classifier.train()
loss_fn = nn.CrossEntropyLoss().to(device)
if proto_init:
classifier.init_params_from_prototypes(z_a_i, n_way, n_support)
classifier_opt = torch.optim.Adam(classifier.parameters(), lr=inner_lr)
if (freeze_backbone is False):
delta_opt = torch.optim.Adam(filter((lambda p: p.requires_grad), encoder.parameters()), lr=inner_lr)
if (freeze_backbone is False):
encoder.train()
else:
encoder.eval()
classifier.train()
if (not finetune_batch_norm):
for module in encoder.modules():
if isinstance(module, torch.nn.modules.BatchNorm2d):
module.eval()
for epoch in tqdm(range(total_epoch), total=total_epoch, leave=False):
rand_id = np.random.permutation(support_size)
for j in range(0, support_size, batch_size):
classifier_opt.zero_grad()
if (freeze_backbone is False):
delta_opt.zero_grad()
selected_id = torch.from_numpy(rand_id[j:min((j + batch_size), support_size)]).to(device)
z_batch = x_a_i[selected_id]
y_batch = y_a_i[selected_id]
output = encoder(z_batch)
output = classifier(output)
loss = loss_fn(output, y_batch)
loss.backward()
classifier_opt.step()
if (freeze_backbone is False):
delta_opt.step()
classifier.eval()
encoder.eval()
output = encoder(x_b_i.to(device))
scores = classifier(output)
y_query = torch.tensor(np.repeat(range(n_way), n_query)).to(device)
loss = F.cross_entropy(scores, y_query, reduction='mean')
(_, predictions) = torch.max(scores, dim=1)
accuracy = torch.mean(predictions.eq(y_query).float())
return (loss, accuracy.item()) |
def load_transformer_encoder(bert_model, layer_index, checkpoint_path):
bert_model.transformer_layers[layer_index].multihead_attention.qkv.set_weights([np.concatenate([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/query/kernel'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/key/kernel'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/value/kernel')], axis=1), np.concatenate([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/query/bias'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/key/bias'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/self/value/bias')], axis=0)])
bert_model.transformer_layers[layer_index].multihead_attention.output_projection.set_weights([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/output/dense/kernel'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/output/dense/bias')])
bert_model.transformer_layers[layer_index].mha_layer_normalization.set_weights([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/output/LayerNorm/gamma'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/attention/output/LayerNorm/beta')])
bert_model.transformer_layers[layer_index].intermediate_layer.set_weights([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/intermediate/dense/kernel'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/intermediate/dense/bias'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/output/dense/kernel'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/output/dense/bias')])
bert_model.transformer_layers[layer_index].intermediate_layer_normalization.set_weights([tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/output/LayerNorm/gamma'), tf.train.load_variable(checkpoint_path, f'bert/encoder/layer_{layer_index}/output/LayerNorm/beta')]) |
def hm(inputs, inputs_norm, indexes, features, features_norm, momentum=0.5):
return HM.apply(inputs, inputs_norm, indexes, features, features_norm, torch.Tensor([momentum]).to(inputs.device)) |
class CacheDataset():
def __init__(self, filename, dataset):
self.filename = filename
self.dataset = dataset
self.save_filename = dataset.return_filename
(self.original_shape, self.__size) = self.__set_ImageHeader_and_get_item_size()
if (not self.__use_existing_cache()):
self.__create_cache()
def __set_ImageHeader_and_get_item_size(self):
img_aux = self.dataset.getitem(0)
img_n_floats = img_aux[0].view((- 1)).shape[0]
if (type(img_aux[1]) == int):
cat_size = c_int16
cat_shape = 1
else:
cat_size = (c_float * img_aux[1].shape[0])
cat_shape = img_aux[1].shape
if self.save_filename:
ImageHeader._fields_ = [('img', (c_float * img_n_floats)), ('cat', cat_size), ('fn', (c_char * 80))]
else:
ImageHeader._fields_ = [('img', (c_float * img_n_floats)), ('cat', cat_size)]
return (img_aux[0].shape, ctypes_sizeof(ImageHeader))
def __use_existing_cache(self):
ans = 'N'
if os.path.exists(self.filename):
print('The file "{}" already exists.'.format(self.filename), file=stderr)
print('Do you want to use it?', file=stderr)
print('(only answer YES if all files listed in "txt" file are cached in {})'.format(self.filename), file=stderr)
ans = input('[y/N]? ')
return (True if ans.upper().startswith('Y') else False)
def __create_cache(self):
print('Creating cache (it can take some time)... ', file=stderr)
with open(self.filename, 'wb') as fd:
if self.save_filename:
for (img, cat, fn) in tqdm(self.dataset):
hdr = self.__fill_structure(img, cat)
self.__fill_fn(hdr, fn)
fd.write(hdr)
else:
for (img, cat) in tqdm(self.dataset):
hdr = self.__fill_structure(img, cat)
fd.write(hdr)
def __fill_structure(self, img, cat):
hdr = ImageHeader()
hdr.img = hdr._fields_[0][1](*img.view((- 1)).numpy().tolist())
if (type(cat) == int):
hdr.cat = hdr._fields_[1][1](cat)
else:
hdr.cat = hdr._fields_[1][1](*cat)
return hdr
def __fill_fn(self, hdr, fn):
hdr.fn = bytes(fn.encode('utf-8'))
def get_cached_item(self, index):
with open(self.filename, 'rb') as fd:
fd.seek((self.__size * index))
item = ImageHeader()
io.BytesIO(fd.read(self.__size)).readinto(item)
if (type(item.cat) != int):
cat = np.array(item.cat, order='C', dtype=np.float32)
else:
cat = item.cat
if self.save_filename:
return (torch.tensor(item.img).reshape(self.original_shape), cat, item.fn.decode('utf-8'))
else:
return (torch.tensor(item.img).reshape(self.original_shape), cat) |
_model('model_parallel_transformer_lm')
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
def build_model(cls, args, task):
if (not has_megatron_submodule):
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
base_lm_architecture(args)
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(','))
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
raise NotImplementedError('Character embeddings is not supported for model parallel')
elif args.adaptive_input:
raise NotImplementedError('Adaptive input is not supported for model parallel')
else:
embed_tokens = cls.build_embedding(args, task.source_dictionary, args.decoder_input_dim)
decoder = ModelParallelTransformerDecoder(args, task.target_dictionary, embed_tokens, no_encoder_attn=True)
return cls(decoder)
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=(embed_dim ** (- 0.5)))
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init)
return embed_tokens |
def squeezeresnet_v1_1(**kwargs):
return get_squeezenet(version='1.1', residual=True, model_name='squeezeresnet_v1_1', **kwargs) |
def test_osipkovmerritt_hernquist_dens_massprofile():
pot = potential.HernquistPotential(amp=2.3, a=1.3)
ras = [0.3, 2.3, 5.7]
for ra in ras:
dfh = osipkovmerrittHernquistdf(pot=pot, ra=ra)
numpy.random.seed(10)
samp = dfh.sample(n=100000)
tol = (5 * 0.001)
check_spherical_massprofile(samp, (lambda r: (pot.mass(r) / pot.mass(numpy.amax(samp.r())))), tol, skip=1000)
return None |
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if ((args.local_rank not in [(- 1), 0]) and (not evaluate)):
torch.distributed.barrier()
processor = processors[task](task=task, train_suffix=args.train_suffix, test_suffix=args.test_suffix)
output_mode = output_modes[task]
train_dir = (f'{task}_{args.train_suffix}' if ((args.train_suffix is not None) and (args.train_suffix != '')) else task)
test_dir = (f'{task}_{args.test_suffix}' if ((args.test_suffix is not None) and (args.test_suffix != '')) else task)
cached_features_file = os.path.join(args.data_dir, (train_dir if ((not evaluate) and (train_dir is not None)) else test_dir), 'cached_{}_{}_{}'.format(('test' if evaluate else 'train'), list(filter(None, args.model_name_or_path.split('/'))).pop(), str(args.max_seq_length)))
if (os.path.exists(cached_features_file) and (not args.overwrite_cache)):
logger.info('Loading features from cached file %s', cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info('Creating features from dataset file at %s', args.data_dir)
label_list = processor.get_labels()
examples = (processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir))
features = convert_examples_to_features(examples, tokenizer, max_length=args.max_seq_length, label_list=label_list, output_mode=output_mode)
if (args.local_rank in [(- 1), 0]):
logger.info('Saving features into cached file %s', cached_features_file)
torch.save(features, cached_features_file)
if ((args.local_rank == 0) and (not evaluate)):
torch.distributed.barrier()
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if (args.model_type in ['bert']):
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
else:
all_token_type_ids = torch.tensor([1 for f in features], dtype=torch.long)
if (output_mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
else:
raise ValueError('No other `output_mode` for XNLI.')
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset |
def parse_training_args(args=None, ignore_unknown=False):
arg_populate_funcs = [training_args, custom_mlp_args]
arg_check_funcs = [process_training_args]
return parse_various_args(args, arg_populate_funcs, arg_check_funcs, ignore_unknown) |
def _create_inception_v3(variant, pretrained=False, **kwargs):
default_cfg = default_cfgs[variant]
aux_logits = kwargs.pop('aux_logits', False)
if aux_logits:
assert (not kwargs.pop('features_only', False))
model_cls = InceptionV3Aux
load_strict = default_cfg['has_aux']
else:
model_cls = InceptionV3
load_strict = (not default_cfg['has_aux'])
return build_model_with_cfg(model_cls, variant, pretrained, default_cfg=default_cfg, pretrained_strict=load_strict, **kwargs) |
def compare_mtcnn(pt_mdl, tf_fun, sess, ind, test_data):
tf_mdls = tf_fun(sess)
tf_mdl = tf_mdls[ind]
print('\nPassing test data through TF model\n')
tf_output = tf_mdl(test_data.numpy())
tf_output = [torch.tensor(out) for out in tf_output]
print('\n'.join([str(o.view((- 1))[:10]) for o in tf_output]))
print('\nPassing test data through PT model\n')
with torch.no_grad():
pt_output = pt_mdl(test_data.permute(0, 3, 2, 1))
pt_output = [torch.tensor(out) for out in pt_output]
for i in range(len(pt_output)):
if (len(pt_output[i].shape) == 4):
pt_output[i] = pt_output[i].permute(0, 3, 2, 1).contiguous()
print('\n'.join([str(o.view((- 1))[:10]) for o in pt_output]))
distance = [(tf_o - pt_o).norm() for (tf_o, pt_o) in zip(tf_output, pt_output)]
print(f'''
Distance {distance}
''') |
_model
def resmlp_12_distilled_224(pretrained=False, **kwargs):
model_args = dict(patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs)
model = _create_mixer('resmlp_12_distilled_224', pretrained=pretrained, **model_args)
return model |
def sample_generator(dataset, tokenizer):
sample_ordering = np.random.permutation(len(dataset))
for sample_idx in sample_ordering:
example = dataset[int(sample_idx)]
example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int64) for (key, arr) in example.items()}
(yield (example, example['labels']))
return |
class NestingState(object):
def __init__(self):
self.stack = []
self.previous_stack_top = []
self.pp_stack = []
def SeenOpenBrace(self):
return ((not self.stack) or self.stack[(- 1)].seen_open_brace)
def InNamespaceBody(self):
return (self.stack and isinstance(self.stack[(- 1)], _NamespaceInfo))
def InExternC(self):
return (self.stack and isinstance(self.stack[(- 1)], _ExternCInfo))
def InClassDeclaration(self):
return (self.stack and isinstance(self.stack[(- 1)], _ClassInfo))
def InAsmBlock(self):
return (self.stack and (self.stack[(- 1)].inline_asm != _NO_ASM))
def InTemplateArgumentList(self, clean_lines, linenum, pos):
while (linenum < clean_lines.NumLines()):
line = clean_lines.elided[linenum]
match = Match('^[^{};=\\[\\]\\.<>]*(.)', line[pos:])
if (not match):
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
if (token in ('{', '}', ';')):
return False
if (token in ('>', '=', '[', ']', '.')):
return True
if (token != '<'):
pos += 1
if (pos >= len(line)):
linenum += 1
pos = 0
continue
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, (pos - 1))
if (end_pos < 0):
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
if Match('^\\s*#\\s*(if|ifdef|ifndef)\\b', line):
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match('^\\s*#\\s*(else|elif)\\b', line):
if self.pp_stack:
if (not self.pp_stack[(- 1)].seen_else):
self.pp_stack[(- 1)].seen_else = True
self.pp_stack[(- 1)].stack_before_else = copy.deepcopy(self.stack)
self.stack = copy.deepcopy(self.pp_stack[(- 1)].stack_before_if)
else:
pass
elif Match('^\\s*#\\s*endif\\b', line):
if self.pp_stack:
if self.pp_stack[(- 1)].seen_else:
self.stack = self.pp_stack[(- 1)].stack_before_else
self.pp_stack.pop()
else:
pass
def Update(self, filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if self.stack:
self.previous_stack_top = self.stack[(- 1)]
else:
self.previous_stack_top = None
self.UpdatePreprocessor(line)
if self.stack:
inner_block = self.stack[(- 1)]
depth_change = (line.count('(') - line.count(')'))
inner_block.open_parentheses += depth_change
if (inner_block.inline_asm in (_NO_ASM, _END_ASM)):
if ((depth_change != 0) and (inner_block.open_parentheses == 1) and _MATCH_ASM.match(line)):
inner_block.inline_asm = _INSIDE_ASM
else:
inner_block.inline_asm = _NO_ASM
elif ((inner_block.inline_asm == _INSIDE_ASM) and (inner_block.open_parentheses == 0)):
inner_block.inline_asm = _END_ASM
while True:
namespace_decl_match = Match('^\\s*namespace\\b\\s*([:\\w]+)?(.*)$', line)
if (not namespace_decl_match):
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if (line.find('{') != (- 1)):
new_namespace.seen_open_brace = True
line = line[(line.find('{') + 1):]
class_decl_match = Match('^(\\s*(?:template\\s*<[\\w\\s<>,:]*>\\s*)?(class|struct)\\s+(?:[A-Z_]+\\s+)*(\\w+(?:::\\w+)*))(.*)$', line)
if (class_decl_match and ((not self.stack) or (self.stack[(- 1)].open_parentheses == 0))):
end_declaration = len(class_decl_match.group(1))
if (not self.InTemplateArgumentList(clean_lines, linenum, end_declaration)):
self.stack.append(_ClassInfo(class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum))
line = class_decl_match.group(4)
if (not self.SeenOpenBrace()):
self.stack[(- 1)].CheckBegin(filename, clean_lines, linenum, error)
if (self.stack and isinstance(self.stack[(- 1)], _ClassInfo)):
classinfo = self.stack[(- 1)]
access_match = Match('^(.*)\\b(public|private|protected|signals)(\\s+(?:slots\\s*)?)?:(?:[^:]|$)', line)
if access_match:
classinfo.access = access_match.group(2)
indent = access_match.group(1)
if ((len(indent) != (classinfo.class_indent + 1)) and Match('^\\s*$', indent)):
if classinfo.is_struct:
parent = ('struct ' + classinfo.name)
else:
parent = ('class ' + classinfo.name)
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3, ('%s%s: should be indented +1 space inside %s' % (access_match.group(2), slots, parent)))
while True:
matched = Match('^[^{;)}]*([{;)}])(.*)$', line)
if (not matched):
break
token = matched.group(1)
if (token == '{'):
if (not self.SeenOpenBrace()):
self.stack[(- 1)].seen_open_brace = True
elif Match('^extern\\s*"[^"]*"\\s*\\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[(- 1)].inline_asm = _BLOCK_ASM
elif ((token == ';') or (token == ')')):
if (not self.SeenOpenBrace()):
self.stack.pop()
elif self.stack:
self.stack[(- 1)].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
for i in range(len(self.stack), 0, (- 1)):
classinfo = self.stack[(i - 1)]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5, ('Failed to find complete declaration of class %s' % obj.name))
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5, ('Failed to find complete declaration of namespace %s' % obj.name)) |
class WarmupExpLR(WarmupLR):
def __init__(self, optimizer, gamma, interval=1, warmup_iter=500, warmup_ratio=0.0005, warmup='exp', last_epoch=(- 1)) -> None:
self.gamma = gamma
self.interval = interval
super().__init__(optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)
def get_main_ratio(self):
real_iter = (self.last_epoch - self.warmup_iter)
return (self.gamma ** (real_iter // self.interval)) |
def q_to_mtx_tf(q):
r0 = tf.stack([((1.0 - (2.0 * (q[1] ** 2))) - (2.0 * (q[2] ** 2))), (((2.0 * q[0]) * q[1]) - ((2.0 * q[2]) * q[3])), (((2.0 * q[0]) * q[2]) + ((2.0 * q[1]) * q[3]))])
r1 = tf.stack([(((2.0 * q[0]) * q[1]) + ((2.0 * q[2]) * q[3])), ((1.0 - (2.0 * (q[0] ** 2))) - (2.0 * (q[2] ** 2))), (((2.0 * q[1]) * q[2]) - ((2.0 * q[0]) * q[3]))])
r2 = tf.stack([(((2.0 * q[0]) * q[2]) - ((2.0 * q[1]) * q[3])), (((2.0 * q[1]) * q[2]) + ((2.0 * q[0]) * q[3])), ((1.0 - (2.0 * (q[0] ** 2))) - (2.0 * (q[1] ** 2)))])
rr = tf.transpose(tf.stack([r0, r1, r2]), [1, 0])
rr = tf.concat([rr, tf.convert_to_tensor([[0], [0], [0]], tf.float32)], axis=1)
rr = tf.concat([rr, tf.convert_to_tensor([[0, 0, 0, 1]], tf.float32)], axis=0)
return rr |
def convert_examples_to_features(examples, tokenizer, max_seq1_length=256, max_seq2_length=128, verbose=True):
features = []
iter = (tqdm(examples, desc='Converting Examples') if verbose else examples)
for (ex_index, example) in enumerate(iter):
encoded_outputs = {'guid': example.guid, 'label': example.label, 'nli_labels': example.nli_labels}
(token_ids_a, token_ids_b) = ([], [])
token_ids = tokenizer.encode(example.claim, add_special_tokens=False)
token_ids_a.extend(token_ids)
for (i, evidence) in enumerate(example.evidences):
token_ids = tokenizer.encode(evidence, add_special_tokens=False)
token_ids_b.extend((token_ids + [tokenizer.sep_token_id]))
token_ids_b = token_ids_b[:(- 1)]
token_ids_b = token_ids_b[:((max_seq1_length - len(token_ids_a)) - 4)]
(input_ids, attention_mask, token_type_ids) = _create_input_ids_from_token_ids(token_ids_b, token_ids_a, tokenizer, max_seq1_length)
encoded_outputs['c_input_ids'] = input_ids
encoded_outputs['c_attention_mask'] = attention_mask
encoded_outputs['c_token_type_ids'] = token_type_ids
encoded_outputs['q_input_ids_list'] = []
encoded_outputs['q_attention_mask_list'] = []
encoded_outputs['q_token_type_ids_list'] = []
for candidate in example.evidential:
token_ids_a = tokenizer.encode(example.claim, add_special_tokens=False)
token_ids_b = tokenizer.encode(candidate, add_special_tokens=False)
(input_ids, attention_mask, token_type_ids) = _create_input_ids_from_token_ids(token_ids_b, token_ids_a, tokenizer, max_seq2_length)
encoded_outputs['q_input_ids_list'].append(input_ids)
encoded_outputs['q_attention_mask_list'].append(attention_mask)
encoded_outputs['q_token_type_ids_list'].append(token_type_ids)
features.append(InputFeatures(**encoded_outputs))
if ((ex_index < 5) and verbose):
logger.info('*** Example ***')
logger.info('guid: {}'.format(example.guid))
logger.info('c_input_ids: {}'.format(encoded_outputs['c_input_ids']))
for input_ids in encoded_outputs['q_input_ids_list']:
logger.info('q_input_ids: {}'.format(input_ids))
logger.info('label: {}'.format(example.label))
logger.info('nli_labels: {}'.format(example.nli_labels))
return features |
def get_sinc_impulse(sample_rate, duration):
n = np.arange(((- duration) / 2), (duration / 2), (1 / sample_rate))
samples = ((2 * 0.25) * np.sinc((((2 * sample_rate) / 4) * n)))
return samples.astype(np.float32) |
class Framework():
def __init__(self, Scheduler, Recovery, ContainerLimit, IntervalTime, hostinit, database, env, logger):
self.hostlimit = len(hostinit)
self.scheduler = Scheduler
self.scheduler.setEnvironment(self)
self.recovery = Recovery
self.recovery.setEnvironment(self)
self.containerlimit = ContainerLimit
self.hostlist = []
self.containerlist = []
self.intervaltime = IntervalTime
self.interval = 0
self.db = database
self.inactiveContainers = []
self.logger = logger
self.stats = None
self.environment = env
self.controller = RequestHandler(self.db, self)
self.addHostlistInit(hostinit)
self.globalStartTime = time()
self.intervalAllocTimings = []
def addHostInit(self, IP, IPS, RAM, Disk, Bw, Powermodel):
assert (len(self.hostlist) < self.hostlimit)
host = Node(len(self.hostlist), IP, IPS, RAM, Disk, Bw, Powermodel, self)
self.hostlist.append(host)
def addHostlistInit(self, hostList):
assert (len(hostList) == self.hostlimit)
for (IP, IPS, RAM, Disk, Bw, Powermodel) in hostList:
self.addHostInit(IP, IPS, RAM, Disk, Bw, Powermodel)
def addContainerInit(self, CreationID, CreationInterval, SLA, Application):
container = Task(len(self.containerlist), CreationID, CreationInterval, SLA, Application, self, HostID=(- 1))
self.containerlist.append(container)
return container
def addContainerListInit(self, containerInfoList):
deployed = containerInfoList[:min(len(containerInfoList), (self.containerlimit - self.getNumActiveContainers()))]
deployedContainers = []
for (CreationID, CreationInterval, SLA, Application) in deployed:
dep = self.addContainerInit(CreationID, CreationInterval, SLA, Application)
deployedContainers.append(dep)
self.containerlist += ([None] * (self.containerlimit - len(self.containerlist)))
return [container.id for container in deployedContainers]
def addContainer(self, CreationID, CreationInterval, SLA, Application):
for (i, c) in enumerate(self.containerlist):
if ((c == None) or (not c.active)):
container = Task(i, CreationID, CreationInterval, SLA, Application, self, HostID=(- 1))
self.containerlist[i] = container
return container
def addContainerList(self, containerInfoList):
deployed = containerInfoList[:min(len(containerInfoList), (self.containerlimit - self.getNumActiveContainers()))]
deployedContainers = []
for (CreationID, CreationInterval, SLA, Application) in deployed:
dep = self.addContainer(CreationID, CreationInterval, SLA, Application)
deployedContainers.append(dep)
return [container.id for container in deployedContainers]
def getContainersOfHost(self, hostID):
containers = []
for container in self.containerlist:
if (container and (container.hostid == hostID)):
containers.append(container.id)
return containers
def getContainerByID(self, containerID):
return self.containerlist[containerID]
def getContainerByCID(self, creationID):
for c in (self.containerlist + self.inactiveContainers):
if (c and (c.creationID == creationID)):
return c
def getHostByID(self, hostID):
return self.hostlist[hostID]
def getCreationIDs(self, migrations, containerIDs):
creationIDs = []
for decision in migrations:
if (decision[0] in containerIDs):
creationIDs.append(self.containerlist[decision[0]].creationID)
return creationIDs
def getPlacementPossible(self, containerID, hostID):
container = self.containerlist[containerID]
host = self.hostlist[hostID]
ipsreq = container.getBaseIPS()
(ramsizereq, _, _) = container.getRAM()
(disksizereq, _, _) = container.getDisk()
ipsavailable = host.getIPSAvailable()
(ramsizeav, ramreadav, ramwriteav) = host.getRAMAvailable()
(disksizeav, diskreadav, diskwriteav) = host.getDiskAvailable()
return ((ipsreq <= ipsavailable) and (ramsizereq <= ramsizeav) and (disksizereq <= disksizeav))
def addContainersInit(self, containerInfoListInit):
self.interval += 1
deployed = self.addContainerListInit(containerInfoListInit)
return deployed
def allocateInit(self, decision):
start = time()
migrations = []
for (cid, hid) in decision:
container = self.getContainerByID(cid)
assert ((container.getHostID() == (- 1)) and (hid != (- 1)))
if self.getPlacementPossible(cid, hid):
migrations.append((cid, hid))
container.allocateAndExecute(hid)
else:
self.containerlist[cid] = None
self.intervalAllocTimings.append((time() - start))
self.logger.debug(('First allocation: ' + str(decision)))
self.logger.debug(((('Interval allocation time for interval ' + str(self.interval)) + ' is ') + str(self.intervalAllocTimings[(- 1)])))
print(((('Interval allocation time for interval ' + str(self.interval)) + ' is ') + str(self.intervalAllocTimings[(- 1)])))
self.visualSleep((self.intervaltime - self.intervalAllocTimings[(- 1)]))
for host in self.hostlist:
host.updateUtilizationMetrics()
return migrations
def destroyCompletedContainers(self):
destroyed = []
for (i, container) in enumerate(self.containerlist):
if (container and (not container.active)):
container.destroy()
self.containerlist[i] = None
self.inactiveContainers.append(container)
destroyed.append(container)
return destroyed
def getNumActiveContainers(self):
num = 0
for container in self.containerlist:
if (container and container.active):
num += 1
return num
def getSelectableContainers(self):
selectable = []
selected = []
containers = self.db.select('SELECT * FROM CreatedContainers;')
for container in self.containerlist:
if (container and container.active and (container.getHostID() != (- 1))):
selectable.append(container.id)
print(selectable)
return selectable
def addContainers(self, newContainerList):
self.interval += 1
destroyed = self.destroyCompletedContainers()
deployed = self.addContainerList(newContainerList)
return (deployed, destroyed)
def getActiveContainerList(self):
return [(c.getHostID() if (c and c.active) else (- 1)) for c in self.containerlist]
def getContainersInHosts(self):
return [len(self.getContainersOfHost(host)) for host in range(self.hostlimit)]
def parallelizedFunc(self, i):
(cid, hid) = i
container = self.getContainerByID(cid)
if (self.containerlist[cid].hostid != (- 1)):
container.allocateAndrestore(hid)
else:
container.allocateAndExecute(hid)
return container
def visualSleep(self, t):
total = (((str(int((t // 60))) + ' min, ') + str((t % 60))) + ' sec')
for i in range(int(t)):
print(((((('\r>> Interval timer ' + str((i // 60))) + ' min, ') + str((i % 60))) + ' sec of ') + total), end=' ')
sleep(1)
sleep((t % 1))
print()
def simulationStep(self, decision):
start = time()
migrations = []
containerIDsAllocated = []
print(decision)
for (cid, hid) in decision:
container = self.getContainerByID(cid)
currentHostID = self.getContainerByID(cid).getHostID()
currentHost = self.getHostByID(currentHostID)
targetHost = self.getHostByID(hid)
if ((hid != self.containerlist[cid].hostid) and self.getPlacementPossible(cid, hid)):
containerIDsAllocated.append(cid)
migrations.append((cid, hid))
Parallel(n_jobs=num_cores, backend='threading')((delayed(self.parallelizedFunc)(i) for i in migrations))
for (cid, hid) in decision:
if (self.containerlist[cid].hostid == (- 1)):
self.containerlist[cid] = None
self.intervalAllocTimings.append((time() - start))
self.logger.debug(('Decision: ' + str(decision)))
self.logger.debug(((('Interval allocation time for interval ' + str(self.interval)) + ' is ') + str(self.intervalAllocTimings[(- 1)])))
print(((('Interval allocation time for interval ' + str(self.interval)) + ' is ') + str(self.intervalAllocTimings[(- 1)])))
self.visualSleep(max(0, (self.intervaltime - self.intervalAllocTimings[(- 1)])))
for host in self.hostlist:
host.updateUtilizationMetrics()
return migrations |
class CacheFlowWorker():
def __init__(self, controller_addr, worker_addr, worker_id, no_register, model_path, model_name, block_size, seed, swap_space, max_num_batched_tokens, distributed_init_method, all_stage_devices):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith('/'):
model_path = model_path[:(- 1)]
self.model_name = (model_name or model_path.split('/')[(- 1)])
logger.info(f'Loading the model {self.model_name} on worker {worker_id} ...')
self.block_size = block_size
self.tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
self.seq_group_counter = Counter()
self.seq_counter = Counter()
self.context_len = 2048
remote_server_class = Server
self.server = remote_server_class(model=self.model_name, model_path=model_path, pipeline_parallel_size=1, tensor_parallel_size=1, block_size=block_size, dtype=torch.float16, seed=seed, swap_space=swap_space, max_num_batched_tokens=max_num_batched_tokens, num_nodes=1, num_devices_per_node=4, distributed_init_method=distributed_init_method, all_stage_devices=all_stage_devices, gpu_memory=get_gpu_memory(), cpu_memory=get_cpu_memory())
self.running_seq_groups: Dict[(int, SequenceGroup)] = {}
self.sequence_group_events: Dict[(int, asyncio.Event)] = {}
self.is_server_running = False
if (not no_register):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(target=heart_beat_worker, args=(self,))
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info('Register to controller')
url = (self.controller_addr + '/register_worker')
data = {'worker_name': self.worker_addr, 'check_heart_beat': True, 'worker_status': self.get_status()}
r = requests.post(url, json=data)
assert (r.status_code == 200)
def send_heart_beat(self):
logger.info(f'Send heart beat. Models: {[self.model_name]}. Semaphore: {pretty_print_semaphore(model_semaphore)}. global_counter: {global_counter}')
url = (self.controller_addr + '/receive_heart_beat')
while True:
try:
ret = requests.post(url, json={'worker_name': self.worker_addr, 'queue_length': self.get_queue_length()}, timeout=5)
exist = ret.json()['exist']
break
except requests.exceptions.RequestException as e:
logger.error(f'heart beat error: {e}')
time.sleep(5)
if (not exist):
self.register_to_controller()
def get_queue_length(self):
if ((model_semaphore is None) or (model_semaphore._value is None) or (model_semaphore._waiters is None)):
return 0
else:
return ((args.limit_model_concurrency - model_semaphore._value) + len(model_semaphore._waiters))
def get_status(self):
return {'model_names': [self.model_name], 'speed': 1, 'queue_length': self.get_queue_length()}
async def server_step(self):
self.is_server_running = True
updated_seq_groups = self.server.step()
self.is_server_running = False
for seq_group in updated_seq_groups:
group_id = seq_group.group_id
self.running_seq_groups[group_id] = seq_group
self.sequence_group_events[group_id].set()
async def generate_stream(self, params):
tokenizer = self.tokenizer
context = params['prompt']
temperature = float(params.get('temperature', 1.0))
max_new_tokens = min(int(params.get('max_new_tokens', 256)), 1024)
stop_str = params.get('stop', None)
input_ids = tokenizer(context).input_ids
max_src_len = ((self.context_len - max_new_tokens) - 8)
input_ids = input_ids[(- max_src_len):]
sampling_params = SamplingParams.from_dict(params)
sampling_params.stop_token_ids.add(tokenizer.eos_token_id)
sampling_params.n = 1
sampling_params.max_num_steps = max_new_tokens
sampling_params.temperature = temperature
if (stop_str is not None):
sampling_params.stop_str = stop_str
seqs: List[Sequence] = []
for _ in range(sampling_params.n):
seq_id = next(self.seq_counter)
seq = Sequence(seq_id, input_ids, block_size=self.block_size)
seqs.append(seq)
arrival_time = time.time()
group_id = next(self.seq_group_counter)
seq_group = SequenceGroup(group_id, seqs, arrival_time)
group_event = asyncio.Event()
self.running_seq_groups[group_id] = seq_group
self.sequence_group_events[group_id] = group_event
self.server.add_sequence_groups([(seq_group, sampling_params)])
while True:
if (not self.is_server_running):
(await self.server_step())
try:
(await asyncio.wait_for(group_event.wait(), timeout=TIMEOUT_TO_PREVENT_DEADLOCK))
except:
pass
group_event.clear()
seq_group = self.running_seq_groups[group_id]
all_outputs = []
for seq in seq_group.seqs:
token_ids = seq.get_token_ids()
output = self.tokenizer.decode(token_ids, skip_special_tokens=True)
if (stop_str is not None):
if output.endswith(stop_str):
output = output[:(- len(stop_str))]
all_outputs.append(output)
assert (len(seq_group.seqs) == 1)
ret = {'text': all_outputs[0], 'error_code': 0}
(yield (json.dumps(ret) + '\x00').encode('utf-8'))
if seq_group.is_finished():
del self.running_seq_groups[group_id]
del self.sequence_group_events[group_id]
break |
def main():
parser = argparse.ArgumentParser(description='Convert YOLO cfg to Caffe prototxt')
parser.add_argument('cfg', type=str, help='YOLO cfg')
parser.add_argument('prototxt', type=str, help='Caffe prototxt')
parser.add_argument('--approx', help='flag whether to approximate leaky relu or not (for TensorRT implementation', action='store_true')
args = parser.parse_args()
convert(args.cfg, args.prototxt, args.approx) |
def train(train_loader, model, criterion, optimizer, epoch, args, log, tf_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
model.train()
end = time.time()
for (i, (inputs, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
inputs = inputs.cuda()
target = target.cuda()
output = model(inputs)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(acc1[0], inputs.size(0))
top5.update(acc5[0], inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
output = 'Epoch: [{0}][{1}/{2}], lr: {lr:.5f}\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})\ {top5.val:.3f} ({top5.avg:.3f})'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5, lr=(optimizer.param_groups[(- 1)]['lr'] * 0.1))
print(output)
log.write((output + '\n'))
log.flush()
tf_writer.add_scalar('loss/train', losses.avg, epoch)
tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)
tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)
tf_writer.add_scalar('lr', optimizer.param_groups[(- 1)]['lr'], epoch) |
def pyramidnet110_a270_svhn(num_classes=10, **kwargs):
return get_pyramidnet_cifar(num_classes=num_classes, blocks=110, alpha=270, bottleneck=False, model_name='pyramidnet110_a270_svhn', **kwargs) |
def run():
logging_GOCD.init_logging(log_file_path=param_log_file_path, log_file_mode=param_log_mode)
logging.info('Preparing before training.')
sys.path.append('..')
from symbol_farm import symbol_64_512_16L_3scales_v1_small as net
(net_symbol, data_names, label_names) = net.get_net_symbol()
net_initializer = mxnet.initializer.Xavier()
logging.info('Get net symbol successfully.')
from data_provider_farm.pickle_provider import PickleProvider
from data_iterator_farm.multithread_dataiter_for_cross_entropy_v1_small import Multithread_DataIter_for_CrossEntropy as DataIter
train_data_provider = PickleProvider(param_trainset_pickle_file_path)
train_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_train_dataiter, data_provider=train_data_provider, batch_size=param_train_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
val_dataiter = None
if ((param_valset_pickle_file_path != '') and (param_val_batch_size != 0) and (param_num_val_loops != 0) and (param_num_thread_val_dataiter != 0)):
val_data_provider = PickleProvider(param_valset_pickle_file_path)
val_dataiter = DataIter(mxnet_module=mxnet, num_threads=param_num_thread_val_dataiter, data_provider=val_data_provider, batch_size=param_val_batch_size, enable_horizon_flip=param_enable_horizon_flip, enable_vertical_flip=param_enable_vertical_flip, enable_random_brightness=param_enable_random_brightness, brightness_params=param_brightness_factors, enable_random_saturation=param_enable_random_saturation, saturation_params=param_saturation_factors, enable_random_contrast=param_enable_random_contrast, contrast_params=param_contrast_factors, enable_blur=param_enable_blur, blur_params=param_blur_factors, blur_kernel_size_list=param_blur_kernel_size_list, neg_image_ratio=param_neg_image_ratio, num_image_channels=param_num_image_channel, net_input_height=param_net_input_height, net_input_width=param_net_input_width, num_output_scales=param_num_output_scales, receptive_field_list=param_receptive_field_list, receptive_field_stride=param_receptive_field_stride, feature_map_size_list=param_feature_map_size_list, receptive_field_center_start=param_receptive_field_center_start, bbox_small_list=param_bbox_small_list, bbox_large_list=param_bbox_large_list, bbox_small_gray_list=param_bbox_small_gray_list, bbox_large_gray_list=param_bbox_large_gray_list, num_output_channels=param_num_output_channels, neg_image_resize_factor_interval=param_neg_image_resize_factor_interval)
from metric_farm.metric_default import Metric
train_metric = Metric(param_num_output_scales)
val_metric = None
if (val_dataiter is not None):
val_metric = Metric(param_num_output_scales)
train_GOCD.start_train(param_dict=param_dict, mxnet_module=mxnet, context=[mxnet.gpu(i) for i in param_GPU_idx_list], train_dataiter=train_dataiter, train_metric=train_metric, train_metric_update_frequency=param_train_metric_update_frequency, num_train_loops=param_num_train_loops, val_dataiter=val_dataiter, val_metric=val_metric, num_val_loops=param_num_val_loops, validation_interval=param_validation_interval, optimizer_name=param_optimizer_name, optimizer_params=param_optimizer_params, net_symbol=net_symbol, net_initializer=net_initializer, net_data_names=data_names, net_label_names=label_names, pretrained_model_param_path=param_pretrained_model_param_path, display_interval=param_display_interval, save_prefix=param_save_prefix, model_save_interval=param_model_save_interval, start_index=param_start_index) |
def yolo_config():
head_cfg = dict(anchor_generator=dict(type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'))
test_cfg = mmcv.Config(dict(deploy_nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100))
model = YOLOV3Head(num_classes=4, in_channels=[1, 1, 1], out_channels=[16, 8, 4], test_cfg=test_cfg, **head_cfg)
model.requires_grad_(False)
model.eval()
return model |
def read_annotations():
anno_df = pd.read_csv(anno_csv_path)
anno_df = anno_df[anno_df.apply((lambda row: (bool(row[VALID_LABEL]) and bool(row[VALID_REASONING]) and (len(str(row[EVIDENCE_COL_NAME])) > 0) and (row[LABEL] != 'invalid prompt'))), axis=1)]
return anno_df |
class RNN_ENCODER(nn.Module):
def __init__(self, ntoken, ninput=300, drop_prob=0.5, nhidden=128, nlayers=1, bidirectional=True):
super(RNN_ENCODER, self).__init__()
self.n_steps = 25
self.rnn_type = 'LSTM'
self.ntoken = ntoken
self.ninput = ninput
self.drop_prob = drop_prob
self.nlayers = nlayers
self.bidirectional = bidirectional
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
self.nhidden = (nhidden // self.num_directions)
self.define_module()
self.init_weights()
def define_module(self):
self.encoder = nn.Embedding(self.ntoken, self.ninput)
self.drop = nn.Dropout(self.drop_prob)
if (self.rnn_type == 'LSTM'):
self.rnn = nn.LSTM(self.ninput, self.nhidden, self.nlayers, batch_first=True, dropout=self.drop_prob, bidirectional=self.bidirectional)
elif (self.rnn_type == 'GRU'):
self.rnn = nn.GRU(self.ninput, self.nhidden, self.nlayers, batch_first=True, dropout=self.drop_prob, bidirectional=self.bidirectional)
else:
raise NotImplementedError
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_((- initrange), initrange)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if (self.rnn_type == 'LSTM'):
return (Variable(weight.new((self.nlayers * self.num_directions), bsz, self.nhidden).zero_()), Variable(weight.new((self.nlayers * self.num_directions), bsz, self.nhidden).zero_()))
else:
return Variable(weight.new((self.nlayers * self.num_directions), bsz, self.nhidden).zero_())
def forward(self, captions, cap_lens, hidden, mask=None):
emb = self.drop(self.encoder(captions))
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(emb, cap_lens, batch_first=True, enforce_sorted=False)
(output, hidden) = self.rnn(emb, hidden)
output = pad_packed_sequence(output, batch_first=True)[0]
words_emb = output.transpose(1, 2)
if (self.rnn_type == 'LSTM'):
sent_emb = hidden[0].transpose(0, 1).contiguous()
else:
sent_emb = hidden.transpose(0, 1).contiguous()
sent_emb = sent_emb.view((- 1), (self.nhidden * self.num_directions))
return (words_emb, sent_emb) |
def download_full_dataset(dataset: str, path_out: Union[(str, os.PathLike, None)]=None):
if (dataset not in ['co2', 'elec', 'raw']):
raise ValueError(f'Unsupported argument {dataset}')
fname = f'EBA_{dataset}.csv.gz'
if (path_out is None):
path_out = (gridemissions.config['DATA_PATH'] / fname)
else:
path_out = pathlib.Path(path_out)
if (not path_out.name.endswith('.csv.gz')):
raise ValueError(f'path_out should end in .csv.gz but got {path_out}')
path_out_csv = (path_out.parent / path_out.stem)
print(f'Downloading to {path_out}...')
request.urlretrieve((gridemissions.config['S3_URL'] + fname), path_out)
print(f'Decompressing to {path_out_csv}...')
with gzip.open(path_out, 'rb') as f_in:
with open(path_out_csv, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out) |
def get_fresh_case_data_from_ts_rl():
log.info('fetch RL/TS/CS data from gsheets')
for attempt in (1, 2):
try:
resp = requests.get(os.environ['RL_TS_CSV_URL'], timeout=(1.0, 5.0))
resp.raise_for_status()
except Exception as err:
log.info('attempt %s: failed getting data: %s', attempt, err)
df = pd.read_csv(io.StringIO(resp.text))
return int(df['current'].sum()) |
def main():
parser = ArgumentParser('Transformers CLI tool', usage='transformers-cli <command> [<args>]')
commands_parser = parser.add_subparsers(help='transformers-cli command helpers')
ConvertCommand.register_subcommand(commands_parser)
DownloadCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
RunCommand.register_subcommand(commands_parser)
ServeCommand.register_subcommand(commands_parser)
UserCommands.register_subcommand(commands_parser)
AddNewModelCommand.register_subcommand(commands_parser)
LfsCommands.register_subcommand(commands_parser)
args = parser.parse_args()
if (not hasattr(args, 'func')):
parser.print_help()
exit(1)
service = args.func(args)
service.run() |
class TFOPTPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def kahypar_subgraph_find_membership(inputs, output, size_dict, weight_nodes='const', weight_edges='log', fix_output_nodes=False, parts=2, imbalance=0.01, compress=0, seed=None, profile=None, mode='direct', objective='cut', quiet=True):
import kahypar as kahypar
if (seed is None):
seed = random.randint(0, ((2 ** 31) - 1))
nv = len(inputs)
if (parts >= nv):
return list(range(nv))
hg = get_hypergraph(inputs, output, size_dict, accel=False)
if fix_output_nodes:
onodes = tuple(hg.output_nodes())
if (parts >= ((nv - len(onodes)) + 1)):
groups = itertools.count(1)
return [(0 if (i in onodes) else next(groups)) for i in range(nv)]
for (e, nodes) in tuple(hg.edges.items()):
if (len(nodes) == 1):
hg.remove_edge(e)
if compress:
hg.compress(compress)
winfo = to_sparse(hg, weight_nodes=weight_nodes, weight_edges=weight_edges)
hypergraph_kwargs = {'num_nodes': hg.get_num_nodes(), 'num_edges': hg.get_num_edges(), 'index_vector': winfo['hyperedge_indices'], 'edge_vector': winfo['hyperedges'], 'k': parts}
(edge_weights, node_weights) = {(False, False): (None, None), (False, True): ([], winfo['node_weights']), (True, False): (winfo['edge_weights'], []), (True, True): (winfo['edge_weights'], winfo['node_weights'])}[(winfo['has_edge_weights'], winfo['has_node_weights'])]
if (edge_weights or node_weights):
hypergraph_kwargs['edge_weights'] = edge_weights
hypergraph_kwargs['node_weights'] = node_weights
hypergraph = kahypar.Hypergraph(**hypergraph_kwargs)
if fix_output_nodes:
for i in onodes:
hypergraph.fixNodeToBlock(i, 0)
mode = 'recursive'
if (profile is None):
profile_mode = {'direct': 'k', 'recursive': 'r'}[mode]
profile = f'{objective}_{profile_mode}KaHyPar_sea20.ini'
context = kahypar.Context()
context.loadINIconfiguration(join(get_kahypar_profile_dir(), profile))
context.setK(parts)
context.setSeed(seed)
context.suppressOutput(quiet)
context.setEpsilon((imbalance * parts))
kahypar.partition(hypergraph, context)
return [hypergraph.blockID(i) for i in hypergraph.nodes()] |
class PseGru(nn.Module):
def __init__(self, input_dim=10, mlp1=[10, 32, 64], pooling='mean_std', mlp2=[128, 128], with_extra=True, extra_size=4, hidden_dim=128, mlp4=[128, 64, 32], num_classes=20, max_temporal_shift=100, max_position=365):
super(PseGru, self).__init__()
if with_extra:
mlp2 = deepcopy(mlp2)
mlp2[0] += 4
self.spatial_encoder = PixelSetEncoder(input_dim, mlp1=mlp1, pooling=pooling, mlp2=mlp2, with_extra=with_extra, extra_size=extra_size)
self.temporal_encoder = GRU(in_channels=mlp2[(- 1)], hidden_dim=hidden_dim, max_position=max_position, max_temporal_shift=max_temporal_shift)
self.decoder = get_decoder(mlp4, num_classes)
def forward(self, pixels, mask, positions, extra, return_feats=False):
spatial_feats = self.spatial_encoder(pixels, mask, extra)
temporal_feats = self.temporal_encoder(spatial_feats, positions)
logits = self.decoder(temporal_feats)
if return_feats:
return (logits, temporal_feats)
else:
return logits
def param_ratio(self):
total = get_ntrainparams(self)
s = get_ntrainparams(self.spatial_encoder)
t = get_ntrainparams(self.temporal_encoder)
c = get_ntrainparams(self.decoder)
print('TOTAL TRAINABLE PARAMETERS : {}'.format(total))
print('RATIOS: Spatial {:5.1f}% , Temporal {:5.1f}% , Classifier {:5.1f}%'.format(((s / total) * 100), ((t / total) * 100), ((c / total) * 100)))
return total |
class UnilmConfig(PretrainedConfig):
pretrained_config_archive_map = UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size=28996, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=6, initializer_range=0.02, layer_norm_eps=1e-12, **kwargs):
super(UnilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str):
with open(vocab_size, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError('First argument must be either a vocabulary size (int) or the path to a pretrained model config file (str)') |
def get_available_segmentation_models():
return [k for (k, v) in models.segmentation.__dict__.items() if (callable(v) and (k[0].lower() == k[0]) and (k[0] != '_'))] |
def get_up_block(up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, resolution_idx: Optional[int]=None, transformer_layers_per_block: int=1, num_attention_heads: Optional[int]=None, resnet_groups: Optional[int]=None, cross_attention_dim: Optional[int]=None, dual_cross_attention: bool=False, use_linear_projection: bool=False, only_cross_attention: bool=False, upcast_attention: bool=False, resnet_time_scale_shift: str='default', attention_type: str='default', resnet_skip_time_act: bool=False, resnet_out_scale_factor: float=1.0, cross_attention_norm: Optional[str]=None, attention_head_dim: Optional[int]=None, upsample_type: Optional[str]=None, dropout: float=0.0) -> nn.Module:
if (attention_head_dim is None):
logger.warn(f'It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}.')
attention_head_dim = num_attention_heads
up_block_type = (up_block_type[7:] if up_block_type.startswith('UNetRes') else up_block_type)
if (up_block_type == 'UpBlock2D'):
return UpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift)
elif (up_block_type == 'ResnetUpsampleBlock2D'):
return ResnetUpsampleBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor)
elif (up_block_type == 'CrossAttnUpBlock2D'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for CrossAttnUpBlock2D')
return CrossAttnUpBlock2D(num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, attention_type=attention_type)
elif (up_block_type == 'SimpleCrossAttnUpBlock2D'):
if (cross_attention_dim is None):
raise ValueError('cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D')
return SimpleCrossAttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, output_scale_factor=resnet_out_scale_factor, only_cross_attention=only_cross_attention, cross_attention_norm=cross_attention_norm)
elif (up_block_type == 'AttnUpBlock2D'):
if (add_upsample is False):
upsample_type = None
else:
upsample_type = (upsample_type or 'conv')
return AttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type)
elif (up_block_type == 'SkipUpBlock2D'):
return SkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_time_scale_shift=resnet_time_scale_shift)
elif (up_block_type == 'AttnSkipUpBlock2D'):
return AttnSkipUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift)
elif (up_block_type == 'UpDecoderBlock2D'):
return UpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels)
elif (up_block_type == 'AttnUpDecoderBlock2D'):
return AttnUpDecoderBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, attention_head_dim=attention_head_dim, resnet_time_scale_shift=resnet_time_scale_shift, temb_channels=temb_channels)
elif (up_block_type == 'KUpBlock2D'):
return KUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn)
elif (up_block_type == 'KCrossAttnUpBlock2D'):
return KCrossAttnUpBlock2D(num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, resolution_idx=resolution_idx, dropout=dropout, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, cross_attention_dim=cross_attention_dim, attention_head_dim=attention_head_dim)
raise ValueError(f'{up_block_type} does not exist.') |
('mnli')
class MNLIModel(Model):
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Union[(Seq2VecEncoder, Seq2SeqEncoder)], box_factory: BoxFactory, intersection: _Intersection, volume: _Volume, premise_feedforward: FeedForward, hypothesis_feedforward: FeedForward, dropout: Optional[float]=None, box_regularizer: Optional[BoxRegularizer]=None, num_labels: int=None, label_namespace: str='labels', namespace: str='tokens', regularizer: Optional[RegularizerApplicator]=None, initializer: Optional[InitializerApplicator]=None, **kwargs: Any) -> None:
super().__init__(vocab, regularizer=regularizer)
self._text_field_embedder = text_field_embedder
self._encoder = encoder
self._box_factory = box_factory
self._box_intersection = intersection
self._box_volume = volume
self._premise_feedforward = premise_feedforward
self._hypothesis_feedforward = hypothesis_feedforward
if dropout:
self._dropout: Optional[torch.nn.Dropout] = torch.nn.Dropout(dropout)
else:
self._dropout = None
if box_regularizer:
self._box_regularizer: Optional[BoxRegularizer] = box_regularizer
else:
self._box_regularizer = None
self._label_namespace = label_namespace
self._namespace = namespace
if num_labels:
self._num_labels = num_labels
else:
self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
self._loss = torch.nn.NLLLoss()
self._accuracy = BooleanAccuracy()
if (initializer is not None):
initializer(self)
def forward(self, premise: TextFieldTensors, hypothesis: TextFieldTensors, label: torch.IntTensor=None, **kwargs) -> Dict[(str, torch.Tensor)]:
premise_embedded_text = self._text_field_embedder(premise)
hypothesis_embedded_text = self._text_field_embedder(hypothesis)
premise_mask = get_text_field_mask(premise)
hypothesis_mask = get_text_field_mask(hypothesis)
premise_embedded_text = self._encoder(premise_embedded_text, mask=premise_mask)
hypothesis_embedded_text = self._encoder(hypothesis_embedded_text, mask=hypothesis_mask)
activations = {'premise_embedded_text': premise_embedded_text, 'hypothesis_embedded_text': hypothesis_embedded_text}
if self._dropout:
premise_embedded_text = self._dropout(premise_embedded_text)
hypothesis_embedded_text = self._dropout(hypothesis_embedded_text)
premise_embeddings = self._premise_feedforward(premise_embedded_text)
hypothesis_embeddings = self._hypothesis_feedforward(hypothesis_embedded_text)
activations['premise_embeddings'] = premise_embeddings
activations['hypothesis_embeddings'] = hypothesis_embeddings
premise_box = self._box_factory(premise_embeddings)
hypothesis_box = self._box_factory(hypothesis_embeddings)
y_prob = (self._box_volume(self._box_intersection(premise_box, hypothesis_box)) - self._box_volume(premise_box))
output_dict = {'y_prob': y_prob}
if (label is not None):
loss = self._loss(torch.stack((y_prob, log1mexp(y_prob)), dim=(- 1)), label.long().view((- 1)))
if self._box_regularizer:
loss += self._box_regularizer(self._box_intersection(premise_box, hypothesis_box))
output_dict['loss'] = loss
y_pred = (1 - torch.round(torch.exp(y_prob.detach())))
self._accuracy(y_pred, label)
output_dict.update(activations)
return output_dict
def get_metrics(self, reset: bool=False) -> Dict[(str, float)]:
metrics = {'accuracy': self._accuracy.get_metric(reset)}
return metrics |
def create_oracles(dataname, path_read, path_wt_distributed):
files = [i.split('.')[0] for i in os.listdir(path_read) if i.endswith('.doc.json')]
total_num = len(files)
cnt = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cnt)
pool.starmap(process_one_example, zip(([path_read] * total_num), ([path_wt_distributed] * total_num), files, ([30] * total_num), ([dataname] * total_num)))
pool.close()
pool.join()
print('finish creating oracles, and write down them to distributed folders.') |
.parametrize('device', ['cpu', 'cuda'])
def test_gaussian_encoding_no_unfreeze(device):
check_cuda(device)
b = rff.functional.sample_b(1.0, (256, 2)).to(device)
layer = rff.layers.GaussianEncoding(b=b).to(device)
layer.requires_grad = True
assert (layer.b.requires_grad != True) |
def train_printer(data, targets, epoch, counter, iter_counter, loss_hist, test_loss_hist, test_data, test_targets):
print(f'Epoch {epoch}, Iteration {iter_counter}')
print(f'Train Set Loss: {loss_hist[counter]:.2f}')
print(f'Test Set Loss: {test_loss_hist[counter]:.2f}')
print_batch_accuracy(data, targets, train=True)
print_batch_accuracy(test_data, test_targets, train=False)
print('\n') |
class Defects4J():
def __init__(self, d4j_home: Path, d4j_checkout_root: Path, java8_home: Path) -> None:
self.d4j_home = d4j_home
self.d4j_checkout_root = d4j_checkout_root
self.java8_home = java8_home
assert d4j_home.exists()
assert self.d4j_executable.exists()
assert self.java8_home.exists()
assert d4j_checkout_root.exists()
self.metadata = self._get_metadata()
all_bugs = self._all_bugs()
considered_bugs = set[str]()
for dir in Path('data/considered-bugs').iterdir():
considered_bugs.update(json.loads(dir.read_text()))
assert (len(considered_bugs) == (138 + 135)), len(considered_bugs)
self.all_bugs = {id: bug for (id, bug) in all_bugs.items() if (id in considered_bugs)}
self.single_hunk_bugs = {id: bug for (id, bug) in self.all_bugs.items() if ((len(bug.buggy_files) == 1) and (len(bug.buggy_files[0].changes) == 1))}
self.single_line_bugs = {id: bug for (id, bug) in self.single_hunk_bugs.items() if ((len(bug.buggy_files[0].changes[0].added_lines) <= 1) and (len(bug.buggy_files[0].changes[0].removed_lines) <= 1))}
self.d4j1_multi_hunk_bugs = {id: bug for (id, bug) in self.all_bugs.items() if ((id not in self.single_hunk_bugs) and is_d4j1(id))}
self.d4j1_single_hunk_bugs = {id: bug for (id, bug) in self.all_bugs.items() if ((id in self.single_hunk_bugs) and is_d4j1(id))}
self.d4j2_single_line_bugs = {id: bug for (id, bug) in self.all_bugs.items() if ((id in self.single_line_bugs) and (not is_d4j1(id)))}
self.d4j2_single_hunk_bugs = {id: bug for (id, bug) in self.all_bugs.items() if ((id in self.single_hunk_bugs) and (not is_d4j1(id)))}
def split_bug_id(bug_id: str) -> tuple[(str, str)]:
(proj, id_str) = bug_id.split('-')
return (proj, id_str)
def group_by_project(data_dict: dict[(str, T)]) -> list[tuple[(str, dict[(str, T)])]]:
def key_fn(item: tuple[(str, Any)]) -> str:
(bug_id, _) = item
return Defects4J.split_bug_id(bug_id)[0]
data_items = list(data_dict.items())
data_items.sort(key=key_fn)
results: list[tuple[(str, dict[(str, T)])]] = []
for (project, group) in groupby(data_items, key_fn):
results.append((project, {bug_id: data for (bug_id, data) in group}))
return results
def form_bug_id(proj: str, id_str: str) -> str:
return ((proj + '-') + id_str)
def d4j_executable(self) -> Path:
return (((self.d4j_home / 'framework') / 'bin') / 'defects4j')
def compile(self, bug_id: str) -> tuple[(bool, str, str)]:
bug = self.all_bugs[bug_id]
env = dict(os.environ, JAVA_HOME=str(self.java8_home))
result = subprocess.run([str(self.d4j_executable), 'compile'], env=env, cwd=bug.proj_path, text=True, capture_output=True)
success = (result.returncode == 0)
assert (('FAIL' not in result.stderr) if success else ('FAIL' in result.stderr))
return (success, result.stdout, result.stderr)
def test(self, bug_id: str, timeout: int) -> tuple[(bool, str, str)]:
(success, stdout, stderr) = self.test_with_option(bug_id, timeout, entire_test_suite=False)
if (not success):
return (success, stdout, stderr)
return self.test_with_option(bug_id, timeout, entire_test_suite=True)
def test_with_option(self, bug_id: str, timeout: int, entire_test_suite: bool) -> tuple[(bool, str, str)]:
bug = self.all_bugs[bug_id]
env = dict(os.environ, JAVA_HOME=str(self.java8_home))
result = subprocess.run(([str(self.d4j_executable), 'test'] + ([] if entire_test_suite else ['-r'])), env=env, cwd=bug.proj_path, timeout=timeout, text=True, capture_output=True)
failing_tests = (Path(bug.proj_path) / 'failing_tests')
if (not failing_tests.exists()):
return (True, result.stdout, result.stderr)
assert failing_tests.exists()
with open(failing_tests) as f:
failing_test_0 = 'Failing tests: 0'
success = ((f.read().strip() == '') or result.stdout.startswith(failing_test_0))
return (success, result.stdout, result.stderr)
def checkout(self, bug_id: str, buggy: bool=True, dirty: bool=False):
assert (not dirty)
bug_proj_path = self.all_bugs[bug_id].proj_path
(proj, id_str) = self.split_bug_id(bug_id)
repo = git.Repo(bug_proj_path)
repo.git.execute(['git', 'checkout', 'HEAD', '-f', '.'])
subprocess.run([str(self.d4j_executable), 'checkout', '-p', proj, f"-v{id_str}{('b' if buggy else 'f')}", '-w', bug_proj_path])
repo.git.execute(['git', 'checkout', 'HEAD', '-f', '.'])
if (not dirty):
repo.git.execute(['git', 'clean', '-xfd'])
repo.close()
def get_patch(self, bug_id: str) -> str:
(proj, bug_id) = self.split_bug_id(bug_id)
patch_file = (((((self.d4j_home / 'framework') / 'projects') / proj) / 'patches') / f'{bug_id}.src.patch')
try:
return patch_file.read_text()
except:
return patch_file.read_text('latin-1')
def buggy_files(self, bug: dict) -> list[BuggyFile]:
patch_file = (((((self.d4j_home / 'framework') / 'projects') / bug['proj']) / 'patches') / f"{bug['bug_id']}.src.patch")
patch_set = PatchSet.from_filename(patch_file, errors='ignore')
patch_files: Iterator[PatchedFile] = filter((lambda f: f.is_modified_file), patch_set)
return [BuggyFile.from_patch_file(True, patch_file, f"{bug['proj']}-{bug['bug_id']}") for patch_file in patch_files]
def bug_id(bug: dict) -> str:
return f"{bug['proj']}-{bug['bug_id']}"
def _all_bugs(self) -> BenchmarkMetadata:
return {self.bug_id(bug): Bug(buggy_files=self.buggy_files(bug), proj_path=bug['path']) for bug in self.metadata}
def _get_checkout_meta(self, proj: str, bug: Dict[(str, str)]) -> Dict:
path = (self.d4j_checkout_root / f"{proj}-{bug['bug.id']}")
bug_id = bug['bug.id']
return {'proj': proj, 'bug_id': bug_id, 'buggy_commit': bug['revision.id.buggy'], 'url': bug['report.url'], 'fixed_commit': bug['revision.id.fixed'], 'path': str(path.absolute()), 'cmd': [str(self.d4j_executable), 'checkout', '-p', proj, '-v', f'{bug_id}f', '-w', str(path.absolute())]}
def _get_all_checkout_meta(self, bugs: Metadata) -> list[Dict[(str, str)]]:
return [self._get_checkout_meta(proj, bug) for (proj, proj_bugs) in bugs.items() for bug in proj_bugs]
def _get_metadata(self) -> list[Dict[(str, str)]]:
all_bugs = self._get_all_bugs()
data = self._get_all_checkout_meta(all_bugs)
return data
def _get_all_bugs(self) -> Metadata:
def impl():
proj_dir = ((self.d4j_home / 'framework') / 'projects')
for path_i in proj_dir.iterdir():
if (not path_i.is_dir()):
continue
for path_j in path_i.iterdir():
if (path_j.name == 'active-bugs.csv'):
with open(path_j) as f:
dataset = csv.reader(f)
keys = next(dataset)
kv_list = (zip(keys, values) for values in dataset)
bugs = [{k: v for (k, v) in kv} for kv in kv_list]
(yield (path_i.name, bugs))
return {proj: bugs for (proj, bugs) in impl()} |
.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_jit_tracing():
def foo(x):
if is_jit_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
assert isinstance(foo(x), list)
traced_foo = torch.jit.trace(foo, (torch.rand(1),))
assert isinstance(traced_foo(x), torch.Tensor) |
def build_onnx_model_with_zero_weight():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [1, 5, 5])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 5, 2])
H = helper.make_tensor_value_info('H', TensorProto.FLOAT, [1, 5, 2])
g_value = np.zeros(25).astype(np.float32)
G_init = helper.make_tensor('G', TensorProto.FLOAT, [5, 5], g_value.reshape(25).tolist())
matmul_node = onnx.helper.make_node('MatMul', ['A', 'G'], ['C'], name='Matmul')
b_value = np.zeros(10).astype(np.float32)
B_init = helper.make_tensor('B', TensorProto.FLOAT, [5, 2], b_value.reshape(10).tolist())
matmul_node2 = onnx.helper.make_node('MatMul', ['C', 'B'], ['I'], name='Matmul2')
e_value = np.zeros(10).astype(np.float32)
E_init = helper.make_tensor('E', TensorProto.FLOAT, [5, 2], e_value.reshape(10).tolist())
matmul_node3 = onnx.helper.make_node('MatMul', ['C', 'E'], ['K'], name='Matmul3')
add = onnx.helper.make_node('Add', ['I', 'E'], ['D'], name='add')
f_value = np.zeros(10).astype(np.float32)
F_init = helper.make_tensor('F', TensorProto.FLOAT, [5, 2], f_value.reshape(10).tolist())
add2 = onnx.helper.make_node('Add', ['D', 'F'], ['H'], name='add2')
graph = helper.make_graph([matmul_node, matmul_node2, matmul_node3, add, add2], 'test_graph_1', [A], [H], [B_init, E_init, F_init, G_init])
model = helper.make_model(graph)
model = helper.make_model(graph, **{'opset_imports': [helper.make_opsetid('', 13)]})
return model |
def load_model(model_id):
model_type = next((x for x in MODEL_CLASSES.keys() if (x in model_id.lower())), 'auto')
model_class = MODEL_CLASSES[model_type]
print('Load model via', model_class)
model = model_class[0].from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=amp_dtype)
print('Model dtype:', model.config.torch_dtype)
model = model.eval().to(device)
model = model.to(memory_format=torch.channels_last)
return (model, model_class) |
def parse_args():
parser = argparse.ArgumentParser(description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--work-dir', help='if specified, the evaluation metric results will be dumpedinto the directory as json')
parser.add_argument('--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "mIoU" for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed testing)')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=DictAction, help='--options is deprecated in favor of --cfg_options\' and it will not be supported in version v0.22.0. Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--opacity', type=float, default=0.5, help='Opacity of painted segmentation map. In (0, 1] range.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
if (args.options and args.cfg_options):
raise ValueError('--options and --cfg-options cannot be both specified, --options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. --options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args |
_measure
class Coverage(Measure):
cls_uuid: str = 'coverage'
def __init__(self, sim, config, **kwargs: Any):
self._sim = sim
self._config = config
self._visited = None
self._mini_visited = None
self._step = None
self._reached_count = None
self._mini_reached = None
self._mini_delta = 0.5
self._grid_delta = config.GRID_DELTA
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _to_grid(self, delta, sim_x, sim_y, sim_z=0):
grid_x = int((sim_x / delta))
grid_y = int((sim_y / delta))
grid_z = int((sim_z / delta))
return (grid_x, grid_y, grid_z)
def reset_metric(self, episode, task, observations, *args: Any, **kwargs: Any):
self._visited = {}
self._mini_visited = {}
self._reached_count = 0
self._mini_reached = 0
self._step = 0
current_visit = self._visit(task, observations)
self._metric = {'reached': self._reached_count, 'mini_reached': self._mini_reached, 'visit_count': current_visit, 'step': self._step}
def _visit(self, task, observations):
self._step += 1
if self._config.EGOCENTRIC:
global_loc = observations[EpisodicGPSSensor.cls_uuid]
else:
global_loc = self._sim.get_agent_state().position.tolist()
mini_loc = self._to_grid(self._mini_delta, *global_loc)
if (mini_loc in self._mini_visited):
self._mini_visited[mini_loc] += 1
else:
self._mini_visited[mini_loc] = 1
self._mini_reached += 1
grid_loc = self._to_grid(self._grid_delta, *global_loc)
if (grid_loc in self._visited):
self._visited[grid_loc] += 1
return self._visited[grid_loc]
self._visited[grid_loc] = 1
self._reached_count += 1
return self._visited[grid_loc]
def update_metric(self, *args: Any, episode, action, task: EmbodiedTask, observations, **kwargs: Any):
current_visit = self._visit(task, observations)
self._metric = {'reached': self._reached_count, 'mini_reached': self._mini_reached, 'visit_count': current_visit, 'step': self._step} |
def load_checkpoint(filename, model=None, logger=None):
if logger:
logger.info(('load checkpoint from ' + filename))
statistics = torch.load(filename)
if model:
model.load_state_dict(statistics['state_dict'])
return statistics |
class MSEMeter(meter.Meter):
def __init__(self, root=False):
super(MSEMeter, self).__init__()
self.reset()
self.root = root
def reset(self):
self.n = 0
self.sesum = 0.0
def add(self, output, target):
if ((not torch.is_tensor(output)) and (not torch.is_tensor(target))):
output = torch.from_numpy(output)
target = torch.from_numpy(target)
self.n += output.numel()
self.sesum += torch.sum(((output - target) ** 2))
def value(self):
mse = (self.sesum / max(1, self.n))
return (math.sqrt(mse) if self.root else mse) |
def _test_handler(file_format, test_obj, str_checker, mode='r+'):
dump_str = mmcv.dump(test_obj, file_format=file_format)
str_checker(dump_str)
tmp_filename = osp.join(tempfile.gettempdir(), 'mmcv_test_dump')
mmcv.dump(test_obj, tmp_filename, file_format=file_format)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
method = ('put' if ('b' in mode) else 'put_text')
with patch.object(PetrelBackend, method, return_value=None) as mock_method:
filename = 's3://path/of/your/file'
mmcv.dump(test_obj, filename, file_format=file_format)
mock_method.assert_called()
with tempfile.NamedTemporaryFile(mode, delete=False) as f:
tmp_filename = f.name
mmcv.dump(test_obj, f, file_format=file_format)
assert osp.isfile(tmp_filename)
with open(tmp_filename, mode) as f:
load_obj = mmcv.load(f, file_format=file_format)
assert (load_obj == test_obj)
os.remove(tmp_filename)
tmp_filename = osp.join(tempfile.gettempdir(), ('mmcv_test_dump.' + file_format))
mmcv.dump(test_obj, tmp_filename)
assert osp.isfile(tmp_filename)
load_obj = mmcv.load(tmp_filename)
assert (load_obj == test_obj)
os.remove(tmp_filename) |
def preprocess(sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
if (conversation_lib.default_conversation.version == 'v1'):
return preprocess_v1(sources, tokenizer)
if (conversation_lib.default_conversation.version == 'mpt'):
return preprocess_mpt(sources, tokenizer)
conversations = []
for source in sources:
header = f'''{conversation_lib.default_conversation.system}
'''
conversation = _add_speaker_and_signal(header, source)
conversations.append(conversation)
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
input_ids = conversations_tokenized['input_ids']
targets = copy.deepcopy(input_ids)
for (target, source) in zip(targets, sources):
tokenized_lens = _tokenize_fn(([header] + [s['value'] for s in source]), tokenizer)['input_ids_lens']
speakers = [sentence['from'] for sentence in source]
_mask_targets(target, tokenized_lens, speakers)
return dict(input_ids=input_ids, labels=targets) |
class MagicWords(object):
names = ['!', 'currentmonth', 'currentmonth1', 'currentmonthname', 'currentmonthnamegen', 'currentmonthabbrev', 'currentday', 'currentday2', 'currentdayname', 'currentyear', 'currenttime', 'currenthour', 'localmonth', 'localmonth1', 'localmonthname', 'localmonthnamegen', 'localmonthabbrev', 'localday', 'localday2', 'localdayname', 'localyear', 'localtime', 'localhour', 'numberofarticles', 'numberoffiles', 'numberofedits', 'articlepath', 'pageid', 'sitename', 'server', 'servername', 'scriptpath', 'stylepath', 'pagename', 'pagenamee', 'fullpagename', 'fullpagenamee', 'namespace', 'namespacee', 'namespacenumber', 'currentweek', 'currentdow', 'localweek', 'localdow', 'revisionid', 'revisionday', 'revisionday2', 'revisionmonth', 'revisionmonth1', 'revisionyear', 'revisiontimestamp', 'revisionuser', 'revisionsize', 'subpagename', 'subpagenamee', 'talkspace', 'talkspacee', 'subjectspace', 'subjectspacee', 'talkpagename', 'talkpagenamee', 'subjectpagename', 'subjectpagenamee', 'numberofusers', 'numberofactiveusers', 'numberofpages', 'currentversion', 'rootpagename', 'rootpagenamee', 'basepagename', 'basepagenamee', 'currenttimestamp', 'localtimestamp', 'directionmark', 'contentlanguage', 'numberofadmins', 'cascadingsources']
def __init__(self):
self.values = {'!': '|'}
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
self.values[name] = value
switches = ('__NOTOC__', '__FORCETOC__', '__TOC__', '__TOC__', '__NEWSECTIONLINK__', '__NONEWSECTIONLINK__', '__NOGALLERY__', '__HIDDENCAT__', '__NOCONTENTCONVERT__', '__NOCC__', '__NOTITLECONVERT__', '__NOTC__', '__START__', '__END__', '__INDEX__', '__NOINDEX__', '__STATICREDIRECT__', '__DISAMBIG__') |
def gen_k_centers(k, dim):
delta = abs(np.random.normal(0.0, 5.0))
eps = 0.001
centers = []
for i in range(k):
c = np.random.multivariate_normal(np.zeros(dim), np.identity(dim))
if len(centers):
c1 = centers[0]
x = (np.random.multivariate_normal(c1, np.identity(c1.size)) - c1)
direction = (x / np.linalg.norm(x))
centers.append(((c1 + (((2.0 * i) * delta) * direction)) + eps))
else:
centers.append(c)
return (centers, delta) |
class ptb_fs_goru_config(object):
cell = 'fs-goru'
init_scale = 0.01
learning_rate = 0.002
max_grad_norm = 1.0
num_layers = 2
num_steps = 150
cell_size = 700
hyper_size = 200
embed_size = 128
max_epoch = 200
max_max_epoch = max_epoch
keep_prob = 0.65
zoneout_h = 0.9
zoneout_c = 0.5
lr_decay = 0.1
batch_size = 128
vocab_size = 50
fast_layers = 2
T_norm = 1.0
use_zoneout = True
use_layer_norm = True
dataset = 'ptb' |
class UAVVideo(Video):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img=False):
super(UAVVideo, self).__init__(name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img) |
def _to_cpu(state):
if isinstance(state, torch.Tensor):
ret = state.cpu()
if ('Float' in state.type()):
ret = ret.half()
return ret
elif isinstance(state, list):
new_state = [_to_cpu(t) for t in state]
elif isinstance(state, tuple):
new_state = tuple((_to_cpu(t) for t in state))
elif isinstance(state, dict):
new_state = {n: _to_cpu(t) for (n, t) in state.items()}
else:
return state
return new_state |
def convert_bdd(root_dir, ann_dir):
count = 0
for img_loc in tqdm(os.listdir((root_dir + ann_dir))):
img = imread(((root_dir + ann_dir) + img_loc))
if (img.ndim <= 1):
continue
loc = (img == 255)
img[loc] = (- 1)
loc = (img == 16)
img[loc] = 19
loc = (img == 18)
img[loc] = 16
loc = (img == 19)
img[loc] = 18
img += 1
scipy.misc.toimage(img, cmin=0, cmax=255).save(((root_dir + ann_dir) + img_loc)) |
def get_self_bleu2_arithmetic(utterances):
weights = (0.5, 0.5)
return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) |
def test_cast_as_tensor_torch_bool_2d():
_test_cast(torch.tensor([[True, False, True], [True, True, False]]), torch.bool, 2)
_test_cast(torch.tensor([[True, True, True]]), torch.bool, 2)
_test_cast(torch.tensor([[False]]), torch.bool, 2) |
class StripTokenDataset(BaseWrapperDataset):
def __init__(self, dataset, id_to_strip):
super().__init__(dataset)
self.id_to_strip = id_to_strip
def __getitem__(self, index):
item = self.dataset[index]
while ((len(item) > 0) and (item[(- 1)] == self.id_to_strip)):
item = item[:(- 1)]
while ((len(item) > 0) and (item[0] == self.id_to_strip)):
item = item[1:]
return item |
def run_watch():
command = (['python', 'train_q.py', '--steps-per-epoch', '0', '--test-length', '100000', '--nn-file', sys.argv[1], '--display-screen', '--max-history', '10', '--testing'] + sys.argv[2:])
p1 = subprocess.Popen(command)
p1.wait() |
class FocusLiteNNMinMax(nn.Module):
def __init__(self, num_channel=1):
super(FocusLiteNNMinMax, self).__init__()
self.num_channel = num_channel
self.conv = nn.Conv2d(3, self.num_channel, 7, stride=5, padding=1)
self.fc = nn.Conv2d((self.num_channel * 2), 1, 1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
def forward(self, x):
batch_size = x.size()[0]
x = self.conv(x)
pool_size = x.shape[2:4]
x1 = (- F.max_pool2d((- x), pool_size))
x2 = F.max_pool2d(x, pool_size)
x = torch.cat((x1, x2), 1)
x = self.fc(x)
x = x.view(batch_size, (- 1))
return x |
def get_rd_data_dict(pkl_path, train_path, n_aug, alpha):
if (not pkl_path.exists()):
print(f'creating {pkl_path}')
(sentences, _) = common.get_sentences_and_labels_from_txt(train_path)
sentence_to_augmented_sentences = {}
for sentence in tqdm(sentences):
rd_sentences = [get_rd_sentence(sentence, alpha) for _ in range(n_aug)]
sentence_to_augmented_sentences[sentence] = rd_sentences
common.save_pickle(pkl_path, sentence_to_augmented_sentences)
return common.load_pickle(pkl_path) |
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img):
if (random.random() < self.p):
return F.hflip(img)
return img
def __repr__(self):
return (self.__class__.__name__ + '(p={})'.format(self.p)) |
_REGISTRY.register()
class CIFAR100C(CIFAR10C):
dataset_dir = ''
domains = ['cifar100', 'cifar100_c']
def __init__(self, cfg):
super().__init__(cfg) |
def parse_args():
parser = argparse.ArgumentParser(description='Convert benchmark model json to script')
parser.add_argument('txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument('--partition', type=str, default='openmmlab', help='slurm partition name')
parser.add_argument('--max-keep-ckpts', type=int, default=1, help='The maximum checkpoints to keep')
parser.add_argument('--run', action='store_true', help='run script directly')
parser.add_argument('--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args |
def game_loop(args):
try:
pygame.init()
display = pygame.display.set_mode((args.width, args.height), (pygame.HWSURFACE | pygame.DOUBLEBUF))
pygame.display.set_caption(args.description)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
text_surface = font.render('Rendering map...', True, COLOR_WHITE)
display.blit(text_surface, text_surface.get_rect(center=((args.width / 2), (args.height / 2))))
pygame.display.flip()
input_control = InputControl(TITLE_INPUT)
hud = HUD(TITLE_HUD, args.width, args.height)
world = World(TITLE_WORLD, args, timeout=2.0)
input_control.start(hud, world)
hud.start()
world.start(hud, input_control)
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
world.tick(clock)
hud.tick(clock)
input_control.tick(clock)
display.fill(COLOR_ALUMINIUM_4)
world.render(display)
hud.render(display)
input_control.render(display)
pygame.display.flip()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
finally:
if (world is not None):
world.destroy() |
def get_abs_min_max(var, ctx):
abs_var = var.abs()
return f'{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}' |
def run_deeplab(args):
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if (args.sync_bn is None):
if (args.cuda and (len(args.gpu_ids) > 1)):
args.sync_bn = True
else:
args.sync_bn = False
if (args.epochs is None):
raise ValueError('epochs must be specified')
if (args.batch_size is None):
args.batch_size = (4 * len(args.gpu_ids))
if (args.test_batch_size is None):
args.test_batch_size = args.batch_size
if (args.checkname is None):
args.checkname = ('deeplab-' + str(args.backbone))
torch.manual_seed(args.seed)
if args.inference:
handle_inference(args)
elif args.evaluate:
handle_evaluate(args)
else:
handle_training(args) |
class Transformer(nn.Module):
def __init__(self, num_tokens, dim, depth, heads, dim_head, attn_dropout, ff_dropout):
super().__init__()
self.embeds = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([Residual(PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=attn_dropout))), Residual(PreNorm(dim, FeedForward(dim, dropout=ff_dropout)))]))
def forward(self, x):
x = self.embeds(x)
for (attn, ff) in self.layers:
x = attn(x)
x = ff(x)
return x |
def deconv(in_planes, out_planes):
return nn.Sequential(nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.1, inplace=True)) |
def test_lazy_class_scope_resolution():
run_cell('\n class Foo:\n shared = 99\n def __init__(self, x):\n self.x = x\n ')
run_cell('foo = Foo(10)')
run_cell('y = 11')
run_cell('Foo.shared = y + 42')
run_cell('y = 12')
run_cell('logging.info(foo.shared)')
assert_detected('`foo.shared` should point to same Symbol as `Foo.shared` and thus also has stale dep')
run_cell('foo.shared = 89')
run_cell('logging.info(Foo.shared)')
assert_detected('Even though we refreshed `foo.shared`, this has no bearing on the original class member `Foo.shared`') |
def test_pretrained_resnet3d_backbone():
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
state_dict_2d = load_state_dict_from_url(' progress=True)
data = torch.randn(1, 3, 1, 224, 224)
model = ResNet3DBackbone(layer_blocks=(3, 4, 6, 3), block_layer=ResNet3DBottleneck, zero_init_residual=True, state_dict_2d=state_dict_2d)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (1, 2048, 1, 7, 7)) |
class Normalize(object):
def __init__(self, mean, std, to_bgr255=True):
self.mean = mean
self.std = std
self.to_bgr255 = to_bgr255
def __call__(self, image, target=None, rois=None):
if self.to_bgr255:
image = (image[[2, 1, 0]] * 255)
image = F.normalize(image, mean=self.mean, std=self.std)
return (image, target, rois) |
def parse_fasta(fasta_string: str) -> Tuple[(Sequence[str], Sequence[str])]:
sequences = []
descriptions = []
index = (- 1)
for line in fasta_string.splitlines():
line = line.strip()
if line.startswith('>'):
index += 1
descriptions.append(line[1:])
sequences.append('')
continue
elif (not line):
continue
sequences[index] += line
return (sequences, descriptions) |
class DeviceManager():
def list_adb_device(cls):
devices = []
adb_list = sh_commands.adb_devices()
for adb in adb_list:
prop = sh_commands.adb_getprop_by_serialno(adb)
android = {YAMLKeyword.device_name: prop['ro.product.model'].replace(' ', ''), YAMLKeyword.target_abis: prop['ro.product.cpu.abilist'].split(','), YAMLKeyword.target_socs: prop['ro.board.platform'], YAMLKeyword.system: SystemType.android, YAMLKeyword.address: adb, YAMLKeyword.username: ''}
if (android not in devices):
devices.append(android)
return devices
def list_ssh_device(cls, yml):
with open(yml) as f:
devices = yaml.load(f.read())
devices = devices['devices']
device_list = []
for (name, dev) in six.iteritems(devices):
dev[YAMLKeyword.device_name] = dev[YAMLKeyword.models].replace(' ', '')
dev[YAMLKeyword.system] = SystemType.arm_linux
device_list.append(dev)
return device_list
def list_devices(cls, yml):
devices_list = []
devices_list.extend(cls.list_adb_device())
if (not yml):
if os.path.exists('devices.yml'):
devices_list.extend(cls.list_ssh_device('devices.yml'))
elif os.path.exists(yml):
devices_list.extend(cls.list_ssh_device(yml))
else:
MaceLogger.error(ModuleName.RUN, 'no ARM linux device config file found')
host = {YAMLKeyword.device_name: SystemType.host, YAMLKeyword.target_abis: [ABIType.host], YAMLKeyword.target_socs: '', YAMLKeyword.system: SystemType.host, YAMLKeyword.address: SystemType.host}
devices_list.append(host)
return devices_list |
def setup_router(api_list, chatbot=None, enable_llm=True, use_deepspeed=False, world_size=1, host='0.0.0.0', port=80):
for api_name in api_list:
lower_api_name = api_name.lower()
if (lower_api_name in api_router_mapping):
api_router = api_router_mapping[lower_api_name]
if enable_llm:
api_router.set_chatbot(chatbot, use_deepspeed, world_size, host, port)
if (lower_api_name == 'plugin_image2image'):
api_router.worker.start()
logger.info('create main worker done...')
_router.include_router(api_router)
else:
logger.error(f'NeuralChat has not supported such service yet: {api_name}')
sys.exit((- 1))
return _router |
def get_args():
parser = argparse.ArgumentParser(description='This script creates a\n position-dependent subword lexicon from a position-independent subword lexicon\n by adding suffixes ("_B", "_I", "_E", "_S") to the related phones.\n It assumes that the input lexicon does not contain disambiguation symbols.')
parser.add_argument('--separator', type=str, default='', help='Separator\n indicates the position of a subword in a word. \n Subword ends with separator can only appear at the beginning or middle of a word. \n Subword without separator can only appear at the end of a word or is a word itself.\n E.g. "international -> al";\n "nation -> nation"\n The separator should match the separator used in the input lexicon.')
parser.add_argument('lexiconp', type=str, help="Filename of subword position-independent \n lexicon with pronunciation probabilities, with lines of the form 'subword prob p1 p2 ...'")
args = parser.parse_args()
return args |
def get_emd_average(model_id, pre_sampled=True, **kwargs):
import os
manager = get_emd_manager(model_id, pre_sampled, **kwargs)
values = None
if os.path.isfile(manager.path):
with manager.get_saving_dataset('r') as ds:
values = np.array(tuple(ds.values()))
if (values is None):
try:
manager.save_all()
except Exception:
os.remove(manager.path)
raise
with manager.get_saving_dataset('r') as ds:
values = np.array(tuple(ds.values()))
return np.mean(values) |
_registry('AdamW', 'tensorflow')
class TensorFlowAdamW(object):
def __init__(self, param_dict):
assert isinstance(param_dict, dict), 'This optimizer constructor parameter must be a dict'
self._param_dict = param_dict
def _mapping(self):
_param_map = {'learning_rate': 'learning_rate', 'weight_decay': 'weight_decay', 'beta_1': 'beta_1', 'beta_2': 'beta_2', 'epsilon': 'epsilon', 'amsgrad': 'amsgrad'}
_dict = {}
for key in self._param_dict:
if (key in _param_map):
_dict.update({_param_map[key]: self._param_dict[key]})
return _dict
def __call__(self, **kwargs):
return (tfa.optimizers.AdamW, self._mapping(**kwargs)) |
class GPRGNN(torch.nn.Module):
def __init__(self, dataset, args):
super(GPRGNN, self).__init__()
self.lin1 = Linear(dataset.num_features, args.hidden)
self.lin2 = Linear(args.hidden, dataset.num_classes)
if (args.ppnp == 'PPNP'):
self.prop1 = APPNP(args.K, args.alpha)
elif (args.ppnp == 'GPR_prop'):
self.prop1 = GPR_prop(args.K, args.alpha, args.Init, args.Gamma)
self.Init = args.Init
self.dprate = args.dprate
self.dropout = args.dropout
def reset_parameters(self):
self.prop1.reset_parameters()
def forward(self, data):
(x, edge_index) = (data.x, data.edge_index)
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.relu(self.lin1(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lin2(x)
if (self.dprate == 0.0):
x = self.prop1(x, edge_index)
return F.log_softmax(x, dim=1)
else:
x = F.dropout(x, p=self.dprate, training=self.training)
x = self.prop1(x, edge_index)
return F.log_softmax(x, dim=1) |
class SetAbstraction(nn.Module):
def __init__(self, in_channels, out_channels, layers=1, stride=1, group_args={'NAME': 'ballquery', 'radius': 0.1, 'nsample': 16}, norm_args={'norm': 'bn1d'}, act_args={'act': 'relu'}, conv_args=None, sampler='fps', feature_type='dp_fj', use_res=False, is_head=False, **kwargs):
super().__init__()
self.stride = stride
self.is_head = is_head
self.all_aggr = ((not is_head) and (stride == 1))
self.use_res = (use_res and (not self.all_aggr) and (not self.is_head))
self.feature_type = feature_type
mid_channel = ((out_channels // 2) if (stride > 1) else out_channels)
channels = (([in_channels] + ([mid_channel] * (layers - 1))) + [out_channels])
channels[0] = (in_channels if is_head else CHANNEL_MAP[feature_type](channels[0]))
if self.use_res:
self.skipconv = (create_convblock1d(in_channels, channels[(- 1)], norm_args=None, act_args=None) if (in_channels != channels[(- 1)]) else nn.Identity())
self.act = create_act(act_args)
create_conv = (create_convblock1d if is_head else create_convblock2d)
convs = []
for i in range((len(channels) - 1)):
convs.append(create_conv(channels[i], channels[(i + 1)], norm_args=(norm_args if (not is_head) else None), act_args=(None if ((i == (len(channels) - 2)) and (self.use_res or is_head)) else act_args), **conv_args))
self.convs = nn.Sequential(*convs)
if (not is_head):
if self.all_aggr:
group_args.nsample = None
group_args.radius = None
self.grouper = create_grouper(group_args)
self.pool = (lambda x: torch.max(x, dim=(- 1), keepdim=False)[0])
if (sampler.lower() == 'fps'):
self.sample_fn = furthest_point_sample
elif (sampler.lower() == 'random'):
self.sample_fn = random_sample
def forward(self, pf):
(p, f) = pf
if self.is_head:
f = self.convs(f)
else:
if (not self.all_aggr):
idx = self.sample_fn(p, (p.shape[1] // self.stride)).long()
new_p = torch.gather(p, 1, idx.unsqueeze((- 1)).expand((- 1), (- 1), 3))
else:
new_p = p
" DEBUG neighbor numbers. \n query_xyz, support_xyz = new_p, p\n radius = self.grouper.radius\n dist = torch.cdist(query_xyz.cpu(), support_xyz.cpu())\n points = len(dist[dist < radius]) / (dist.shape[0] * dist.shape[1])\n logging.info(f'query size: {query_xyz.shape}, support size: {support_xyz.shape}, radius: {radius}, num_neighbors: {points}')\n DEBUG end "
if (self.use_res or ('df' in self.feature_type)):
fi = torch.gather(f, (- 1), idx.unsqueeze(1).expand((- 1), f.shape[1], (- 1)))
if self.use_res:
identity = self.skipconv(fi)
else:
fi = None
(dp, fj) = self.grouper(new_p, p, f)
fj = get_aggregation_feautres(new_p, dp, fi, fj, feature_type=self.feature_type)
f = self.pool(self.convs(fj))
if self.use_res:
f = self.act((f + identity))
p = new_p
return (p, f) |
_module()
class LLavaConvProcessV1(BaseConvProcessFunc):
def __call__(self, raw_conv: List[Dict[(str, Any)]], preprocessor: Dict[(str, Any)], conv_template: Conversation) -> List[Dict[(str, Any)]]:
conv_processor_cfg = preprocessor['conv']
image_token_len = conv_processor_cfg['image_token_len']
sep_image_conv_front = conv_processor_cfg.get('sep_image_conv_front', False)
use_im_start_end = conv_processor_cfg.get('use_im_start_end', False)
if sep_image_conv_front:
raw_conv[0]['value'] = raw_conv[0]['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
raw_conv[0]['value'] = ((((DEFAULT_IMAGE_TOKEN + conv_template.sep) + conv_template.roles[0]) + ': ') + raw_conv[0]['value'])
for sentence in raw_conv:
replace_token = (DEFAULT_IMAGE_PATCH_TOKEN * image_token_len)
if use_im_start_end:
replace_token = ((DEFAULT_IM_START_TOKEN + replace_token) + DEFAULT_IM_END_TOKEN)
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, replace_token)
return raw_conv |
def registerSceneProperties():
bpy.types.Scene.zpy_sim_name = bpy.props.StringProperty(name='Sim Name', description='Name of the scene, must match data portal.', default='default')
bpy.types.Scene.zpy_sim_version = bpy.props.StringProperty(name='Sim Version', description='Version of the scene, must match data portal.', default='0')
bpy.types.Scene.zpy_export_dir = bpy.props.StringProperty(name='Export Directory Path', description='Path to directory for exporting packaged zumo scenes.', default=str(zpy.files.default_temp_path()), subtype='DIR_PATH')
bpy.types.Scene.zpy_export_path = bpy.props.StringProperty(name='Export Path', description='Export path for this zumo scene.', default=str(zpy.files.default_temp_path()), subtype='DIR_PATH') |
def test_robot_warehouse_utils__calculate_num_observation_features() -> None:
sensor_range = 1
num_obs_features = calculate_num_observation_features(sensor_range)
assert (num_obs_features == 66)
sensor_range = 2
num_obs_features = calculate_num_observation_features(sensor_range)
assert (num_obs_features == 178) |
def eval_base_model_mean_rank(pred_fn, target_events):
pred_data = file_uri_reader_processor(pred_fn)
pred_target_data = []
pred_type_score = []
label_type = []
for event in target_events:
(seq_idx, original_idx) = eval(event[0])
pred_event = search_pred_data(pred_data, seq_idx, original_idx)
pred_target_data.append(pred_event)
pred_type_score.append(pred_event['pred_type_score'])
label_type.append(pred_event['label_type'])
type_pr_topk = 5
type_ranks = rank(label_type, pred_type_score)
type_mask = (type_ranks <= type_pr_topk)
type_mean_ranks = np.mean(type_ranks[type_mask])
print(type_mean_ranks)
return |
class PlatformType(object):
KUBERNETES = 'k8s'
RAY = 'ray'
PY_KUBERNETES = 'pyk8s'
LOCAL = 'local' |
class Adam(OptimMethod, ZooKerasCreator):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, schedule=None, weight_decay=0.0, bigdl_type='float'):
self.value = callZooFunc(bigdl_type, ZooKerasCreator.jvm_class_constructor(self), lr, beta_1, beta_2, epsilon, decay, weight_decay, (schedule if schedule else Default()))
self.bigdl_type = bigdl_type |
def run_save_and_load(rank, world_size, pipe_config=dict(), amp_config=None, loss_func=None):
from atorch.auto.opt_lib.pipeline_parallel_optimization import PipelineParallelOptimization
pipe_config['use_c10d'] = True
init_pipe_distributed(rank, world_size)
model_context = create_model_context(loss_func=loss_func)
deepcopy_checkpoint_name(model_context.model, model_context.model)
if torch.cuda.is_available():
model_context.model.cuda()
best_config = generate_pipe_configs(model_context, pipe_config)
if (amp_config is not None):
best_config['compiler_configs']['amp_config'] = amp_config
PipelineParallelOptimization.apply_wrapper(model_context, 'pipe', best_config)
model_context.update_dataloader()
if (loss_func is not None):
model_context.update_optim()
optimizer = (model_context.optim if (loss_func is not None) else None)
atorch_save_pipe_checkpoint(model_context.model, optimizer=optimizer)
model_context.model.to('meta')
device = (f'cuda:{rank}' if torch.cuda.is_available() else 'cpu')
atorch_load_pipe_checkpoint(model_context.model, optim=optimizer, device=device)
run_two_steps_return_the_last(rank, model_context) |
def _main():
opts = _parse_main()
count = 0
goal2count = dict()
with open(opts.target_file, 'r') as f:
for l in f:
if (count > opts.limit):
print('[check_multiple_goals] LIMIT HIT')
break
if pred(l):
count += 1
print('[check_multiple_goals] FOUND MULTIPLE GOALS: ', l)
if (l in goal2count):
goal2count[l] = (goal2count[l] + 1)
else:
goal2count[l] = 1
repeated_goals = [k for (k, v) in goal2count.items() if (v > 1)]
print('REPEATED GOALS', repeated_goals)
print('REPEATED GOALS WITH MULTIPLE GOALS: ', len([x for x in repeated_goals if (x.count('') > 1)]))
print('NUM REPEATED GOALS: ', len(repeated_goals))
print('DUPLICATE CONTRIBUTION: ', sum(((goal2count[k] - 1) for k in repeated_goals)))
print('REPETITION DISTRIBUTION')
plt.xlim([0, 7])
plt.hist([goal2count[k] for k in repeated_goals])
plt.show()
count_repeats = len(repeated_goals)
print(f'[check_multiple_goals] FOUND {count_repeats} REPEATED GOALS')
print(f'[check_multiple_goals] FOUND {count} INSTANCES OF MULTIPLE GOALS') |
def simxGetStringParameter(clientID, paramIdentifier, operationMode):
paramValue = ct.POINTER(ct.c_char)()
ret = c_GetStringParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode)
a = bytearray()
if (ret == 0):
i = 0
while (paramValue[i] != b'\x00'):
if (sys.version_info[0] == 3):
a.append(int.from_bytes(paramValue[i], 'big'))
else:
a.append(paramValue[i])
i = (i + 1)
if (sys.version_info[0] == 3):
a = str(a, 'utf-8')
else:
a = str(a)
return (ret, a) |
class SEBottleneck(Bottleneck):
def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs):
super().__init__(in_channels, out_channels, **kwargs)
self.se_layer = SELayer(out_channels, ratio=se_ratio)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se_layer(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out |
class ParamGroup():
def __init__(self, parser: ArgumentParser, name: str, fill_none=False):
group = parser.add_argument_group(name)
for (key, value) in vars(self).items():
shorthand = False
if key.startswith('_'):
shorthand = True
key = key[1:]
t = type(value)
value = (value if (not fill_none) else None)
if shorthand:
if (t == bool):
group.add_argument(('--' + key), ('-' + key[0:1]), default=value, action='store_true')
else:
group.add_argument(('--' + key), ('-' + key[0:1]), default=value, type=t)
elif (t == bool):
group.add_argument(('--' + key), default=value, action='store_true')
else:
group.add_argument(('--' + key), default=value, type=t)
def extract(self, args):
group = GroupParams()
for arg in vars(args).items():
if ((arg[0] in vars(self)) or (('_' + arg[0]) in vars(self))):
setattr(group, arg[0], arg[1])
return group |
def learn_and_test(solver_file):
caffe.set_mode_cpu()
solver = caffe.get_solver(solver_file)
solver.solve()
accuracy = 0
test_iters = int((len(Xt) / solver.test_nets[0].blobs['data'].num))
for i in range(test_iters):
solver.test_nets[0].forward()
accuracy += solver.test_nets[0].blobs['accuracy'].data
accuracy /= test_iters
return accuracy |
def targets_rate(targets, num_classes, num_steps=False, first_spike_time=0, correct_rate=1, incorrect_rate=0, on_target=1, off_target=0, firing_pattern='regular', interpolate=False, epsilon=1e-07):
if ((not (0 <= correct_rate <= 1)) or (not (0 <= incorrect_rate <= 1))):
raise Exception(f'``correct_rate``{correct_rate} and ``incorrect_rate``{incorrect_rate} must be between 0 and 1.')
if ((not num_steps) and ((correct_rate != 1) or (incorrect_rate != 0))):
raise Exception('``num_steps`` must be passed if correct_rate is not 1 or incorrect_rate is not 0.')
if (incorrect_rate > correct_rate):
raise Exception('``correct_rate`` must be greater than ``incorrect_rate``.')
if (firing_pattern.lower() not in ['regular', 'uniform', 'poisson']):
raise Exception("``firing_pattern`` must be either 'regular', 'uniform' or 'poisson'.")
device = targets.device
if ((correct_rate == 1) and (incorrect_rate == 0)):
if (first_spike_time == 0):
if (on_target > off_target):
return torch.clamp((to_one_hot(targets, num_classes) * on_target), off_target)
else:
return ((to_one_hot(targets, num_classes) * on_target) + ((~ to_one_hot(targets, num_classes).bool()) * off_target))
if (first_spike_time > 0):
spike_targets = torch.clamp((to_one_hot(targets, num_classes) * on_target), off_target)
spike_targets = spike_targets.repeat(tuple(([num_steps] + torch.ones(len(spike_targets.size()), dtype=int).tolist())))
spike_targets[0:first_spike_time] = off_target
return spike_targets
else:
one_hot_targets = to_one_hot(targets, num_classes)
one_hot_inverse = to_one_hot_inverse(one_hot_targets)
one_hot_targets = one_hot_targets.repeat(tuple(([num_steps] + torch.ones(len(one_hot_targets.size()), dtype=int).tolist())))
one_hot_inverse = one_hot_inverse.repeat(tuple(([num_steps] + torch.ones(len(one_hot_inverse.size()), dtype=int).tolist())))
(correct_spike_targets, correct_spike_times) = target_rate_code(num_steps=num_steps, first_spike_time=first_spike_time, rate=correct_rate, firing_pattern=firing_pattern)
correct_spikes_one_hot = (one_hot_targets * correct_spike_targets.to(device).unsqueeze((- 1)).unsqueeze((- 1)))
(incorrect_spike_targets, incorrect_spike_times) = target_rate_code(num_steps=num_steps, first_spike_time=first_spike_time, rate=incorrect_rate, firing_pattern=firing_pattern)
incorrect_spikes_one_hot = (one_hot_inverse * incorrect_spike_targets).to(device).unsqueeze((- 1)).unsqueeze((- 1))
if (not interpolate):
return torch.clamp(((incorrect_spikes_one_hot.to(device) + correct_spikes_one_hot.to(device)) * on_target), off_target)
else:
correct_spike_targets = (one_hot_targets * rate_interpolate(correct_spike_times, num_steps=num_steps, on_target=on_target, off_target=off_target, epsilon=epsilon).to(device).unsqueeze((- 1)).unsqueeze((- 1)))
incorrect_spike_targets = (one_hot_inverse * rate_interpolate(incorrect_spike_times, num_steps=num_steps, on_target=on_target, off_target=off_target, epsilon=epsilon).to(device).unsqueeze((- 1)).unsqueeze((- 1)))
return (correct_spike_targets + incorrect_spike_targets) |
class MultiEdgeGraphFormatter(BaseGraphFormatter):
def __init__(self, config, name='MultiEdgeGraphFormatter'):
self.name = name
self.disable_tqdm = config.disable_tqdm
self.config = config
self.t3_parser = CodeTokenizer(data=[], lang='C', tlevel='t3')
BaseFormatter.__init__(self, config, name)
def format(self, item_json, vocab_dicts):
(token_vd, node_vd, target_vd, word_vd) = vocab_dicts
datapoint = self.datapoint_class()
dgl_graph = self._convert_to_multi_edge_dglgraph(item_json['jsgraph'], token_vd, node_vd)
datapoint.function = item_json['function']
datapoint.function_graph = dgl_graph
datapoint.graph_size = item_json['graph_size']
datapoint.tgt = item_json['target']
return datapoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.