code stringlengths 101 5.91M |
|---|
class GlobalStateWrapper(Wrapper):
def modify_timestep(self, timestep: TimeStep) -> TimeStep[ObservationGlobalState]:
global_state = jnp.concatenate(timestep.observation.agents_view, axis=0)
global_state = jnp.tile(global_state, (self._env.num_agents, 1))
observation = ObservationGlobalState(global_state=global_state, agents_view=timestep.observation.agents_view, action_mask=timestep.observation.action_mask, step_count=timestep.observation.step_count)
return timestep.replace(observation=observation)
def reset(self, key: chex.PRNGKey) -> Tuple[(State, TimeStep)]:
(state, timestep) = self._env.reset(key)
return (state, self.modify_timestep(timestep))
def step(self, state: State, action: chex.Array) -> Tuple[(State, TimeStep)]:
(state, timestep) = self._env.step(state, action)
return (state, self.modify_timestep(timestep))
def observation_spec(self) -> specs.Spec[ObservationGlobalState]:
obs_spec = self._env.observation_spec()
num_obs_features = obs_spec.agents_view.shape[(- 1)]
global_state = specs.Array((self._env.num_agents, (self._env.num_agents * num_obs_features)), jnp.int32, 'global_state')
return specs.Spec(ObservationGlobalState, 'ObservationSpec', agents_view=obs_spec.agents_view, action_mask=obs_spec.action_mask, global_state=global_state, step_count=obs_spec.step_count) |
def main():
parser = argparse.ArgumentParser(description='DeeplabV3+ And Evaluation')
parser.add_argument('--backbone', type=str, default='resnet', choices=['resnet', 'xception', 'drn', 'mobilenet', 'drn_c42'], help='backbone name (default: resnet)')
parser.add_argument('--out-stride', type=int, default=16, help='network output stride (default: 8)')
parser.add_argument('--dataset', type=str, default='urban3d', choices=['urban3d', 'spaceNet', 'crowdAI', 'combined'], help='dataset name (default: urban3d)')
parser.add_argument('--data-root', type=str, default='/data/', help='datasets root path')
parser.add_argument('--workers', type=int, default=4, metavar='N', help='dataloader threads')
parser.add_argument('--sync-bn', type=bool, default=None, help='whether to use sync bn (default: auto)')
parser.add_argument('--freeze-bn', type=bool, default=False, help='whether to freeze bn parameters (default: False)')
parser.add_argument('--loss-type', type=str, default='ce_dice', choices=['ce', 'ce_dice', 'wce_dice'], help='loss func type (default: ce)')
parser.add_argument('--fbeta', type=float, default=1, help='beta for FBeta-Measure')
parser.add_argument('--loss-weights', type=float, nargs='+', default=[1.0, 1.0], help='loss weighting')
parser.add_argument('--num-classes', type=int, default=2, help='number of classes to predict (2 for binary mask)')
parser.add_argument('--dropout', type=float, nargs='+', default=[0.1, 0.5], help='dropout values')
parser.add_argument('--epochs', type=int, default=None, metavar='N', help='number of epochs to train (default: auto)')
parser.add_argument('--start_epoch', type=int, default=0, metavar='N', help='start epochs (default:0)')
parser.add_argument('--batch-size', type=int, default=None, metavar='N', help='input batch size for training (default: auto)')
parser.add_argument('--test-batch-size', type=int, default=None, metavar='N', help='input batch size for testing (default: auto)')
parser.add_argument('--lr', type=float, default=0.0001, metavar='LR', help='learning rate (default: auto)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0005, metavar='M', help='w-decay (default: 5e-4)')
parser.add_argument('--nesterov', action='store_true', default=False, help='whether use nesterov (default: False)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0', help='use which gpu to train, must be a comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--checkname', type=str, default=None, help='set the checkpoint name')
parser.add_argument('--no-val', action='store_true', default=False, help='skip validation during training')
parser.add_argument('--use-wandb', action='store_true', default=False)
parser.add_argument('--resume', type=str, default=None, help='experiment to load')
parser.add_argument('--evaluate', action='store_true', default=False)
parser.add_argument('--best-miou', action='store_true', default=False)
parser.add_argument('--inference', action='store_true', default=False)
parser.add_argument('--input-filename', type=str, default=None, help='path to an input file to run inference on')
parser.add_argument('--output-filename', type=str, default=None, help='path to where predicted segmentation mask will be written')
parser.add_argument('--window-size', type=int, default=None, help='the size of grid blocks to sample from the input, use if encountering OOM issues')
parser.add_argument('--stride', type=int, default=None, help='the stride at which to sample grid blocks, recommended value is equal to `window_size`')
parser.add_argument('--incl-bounds', action='store_true', default=False, help='includes boundaries of masks in loss function')
parser.add_argument('--bounds-kernel-size', type=int, default=3, help='kernel size for calculating boundary')
args = parser.parse_args()
run_deeplab(args) |
def get_diagnoses_extractors(data_dir, extractor_map):
extractors = []
table = 'diagnoses_icd'
id_extractor = MultiExtractor(names=['subject_id', 'hadm_id'], sep='_')
outpath = os.path.join(data_dir, (table + '.tsv'))
time_ext = ConstantExtractor(None)
type_ext = FmtExtractor(names=[], fmt='diagnoses_icd')
value_ext = MultiExtractor(names=['seq_num', 'icd9_code'])
test_ext = true_test_extractor
extractor = ExtractorInfo(table, outpath, id_extractor, time_ext, type_ext, value_ext, test_ext)
extractors.append(extractor)
extractor_map[table] = extractors
return table |
class CheckpointLoader():
_schemes = {}
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tuple))
for prefix in prefixes:
if ((prefix not in cls._schemes) or force):
cls._schemes[prefix] = loader
else:
raise KeyError(f'{prefix} is already registered as a loader backend, add "force=True" if you want to override it')
cls._schemes = OrderedDict(sorted(cls._schemes.items(), key=(lambda t: t[0]), reverse=True))
def register_scheme(cls, prefixes, loader=None, force=False):
if (loader is not None):
cls._register_scheme(prefixes, loader, force=force)
return
def _register(loader_cls):
cls._register_scheme(prefixes, loader_cls, force=force)
return loader_cls
return _register
def _get_checkpoint_loader(cls, path):
for p in cls._schemes:
if (re.match(p, path) is not None):
return cls._schemes[p]
def load_checkpoint(cls, filename, map_location=None, logger=None):
checkpoint_loader = cls._get_checkpoint_loader(filename)
class_name = checkpoint_loader.__name__
mmcv.print_log(f'load checkpoint from {class_name[10:]} path: {filename}', logger)
return checkpoint_loader(filename, map_location) |
class RGB2Gray(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
obs_shape = env.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(*obs_shape[:2], 1), dtype=np.uint8)
def observation(self, observation):
observation = np.dot(observation, [[0.299], [0.587], [0.114]])
return observation.astype(np.uint8) |
_module()
class RepeatDataset(object):
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = dataset.PALETTE
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[(idx % self._ori_len)]
def __len__(self):
return (self.times * self._ori_len) |
def _init_weight_goog(m, n='', fix_group_fanout=True):
if isinstance(m, CondConv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if fix_group_fanout:
fan_out //= m.groups
init_weight_fn = get_condconv_initializer((lambda w: w.data.normal_(0, math.sqrt((2.0 / fan_out)))), m.num_experts, m.weight_shape)
init_weight_fn(m.weight)
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
if fix_group_fanout:
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
fan_out = m.weight.size(0)
fan_in = 0
if ('routing_fn' in n):
fan_in = m.weight.size(1)
init_range = (1.0 / math.sqrt((fan_in + fan_out)))
m.weight.data.uniform_((- init_range), init_range)
m.bias.data.zero_() |
_cache(None)
def can_hash_optimize(cls):
if issubclass(cls, (str, tuple, list)):
return True
return False |
class TwoCropsTransform():
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k] |
def get_calibration_loader(image_root, pos_root, neg_root, batchsize, testsize, shuffle=True, num_workers=12, pin_memory=True):
dataset = calibration_dataset(image_root, pos_root, neg_root, testsize)
data_loader = data.DataLoader(dataset=dataset, batch_size=batchsize, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
return data_loader |
def create_oracles_ilp(dataname, path_read, path_wt_distributed):
files = [i.split('.')[0] for i in os.listdir(path_read) if i.endswith('.doc.json')]
total_num = len(files)
cnt = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cnt)
pool.starmap(process_one_example, zip(([path_read] * total_num), ([path_wt_distributed] * total_num), files, ([40] * total_num), ([dataname] * total_num)))
pool.close()
pool.join()
print('finish creating oracles, and write down them to distributed folders.') |
class Inferencer(object):
def __init__(self, config, args):
self.config = config
print(config)
self.args = args
print(self.args)
self.build_model()
self.load_model()
with open(self.args.attr, 'rb') as f:
self.attr = pickle.load(f)
def load_model(self):
print(f'Load model from {self.args.model}')
self.model.load_state_dict(torch.load(f'{self.args.model}'))
return
def build_model(self):
self.model = cc(AE(self.config))
print(self.model)
self.model.eval()
return
def utt_make_frames(self, x):
frame_size = self.config['data_loader']['frame_size']
remains = (x.size(0) % frame_size)
if (remains != 0):
x = F.pad(x, (0, remains))
out = x.view(1, (x.size(0) // frame_size), (frame_size * x.size(1))).transpose(1, 2)
return out
def inference_one_utterance(self, x, x_cond):
x = self.utt_make_frames(x)
x_cond = self.utt_make_frames(x_cond)
dec = self.model.inference(x, x_cond)
dec = dec.transpose(1, 2).squeeze(0)
dec = dec.detach().cpu().numpy()
dec = self.denormalize(dec)
wav_data = melspectrogram2wav(dec)
return (wav_data, dec)
def denormalize(self, x):
(m, s) = (self.attr['mean'], self.attr['std'])
ret = ((x * s) + m)
return ret
def normalize(self, x):
(m, s) = (self.attr['mean'], self.attr['std'])
ret = ((x - m) / s)
return ret
def write_wav_to_file(self, wav_data, output_path):
write(output_path, rate=self.args.sample_rate, data=wav_data)
return
def inference_from_path(self):
(src_mel, _) = get_spectrograms(self.args.source)
(tar_mel, _) = get_spectrograms(self.args.target)
src_mel = torch.from_numpy(self.normalize(src_mel)).cuda()
tar_mel = torch.from_numpy(self.normalize(tar_mel)).cuda()
(conv_wav, conv_mel) = self.inference_one_utterance(src_mel, tar_mel)
self.write_wav_to_file(conv_wav, self.args.output)
return |
class ImageAttributeDataset(data.Dataset):
def __init__(self, filename, transform):
self.lines = [line.rstrip().split() for line in open(filename, 'r')]
self.transform = transform
self.length = len(self.lines)
def __getitem__(self, index):
line = self.lines[index]
image = Image.open(line[0])
conditions = [int(condition) for condition in line[1:]]
return (self.transform(image), torch.Tensor(conditions))
def __len__(self):
return self.length |
_registry(pattern_type='QuantizedGraphDtypeRefactor')
class QuantizedGraphDtypeCheck(Pattern):
def __call__(self, model):
def _quantized_dtype(model):
dtype = 'fp32'
for node in model.nodes:
if (node.op_type == 'Quantize'):
dtype = node.attr['output_dtype']
break
return dtype
def _get_dst_ops(node, model):
ret = []
output_name = node.output_tensors[0].name
for node in model.nodes:
for input_tensor in node.input_tensors:
if (output_name == input_tensor.name):
ret.append(node)
return ret
def _insert_quant_node(pre_node, model, graph_dtype):
output_tensor = copy.deepcopy(pre_node.output_tensors[0])
output_tensor.dtype = graph_dtype
pre_node.output_tensors[0] = copy.deepcopy(pre_node.output_tensors[0])
pre_node.output_tensors[0].name = (pre_node.output_tensors[0].name + '_before_quant')
pre_node.output_tensors[0].dest_op = [(pre_node.name + '_quant')]
input_tensor = copy.deepcopy(pre_node.output_tensors[0])
quantize_op = util.construct_node(node_name=(pre_node.name + '_quant'), op_type='Quantize', input_tensors=[input_tensor], output_tensors=[output_tensor], attr=OrderedDict({'output_dtype': graph_dtype}))
insert_idx = model.get_node_id(pre_node.name)
model.insert_nodes((insert_idx + 1), [quantize_op])
def _check_dst_op(start_node, model, checker, graph_dtype):
dst_ops = _get_dst_ops(start_node, model)
if (len(dst_ops) == 0):
return
for op in dst_ops:
if (EXECUTOR_TYPE.get(op.op_type, op.op_type) in checker[graph_dtype]):
if (op.attr.get('output_dtype', None) != graph_dtype):
op.attr['output_dtype'] = graph_dtype
_check_dst_op(op, model, checker, graph_dtype)
else:
continue
def _scan_nodes_graph_dtype(model, checker, graph_dtype):
for node in model.nodes:
if (node.attr and (node.attr.get('output_dtype', 'fp32') == graph_dtype) and (node.name != 'input_data')):
dst_ops = _get_dst_ops(node, model)
for op in dst_ops:
if ((EXECUTOR_TYPE.get(op.op_type, op.op_type) in checker[graph_dtype]) and (op.attr.get('output_dtype', 'fp32') != graph_dtype)):
op.attr['output_dtype'] = graph_dtype
def _remove_redundant_quant_node(model, checker, graph_dtype):
remove_node_names = []
for node in model.nodes:
if (node.op_type == 'Quantize'):
pre_node = None
try:
pre_node = model.get_node_by_name(node.input_tensors[0].source_op[0])
except:
pre_node = None
if (pre_node and pre_node.attr and (pre_node.attr.get('output_dtype', None) == graph_dtype)):
dst_ops = _get_dst_ops(pre_node, model)
if ((len(dst_ops) == 1) and (dst_ops[0].op_type == 'Quantize')):
pre_node.output_tensors = copy.deepcopy(node.output_tensors)
remove_node_names.append(node.name)
elif (len(dst_ops) >= 2):
dst_ops_type = [EXECUTOR_TYPE.get(o.op_type, o.op_type) for o in dst_ops]
valid = True
for ot in dst_ops_type:
if ((ot in checker[graph_dtype]) or (ot in ['Quantize', 'Output'])):
continue
else:
valid = False
break
if valid:
quant_dst_ops = _get_dst_ops(node, model)
qot_name = node.output_tensors[0].name
for qdo in quant_dst_ops:
r_tensor_idx = model.get_tensor_idx(qdo.name, qot_name, from_output=False)
if (r_tensor_idx != (- 1)):
pre_node.output_tensors[0].dest_op.append(qdo.name)
qdo.input_tensors[r_tensor_idx] = copy.deepcopy(pre_node.output_tensors[0])
remove_node_names.append(node.name)
model.remove_nodes(remove_node_names)
def _check_append_sum_nodes(model, checker, graph_dtype):
name_list = []
for node in model.nodes:
if ((EXECUTOR_TYPE.get(node.op_type, node.op_type) in ['InnerProduct', 'Convolution']) and node.attr and (node.attr.get('append_op') == 'sum') and (graph_dtype == 'bf16')):
post_n = model.get_node_by_name(node.input_tensors[(- 1)].source_op[0])
if (post_n.attr and (post_n.attr.get('output_dtype', 'fp32') != 'bf16')):
if (post_n.name not in name_list):
name_list.append(post_n.name)
if name_list:
for n in name_list:
_insert_quant_node(model.get_node_by_name(n), model, graph_dtype)
_scan_nodes_graph_dtype(model, checker, graph_dtype)
def _revert_logits_output_dtype(model, graph_dtype):
for t in model.nodes[(- 1)].input_tensors:
pre_node = model.get_node_by_name(t.source_op[0])
if (pre_node and (EXECUTOR_TYPE.get(pre_node.op_type, pre_node.op_type) in ['InnerProduct', 'Softmax', 'LogSoftmax', 'Convolution'])):
if (pre_node.attr.get('output_dtype', 'fp32') == graph_dtype):
pre_node.attr['output_dtype'] = 'fp32'
if (util.get_autocast_info()['cast_type'] != 'bf16'):
return model
graph_dtype = _quantized_dtype(model)
bf16_op = ['InnerProduct', 'Slice', 'Matmul', 'Reshape', 'BinaryOp', 'BinaryAdd', 'Reorder', 'Concat', 'Softmax', 'LayerNorm', 'LogSoftmax', 'Convolution', 'Gather', 'GroupNorm', 'Sigmoid', 'Gelu', 'MultiHeadAttention', 'Resampling', 'StridedSlice']
s8_op = ['InnerProduct', 'Reshape', 'Shape', 'BinaryOp']
checker = {'bf16': bf16_op, 's8': s8_op}
non_quantized_patterns = [[(0, 'Range'), (1, ['Div', 'BinaryOp']), (2, 'Pow'), (3, ['Div', 'BinaryOp']), (4, 'Reshape'), (7, ['Matmul', 'BatchMatmul'])], [(), (5, 'Range'), (6, 'Reshape'), (7, ['Matmul', 'BatchMatMul'])]]
match_ret = util.search_pattern(non_quantized_patterns, model)
for ret in match_ret:
mat_node = model.get_node_by_name(ret[(- 2)])
dst_ops = _get_dst_ops(mat_node, model)
dst_ops_type = [n.op_type for n in dst_ops]
if ((dst_ops_type == ['Sin', 'Cos']) or (dst_ops_type == ['CosSin', 'CosSin'])):
for n in dst_ops:
_insert_quant_node(n, model, graph_dtype)
if (graph_dtype in ['bf16']):
for node in model.nodes:
if ((EXECUTOR_TYPE.get(node.op_type, node.op_type) in ['Quantize']) and (node.attr['output_dtype'] == 'bf16')):
dst_ops = _get_dst_ops(node, model)
for op in dst_ops:
if (op.attr and (op.attr.get('output_dtype', 'fp32') != graph_dtype) and (EXECUTOR_TYPE.get(op.op_type, op.op_type) in checker[graph_dtype])):
op.attr['output_dtype'] = graph_dtype
_check_dst_op(op, model, checker, graph_dtype)
_scan_nodes_graph_dtype(model, checker, graph_dtype)
_check_append_sum_nodes(model, checker, graph_dtype)
_remove_redundant_quant_node(model, checker, graph_dtype)
_revert_logits_output_dtype(model, graph_dtype)
for t in model.nodes[0].output_tensors:
if (t.location and (len(t.location) == 2)):
break
elif ((match_ret or (model._framework_modeling_config.get('architecture', 'None') == 'decoder_only')) and (t.dtype == 'fp32')):
t.dtype = 'bf16'
return model |
class _CustomDataParallel(nn.Module):
def __init__(self, model, device_ids):
super(_CustomDataParallel, self).__init__()
self.model = nn.DataParallel(model, device_ids=device_ids)
self.model.to(device)
num_devices = (torch.cuda.device_count() if (device_ids is None) else len(device_ids))
print(f'{type(model)} using {num_devices} GPUs!')
def forward(self, *input, **kwargs):
return self.model(*input, **kwargs)
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.model.module, name) |
def hrnet32(in_channels, num_classes):
model = HighResolutionNet(in_channels=in_channels, num_classes=num_classes, extra=extra_32)
init_weights(model, 'kaiming')
return model |
def get_rnn_cell(rnn_type, num_layers, hidden_dim, keep_prob, scope):
with tf.variable_scope(scope):
lst = []
for _ in range(num_layers):
if (rnn_type == 'gru'):
cell = tf.contrib.rnn.GRUCell(num_units=hidden_dim)
else:
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
lst.append(cell)
if (num_layers > 1):
res = tf.contrib.rnn.MultiRNNCell(lst)
else:
res = lst[0]
return res |
_only_test
def test_correct_scopes_used_2():
run_cell('\n def foo():\n x, y = [[42], [43]]\n x = x[:1]\n y = y[:1]\n return x, y\n\n def bar(x, y, z):\n return x + y + z\n ')
run_cell("\n v = [int(x) for x in '12345']\n for i in range(5):\n x, y = foo()\n v = bar(x, y, v)\n ")
slice_size = num_stmts_in_slice(2)
assert (slice_size == 4), ('got %d' % slice_size)
run_cell("\n v = [int(x) for x in '12345']\n for i in range(5):\n x, y = foo()\n v = bar(x, y, v)\n ")
slice_size = num_stmts_in_slice(3)
assert (slice_size == 4), ('got %d' % slice_size)
run_cell("\n v = [int(x) for x in '12345']\n for i in range(5):\n x, y = foo()\n v = bar(x, y, v)\n ")
slice_size = num_stmts_in_slice(4)
assert (slice_size == 4), ('got %d' % slice_size) |
class Conv(nn.Module):
def __init__(self, filters0, filters1, kernel_size, bn=False):
super().__init__()
self.conv = nn.Conv2d(filters0, filters1, kernel_size, stride=1, padding=(kernel_size // 2), bias=False)
self.bn = None
if bn:
self.bn = nn.BatchNorm2d(filters1)
def forward(self, x):
h = self.conv(x)
if (self.bn is not None):
h = self.bn(h)
return h |
class AlignBinary(torch.nn.Module):
def __init__(self, config, vocab, max_len_token):
super(AlignBinary, self).__init__()
self.config = config
self.vocab = vocab
self.need_flatten = True
self.embedding = nn.Embedding((vocab.size + 1), (vocab.size + 1), padding_idx=0)
self.embedding.weight.data = torch.eye((vocab.size + 1))
self.embedding.weight.requires_grad = False
self.max_len_token = max_len_token
self.CNN = CNN(config.increasing, config.num_layers, config.filter_counts, max_len_token)
self.ones = Variable(torch.ones(config.train_batch_size, 1))
self.loss = BCEWithLogitsLoss()
self.has_hidden = False
def compute_loss(self, qry_tk, pos_tk, neg_tk):
(qry_lkup, pos_lkup, neg_lkup) = get_qry_pos_neg_tok_lookup(self.vocab, qry_tk, pos_tk, neg_tk)
(qry_emb, qry_mask) = self.embed(qry_lkup)
(pos_emb, pos_mask) = self.embed(pos_lkup)
(neg_emb, neg_mask) = self.embed(neg_lkup)
scores = (self.score_pair(qry_emb, pos_emb, qry_mask, pos_mask) - self.score_pair(qry_emb, neg_emb, qry_mask, neg_mask))
loss = self.loss(scores, self.ones)
return loss
def score_pair(self, qry_emb, cnd_emb, qry_msk, cnd_msk):
qry_cnd_sim = torch.bmm(qry_emb, torch.transpose(cnd_emb, 2, 1))
qry_mask = qry_msk.unsqueeze(dim=2)
cnd_mask = cnd_msk.unsqueeze(dim=1)
qry_cnd_mask = torch.bmm(qry_mask, cnd_mask)
qry_cnd_sim = torch.mul(qry_cnd_sim, qry_cnd_mask)
return self.CNN(qry_cnd_sim)
def embed(self, mnt_lkp):
mnt_lkp = torch.from_numpy(mnt_lkp).cuda()
mnt_mask = Variable(torch.cuda.ByteTensor((mnt_lkp > 0)).float())
mnt_emb = self.embedding(Variable(mnt_lkp))
return (mnt_emb, mnt_mask)
def score_dev_test_batch(self, qry_tk, cnd_tk):
(qry_lkup, cnd_lkup) = get_qry_cnd_tok_lookup(self.vocab, qry_tk, cnd_tk)
(qry_emb, qry_mask) = self.embed(qry_lkup)
(cnd_emb, cnd_mask) = self.embed(cnd_lkup)
return self.score_pair(qry_emb, cnd_emb, qry_mask, cnd_mask) |
class Sharpen(nn.Module):
def __init__(self, tempeature=0.5):
super(Sharpen, self).__init__()
self.T = tempeature
def forward(self, probabilities):
tempered = torch.pow(probabilities, (1 / self.T))
tempered = (tempered / tempered.sum(dim=(- 1), keepdim=True))
return tempered |
def general_detokenize(string):
string = string.replace(" n't", "n't")
string = string.replace(' )', ')')
string = string.replace('( ', '(')
string = string.replace('" ', '"')
string = string.replace(' "', '"')
string = re.sub(" (['.,])", '\\1', string)
return string |
def test_basic():
run_cell('x = object()')
assert (lookup_symbol_by_name('x').get_ref_count() == 1)
run_cell('y = x')
assert (lookup_symbol_by_name('x').get_ref_count() == 2)
assert (lookup_symbol_by_name('y').get_ref_count() == 2)
run_cell('del x')
assert (lookup_symbol_by_name('y').get_ref_count() == 1)
run_cell('y = None')
assert (lookup_symbol_by_name('y').get_ref_count() == (- 1)) |
def check_dens_directint(dfi, pot, tol, dens, rmin=None, rmax=None, bins=31):
rs = numpy.linspace(rmin, rmax, bins)
intdens = numpy.array([dfi.vmomentdensity(r, 0, 0) for r in rs])
expdens = numpy.array([dens(r) for r in rs])
assert numpy.all((numpy.fabs(((intdens / expdens) - 1.0)) < tol)), 'Density from direct integration is not equal to the expected value'
return None |
def temp_path_generator():
sys_temp = tempfile.gettempdir()
path = os.path.join(sys_temp, 'autokaggle')
return path |
def gen_learner_deep(data: ImageDataBunch, gen_loss, arch=models.resnet34, nf_factor: float=1.5) -> Learner:
return unet_learner_deep(data, arch, wd=0.001, blur=True, norm_type=NormType.Spectral, self_attention=True, y_range=((- 3.0), 3.0), loss_func=gen_loss, nf_factor=nf_factor) |
def test_icdar_dataset():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_dummy_icdar_json(fake_json_file)
dataset = IcdarDataset(ann_file=fake_json_file, pipeline=[])
assert (dataset.CLASSES == 'text')
assert (dataset.img_ids == [0, 1])
assert (dataset.select_first_k == (- 1))
ann = dataset.get_ann_info(0)
assert np.allclose(ann['bboxes'], [[50.0, 60.0, 70.0, 80.0], [100.0, 120.0, 130.0, 150.0]])
assert np.allclose(ann['labels'], [0, 0])
assert np.allclose(ann['bboxes_ignore'], [[150.0, 160.0, 190.0, 200.0], [250.0, 260.0, 350.0, 360.0]])
assert np.allclose(ann['masks'], [[[50, 60, 70, 60, 70, 80, 50, 80]], [[100, 120, 130, 120, 120, 150, 100, 150]]])
assert np.allclose(ann['masks_ignore'], [[[150, 160, 190, 160, 190, 200, 150, 200]], [[250, 260, 350, 260, 350, 360, 250, 360]]])
assert (dataset.cat_ids == [0])
tmp_dir.cleanup()
metrics = ['hmean-iou', 'hmean-ic13']
results = [{'boundary_result': [[50, 60, 70, 60, 70, 80, 50, 80, 1], [100, 120, 130, 120, 120, 150, 100, 150, 1]]}, {'boundary_result': []}]
output = dataset.evaluate(results, metrics)
assert (output['hmean-iou:hmean'] == 1)
assert (output['hmean-ic13:hmean'] == 1) |
def weights_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
m.weight.data.normal_(0.0, 0.01)
m.bias.data.normal_(0.0, 0.01)
elif (classname.find('BatchNorm') != (- 1)):
m.weight.data.normal_(1.0, 0.01)
m.bias.data.fill_(0)
elif (classname.find('Linear') != (- 1)):
m.weight.data.normal_(0.0, 0.01)
m.bias.data.normal_(0.0, 0.01) |
def model2list(model):
if isinstance(model, nn.DataParallel):
model = list(model.module)
elif isinstance(model, nn.Sequential):
model = list(model)
return model |
def get_loss(pred, label, end_points, label_b, lam):
loss_a = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
loss_b = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label_b)
loss_a_lam = (tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) * (1 - lam))
loss_b_lam = (tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label_b) * lam)
loss_sum = tf.add(loss_a_lam, loss_b_lam)
classify_loss = tf.reduce_mean(loss_sum)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss |
_model
def tf_efficientnet_b0(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs)
return model |
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) |
def get_module_key(file_name):
module_key = None
if (file_name[(- 3):] == '.cl'):
module_key = file_name[:(- 3)]
elif (file_name[(- 2):] == '.h'):
module_key = file_name
return module_key |
def resnet50(pretrained=False):
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model |
def test(args):
seed_everything(seed=args.seed, workers=True)
datamodule = datamodules[args.dataset](data_root=args.data_root, train_batch_size=args.train_batch_size, test_batch_size=args.val_batch_size, num_workers=args.num_workers, scale_lower_bound=args.rrc_scale_lb, jitter_prob=args.jitter_prob, greyscale_prob=args.greyscale_prob, solarize_prob=args.solarize_prob)
if (not args.disable_pgn):
pgn_settings = {'prompt_mode': args.prompt_mode, 'pgn_resolution': args.pgn_resolution, 'nr_output_vectors': args.pgn_length, 'vector_dim': visual_embedding_dims[args.architecture], 'mixture_size': args.pgn_mixture_size, 'pretrained_pgn': args.pretrained_pgn, 'model_type': args.pgn_model_type, 'proj_type': args.pgn_proj_type, 'pgn_act_fn': args.pgn_act_fn, 'nr_groups': args.nr_groups, 'blocks_per_group': args.blocks_per_group, 'initial_channels': args.initial_channels, 'init_max_pool': args.init_max_pool}
else:
pgn_settings = None
pgn_clip = PGNCLIP(clip_architecture=args.architecture, pgn_settings=pgn_settings, optimizer=args.optimizer, init_lr=args.init_lr, lr_scheduler=args.lr_scheduler, warmup_epochs=args.warmup_epochs, epochs=args.epochs, pgn_path=args.pgn_model_path)
trainer = pl.Trainer(accelerator='gpu', devices=(- 1), precision=args.precision, strategy=args.strategy)
trainer.test(model=pgn_clip, datamodule=datamodule) |
def test_parse_config_with_invalid_reload_param(mocker):
flag_values = flags.FlagValues()
mocker.patch('sys.argv', ['vmcnet', '--reload.logdir_typo=log_dir_path'])
with pytest.raises(AttributeError):
parse_flags(flag_values) |
def test_pitching_stats_bref_bad_year() -> None:
with pytest.raises(ValueError):
league_pitching_stats.pitching_stats_bref('NOT A YEAR') |
def hat(v):
hat_v = torch.zeros([*v.shape[:(- 1)], 3, 3])
(hat_v[(..., 0, 1)], hat_v[(..., 0, 2)], hat_v[(..., 1, 2)]) = ((- v[(..., 2)]), v[(..., 1)], (- v[(..., 0)]))
return (hat_v + (- hat_v.transpose((- 1), (- 2)))) |
def visualize_algorithm_as_video(env, policy):
env.spec.id = 1
env = wrappers.Monitor(env, './gym-results', force=True)
obs = env.reset()
for i in range(1000):
(action, _) = policy.get_action(obs)
(obs, reward, done, info) = env.step(action)
if done:
break
print(('done at step %i' % i))
env.close() |
def dirichlet_kullback_leibler(alpha_1: ArrayLike, alpha_2: ArrayLike) -> Array:
return ((jnp.log((gamma(alpha_1.sum()) / gamma(alpha_2.sum()))) + jnp.sum(jnp.log((gamma(alpha_2) / gamma(alpha_1))))) + jnp.sum(((alpha_1 - alpha_2) * (digamma(alpha_1) - digamma(alpha_1.sum()))))) |
class SimpleDataset():
def __init__(self, transform, original_transform, target_transform=identity, n_support=1, n_query=1, no_aug_support=False, no_aug_query=False, img_size=(224, 224)):
self.transform = transform
self.target_transform = target_transform
self.img_size = img_size
self.original_transform = original_transform
self.n_support = n_support
self.n_query = n_query
self.no_aug_support = no_aug_support
self.no_aug_query = no_aug_query
self.dataset = ImageFolder(miniImageNet_path)
def __getitem__(self, i):
(data, label) = self.dataset[i]
view_list = []
for _ in range(self.n_support):
if (not self.no_aug_support):
view_list.append(self.transform(data).unsqueeze(0))
else:
assert (self.n_support == 1)
view_list.append(self.original_transform(data).unsqueeze(0))
for _ in range(self.n_query):
if (not self.no_aug_query):
view_list.append(self.transform(data).unsqueeze(0))
else:
assert (self.n_query == 1)
view_list.append(self.original_transform(data).unsqueeze(0))
return torch.cat(view_list)
def __len__(self):
return len(self.dataset) |
def main():
train_set_path = os.path.join(os.path.dirname(__file__), '..', 'data', 'train', 'bostonbombings', 'aug_rnr_train_set_combined.csv')
heldout_set_path = os.path.join(os.path.dirname(__file__), '..', 'data', 'train', 'bostonbombings', 'aug_rnr_heldout_set_combined.csv')
evaluation_data_path = os.path.join(os.path.dirname(__file__), '..', 'data', 'test', 'bostonbombings.csv')
n_gpu = (- 1)
train_batch_size = 128
model_file_prefix = 'bostonbombings'
model_training(train_set_path, heldout_set_path, evaluation_data_path, n_gpu, train_batch_size, model_file_prefix) |
def write_scores(nbest_and_scores, path):
with open(path, 'w', encoding='utf-8') as f:
for key in nbest_and_scores.keys():
for (idx, (_, score)) in enumerate(nbest_and_scores[key], 1):
current_key = '-'.join([key, str(idx)])
f.write(('%s %.4f\n' % (current_key, score)))
print(('Write to %s' % path)) |
def _rename(cfg: CN, old: str, new: str) -> None:
old_keys = old.split('.')
new_keys = new.split('.')
def _set(key_seq: List[str], val: str) -> None:
cur = cfg
for k in key_seq[:(- 1)]:
if (k not in cur):
cur[k] = CN()
cur = cur[k]
cur[key_seq[(- 1)]] = val
def _get(key_seq: List[str]) -> CN:
cur = cfg
for k in key_seq:
cur = cur[k]
return cur
def _del(key_seq: List[str]) -> None:
cur = cfg
for k in key_seq[:(- 1)]:
cur = cur[k]
del cur[key_seq[(- 1)]]
if ((len(cur) == 0) and (len(key_seq) > 1)):
_del(key_seq[:(- 1)])
_set(new_keys, _get(old_keys))
_del(old_keys) |
def ModelConvMiniImagenet(out_features, hidden_size=64, **kwargs):
return MetaConvModel(3, out_features, hidden_size=hidden_size, feature_size=((5 * 5) * hidden_size), **kwargs) |
def make_evolution_onesite(hamiltonian: SiteOperator, graph: LatticeGraph, tau: Union[(float, complex)], group: int=0, result_cutoff: float=1e-15) -> List[SiteOperator]:
(D, V) = np.linalg.eigh(hamiltonian.elements)
evo = np.einsum('il, l, jl -> ij', V, np.exp(((- tau) * D)), V)
return [SiteOperator(hamiltonian.site, evo, group=group)] |
def get_ffd_dataset(cat_ids, n=3, edge_length_threshold=None, n_samples=None):
from dids.core import BiKeyDataset
kwargs = dict(n=n, edge_length_threshold=edge_length_threshold, n_samples=n_samples)
if isinstance(cat_ids, str):
cat_ids = [cat_ids]
datasets = {c: _get_ffd_dataset(c, **kwargs) for c in cat_ids}
return BiKeyDataset(datasets) |
def set_window_iconify_callback(window, cbfun):
window_addr = ctypes.cast(ctypes.pointer(window), ctypes.POINTER(ctypes.c_long)).contents.value
if (window_addr in _window_iconify_callback_repository):
previous_callback = _window_iconify_callback_repository[window_addr]
else:
previous_callback = None
if (cbfun is None):
cbfun = 0
c_cbfun = _GLFWwindowiconifyfun(cbfun)
_window_iconify_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowIconifyCallback(window, cbfun)
if ((previous_callback is not None) and (previous_callback[0] != 0)):
return previous_callback[0] |
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
out_dir = (args.out_dir or coco_path)
out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, 'annotations')
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2017'))
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'val2017'))
if (out_dir != coco_path):
shutil.copytree(osp.join(coco_path, 'images'), out_img_dir)
train_list = glob(osp.join(coco_path, 'annotations', 'train2017', '*.png'))
train_list = [file for file in train_list if ('_labelTrainIds' not in file)]
test_list = glob(osp.join(coco_path, 'annotations', 'val2017', '*.png'))
test_list = [file for file in test_list if ('_labelTrainIds' not in file)]
assert ((len(train_list) + len(test_list)) == COCO_LEN), 'Wrong length of list {} & {}'.format(len(train_list), len(test_list))
if (args.nproc > 1):
mmcv.track_parallel_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), train_list, nproc=nproc)
mmcv.track_parallel_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), test_list, nproc=nproc)
else:
mmcv.track_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True), train_list)
mmcv.track_progress(partial(convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False), test_list)
print('Done!') |
def blackify(code):
has_indent = (len(get_indent(code)) > 0)
if has_indent:
code = f'''class Bla:
{code}'''
result = black.format_str(code, mode=black.FileMode([black.TargetVersion.PY35], line_length=119))
(result, _) = style_docstrings_in_code(result)
return (result[len('class Bla:\n'):] if has_indent else result) |
def create_basic_stream_logger():
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s - %(name)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
class Mandl2019hind(dataset.Dataset):
name = 'mandl2019hind'
url = '
hash = 'd419780fe825f9946e3a03da4cf3fdf41699b188932d3662caad'
files = [{'name': 'mandl2019hind.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter and facebook'}]
license = ' '
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.unzip_file(tmp_file_path)
file1 = helpers.clean_csv(os.path.join(tmp_file_path, 'hindi_dataset/hindi_dataset.tsv'), sep='\t')
file2 = helpers.clean_csv(os.path.join(tmp_file_path, 'hindi_dataset/hasoc2019_hi_test_gold_2919.tsv'), sep='\t')
tmp_file_path = helpers.merge_csvs({file1: [], file2: []})
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'mandl2019hind.csv'))
def unify_row(cls, row):
labels = []
if (row['task_1'] == 'NOT'):
labels.append('normal')
elif (row['task_2'] == 'HATE'):
labels.append('hate')
elif (row['task_2'] == 'OFFN'):
labels.append('offensive')
elif (row['task_2'] == 'PRFN'):
labels.append('profane')
if (row['task_3'] == 'TIN'):
labels.append('targeted')
elif (row['task_3'] == 'UNT'):
labels.append('untargeted')
row['labels'] = labels
row = row.drop(['text_id', 'task_1', 'task_2', 'task_3'])
return row |
def bn_vgg13(**kwargs):
return get_vgg(blocks=13, bias=False, use_bn=True, model_name='bn_vgg13', **kwargs) |
class Metric(object):
def __init__(self):
self.reset()
def reset(self):
pass
def add(self, es, ta, ma=None):
pass
def get(self):
return {}
def items(self):
return self.get().items()
def __str__(self):
return ', '.join([('%s=%.5f' % (key, value)) for (key, value) in self.get().items()]) |
class SoftProjection(nn.Module):
def __init__(self, group_size, initial_temperature=1.0, is_temperature_trainable=True, min_sigma=0.0001):
super().__init__()
self._group_size = group_size
self._temperature = torch.nn.Parameter(torch.tensor(initial_temperature, requires_grad=is_temperature_trainable, dtype=torch.float32))
self._min_sigma = torch.tensor(min_sigma, dtype=torch.float32)
def forward(self, point_cloud, query_cloud, point_features=None, action='project'):
point_cloud = point_cloud.contiguous()
query_cloud = query_cloud.contiguous()
if (action == 'project'):
return self.project(point_cloud, query_cloud)
elif (action == 'propagate'):
return self.propagate(point_cloud, point_features, query_cloud)
elif (action == 'project_and_propagate'):
return self.project_and_propagate(point_cloud, point_features, query_cloud)
else:
raise ValueError("action should be one of the following: 'project', 'propagate', 'project_and_propagate'")
def _group_points(self, point_cloud, query_cloud, point_features=None):
group_size = self._group_size
(dist, idx) = knn_point(group_size, point_cloud, query_cloud)
idx = idx.permute(0, 2, 1).type(torch.int32)
grouped_points = group_point(point_cloud, idx)
grouped_features = (None if (point_features is None) else group_point(point_features, idx))
return (grouped_points, grouped_features)
def _get_distances(self, grouped_points, query_cloud):
deltas = (grouped_points - query_cloud.unsqueeze((- 1)).expand_as(grouped_points))
dist = (torch.sum((deltas ** 2), dim=_axis_to_dim(3), keepdim=True) / self.sigma())
return dist
def sigma(self):
device = self._temperature.device
return torch.max((self._temperature ** 2), self._min_sigma.to(device))
def project_and_propagate(self, point_cloud, point_features, query_cloud):
(grouped_points, grouped_features) = self._group_points(point_cloud, query_cloud, point_features)
dist = self._get_distances(grouped_points, query_cloud)
weights = torch.softmax((- dist), dim=_axis_to_dim(2))
projected_points = torch.sum((grouped_points * weights), dim=_axis_to_dim(2))
propagated_features = torch.sum((grouped_features * weights), dim=_axis_to_dim(2))
return (projected_points, propagated_features)
def propagate(self, point_cloud, point_features, query_cloud):
(grouped_points, grouped_features) = self._group_points(point_cloud, query_cloud, point_features)
dist = self._get_distances(grouped_points, query_cloud)
weights = torch.softmax((- dist), dim=_axis_to_dim(2))
propagated_features = torch.sum((grouped_features * weights), dim=_axis_to_dim(2))
return propagated_features
def project(self, point_cloud, query_cloud, hard=False):
(grouped_points, _) = self._group_points(point_cloud, query_cloud)
dist = self._get_distances(grouped_points, query_cloud)
weights = torch.softmax((- dist), dim=_axis_to_dim(2))
if hard:
raise NotImplementedError
weights = weights.repeat(1, 3, 1, 1)
projected_points = torch.sum((grouped_points * weights), dim=_axis_to_dim(2))
return projected_points |
class ResultsDestination():
def __init__(self, path):
self._path = path
if (not os.path.exists(self._path)):
os.makedirs(self._path)
def result_filepath(self, fileID):
return os.path.join(self._path, (fileID + '.png')) |
class SelectiveKernelAttn(nn.Module):
def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(SelectiveKernelAttn, self).__init__()
self.num_paths = num_paths
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
self.bn = norm_layer(attn_channels)
self.act = act_layer(inplace=True)
self.fc_select = nn.Conv2d(attn_channels, (channels * num_paths), kernel_size=1, bias=False)
def forward(self, x):
assert (x.shape[1] == self.num_paths)
x = torch.sum(x, dim=1)
x = self.pool(x)
x = self.fc_reduce(x)
x = self.bn(x)
x = self.act(x)
x = self.fc_select(x)
(B, C, H, W) = x.shape
x = x.view(B, self.num_paths, (C // self.num_paths), H, W)
x = torch.softmax(x, dim=1)
return x |
def softmax(scores, axis=None):
if _use_optimized_softmax:
return tf.nn.softmax(scores, axis)
else:
scores = i_cast(scores)
rval = tf.nn.softmax(scores, axis)
return r_cast(rval) |
def get_trainer(cfg: NamespaceMap, is_representor: bool, dm: pl.LightningDataModule=None) -> pl.Trainer:
kwargs = dict(**cfg.trainer)
trainer = pl.Trainer(plugins=[SLURMEnvironment(auto_requeue=False)], logger=get_logger(cfg), callbacks=get_callbacks(cfg, is_representor, dm=dm), **kwargs)
return trainer |
def plot_examples():
DATASET = 'mghdb'
RECORDS = ['mgh019', 'mgh023', 'mgh027']
for rec in RECORDS:
hr = load_data(DATASET, rec, freq=0.1)
pylab.figure()
pylab.plot(hr)
pylab.title(rec) |
def unet_plusplus(in_channels, num_classes):
model = NestedUNet(num_classes=num_classes, input_channels=in_channels)
init_weights(model, 'kaiming')
return model |
def view_results(result_path: str):
eval_results = []
for filename in glob(os.path.join(result_path, '*.json')):
with open(filename, 'rb') as f:
questions = orjson.loads(f.read())
eval_results.extend([{'model': Path(filename).stem, 'task_type': q['task_type'], 'task_name': os.path.relpath(q['task_name'], q['task_type']), 'accuracy': q['is_correct'], 'unmatched': (not q['is_matched'])} for q in questions])
df = pd.DataFrame.from_records(eval_results)
df_overall = df.pivot_table(index=['model'], columns=['task_type'], values=['accuracy', 'unmatched'], aggfunc='mean')
print(df_overall.to_string(float_format=(lambda x: f'{(x * 100):.1f}'), na_rep='-'))
for task_type in df['task_type'].unique():
df_task = df[(df['task_type'] == task_type)].pivot_table(index=['task_name'], columns=['model'], values=['accuracy', 'unmatched'], aggfunc='mean')
print(f'''
### {task_type}
''')
print(df_task.to_string(float_format=(lambda x: f'{(x * 100):.1f}'), na_rep='-')) |
def drn_c_42(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-42']))
return model |
class ResBlock(nn.Module):
def __init__(self, cfg, dim_in, dim_out, dim_inner, stride, dilation=1, use_temp_conv=0, temp_stride=1, need_shortcut=False):
super(ResBlock, self).__init__()
self.btnk = Bottleneck(cfg, dim_in, dim_out, dim_inner=dim_inner, stride=stride, dilation=dilation, use_temp_conv=use_temp_conv, temp_stride=temp_stride)
if (not need_shortcut):
self.shortcut = None
else:
self.shortcut = Conv3dBN(cfg, dim_in, dim_out, (1, 1, 1), stride=(temp_stride, stride, stride), padding=0)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
tr = self.btnk(x)
if (self.shortcut is None):
sc = x
else:
sc = self.shortcut(x)
return self.relu((tr + sc))
def c2_weight_mapping(self):
weight_map = {}
for (name, m_child) in self.named_children():
if m_child.state_dict():
child_map = m_child.c2_weight_mapping()
for (key, val) in child_map.items():
new_key = ((name + '.') + key)
if isinstance(m_child, Conv3dBN):
prefix = 'branch1_'
else:
prefix = ''
weight_map[new_key] = (prefix + val)
return weight_map |
class PSMAggregator(nn.Module):
def __init__(self, max_disp, in_planes=64, batch_norm=True):
super(PSMAggregator, self).__init__()
self.max_disp = max_disp
self.in_planes = in_planes
self.batch_norm = batch_norm
self.dres0 = nn.Sequential(conv3d_bn_relu(batch_norm, self.in_planes, 32, 3, 1, 1, bias=False), conv3d_bn_relu(batch_norm, 32, 32, 3, 1, 1, bias=False))
self.dres1 = nn.Sequential(conv3d_bn_relu(batch_norm, 32, 32, 3, 1, 1, bias=False), conv3d_bn(batch_norm, 32, 32, 3, 1, 1, bias=False))
self.dres2 = Hourglass(in_planes=32, batch_norm=batch_norm)
self.dres3 = Hourglass(in_planes=32, batch_norm=batch_norm)
self.dres4 = Hourglass(in_planes=32, batch_norm=batch_norm)
self.classif1 = nn.Sequential(conv3d_bn_relu(batch_norm, 32, 32, 3, 1, 1, bias=False), nn.Conv3d(32, 1, kernel_size=3, stride=1, padding=1, bias=False))
self.classif2 = nn.Sequential(conv3d_bn_relu(batch_norm, 32, 32, 3, 1, 1, bias=False), nn.Conv3d(32, 1, kernel_size=3, stride=1, padding=1, bias=False))
self.classif3 = nn.Sequential(conv3d_bn_relu(batch_norm, 32, 32, 3, 1, 1, bias=False), nn.Conv3d(32, 1, kernel_size=3, stride=1, padding=1, bias=False))
def forward(self, raw_cost):
(B, C, D, H, W) = raw_cost.shape
cost0 = self.dres0(raw_cost)
cost0 = (self.dres1(cost0) + cost0)
(out1, pre1, post1) = self.dres2(cost0, None, None)
out1 = (out1 + cost0)
(out2, pre2, post2) = self.dres3(out1, pre1, post1)
out2 = (out2 + cost0)
(out3, pre3, post3) = self.dres4(out2, pre2, post2)
out3 = (out3 + cost0)
cost1 = self.classif1(out1)
cost2 = (self.classif2(out2) + cost1)
cost3 = (self.classif3(out3) + cost2)
(full_h, full_w) = ((H * 4), (W * 4))
align_corners = True
cost1 = F.interpolate(cost1, [self.max_disp, full_h, full_w], mode='trilinear', align_corners=align_corners)
cost2 = F.interpolate(cost2, [self.max_disp, full_h, full_w], mode='trilinear', align_corners=align_corners)
cost3 = F.interpolate(cost3, [self.max_disp, full_h, full_w], mode='trilinear', align_corners=align_corners)
cost1 = torch.squeeze(cost1, 1)
cost2 = torch.squeeze(cost2, 1)
cost3 = torch.squeeze(cost3, 1)
return [cost3, cost2, cost1] |
def I_gradient(numpy_image, baseline_image, model, attr_objective, fold, interp='linear'):
interpolated = interpolation(numpy_image, baseline_image, fold, mode=interp).astype(np.float32)
grad_list = np.zeros_like(interpolated, dtype=np.float32)
result_list = []
for i in range(fold):
img_tensor = torch.from_numpy(interpolated[i])
img_tensor.requires_grad_(True)
result = model(_add_batch_one(img_tensor))
target = attr_objective(result)
target.backward()
grad = img_tensor.grad.numpy()
grad_list[i] = grad
result_list.append(result)
results_numpy = np.asarray(result_list)
return (grad_list, results_numpy, interpolated) |
class labelledDataParsing():
def __init__(self, vocab_dict, file_list, super_category_list, CBT_ontology_file):
self.file_list = file_list
self.labelled_posts = {}
with open(CBT_ontology_file, 'r') as f:
self.CBT_ontology = json.load(f)
self.super_category = super_category_list
print('loading the lablled data ... ')
for (i, file) in enumerate(self.file_list):
super_category = self.super_category[i]
with open(file) as f:
complete_post = ''
for line in f:
if (complete_post == ''):
complete_post = line.strip()
else:
complete_post = ((complete_post + ' ') + line.strip())
tokens = complete_post.split('||||')
if (len(tokens) != 4):
continue
(ID, problem, negative_take, sub_category) = (tokens[0], sub_UNK(normalize(tokens[1]), vocab_dict), sub_UNK(normalize(tokens[2]), vocab_dict), tokens[3])
if (ID not in self.labelled_posts):
self.labelled_posts[ID] = {}
self.labelled_posts[ID]['label'] = {'emotions': set(), 'thinking_errors': set(), 'situations': set()}
self.labelled_posts[ID]['problem'] = problem
self.labelled_posts[ID]['negative_take'] = negative_take
if (sub_category != 'None'):
self.labelled_posts[ID]['label'][super_category].add(polished_labels[sub_category])
complete_post = ''
print('Total size of labelled post:', len(self.labelled_posts))
print('print the data information into data_info.txt ...')
with open('Data/data_info.txt', 'w') as f:
for (super_category, values) in self.CBT_ontology.items():
f.write(('=== Super category: %s, %d sub categories totally ===\n' % (super_category, len(values))))
for sub_category in values:
count = self.frequency_count(super_category, sub_category)
f.write(('%s %d %2.2f\n' % (sub_category, count, ((count / len(self.labelled_posts)) * 100))))
f.write('\n')
for (ID, post) in self.labelled_posts.items():
post['id'] = ID
post['label']['emotions'] = list(post['label']['emotions'])
post['label']['situations'] = list(post['label']['situations'])
post['label']['thinking_errors'] = list(post['label']['thinking_errors'])
print('parsing labelled posts into NAUM_labelled_data.json ... ')
NAUM_labelled_data = self.generate_training_data_for_SkipThought()
with open('Data/NAUM_labelled_data.json', 'w') as f:
json.dump(NAUM_labelled_data, f, indent=2)
def frequency_count(self, super_category, sub_category):
count = 0
for (ID, post) in self.labelled_posts.items():
if (sub_category in post['label'][super_category]):
count += 1
return count
def generate_training_data_for_SkipThought(self):
max_length_doc = 0
(problem_max_length, negative_take_max_length) = (0, 0)
testing_data = []
for (ID, post) in self.labelled_posts.items():
problem_max_length = max(problem_max_length, len(post['problem'].split()))
negative_take_max_length = max(negative_take_max_length, len(post['negative_take'].split()))
one_labelled_data = copy.deepcopy(post)
split_sentences = []
for sent in re.split('[?!.]', post['problem']):
sent_words = sent.split()
sent_len = len(sent_words)
sent = ' '.join(sent_words)
if (sent_len == 1):
if (not re.match('^\\s*(<NUM>|<UNK>|\\d|\\w|,)\\s*$', sent)):
split_sentences.append(sent)
elif ((sent_len <= 50) and (sent_len > 0)):
split_sentences.append(sent)
elif (sent_len > 50):
cutsize = random.choice(list(range(20, 40)))
for i in range((sent_len // cutsize)):
split_sentences.append(' '.join(sent_words[(i * cutsize):((i + 1) * cutsize)]))
for sent in re.split('[?!.]', post['negative_take']):
sent_words = sent.split()
sent_len = len(sent_words)
sent = ' '.join(sent_words)
if (sent_len == 1):
if (not re.match('^\\s*(<NUM>|<UNK>|\\d|\\w|,)\\s*$', sent)):
split_sentences.append(sent)
elif ((sent_len <= 50) and (sent_len > 0)):
split_sentences.append(sent)
elif (sent_len > 50):
cutsize = random.choice(list(range(20, 40)))
for i in range((sent_len // cutsize)):
split_sentences.append(' '.join(sent_words[(i * cutsize):((i + 1) * cutsize)]))
one_labelled_data['split_sentences'] = copy.deepcopy(split_sentences)
testing_data.append(one_labelled_data)
max_length_doc = max(max_length_doc, len(split_sentences))
print((' problem contains max %d words' % problem_max_length))
print((' negative take contains max %d words' % negative_take_max_length))
print(' After spliting for SkipThought, max number of sents in one doc:', max_length_doc)
return testing_data |
def _configure_libraries():
disable_cv2 = int(os.environ.get('DETECTRON2_DISABLE_CV2', False))
if disable_cv2:
sys.modules['cv2'] = None
else:
os.environ['OPENCV_OPENCL_RUNTIME'] = 'disabled'
try:
import cv2
if (int(cv2.__version__.split('.')[0]) >= 3):
cv2.ocl.setUseOpenCL(False)
except ImportError:
pass |
def test_tracing_disable_with_nested_calls():
run_cell('y = 0')
run_cell('\n def f():\n return y\n ')
run_cell('\n def g(flag):\n if flag:\n return f()\n else:\n return 2\n ')
run_cell('\n g(False)\n x = g(True) + 1\n ')
run_cell('y = 42')
run_cell('logging.info(x)')
assert_detected('`x` has dep on stale `y`') |
def coverate_run_status_mp(g_code, library, cov_executor, device='cpu') -> (ExecutionStatus, bool):
CURRENT_TIME = time.time()
tmp_filename = '/tmp/tmp{}.py'.format(CURRENT_TIME)
write_code = wrap_code_with_device(g_code, library, device)
with open(tmp_filename, 'w') as f:
f.write(write_code)
(status, new_coverage) = cov_executor.run_test(tmp_filename)
if (status == 'ok'):
return (ExecutionStatus.SUCCESS, new_coverage)
elif ('timeout' in status):
return (ExecutionStatus.TIMEOUT, new_coverage)
elif ('exception' in status):
return (ExecutionStatus.EXCEPTION, new_coverage)
elif ('crash' in status):
return (ExecutionStatus.CRASH, new_coverage)
elif ('Error' in status):
return (ExecutionStatus.CRASH, new_coverage)
else:
return (ExecutionStatus.EXCEPTION, new_coverage) |
class CityscapesDataset(Pix2pixDataset):
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='fixed')
parser.set_defaults(load_size=512)
parser.set_defaults(crop_size=512)
parser.set_defaults(display_winsize=512)
parser.set_defaults(label_nc=19)
parser.set_defaults(aspect_ratio=2.0)
(opt, _) = parser.parse_known_args()
if hasattr(opt, 'num_upsampling_layers'):
parser.set_defaults(num_upsampling_layers='more')
return parser
def get_paths(self, opt, adda_mode='normal'):
if (adda_mode == 'source'):
root = opt.dataroot_source
elif (adda_mode == 'target'):
root = opt.dataroot_target
else:
root = opt.dataroot
phase = ('val' if (opt.phase == 'test') else 'train')
if opt.eval_spade:
label_dir = os.path.join(root, 'gtFinePred', phase)
else:
label_dir = os.path.join(root, 'gtFine', phase)
label_paths_all = make_dataset(label_dir, recursive=True)
label_paths = [p for p in label_paths_all if p.endswith('_labelIds.png')]
image_dir = os.path.join(root, 'leftImg8bit', phase)
image_paths = make_dataset(image_dir, recursive=True)
if (not opt.no_instance):
instance_paths = [p for p in label_paths_all if p.endswith('_instanceIds.png')]
else:
instance_paths = []
return (label_paths, image_paths, instance_paths)
def paths_match(self, path1, path2):
name1 = os.path.basename(path1)
name2 = os.path.basename(path2)
return ('_'.join(name1.split('_')[:3]) == '_'.join(name2.split('_')[:3])) |
def _build_cifar100(data_path, augmentations=True, normalize=True):
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True, download=True, transform=transforms.ToTensor())
validset = torchvision.datasets.CIFAR100(root=data_path, train=False, download=True, transform=transforms.ToTensor())
if (cifar100_mean is None):
(data_mean, data_std) = _get_meanstd(trainset)
else:
(data_mean, data_std) = (cifar100_mean, cifar100_std)
transform = transforms.Compose([transforms.ToTensor(), (transforms.Normalize(data_mean, data_std) if normalize else transforms.Lambda((lambda x: x)))])
if augmentations:
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transform])
trainset.transform = transform_train
else:
trainset.transform = transform
validset.transform = transform
return (trainset, validset) |
def test_predict_proba_one_edge():
m = Categorical([[(1.0 / 3), (1.0 / 3), (1.0 / 3)]])
f = Categorical([[0.23, 0.17, 0.6]])
X = torch.tensor([[0], [0], [1], [2]])
mask = torch.tensor([[False], [True], [True], [True]])
X_masked = torch.masked.MaskedTensor(X, mask=mask)
model = FactorGraph([f], [m], [(m, f)])
y_hat = model.predict_proba(X_masked)
assert_array_almost_equal(y_hat[0], [[0.23, 0.17, 0.6], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) |
def main(args):
def start_camera_server(serial, port):
CameraServer(serial, port=port).run()
for (serial, port) in [(CAMERA_SERIALS[0], 6000), (CAMERA_SERIALS[1], 6001)]:
Process(target=start_camera_server, args=(serial, port), daemon=True).start()
time.sleep(1.5)
def start_marker_detector_server():
MarkerDetectorServer(hostname='0.0.0.0').run()
Process(target=start_marker_detector_server, daemon=True).start()
robot_idx = (args.robot_num - 1)
def start_controller_server():
ControllerServer(robot_idx, debug=args.debug).run()
Process(target=start_controller_server, daemon=True).start()
if args.shortest_path:
TeleopShortestPath(robot_idx, debug=args.debug).run()
else:
Teleop(robot_idx).run() |
def get_option(cf, section, option, default=None):
if cf.has_option(section, option):
return cf.get(section, option)
else:
return default |
class AutoencoderKLTemporalDecoder(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
class MyPath(object):
def db_root_dir(database=''):
db_names = {'cifar-10', 'stl-10', 'cifar-20', 'imagenet', 'imagenet_50', 'imagenet_100', 'imagenet_200'}
assert (database in db_names)
if (database == 'cifar-10'):
return '/data/dzy/'
elif (database == 'cifar-20'):
return '/data/dzy/'
elif (database == 'stl-10'):
return '/data/dzy/'
elif (database in ['imagenet', 'imagenet_50', 'imagenet_100', 'imagenet_200']):
return '/data/dzy/Imagenet/'
else:
raise NotImplementedError |
class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
def __init__(self, vocab_file, emoji_file, unk_token='<|nottoken|>', pad_token='<|separator|>', bos_token='<|startoftext|>', eos_token='<|endoftext|>', sep_token='<|segmenter|>', do_clean_text=False, **kwargs):
super().__init__(unk_token=unk_token, pad_token=pad_token, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, do_clean_text=do_clean_text, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if (not os.path.isfile(emoji_file)):
raise ValueError(f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google pretrained model use `tokenizer = GPTSanJapaneseTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.do_clean_text = do_clean_text
(self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji) = load_vocab_and_emoji(vocab_file, emoji_file)
self.subword_tokenizer = SubWordJapaneseTokenizer(vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
def vocab_size(self):
return len(self.raw_vocab)
def get_vocab(self):
return dict(self.raw_vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text)
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.subword_tokenizer.convert_id_to_token(index)
def convert_tokens_to_string(self, tokens):
words = []
byte_tokens = []
for word in tokens:
if ((word[:6] == '<|byte') and (word[(- 2):] == '|>')):
byte_tokens.append(int(word[6:(- 2)]))
else:
if (len(byte_tokens) > 0):
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
byte_tokens = []
if ((word[:7] == '<|emoji') and (word[(- 2):] == '|>')):
words.append(self.emoji['emoji_inv'][word])
elif (word == '<SP>'):
words.append(' ')
elif (word == '<BR>'):
words.append('\n')
elif (word == '<TAB>'):
words.append('\t')
elif (word == '<BLOCK>'):
words.append('')
elif (word == '<KIGOU>'):
words.append('')
elif (word == '<U2000U2BFF>'):
words.append('')
elif (word == '<|bagoftoken|>'):
if (len(words) > 0):
words.append(words[(- 1)])
words.append(words[(- 1)])
words.append(words[(- 1)])
elif (word.startswith('<|') and word.endswith('|>')):
words.append('')
else:
words.append(word)
if (len(byte_tokens) > 0):
words.append(bytearray(byte_tokens).decode('utf-8', errors='replace'))
text = ''.join(words)
return text
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
emoji_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file']))
else:
vocab_file = ((((filename_prefix + '-') if filename_prefix else '') + save_directory) + VOCAB_FILES_NAMES['vocab_file'])
emoji_file = ((((filename_prefix + '-') if filename_prefix else '') + save_directory) + VOCAB_FILES_NAMES['emoji_file'])
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token_index, token) in self.ids_to_tokens.items():
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((','.join(token) + '\n'))
index += 1
with open(emoji_file, 'w', encoding='utf-8') as writer:
json.dump(self.emoji, writer)
return (vocab_file, emoji_file)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
prefix_len = 0
if (self.sep_token in self.vocab):
segid = self.vocab[self.sep_token]
if (segid in token_ids_0):
prefix_len = token_ids_0.index(segid)
if (token_ids_1 is None):
total_len = len(token_ids_0)
else:
total_len = len((token_ids_0 + token_ids_1))
return ((prefix_len * [1]) + ((total_len - prefix_len) * [0]))
def prepare_for_tokenization(self, text, prefix_text=None, add_sep_token=None, **kwargs):
if (add_sep_token is None):
add_sep_token = (self.sep_token not in text)
prepared = (self.bos_token if (self.bos_token in self.vocab) else '')
prepared += (prefix_text if (prefix_text is not None) else '')
if add_sep_token:
prepared += (self.sep_token if (self.sep_token in self.vocab) else '')
prepared += text
return (prepared, kwargs)
def _batch_encode_plus(self, batch_text_or_text_pairs: Union[(List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair])], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:
if ((type(batch_text_or_text_pairs[0]) is tuple) or (type(batch_text_or_text_pairs[0]) is list)):
batch_prefix_texts = []
for (pref, txt) in batch_text_or_text_pairs:
batch_prefix_texts.append(((pref + self.sep_token) + txt))
batch_text_or_text_pairs = batch_prefix_texts
return super()._batch_encode_plus(batch_text_or_text_pairs, add_special_tokens, padding_strategy, truncation_strategy, max_length, stride, is_split_into_words, pad_to_multiple_of, return_tensors, return_token_type_ids, return_attention_mask, return_overflowing_tokens, return_special_tokens_mask, return_offsets_mapping, return_length, verbose) |
def list_pred_files(pred_dir):
ret = []
for file_name in sorted(os.listdir(pred_dir)):
if PRED_FILE_RE.match(file_name):
ret.append(file_name.split('.json')[0])
return ret |
def one_hot(tensor, num_classes):
'Workaround for
if (torch.__version__ >= '1.9'):
return F.one_hot(tensor, num_classes=num_classes)
else:
assert (num_classes > 0), 'num_classes must be a positive integer'
ret = torch.zeros((tensor.shape + (num_classes,)), device=tensor.device, dtype=tensor.dtype)
ret.scatter_((- 1), tensor.unsqueeze((- 1)), 1)
return ret |
('AGENT_6')
class AGENT_6(BaseAgent):
type = PolicyType.MLP
features_extractor_class = None
features_extractor_kwargs = None
net_arch = [128, 64, 64, dict(pi=[64, 64], vf=[64, 64])]
activation_fn = nn.ReLU |
def is_sorted(arr):
return (np.all((np.sort(arr) == arr)) or np.all((np.sort(arr)[::(- 1)] == arr))) |
def remove_number_of_samples_from_shape(shape: str) -> str:
shape_as_list = shape.split(',')
if (len(shape_as_list) > 3):
shape_as_list.pop(0)
return ','.join(shape_as_list) |
class EncryptedDataFrameReader():
def __init__(self, bigdl_type, df_reader):
self.bigdl_type = bigdl_type
self.df_reader = df_reader
def schema(self, value):
self.df_reader = callBigDlFunc(self.bigdl_type, 'schema', self.df_reader, value)
return self
def option(self, key, value):
self.df_reader = callBigDlFunc(self.bigdl_type, 'option', self.df_reader, key, value)
return self
def csv(self, path):
return callBigDlFunc(self.bigdl_type, 'csv', self.df_reader, path)
def parquet(self, path):
return callBigDlFunc(self.bigdl_type, 'parquet', self.df_reader, path)
def json(self, path):
return callBigDlFunc(self.bigdl_type, 'json', self.df_reader, path) |
class Agent():
def __init__(self, device_name, model_name, num_observations, num_envs, num_threads, data_path):
assert (num_envs == 1)
if (model_name is None):
model_name = DEFAULT_MODEL
self.device = torch.device(device_name)
self.net = fc.FC_DQN(num_observations, NUM_ACTIONS)
self.net.train(False)
self.net.load_state_dict(torch.load(model_name, map_location=self.device))
self.net.to(self.device)
def pre_step(self, observation):
state_v = torch.FloatTensor([observation]).to(self.device)
q_vals_v = self.net(state_v)
(_, act_v) = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
return action
def post_step(new_observation, action, reward, mean_reward, is_done, mean_success):
return 0
def stop(self):
pass |
class RSU5(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5, self).__init__()
self.rebnconvin = REBNCONV(in_ch, out_ch, dirate=1)
self.rebnconv1 = REBNCONV(out_ch, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv4d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv3d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv2d = REBNCONV((mid_ch * 2), mid_ch, dirate=1)
self.rebnconv1d = REBNCONV((mid_ch * 2), out_ch, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
hx4dup = _upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = _upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = _upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return (hx1d + hxin) |
class EffiDwsConvUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, bn_eps, activation, tf_mode):
super(EffiDwsConvUnit, self).__init__()
self.tf_mode = tf_mode
self.residual = ((in_channels == out_channels) and (stride == 1))
self.dw_conv = dwconv3x3_block(in_channels=in_channels, out_channels=in_channels, padding=(0 if tf_mode else 1), bn_eps=bn_eps, activation=activation)
self.se = SEBlock(channels=in_channels, reduction=4, activation=activation)
self.pw_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None)
def forward(self, x):
if self.residual:
identity = x
if self.tf_mode:
x = F.pad(x, pad=calc_tf_padding(x, kernel_size=3))
x = self.dw_conv(x)
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = (x + identity)
return x |
def read_one_file(dir, fp):
with open(os.path.join(dir, fp), 'r') as fd:
lines = fd.read()
line_dict = json.loads(lines)
doc_parse = extract_parse(line_dict)
for sent_parse in doc_parse:
sent_tree = read_single_parse_tree(sent_parse)
print(sent_parse)
tree_len = len(sent_tree.text)
del_bag = find_deletable_span_rule_based_updated(sent_tree, root_len=tree_len, parent=None, grand_parent=None)
print(' '.join(sent_tree.text))
print(del_bag)
print(('-' * 50)) |
def get_opencl_binary_output_path(library_name, target_abi, device):
target_soc = device.target_socs
device_name = device.device_name
return ('%s/%s/%s/%s/%s_%s.%s.%s.bin' % (BUILD_OUTPUT_DIR, library_name, OUTPUT_OPENCL_BINARY_DIR_NAME, target_abi, library_name, OUTPUT_OPENCL_BINARY_FILE_NAME, device_name, target_soc)) |
def code2lcrs(code, language='python'):
ts = TS(code, language)
lcrs_tokens = ts.gen_lcrs_representation()
return lcrs_tokens |
class TensorNetwork():
def __init__(self):
self.tensors = []
self.bonds = []
self.total_memory = 0.0
self.max_memory = 0.0
self.cpu_cost = 0.0
def __str__(self):
s = ''
for (i, t) in enumerate(self.tensors):
s += 'tensor {0} : {1}\n'.format(i, t)
for (i, b) in enumerate(self.bonds):
s += 'bond {0} : {1}, {2} {3}\n'.format(i, BOND_NAMES[i], b, BOND_DIMS[i])
s += 'memory : {0}\n'.format(self.total_memory)
s += 'cpu : {0}\n'.format(self.cpu_cost)
return s
def clone(self):
tn = TensorNetwork()
tn.total_memory = self.total_memory
tn.max_memory = self.max_memory
tn.cpu_cost = self.cpu_cost
tn.bonds = [Bond(b.t0, b.t1) for b in self.bonds]
tn.tensors = [Tensor(t.name, t.bonds) for t in self.tensors]
return tn
def output_log(self, prefix=''):
if (not (prefix == '')):
prefix += ' '
for (i, t) in enumerate(self.tensors):
logging.info((prefix + 'tensor{0} : {1} {2}'.format(i, TENSOR_NAMES[i], t.bonds)))
for (i, b) in enumerate(self.bonds):
logging.info((prefix + 'bond{0} : {1} {2} {3}'.format(i, BOND_NAMES[i], b, BOND_DIMS[i])))
def add_tensor(self, t_name, b_names):
t_index = len(self.tensors)
b_indexs = []
for b in b_names:
if (b not in BOND_NAMES):
self.bonds.append(Bond())
BOND_NAMES.append(b)
BOND_DIMS.append(config.DEFAULT_BOND_DIM)
i = BOND_NAMES.index(b)
self.bonds[i].connect(t_index)
b_indexs.append(i)
TENSOR_NAMES.append(t_name)
self.tensors.append(Tensor(t_index, b_indexs))
def find_bonds(self, tensor_a, tensor_b):
bonds_a = self.tensors[tensor_a].bonds
bonds_b = self.tensors[tensor_b].bonds
contract = [b for b in bonds_a if (b in bonds_b)]
replaced_a = [b for b in bonds_a if (b not in bonds_b)]
replaced_b = [b for b in bonds_b if (b not in bonds_a)]
return (contract, replaced_a, replaced_b)
def contract(self, t0, t1, bc, br0, br1):
tn = self.clone()
t_new = tn.tensors[t0]
t_new.name = ((self.tensors[t0].name + self.tensors[t1].name) + [(- 1)])
for b in bc:
t_new.bonds.remove(b)
for b in br1:
t_new.bonds.append(b)
tn.tensors[t1] = Tensor()
bonds = tn.bonds
for b in bc:
bonds[b].t0 = bonds[b].t1 = (- 1)
old_idx = t1
new_idx = t0
for b in br1:
if (bonds[b].t0 == old_idx):
bonds[b].t0 = new_idx
elif (bonds[b].t1 == old_idx):
bonds[b].t1 = new_idx
return tn |
def test_digits_euclidean_two_stage():
model = SumRedundancySelection(100, 'euclidean', optimizer='two-stage')
model.fit(X_digits)
assert_array_equal(model.ranking, digits_euclidean_ranking)
assert_array_almost_equal(model.gains, digits_euclidean_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class TestSweep(unittest.TestCase):
def test_job(self):
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
self.assertTrue(job.output_dir.startswith(sweep_output_dir))
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
self.assertEqual(job.command_str, f'python -m domainbed.scripts.train --foo bar --output_dir {job.output_dir}')
def test_job_launch(self):
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
launcher_fn_called = False
def launcher_fn(commands):
nonlocal launcher_fn_called
launcher_fn_called = True
self.assertEqual(len(commands), 1)
self.assertEqual(commands[0], job.command_str)
sweep.Job.launch([job], launcher_fn)
self.assertTrue(launcher_fn_called)
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.INCOMPLETE)
def test_job_delete(self):
train_args = {'foo': 'bar'}
sweep_output_dir = f'/tmp/{str(uuid.uuid4())}'
job = sweep.Job(train_args, sweep_output_dir)
sweep.Job.launch([job], (lambda commands: None))
sweep.Job.delete([job])
job = sweep.Job(train_args, sweep_output_dir)
self.assertEqual(job.state, sweep.Job.NOT_LAUNCHED)
def test_make_args_list(self):
args_list = sweep.make_args_list(n_trials=2, dataset_names=['Debug28'], algorithms=['ERM'], n_hparams_from=0, n_hparams=3, steps=123, data_dir='/tmp/data', task='domain_generalization', holdout_fraction=0.2, single_test_envs=False, hparams=None)
assert (len(args_list) == ((2 * 3) * (3 + 3)))
(('DATA_DIR' not in os.environ), 'needs DATA_DIR environment variable')
def test_end_to_end(self):
output_dir = os.path.join('/tmp', str(uuid.uuid4()))
result = subprocess.run(f"python -m domainbed.scripts.sweep launch --data_dir={os.environ['DATA_DIR']} --output_dir={output_dir} --algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 --command_launcher dummy --skip_confirmation", shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split('\n')
dummy_launcher_lines = [l for l in stdout_lines if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6)
result = subprocess.run(f"python -m domainbed.scripts.sweep launch --data_dir={os.environ['DATA_DIR']} --output_dir={output_dir} --algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 --command_launcher dummy --skip_confirmation", shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split('\n')
dummy_launcher_lines = [l for l in stdout_lines if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 0)
subprocess.run(f"python -m domainbed.scripts.sweep delete_incomplete --data_dir={os.environ['DATA_DIR']} --output_dir={output_dir} --algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 --command_launcher dummy --skip_confirmation", shell=True, capture_output=True)
result = subprocess.run(f"python -m domainbed.scripts.sweep launch --data_dir={os.environ['DATA_DIR']} --output_dir={output_dir} --algorithms ERM --datasets Debug28 --n_hparams 1 --n_trials 1 --command_launcher dummy --skip_confirmation", shell=True, capture_output=True)
stdout_lines = result.stdout.decode('utf8').split('\n')
dummy_launcher_lines = [l for l in stdout_lines if l.startswith('Dummy launcher:')]
self.assertEqual(len(dummy_launcher_lines), 6) |
class LegacySwap(TransformationPass):
def __init__(self, coupling_map, initial_layout=None, trials=20, seed=None):
super().__init__()
self.coupling_map = coupling_map
self.initial_layout = initial_layout
self.trials = trials
self.seed = seed
def run(self, dag):
if (dag.width() > self.coupling_map.size()):
raise TranspilerError('Not enough qubits in CouplingGraph')
layerlist = list(dag.layers())
if ((self.initial_layout is None) and self.property_set['layout']):
self.initial_layout = self.property_set['layout']
if (self.initial_layout is not None):
virtual_qubits = self.initial_layout.get_virtual_bits()
self.initial_layout = {(v.register.name, v.index): ('q', self.initial_layout[v]) for v in virtual_qubits}
device_register = QuantumRegister(self.coupling_map.size(), 'q')
initial_layout = {dag.qregs[k[0]][k[1]]: device_register[v[1]] for (k, v) in self.initial_layout.items()}
circ_qubits = dag.qubits()
coup_qubits = [(QuantumRegister(self.coupling_map.size(), 'q'), wire) for wire in self.coupling_map.physical_qubits]
qubit_subset = []
for (k, v) in initial_layout.items():
qubit_subset.append(v)
if (k not in circ_qubits):
raise TranspilerError(('initial_layout qubit %s[%d] not in input DAGCircuit' % (k[0].name, k[1])))
if (v not in coup_qubits):
raise TranspilerError(('initial_layout qubit %s[%d] not in input CouplingGraph' % (v[0].name, v[1])))
else:
qubit_subset = [QuantumRegister(self.coupling_map.size(), 'q')[wire] for wire in self.coupling_map.physical_qubits]
qubit_subset = qubit_subset[0:dag.width()]
initial_layout = {a: b for (a, b) in zip(dag.qubits(), qubit_subset)}
layout = initial_layout.copy()
dagcircuit_output = DAGCircuit()
dagcircuit_output.name = dag.name
dagcircuit_output.add_qreg(QuantumRegister(self.coupling_map.size(), 'q'))
for creg in dag.cregs.values():
dagcircuit_output.add_creg(creg)
identity_wire_map = {}
q = QuantumRegister(self.coupling_map.size(), 'q')
for j in range(self.coupling_map.size()):
identity_wire_map[q[j]] = q[j]
for creg in dag.cregs.values():
for j in range(creg.size):
identity_wire_map[creg[j]] = creg[j]
first_layer = True
for (i, layer) in enumerate(layerlist):
(success_flag, best_circ, best_d, best_layout, trivial_flag) = self.layer_permutation(layer['partition'], layout, qubit_subset)
if (not success_flag):
serial_layerlist = list(layer['graph'].serial_layers())
for (j, serial_layer) in enumerate(serial_layerlist):
(success_flag, best_circ, best_d, best_layout, trivial_flag) = self.layer_permutation(serial_layer['partition'], layout, qubit_subset)
if (not success_flag):
raise TranspilerError(('swap_mapper failed: ' + ('layer %d, sublayer %d' % (i, j))))
if (trivial_flag and first_layer):
continue
layout = best_layout
dagcircuit_output.compose_back(self.swap_mapper_layer_update(j, first_layer, best_layout, best_d, best_circ, serial_layerlist), identity_wire_map)
if first_layer:
initial_layout = layout
first_layer = False
else:
layout = best_layout
dagcircuit_output.compose_back(self.swap_mapper_layer_update(i, first_layer, best_layout, best_d, best_circ, layerlist), identity_wire_map)
if first_layer:
initial_layout = layout
first_layer = False
if first_layer:
layout = initial_layout
for (i, layer) in enumerate(layerlist):
dagcircuit_output.compose_back(layer['graph'], layout)
return dagcircuit_output
def layer_permutation(self, layer_partition, layout, qubit_subset):
if (self.seed is None):
self.seed = np.random.randint(0, np.iinfo(np.int32).max)
rng = np.random.RandomState(self.seed)
rev_layout = {b: a for (a, b) in layout.items()}
gates = []
for layer in layer_partition:
if (len(layer) > 2):
raise TranspilerError('Layer contains >2 qubit gates')
if (len(layer) == 2):
gates.append(tuple(layer))
dist = sum([self.coupling_map.distance(layout[g[0]].index, layout[g[1]].index) for g in gates])
if (dist == len(gates)):
circ = DAGCircuit()
circ.add_qreg(QuantumRegister(self.coupling_map.size(), 'q'))
return (True, circ, 0, layout, bool(gates))
n = self.coupling_map.size()
best_d = sys.maxsize
best_circ = None
best_layout = None
QR = QuantumRegister(self.coupling_map.size(), 'q')
for _ in range(self.trials):
trial_layout = layout.copy()
rev_trial_layout = rev_layout.copy()
trial_circ = DAGCircuit()
trial_circ.add_qreg(QR)
xi = {}
for i in self.coupling_map.physical_qubits:
xi[(QR, i)] = {}
for i in self.coupling_map.physical_qubits:
i = (QR, i)
for j in self.coupling_map.physical_qubits:
j = (QR, j)
scale = (1 + rng.normal(0, (1 / n)))
xi[i][j] = (scale * (self.coupling_map.distance(i[1], j[1]) ** 2))
xi[j][i] = xi[i][j]
d = 1
circ = DAGCircuit()
circ.add_qreg(QR)
identity_wire_map = {QR[j]: QR[j] for j in range(n)}
while (d < ((2 * n) + 1)):
qubit_set = set(qubit_subset)
while qubit_set:
min_cost = sum([xi[trial_layout[g[0]]][trial_layout[g[1]]] for g in gates])
progress_made = False
for e in self.coupling_map.get_edges():
e = [QR[edge] for edge in e]
if ((e[0] in qubit_set) and (e[1] in qubit_set)):
new_layout = trial_layout.copy()
new_layout[rev_trial_layout[e[0]]] = e[1]
new_layout[rev_trial_layout[e[1]]] = e[0]
rev_new_layout = rev_trial_layout.copy()
rev_new_layout[e[0]] = rev_trial_layout[e[1]]
rev_new_layout[e[1]] = rev_trial_layout[e[0]]
new_cost = sum([xi[new_layout[g[0]]][new_layout[g[1]]] for g in gates])
if (new_cost < min_cost):
progress_made = True
min_cost = new_cost
opt_layout = new_layout
rev_opt_layout = rev_new_layout
opt_edge = e
if progress_made:
qubit_set.remove(opt_edge[0])
qubit_set.remove(opt_edge[1])
trial_layout = opt_layout
rev_trial_layout = rev_opt_layout
circ.apply_operation_back(SwapGate(), [opt_edge[0], opt_edge[1]], [])
else:
break
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index, trial_layout[g[1]].index) for g in gates])
if (dist == len(gates)):
trial_circ.compose_back(circ, identity_wire_map)
break
d += 1
dist = sum([self.coupling_map.distance(trial_layout[g[0]].index, trial_layout[g[1]].index) for g in gates])
if (dist == len(gates)):
if (d < best_d):
best_circ = trial_circ
best_layout = trial_layout
best_d = min(best_d, d)
if (best_circ is None):
return (False, None, None, None, False)
return (True, best_circ, best_d, best_layout, False)
def swap_mapper_layer_update(self, i, first_layer, best_layout, best_d, best_circ, layer_list):
layout = best_layout
dagcircuit_output = DAGCircuit()
QR = QuantumRegister(self.coupling_map.size(), 'q')
dagcircuit_output.add_qreg(QR)
identity_wire_map = {QR[j]: QR[j] for j in range(self.coupling_map.size())}
if first_layer:
for j in range((i + 1)):
dagcircuit_output.compose_back(layer_list[j]['graph'], layout)
else:
if (best_d > 0):
dagcircuit_output.compose_back(best_circ, identity_wire_map)
dagcircuit_output.compose_back(layer_list[i]['graph'], layout)
return dagcircuit_output |
def argsparser():
parser = argparse.ArgumentParser('Tensorflow Implementation of GAIL')
parser.add_argument('--env_id', help='environment ID', default='Hopper-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--expert_path', type=str, default='data/deterministic.trpo.Hopper.0.00.npz')
parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')
parser.add_argument('--log_dir', help='the directory to save log file', default='log')
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')
boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')
boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')
parser.add_argument('--traj_limitation', type=int, default=(- 1))
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=3)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
parser.add_argument('--algo', type=str, choices=['trpo', 'ppo'], default='trpo')
parser.add_argument('--max_kl', type=float, default=0.01)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=0)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=0.001)
parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=100)
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=5000000.0)
boolean_flag(parser, 'pretrained', default=False, help='Use BC to pretrain')
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=10000.0)
return parser.parse_args() |
def weight_initialization(weight, init, activation):
if (init is None):
return
if (init == 'kaiming'):
assert (not (activation is None))
if hasattr(activation, 'negative_slope'):
kaiming_normal(weight, a=activation.negative_slope)
else:
kaiming_normal(weight, a=0)
elif (init == 'xavier'):
xavier_normal(weight)
return |
(events=subsets(_ALL_EVENTS_WITH_HANDLERS))
_events_with_registered_handlers_to_subset
def test_recorded_events_two_stmts(events):
assert (_RECORDED_EVENTS == [])
run_cell('x = [1, 2, 3]')
run_cell('logging.info(x)')
throw_and_print_diff_if_recorded_not_equal_to(filter_events_to_subset([TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_assign_rhs, TraceEvent.before_list_literal, *([TraceEvent.list_elt] * 3), TraceEvent.after_list_literal, TraceEvent.after_assign_rhs, TraceEvent.after_stmt, TraceEvent.after_module_stmt, TraceEvent.init_module, TraceEvent.before_stmt, TraceEvent.before_load_complex_symbol, TraceEvent.load_name, TraceEvent.before_attribute_load, TraceEvent.after_attribute_load, TraceEvent.before_call, TraceEvent.load_name, TraceEvent.after_argument, TraceEvent.after_call, TraceEvent.after_load_complex_symbol, TraceEvent.after_stmt, TraceEvent.after_module_stmt], events)) |
def init_node(n):
node = {}
node['node'] = n
node['inputs'] = []
node['outputs'] = []
return node |
class Time_usage_training():
def __init__(self):
self.start_time = 0
self.end_time = 0
def init(self, algorithm=None):
self.start_time = 0
self.end_time = 0
self.start_time = time.time()
def start(self, algorithm=None):
self.start_time = 0
self.end_time = 0
self.start_time = time.time()
def stop(self, algorithm=None):
self.end_time = time.time()
def result(self):
return ('Training time:', (self.end_time - self.start_time))
def reset(self):
pass |
def interp_sheet(G, num_per_sheet, num_midpoints, num_classes, parallel, samples_root, experiment_name, folder_number, sheet_number=0, fix_z=False, fix_y=False, device='cuda'):
if fix_z:
zs = torch.randn(num_per_sheet, 1, G.dim_z, device=device)
zs = zs.repeat(1, (num_midpoints + 2), 1).view((- 1), G.dim_z)
else:
zs = interp(torch.randn(num_per_sheet, 1, G.dim_z, device=device), torch.randn(num_per_sheet, 1, G.dim_z, device=device), num_midpoints).view((- 1), G.dim_z)
if fix_y:
ys = sample_1hot(num_per_sheet, num_classes)
ys = G.shared(ys).view(num_per_sheet, 1, (- 1))
ys = ys.repeat(1, (num_midpoints + 2), 1).view((num_per_sheet * (num_midpoints + 2)), (- 1))
else:
ys = interp(G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, (- 1)), G.shared(sample_1hot(num_per_sheet, num_classes)).view(num_per_sheet, 1, (- 1)), num_midpoints).view((num_per_sheet * (num_midpoints + 2)), (- 1))
if G.fp16:
zs = zs.half()
with torch.no_grad():
if parallel:
out_ims = nn.parallel.data_parallel(G, (zs, ys)).data.cpu()
else:
out_ims = G(zs, ys).data.cpu()
interp_style = (('' + ('Z' if (not fix_z) else '')) + ('Y' if (not fix_y) else ''))
image_filename = ('%s/%s/%d/interp%s%d.jpg' % (samples_root, experiment_name, folder_number, interp_style, sheet_number))
torchvision.utils.save_image(out_ims, image_filename, nrow=(num_midpoints + 2), normalize=True) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.