code stringlengths 101 5.91M |
|---|
def embed(params, data, policy, states, k=100):
if (params['embedding'] == 'a_s'):
embedding = np.concatenate([policy.forward(x, eval=False) for x in states], axis=0)
return embedding |
class Conv2dIndepNormal(_DeepIndepNormal):
def __init__(self, backbone: nn.Module, hidden_channels: List[int], out_channels: int=1, logstd_ref: float=(- 5.0), **kwargs):
super().__init__(backbone=backbone, mean_head=_create_head(hidden_channels, out_channels, **kwargs), logstd_head=_create_head(hidden_channels, out_channels, **kwargs), logstd_ref=logstd_ref) |
_task('dummy_masked_lm')
class DummyMaskedLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('--dict-size', default=50000, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.mask_idx = dictionary.add_symbol('<mask>')
assert ((len(dictionary) % 8) == 0)
mask_idx = 0
pad_idx = 1
seq = ((torch.arange(args.tokens_per_sample) + pad_idx) + 1)
mask = torch.arange(2, args.tokens_per_sample, 7)
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
def setup_task(cls, args, **kwargs):
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
bsz = self.args.max_sentences
self.datasets[split] = DummyDataset({'id': 1, 'net_input': {'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full((bsz,), self.args.tokens_per_sample)}, 'target': torch.stack([self.dummy_tgt for _ in range(bsz)]), 'nsentences': bsz, 'ntokens': (bsz * self.args.tokens_per_sample)}, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
def binary_surprise(x: Union[(float, ArrayLike)], expected_mean: Union[(float, ArrayLike)]) -> ArrayLike:
return jnp.where(x, (- jnp.log(expected_mean)), (- jnp.log((jnp.array(1.0) - expected_mean)))) |
def test_sudoku_animation(sudoku_env: Sudoku, mocker: pytest_mock.MockerFixture) -> None:
states = mocker.MagicMock()
animation = sudoku_env.animate(states)
assert isinstance(animation, matplotlib.animation.Animation) |
def rearrange(csv_file_path, mode=''):
(title, data) = load_csv(((os.getcwd() + os.sep) + csv_file_path))
title.insert(2, title.pop(0))
for i in range(len(data)):
data[i].insert(2, data[i].pop(0))
data = sorted(data, key=functools.cmp_to_key(sort_by_time_stamp))
csv_file_name = csv_file_path.split(os.sep)[(- 1)].split('.')[0]
target_save_path = (((('..' + os.sep) + 'sorted') + os.sep) + mode)
write_to_csv(title, data, target_save_path, csv_file_name) |
class TestGatesOnWireSlice(QiskitTestCase):
def test_wire_slice(self):
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
circuit.h(slice(0, 2))
expected = QuantumCircuit(qreg)
expected.h(qreg[0:2])
self.assertEqual(circuit, expected)
def test_wire_list(self):
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
circuit.h([0, 1])
expected = QuantumCircuit(qreg)
expected.h(qreg[0:2])
self.assertEqual(circuit, expected)
def test_wire_np_int(self):
numpy_int = numpy.dtype('int').type(2)
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
circuit.h(numpy_int)
expected = QuantumCircuit(qreg)
expected.h(qreg[2])
self.assertEqual(circuit, expected)
def test_wire_np_1d_array(self):
numpy_arr = numpy.array([0, 1])
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
circuit.h(numpy_arr)
expected = QuantumCircuit(qreg)
expected.h(qreg[0])
expected.h(qreg[1])
self.assertEqual(circuit, expected)
def test_circuit_multi_qregs_h(self):
qreg0 = QuantumRegister(2)
qreg1 = QuantumRegister(2)
circuit = QuantumCircuit(qreg0, qreg1)
circuit.h(slice(0, 3))
expected = QuantumCircuit(qreg0, qreg1)
expected.h(qreg0[0])
expected.h(qreg0[1])
expected.h(qreg1[0])
self.assertEqual(circuit, expected)
def test_circuit_multi_qreg_cregs_measure(self):
qreg0 = QuantumRegister(2)
creg0 = ClassicalRegister(2)
qreg1 = QuantumRegister(2)
creg1 = ClassicalRegister(2)
circuit = QuantumCircuit(qreg0, qreg1, creg0, creg1)
circuit.measure(slice(1, 3), slice(0, 4, 2))
expected = QuantumCircuit(qreg0, qreg1, creg0, creg1)
expected.measure(qreg0[1], creg0[0])
expected.measure(qreg1[0], creg1[0])
self.assertEqual(circuit, expected)
def test_circuit_barrier(self):
qreg01 = QuantumRegister(2)
qreg23 = QuantumRegister(2)
circuit = QuantumCircuit(qreg01, qreg23)
circuit.barrier(slice(0, 3))
expected = QuantumCircuit(qreg01, qreg23)
expected.barrier([qreg01[0], qreg01[1], qreg23[0]])
self.assertEqual(circuit, expected)
def test_circuit_initialize(self):
init_vector = [0.5, 0.5, 0.5, 0.5]
qreg01 = QuantumRegister(2)
qreg23 = QuantumRegister(2)
circuit = QuantumCircuit(qreg01, qreg23)
circuit.initialize(init_vector, slice(1, 3))
expected = QuantumCircuit(qreg01, qreg23)
expected.initialize(init_vector, [qreg01[1], qreg23[0]])
self.assertEqual(circuit, expected)
def test_circuit_conditional(self):
qreg0 = QuantumRegister(2)
qreg1 = QuantumRegister(2)
creg = ClassicalRegister(2)
circuit = QuantumCircuit(qreg0, qreg1, creg)
circuit.h(slice(1, 3)).c_if(creg, 3)
expected = QuantumCircuit(qreg0, qreg1, creg)
expected.h(qreg0[1]).c_if(creg, 3)
expected.h(qreg1[0]).c_if(creg, 3)
self.assertEqual(circuit, expected)
def test_circuit_qwire_out_of_range(self):
qreg = QuantumRegister(2)
circuit = QuantumCircuit(qreg)
self.assertRaises(QiskitError, circuit.h, slice(9, 99))
def test_circuit_cwire_out_of_range(self):
qreg = QuantumRegister(2)
creg = ClassicalRegister(2)
circuit = QuantumCircuit(qreg, creg)
self.assertRaises(QiskitError, circuit.measure, 1, slice(9, 99))
def test_wire_np_2d_array(self):
numpy_arr = numpy.array([[0, 1], [2, 3]])
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
self.assertRaises(QiskitError, circuit.h, numpy_arr) |
class Accumulator(object):
def Set(self, y):
if (type(self) == type(y)):
(self._s, self._t) = (y._s, y._t)
else:
(self._s, self._t) = (float(y), 0.0)
def __init__(self, y=0.0):
self.Set(y)
def Add(self, y):
(y, u) = Math.sum(y, self._t)
(self._s, self._t) = Math.sum(y, self._s)
if (self._s == 0):
self._s = u
else:
self._t += u
def Sum(self, y=0.0):
if (y == 0.0):
return self._s
else:
b = Accumulator(self)
b.Add(y)
return b._s
def Negate(self):
self._s *= (- 1)
self._t *= (- 1) |
def main(cfg):
if (cfg.SEED_VALUE >= 0):
print(f'Seed value for the experiment {cfg.SEED_VALUE}')
os.environ['PYTHONHASHSEED'] = str(cfg.SEED_VALUE)
random.seed(cfg.SEED_VALUE)
torch.manual_seed(cfg.SEED_VALUE)
np.random.seed(cfg.SEED_VALUE)
logger = create_logger(cfg.LOGDIR, phase='train')
logger.info(f'GPU name -> {torch.cuda.get_device_name()}')
logger.info(f"GPU feat -> {torch.cuda.get_device_properties('cuda')}")
logger.info(pprint.pformat(cfg))
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
writer = SummaryWriter(log_dir=cfg.LOGDIR)
writer.add_text('config', pprint.pformat(cfg), 0)
data_loaders = get_data_loaders(cfg)
loss = VIBELoss(e_loss_weight=cfg.LOSS.KP_2D_W, e_3d_loss_weight=cfg.LOSS.KP_3D_W, e_pose_loss_weight=cfg.LOSS.POSE_W, e_shape_loss_weight=cfg.LOSS.SHAPE_W)
generator = MEVA(n_layers=cfg.MODEL.TGRU.NUM_LAYERS, batch_size=cfg.TRAIN.BATCH_SIZE, seqlen=cfg.DATASET.SEQLEN, hidden_size=cfg.MODEL.TGRU.HIDDEN_SIZE, add_linear=cfg.MODEL.TGRU.ADD_LINEAR, bidirectional=cfg.MODEL.TGRU.BIDIRECTIONAL, use_residual=cfg.MODEL.TGRU.RESIDUAL, cfg=cfg.VAE_CFG).to(cfg.DEVICE)
if ((cfg.TRAIN.PRETRAINED != '') and os.path.isfile(cfg.TRAIN.PRETRAINED)):
checkpoint = torch.load(cfg.TRAIN.PRETRAINED)
best_performance = checkpoint['performance']
generator.load_state_dict(checkpoint['gen_state_dict'])
print(f'==> Loaded pretrained model from {cfg.TRAIN.PRETRAINED}...')
print(f'Performance on 3DPW test set {best_performance}')
else:
print(f'{cfg.TRAIN.PRETRAINED} is not a pretrained model!!!!')
gen_optimizer = get_optimizer(model=generator, optim_type=cfg.TRAIN.GEN_OPTIM, lr=cfg.TRAIN.GEN_LR, weight_decay=cfg.TRAIN.GEN_WD, momentum=cfg.TRAIN.GEN_MOMENTUM)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(gen_optimizer, mode='min', factor=0.1, patience=cfg.TRAIN.LR_PATIENCE, verbose=True)
Trainer(data_loaders=data_loaders, generator=generator, criterion=loss, gen_optimizer=gen_optimizer, start_epoch=cfg.TRAIN.START_EPOCH, end_epoch=cfg.TRAIN.END_EPOCH, device=cfg.DEVICE, writer=writer, debug=cfg.DEBUG, logdir=cfg.LOGDIR, lr_scheduler=lr_scheduler, resume=cfg.TRAIN.RESUME, num_iters_per_epoch=cfg.TRAIN.NUM_ITERS_PER_EPOCH, debug_freq=cfg.DEBUG_FREQ).fit() |
def check_spherical_symmetry(samp, l, m, tol):
(thetas, phis) = (numpy.arctan2(samp.R(), samp.z()), samp.phi())
assert (numpy.fabs(((numpy.sum((special.lpmv(m, l, numpy.cos(thetas)) * numpy.cos((m * phis)))) / samp.size) - ((l == 0) * (m == 0)))) < tol), f'Sample does not appear to be spherically symmetric, fails spherical harmonics test for (l,m) = ({l},{m})'
return None |
def df_to_loader(df: pd.DataFrame, batch_size: int=128, line_graph: bool=True, pin_memory: bool=False, shuffle: bool=True, **kwargs: Any) -> DataLoader:
graphs = load_graphs(df, neighbor_strategy=config.neighbor_strategy, use_canonize=config.use_canonize)
dataset = StructureDataset(df.reset_index(drop=True), graphs, target=target_col, line_graph=line_graph, atom_features=config.atom_features, id_tag=id_col, **kwargs)
collate_fn = getattr(dataset, f"collate{('_line' if line_graph else '')}_graph")
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, collate_fn=collate_fn, pin_memory=pin_memory) |
def dice_coef_metric(pred, label):
pred[(pred >= 0.5)] = 1.0
pred[(pred < 0.5)] = 0.0
intersection = (2.0 * (pred * label).sum())
union = (pred.sum() + label.sum())
if ((pred.sum() == 0) and (label.sum() == 0)):
return 1.0
return (intersection / union) |
class ConfigGenerator():
def __init__(self):
self.p_list = []
for func in dir(self):
if hasattr(getattr(self, func), '__name__'):
if getattr(self, func).__name__.startswith(TRIGGER_REG_NAME_PREFIX):
p = Thread(target=getattr(self, func), args=(), daemon=True)
p.start()
def genConfig(self, *args, **kwargs):
pass |
def dangling_context(is_dangling: bool=True) -> Generator[(None, None, None)]:
token = dangling_ctx_var.set((is_dangling or dangling_ctx_var.get()))
try:
(yield)
finally:
dangling_ctx_var.reset(token) |
def _build_regularizer(regularizer):
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if (regularizer_oneof == 'l1_regularizer'):
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if (regularizer_oneof == 'l2_regularizer'):
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if (regularizer_oneof is None):
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof)) |
class FairseqEncoderModel(BaseFairseqModel):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
check_type(self.encoder, FairseqEncoder)
def forward(self, src_tokens, src_lengths, **kwargs):
return self.encoder(src_tokens, src_lengths, **kwargs)
def get_normalized_probs(self, net_output, log_probs, sample=None):
encoder_out = net_output['encoder_out']
if torch.is_tensor(encoder_out):
logits = encoder_out.float()
if log_probs:
return F.log_softmax(logits, dim=(- 1))
else:
return F.softmax(logits, dim=(- 1))
raise NotImplementedError
def max_positions(self):
return self.encoder.max_positions() |
def mctsLoop(env, policies, seed, save, animate, **kwargs):
if (seed is not None):
world_id = int(seed)
else:
world_id = np.random.randint(10000)
np.random.seed(world_id)
env.reset()
world = env._world
current_root = Node(world=world)
done = current_root.terminal
if (policies._rollout is None):
rollout = 'norollout'
else:
rollout = 'rollout'
if policies._dfs:
dfs = '_dfs'
else:
dfs = ''
if (policies._sample is not None):
sample = policies._sample.getName()
else:
sample = 'none'
dirname = ('world%d_%s_%s%s' % (world_id, sample, rollout, dfs))
if (save or animate):
window = world._getScreen()
os.mkdir(dirname)
while (not done):
for i in xrange(kwargs['iter']):
policies.explore(current_root)
path = policies.extract(current_root)
while (current_root.state.t < 1.0):
pass
done = current_root.terminal
if animate:
pass
if done:
break |
_subclass('projected_sgd')
class ProjSGD(Inference):
def __init__(self, model, loader, criterion, epochs=10, **kwargs):
super(ProjSGD, self).__init__()
self.kwargs = kwargs
self.optimizer = None
self.epochs = epochs
(self.mean, self.var, self.subspace) = (None, None, None)
self.optimizer = None
self.proj_params = None
(self.loader, self.criterion) = (loader, criterion)
self.model = model
def fit(self, mean, variance, subspace, use_cuda=True, **kwargs):
if (use_cuda and torch.cuda.is_available()):
self.mean = mean.cuda()
self.subspace = subspace.cuda()
else:
self.mean = mean
self.subspace = subspace
if (self.proj_params is None):
proj_params = torch.zeros(self.subspace.size(0), 1, dtype=self.subspace.dtype, device=self.subspace.device, requires_grad=True)
print(proj_params.device)
self.proj_model = ProjectedModel(model=self.model, mean=self.mean.unsqueeze(1), projection=self.subspace, proj_params=proj_params)
self.optimizer = torch.optim.SGD([proj_params], **self.kwargs)
else:
proj_params = self.proj_params.clone()
loss_vec = []
for _ in range(self.epochs):
loss = train_epoch(loader=self.loader, optimizer=self.optimizer, model=self.proj_model, criterion=self.criterion, **kwargs)
loss_vec.append(loss)
self.proj_params = proj_params
return loss_vec
def sample(self, *args, **kwargs):
print(self.mean.size(), self.subspace.size(), self.proj_params.size())
map_sample = (self.mean + self.subspace.t().matmul(self.proj_params.squeeze(1)))
return map_sample.view(1, (- 1)) |
def MakeDir(dir):
try:
os.mkdir(dir)
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise exc
raise Exception('Directory {0} already exists'.format(dir))
pass |
class FineTuneTrainer(SemiTrainer):
activate_hooks = False
def train_epocher(self) -> Type[EpocherBase]:
return FineTuneEpocher |
class Attribute_Embedding(nn.Module):
def __init__(self, d_model, attribute_vocab_size):
super().__init__()
self.embed = nn.Linear(attribute_vocab_size, d_model)
self.norm = nn.BatchNorm1d(attribute_vocab_size, momentum=0.01)
def forward(self, attribute):
attribute = self.norm(attribute)
attribute = self.embed(attribute)
return attribute |
class ConfigTester(unittest.TestCase):
def test_load_not_from_mixin(self):
with self.assertRaises(ValueError):
ConfigMixin.load_config('dummy_path')
def test_register_to_config(self):
obj = SampleObject()
config = obj.config
assert (config['a'] == 2)
assert (config['b'] == 5)
assert (config['c'] == (2, 5))
assert (config['d'] == 'for diffusion')
assert (config['e'] == [1, 3])
obj = SampleObject(_name_or_path='lalala')
config = obj.config
assert (config['a'] == 2)
assert (config['b'] == 5)
assert (config['c'] == (2, 5))
assert (config['d'] == 'for diffusion')
assert (config['e'] == [1, 3])
obj = SampleObject(c=6)
config = obj.config
assert (config['a'] == 2)
assert (config['b'] == 5)
assert (config['c'] == 6)
assert (config['d'] == 'for diffusion')
assert (config['e'] == [1, 3])
obj = SampleObject(1, c=6)
config = obj.config
assert (config['a'] == 1)
assert (config['b'] == 5)
assert (config['c'] == 6)
assert (config['d'] == 'for diffusion')
assert (config['e'] == [1, 3])
def test_save_load(self):
obj = SampleObject()
config = obj.config
assert (config['a'] == 2)
assert (config['b'] == 5)
assert (config['c'] == (2, 5))
assert (config['d'] == 'for diffusion')
assert (config['e'] == [1, 3])
with tempfile.TemporaryDirectory() as tmpdirname:
obj.save_config(tmpdirname)
new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname))
new_config = new_obj.config
config = dict(config)
new_config = dict(new_config)
assert (config.pop('c') == (2, 5))
assert (new_config.pop('c') == [2, 5])
config.pop('_use_default_values')
assert (config == new_config)
def test_load_ddim_from_pndm(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
ddim = DDIMScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler')
assert (ddim.__class__ == DDIMScheduler)
assert (cap_logger.out == '')
def test_load_euler_from_pndm(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
euler = EulerDiscreteScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler')
assert (euler.__class__ == EulerDiscreteScheduler)
assert (cap_logger.out == '')
def test_load_euler_ancestral_from_pndm(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
euler = EulerAncestralDiscreteScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler')
assert (euler.__class__ == EulerAncestralDiscreteScheduler)
assert (cap_logger.out == '')
def test_load_pndm(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
pndm = PNDMScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler')
assert (pndm.__class__ == PNDMScheduler)
assert (cap_logger.out == '')
def test_overwrite_config_on_load(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
ddpm = DDPMScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler', prediction_type='sample', beta_end=8)
with CaptureLogger(logger) as cap_logger_2:
ddpm_2 = DDPMScheduler.from_pretrained('google/ddpm-celebahq-256', beta_start=88)
assert (ddpm.__class__ == DDPMScheduler)
assert (ddpm.config.prediction_type == 'sample')
assert (ddpm.config.beta_end == 8)
assert (ddpm_2.config.beta_start == 88)
assert (cap_logger.out == '')
assert (cap_logger_2.out == '')
def test_load_dpmsolver(self):
logger = logging.get_logger('diffusers.configuration_utils')
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
dpm = DPMSolverMultistepScheduler.from_pretrained('hf-internal-testing/tiny-stable-diffusion-torch', subfolder='scheduler')
assert (dpm.__class__ == DPMSolverMultistepScheduler)
assert (cap_logger.out == '')
def test_use_default_values(self):
config = SampleObject()
config_dict = {k: v for (k, v) in config.config.items() if (not k.startswith('_'))}
assert (set(config_dict.keys()) == set(config.config._use_default_values))
with tempfile.TemporaryDirectory() as tmpdirname:
config.save_config(tmpdirname)
config = SampleObject2.from_config(tmpdirname)
assert ('f' in config._use_default_values)
assert (config.f == [1, 3])
new_config = SampleObject4.from_config(config.config)
assert (new_config.f == [5, 4])
config.config._use_default_values.pop()
new_config_2 = SampleObject4.from_config(config.config)
assert (new_config_2.f == [1, 3])
assert (new_config_2.e == [1, 3]) |
def resnet110_cifar100(num_classes=100, **kwargs):
return get_resnet_cifar(num_classes=num_classes, blocks=110, bottleneck=False, model_name='resnet110_cifar100', **kwargs) |
class TestDARNTop(RWSTopLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DARNTop(n_X=8)
self.layer.setup() |
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.elided[linenum]
matched = Match('\\s*(DISALLOW_COPY_AND_ASSIGN|DISALLOW_EVIL_CONSTRUCTORS|DISALLOW_IMPLICIT_CONSTRUCTORS)', line)
if (not matched):
return
if (nesting_state.stack and isinstance(nesting_state.stack[(- 1)], _ClassInfo)):
if (nesting_state.stack[(- 1)].access != 'private'):
error(filename, linenum, 'readability/constructors', 3, ('%s must be in the private: section' % matched.group(1)))
else:
pass |
def cal_torsion_energy(m):
energy = 0
(torsion_list, torsion_list_ring) = CalculateTorsionLists(m)
angles = CalculateTorsionAngles(m, torsion_list, torsion_list_ring)
for (idx, t) in enumerate(torsion_list):
(indice, _) = t
(indice, angle) = (indice[0], angles[idx][0][0])
v = rdForceFieldHelpers.GetUFFTorsionParams(m, indice[0], indice[1], indice[2], indice[3])
hs = [str(m.GetAtomWithIdx(i).GetHybridization()) for i in indice]
if (set([hs[1], hs[2]]) == set(['SP3', 'SP3'])):
(n, pi_zero) = (3, math.pi)
elif (set([hs[1], hs[2]]) == set(['SP2', 'SP3'])):
(n, pi_zero) = (6, 0.0)
else:
continue
energy += ((0.5 * v) * (1 - (math.cos((n * pi_zero)) * math.cos((((n * angle) / 180) * math.pi)))))
return energy |
def dataset_registry(dataset_type, framework, dataset_format=''):
def decorator_dataset(cls):
for single_framework in [fwk.strip() for fwk in framework.split(',')]:
assert (single_framework in ['tensorflow', 'tensorflow_itex', 'mxnet', 'pytorch', 'pytorch_ipex', 'pytorch_fx', 'onnxrt_qlinearops', 'onnxrt_integerops', 'onnxrt_qdq', 'onnxruntime']), 'The framework support tensorflow mxnet pytorch onnxrt'
dataset_name = (dataset_type + dataset_format)
if (dataset_name in registry_datasets[single_framework].keys()):
raise ValueError('Cannot have two datasets with the same name')
registry_datasets[single_framework][dataset_name] = cls
return cls
return decorator_dataset |
class Transformer(base_converter.ConverterInterface):
def __init__(self, option, model, converter_info):
self._registered_transformers = {TransformerRule.TRANSFORM_FAKE_QUANTIZE: self.transform_fake_quantize, TransformerRule.REMOVE_USELESS_OP: self.remove_useless_op, TransformerRule.FOLD_DIV_BN: self.fold_div_bn, TransformerRule.TRANSPOSE_CONST_OP_INPUT: self.transpose_const_op_input, TransformerRule.TRANSFORM_GLOBAL_POOLING: self.transform_global_pooling, TransformerRule.TRANSFORM_LSTMCELL_ZEROSTATE: self.transform_lstmcell_zerostate, TransformerRule.TRANSFORM_BASIC_LSTMCELL: self.transform_basic_lstmcell, TransformerRule.FOLD_RESHAPE: self.fold_reshape, TransformerRule.TRANSFORM_MATMUL_TO_FC: self.transform_matmul_to_fc, TransformerRule.UPDATE_FC_OUTPUT_SHAPE: self.update_fc_output_shape, TransformerRule.FOLD_BATCHNORM: self.fold_batchnorm, TransformerRule.FOLD_BIASADD: self.fold_biasadd, TransformerRule.FOLD_CONV_AND_BN: self.fold_conv_and_bn, TransformerRule.FOLD_DECONV_AND_BN: self.fold_deconv_and_bn, TransformerRule.FOLD_DEPTHWISE_CONV_AND_BN: self.fold_depthwise_conv_and_bn, TransformerRule.TRANSFORM_ADD_TO_BIASADD: self.transform_add_to_biasadd, TransformerRule.TRANSFORM_BIASADD_TO_ADD: self.transform_biasadd_to_add, TransformerRule.REARRANGE_BATCH_TO_SPACE: self.rearrange_batch_to_space, TransformerRule.FLATTEN_ATROUS_CONV: self.flatten_atrous_conv, TransformerRule.FOLD_ACTIVATION: self.fold_activation, TransformerRule.FOLD_SQRDIFF_MEAN: self.fold_squared_diff_mean, TransformerRule.FOLD_INSTANCE_NORM: self.fold_instance_norm, TransformerRule.FOLD_MOMENTS: self.fold_moments, TransformerRule.FOLD_EMBEDDING_LOOKUP: self.fold_embedding_lookup, TransformerRule.TRANSPOSE_FILTERS: self.transpose_filters, TransformerRule.TRANSPOSE_MATMUL_WEIGHT: self.transpose_matmul_weight, TransformerRule.FOLD_FC_RESHAPE: self.fold_fc_reshape, TransformerRule.ADD_IN_OUT_TENSOR_INFO: self.add_in_out_tensor_info, TransformerRule.ADD_WINOGRAD_ARG: self.add_winograd_arg, TransformerRule.TRANSFORM_GLOBAL_CONV_TO_FC: self.transform_global_conv_to_fc, TransformerRule.RESHAPE_FC_WEIGHT: self.reshape_fc_weight, TransformerRule.QUANTIZE_NODES: self.quantize_nodes, TransformerRule.ADD_QUANTIZE_TENSOR_RANGE: self.add_quantize_tensor_range, TransformerRule.QUANTIZE_WEIGHTS: self.quantize_weights, TransformerRule.UPDATE_FLOAT_OP_DATA_TYPE: self.update_float_op_data_type, TransformerRule.ADD_OPENCL_INFORMATIONS: self.add_opencl_informations, TransformerRule.SORT_BY_EXECUTION: self.sort_by_execution, TransformerRule.UPDATE_DATA_FORMAT: self.update_data_format, TransformerRule.TRANSPOSE_RESHAPE_AND_FLATTEN: self.transform_reshape_and_flatten, TransformerRule.TRANSPOSE_SHAPE_TENSOR_TO_PARAM: self.transform_shape_tensor_to_param, TransformerRule.TRANSPOSE_DATA_FORMAT: self.transpose_data_format, TransformerRule.CHECK_QUANTIZE_INFO: self.check_quantize_info, TransformerRule.TRANSFORM_CHANNEL_SHUFFLE: self.transform_channel_shuffle, TransformerRule.QUANTIZE_SPECIFIC_OPS_ONLY: self.quantize_specific_ops_only, TransformerRule.FP16_MATMUL_WEIGHT: self.fp16_matmul_weight, TransformerRule.FP16_GATHER_WEIGHT: self.fp16_gather_weight, TransformerRule.QUANTIZE_LARGE_WEIGHTS: self.quantize_large_weights, TransformerRule.TRANSFORM_SINGLE_BN_TO_DEPTHWISE_CONV: self.transform_single_bn_to_depthwise_conv, TransformerRule.TRANSFORM_MUL_MAX_TO_PRELU: self.transform_mul_max_to_prelu, TransformerRule.TRANSFORM_EXPAND_DIMS_TO_RESHAPE: self.transform_expand_dims_to_reshape, TransformerRule.QUANTIZE_FOLD_RELU: self.quantize_fold_relu, TransformerRule.TRANSFORM_KERAS_QUANTIZE_INFO: self.transform_keras_quantize_info, TransformerRule.ADD_GENERRAL_INFO: self.add_general_info, TransformerRule.REMOVE_UNUSED_TENSOR: self.remove_unused_tensor, TransformerRule.TRANSFORM_SLICE_TO_STRIDED_SLICE: self.transform_slice_to_strided_slice, TransformerRule.ADD_TRANSPOSE_FOR_HTP: self.add_transpose_for_htp}
self._option = option
self._model = model
self._wino_arg = self._option.winograd
self._ops = {}
self._consts = {}
self._consumers = {}
self._producer = {}
self._quantize_activation_info = {}
self._quantized_tensor = set()
self.input_name_map = {}
self.output_name_map = {}
self._has_none_df = False
self.initialize_name_map()
self._converter_info = converter_info
def run(self):
for key in self._option.transformer_option:
transformer = self._registered_transformers[key]
while True:
self.construct_ops_and_consumers(key)
changed = transformer()
if (not changed):
break
return (self._model, self._quantize_activation_info)
def initialize_name_map(self):
for input_node in self._option.input_nodes.values():
if (self._option.platform == Platform.KERAS):
input_name_parts = input_node.name.split(':')
if (len(input_name_parts) == 2):
input_name_without_postfix = input_name_parts[0]
for op in self._model.op:
for (i, name) in enumerate(op.input):
if (name == input_name_without_postfix):
op.input[i] = input_node.name
new_input_name = ((MaceKeyword.mace_input_node_name + '_') + input_node.name)
self.input_name_map[input_node.name] = new_input_name
if (input_node.data_type == mace_pb2.DT_INT32):
self.input_name_map[input_node.name] = input_node.name
if (input_node.data_format == DataFormat.NONE):
self._has_none_df = True
output_nodes = self._option.check_nodes.values()
for output_node in output_nodes:
new_output_name = ((MaceKeyword.mace_output_node_name + '_') + output_node.name)
self.output_name_map[output_node.name] = new_output_name
def filter_format(self):
filter_format_value = ConverterUtil.get_arg(self._model, MaceKeyword.mace_filter_format_str).i
filter_format = None
if (filter_format_value == DataFormat.HWIO.value):
filter_format = DataFormat.HWIO
elif (filter_format_value == DataFormat.OIHW.value):
filter_format = DataFormat.OIHW
elif (filter_format_value == DataFormat.HWOI.value):
filter_format = DataFormat.HWOI
else:
mace_check(False, ('filter format %d not supported' % filter_format_value))
return filter_format
def set_filter_format(self, filter_format):
arg = ConverterUtil.get_arg(self._model, MaceKeyword.mace_filter_format_str)
arg.i = filter_format.value
def construct_ops_and_consumers(self, key):
self._ops.clear()
self._consumers.clear()
self._producer.clear()
for op in self._model.op:
self._ops[op.name] = op
for tensor in self._model.tensors:
self._consts[tensor.name] = tensor
for op in self._ops.values():
for input_tensor in op.input:
if (input_tensor not in self._consumers):
self._consumers[input_tensor] = []
self._consumers[input_tensor].append(op)
for output_tensor in op.output:
self._producer[output_tensor] = op
if (key != TransformerRule.SORT_BY_EXECUTION):
for input_node in self._option.input_nodes.values():
input_node_existed = False
for op in self._model.op:
if (input_node.name in op.output):
input_node_existed = True
break
if (not input_node_existed):
op = mace_pb2.OperatorDef()
op.name = self.normalize_op_name(input_node.name)
op.type = 'Input'
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = input_node.data_type
op.output.extend([input_node.name])
output_shape = op.output_shape.add()
output_shape.dims.extend(input_node.shape)
if (input_node.data_format != DataFormat.NONE):
if (input_node.data_format == DataFormat.NCHW):
self.transpose_shape(output_shape.dims, [0, 3, 1, 2])
ConverterUtil.add_data_format_arg(op, DataFormat.AUTO)
else:
ConverterUtil.add_data_format_arg(op, DataFormat.NONE)
self._producer[op.output[0]] = op
def replace(obj_list, source, target):
for i in six.moves.range(len(obj_list)):
if (obj_list[i] == source):
obj_list[i] = target
def transpose_shape(shape, order):
transposed_shape = []
for i in six.moves.range(len(order)):
transposed_shape.append(shape[order[i]])
shape[:] = transposed_shape[:]
def normalize_op_name(name):
return name.replace(':', '_')
def get_tensor_shape(self, tensor):
if (tensor in self._consts):
return list(self._consts[tensor].dims)
elif (tensor in self._producer):
producer = self._producer[tensor]
for i in six.moves.range(len(producer.output)):
if (producer.output[i] == tensor):
return list(producer.output_shape[i].dims)
else:
return None
def get_tensor_data_type(self, tensor):
if (tensor in self._consts):
return self._consts[tensor].data_type
elif (tensor in self._producer):
producer = self._producer[tensor]
for i in six.moves.range(len(producer.output)):
if (producer.output[i] == tensor):
if (i < len(producer.output_type)):
return producer.output_type[i]
elif (ConverterUtil.get_arg(producer, 'T') is not None):
return ConverterUtil.get_arg(producer, 'T').i
else:
print('No data type filled: ', producer)
return None
else:
return None
def get_tensor_data_format(self, tensor):
if (tensor in self._producer):
producer = self._producer[tensor]
return ConverterUtil.data_format(producer)
else:
return DataFormat.NONE
def consumer_count(self, tensor_name):
return len(self._consumers.get(tensor_name, []))
def is_op_output_node(self, op):
output_node_tensor_names = [out for out in self._option.output_nodes]
for output in op.output:
if (output in output_node_tensor_names):
return True
return False
def safe_remove_node(self, op, replace_op, remove_input_tensor=False):
if (replace_op is None):
reshape_const_dim = ((op.type == MaceOp.Reshape.name) and ((len(op.input) == 1) or (op.input[1] in self._consts)))
mace_check((((len(op.output) == 1) and (len(op.input) == 1)) or reshape_const_dim), ('cannot remove op that w/o replace op specified and input/output length > 1\n' + str(op)))
for consumer_op in self._consumers.get(op.output[0], []):
self.replace(consumer_op.input, op.output[0], op.input[0])
mace_check((op.output[0] not in self._option.output_nodes), 'cannot remove op that is output node')
else:
mace_check((len(op.output) == len(replace_op.output)), 'cannot remove op since len(op.output) != len(replace_op.output)')
for i in six.moves.range(len(op.output)):
if (op.output[i] in self._option.output_nodes):
for consumer in self._consumers.get(replace_op.output[i], []):
self.replace(consumer.input, replace_op.output[i], op.output[i])
replace_op.output[i] = op.output[i]
else:
for consumer_op in self._consumers.get(op.output[i], []):
self.replace(consumer_op.input, op.output[i], replace_op.output[i])
if remove_input_tensor:
for input_name in op.input:
if (input_name in self._consts):
const_tensor = self._consts[input_name]
self._model.tensors.remove(const_tensor)
self._model.op.remove(op)
def add_in_out_tensor_info(self):
net = self._model
for input_node in self._option.input_nodes.values():
input_info = net.input_info.add()
input_info.name = input_node.name
if (input_node.alias is not None):
input_info.alias = input_node.alias
else:
input_info.alias = input_node.name
input_info.data_format = input_node.data_format.value
input_info.dims.extend(input_node.shape)
input_info.data_type = input_node.data_type
output_nodes = self._option.check_nodes.values()
for output_node in output_nodes:
output_info = net.output_info.add()
output_info.name = output_node.name
if (output_node.alias is not None):
output_info.alias = output_node.alias
else:
output_info.alias = output_node.name
output_info.data_format = output_node.data_format.value
output_info.dims.extend(output_node.shape)
output_info.data_type = output_node.data_type
return False
def remove_useless_op(self):
net = self._model
for op in net.op:
if (self.is_op_output_node(op) and (self._option.device == DeviceType.CPU.value)):
continue
if (op.type == 'Identity'):
print(('Remove useless op: %s(%s)' % (op.name, op.type)))
self.safe_remove_node(op, self._producer.get(op.input[0], None))
return True
elif ((op.type == 'Reshape') and (len(op.output_shape) == 1) and (op.output_shape[0].dims == self.get_tensor_shape(op.input[0]))):
mace_check((len(op.output_shape[0].dims) != 0), ('Output shape is null in op: %s(%s)' % (op.name, op.type)))
print(('Remove useless reshape: %s(%s)' % (op.name, op.type)))
self.safe_remove_node(op, self._producer.get(op.input[0], None))
return True
elif ((op.type == 'Eltwise') and (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.PROD.value)):
scala = ConverterUtil.get_arg(op, MaceKeyword.mace_scalar_input_str)
if ((scala is not None) and (scala.f == 1.0)):
print(('Remove useless eltwise mul: %s(%s)' % (op.name, op.type)))
self.safe_remove_node(op, self._producer.get(op.input[0], None))
return True
elif ((op.type == 'Reshape') and (len(op.output_shape) == 1) and (self._producer.get(op.input[0], None) is not None) and (self._producer.get(op.input[0], None).type == 'Reshape')):
print(('Remove useless Reshape: %s(%s)' % (op.name, op.type)))
producer_op = self._producer.get(op.input[0], None)
if (((len(producer_op.input) == 1) or (producer_op.input[1] in self._consts)) and (self._consumers.get(producer_op.output[0], None) is not None) and (len(self._consumers.get(producer_op.output[0], None)) == 1)):
self.safe_remove_node(producer_op, None, remove_input_tensor=True)
return True
return False
def transform_global_pooling(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Pooling.name) and (ConverterUtil.get_arg(op, MaceKeyword.mace_global_pooling_str) is not None)):
print(('Transform global pooling: %s(%s)' % (op.name, op.type)))
input_shape = self._producer[op.input[0]].output_shape[0].dims
if (ConverterUtil.data_format(op) == DataFormat.NHWC):
kernel_shape = input_shape[1:3]
else:
kernel_shape = input_shape[2:4]
ConverterUtil.get_arg(op, MaceKeyword.mace_kernel_str).ints[:] = kernel_shape[:]
return False
def fold_batchnorm(self):
net = self._model
for op in net.op:
if ((not self._has_none_df) and ((op.type == MaceOp.Eltwise.name) and (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.PROD.value)) and (len(op.input) == 2) and (op.input[1] in self._consts) and (op.output_shape[0].dims[(- 1):] == self._consts[op.input[1]].dims) and (self.consumer_count(op.output[0]) == 1) and (not self.is_op_output_node(op))):
consumer_op = self._consumers[op.output[0]][0]
if ((((consumer_op.type == MaceOp.Eltwise.name) and (ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUM.value)) or (consumer_op.type == MaceOp.BiasAdd.name)) and (len(consumer_op.input) == 2) and (consumer_op.input[1] in self._consts) and (len(self._consts[consumer_op.input[1]].dims) == 1)):
print(('Fold batchnorm: %s(%s)' % (op.name, op.type)))
consumer_op.type = MaceOp.BatchNorm.name
consumer_op.input[:] = [op.input[0], op.input[1], consumer_op.input[1]]
net.op.remove(op)
return True
return False
def fold_squared_diff_mean(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Eltwise.name) and (len(op.input) == 2)):
elt_type = ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i
if ((elt_type == EltwiseType.SQR_DIFF.value) and (self.consumer_count(op.output[0]) == 1)):
consumer_op = self._consumers[op.output[0]][0]
if (consumer_op.type == MaceOp.Reduce.name):
axis = ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_axis_str).ints
keep_dims = ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_keepdims_str).i
reduce_type = ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_reduce_type_str).i
if ((reduce_type == ReduceType.MEAN.value) and (len(consumer_op.input) == 1) and (axis[0] == 1) and (axis[1] == 2) and (keep_dims > 0)):
print(('Fold SquaredDiff Reduce: %s' % op.name))
op.type = MaceOp.SqrDiffMean.name
op.output[0] = consumer_op.output[0]
del op.output_shape[0].dims[:]
op.output_shape[0].dims.extend(consumer_op.output_shape[0].dims)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_moments(self):
if (self._option.device != DeviceType.HTP.value):
return False
net = self._model
for op in net.op:
if (op.type == MaceOp.Reduce.name):
axis = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str).ints
keep_dims = ConverterUtil.get_arg(op, MaceKeyword.mace_keepdims_str).i
reduce_type = ConverterUtil.get_arg(op, MaceKeyword.mace_reduce_type_str).i
if ((reduce_type == ReduceType.MEAN.value) and (len(op.input) == 1) and (len(axis) >= 2) and (axis[0] == 1) and (axis[1] == 2) and (keep_dims > 0)):
outputs = op.output
sqr_diff_mean_count = 0
for output in outputs:
consumer_ops = self._consumers[output]
for consumer_op in consumer_ops:
print(consumer_op.type)
if (consumer_op.type == MaceOp.SqrDiffMean.name):
sqr_diff_mean_count += 1
if (sqr_diff_mean_count == 1):
for output in outputs:
consumer_ops = self._consumers[output]
for consumer_op in consumer_ops:
if (consumer_op.type == MaceOp.SqrDiffMean.name):
print(('Fold Moments: %s' % op.name))
op.type = MaceOp.Moments.name
op.output.extend(consumer_op.output)
op.output_shape.extend(consumer_op.output_shape)
if (len(consumer_op.quantize_info) > 0):
op.quantize_info.extend(consumer_op.quantize_info)
net.op.remove(consumer_op)
return True
return False
def fold_embedding_lookup(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Gather.name) and (self.consumer_count(op.output[0]) == 1)):
consumer_op = self._consumers[op.output[0]][0]
if ((consumer_op.type == MaceOp.Eltwise.name) and (ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_element_type_str).i == EltwiseType.PROD.value) and (len(consumer_op.input) == 1) and (op.input[0] in self._consts) and (self.consumer_count(op.input[0]) == 1)):
print(('Fold Gather and Mul: %s' % op.name))
gather_weights = self._consts[op.input[0]]
mul_weight = ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_scalar_input_str).f
gather_weights.float_data[:] = [(float_data * mul_weight) for float_data in gather_weights.float_data]
self.safe_remove_node(consumer_op, None, remove_input_tensor=True)
def transform_lstmcell_zerostate(self):
net = self._model
zero_state_pattern = re.compile('^.*BasicLSTMCellZeroState_?[0-9]*/[a-zA-Z]+_?[0-9]*')
for op in net.op:
if ((op.type == MaceOp.Fill.name) and zero_state_pattern.match(op.name)):
print('Transform lstm zerostate')
concat_op = self._producer[op.input[0]]
consumer_op = self._consumers[op.output[0]][0]
if ((concat_op.input[0] not in self._consts) or (concat_op.input[1] not in self._consts)):
continue
dims = [self._consts[concat_op.input[0]].int32_data[0], self._consts[concat_op.input[1]].int32_data[0]]
tensor_def = net.tensors.add()
tensor_def.name = op.output[0].replace('/zeros', '/init_const')
tensor_def.dims.extend(dims)
tensor_def.data_type = self._consts[op.input[1]].data_type
tensor_def.float_data.extend(([self._consts[op.input[1]].float_data[0]] * (dims[0] * dims[1])))
for i in range(len(consumer_op.input)):
if zero_state_pattern.match(consumer_op.input[i][:(- 2)]):
consumer_op.input[i] = tensor_def.name
net.tensors.remove(self._consts[op.input[1]])
net.tensors.remove(self._consts[concat_op.input[0]])
net.tensors.remove(self._consts[concat_op.input[1]])
net.op.remove(concat_op)
net.op.remove(op)
return True
def transform_basic_lstmcell(self):
if (self._option.device != DeviceType.GPU.value):
return False
net = self._model
basic_lstm_concat_pattern = re.compile('^.*basic_lstm_cell_?[0-9]*/concat_?[0-9]*')
for op in net.op:
if ((op.type == MaceOp.Concat.name) and basic_lstm_concat_pattern.match(op.name)):
print('Transform basic lstmcell')
ops_to_delete = []
ops_to_delete.extend([op])
op_def = net.op.add()
op_def.name = op.name.replace('/concat', '/folded_lstmcell')
op_def.type = MaceOp.LSTMCell.name
op_def.arg.extend(op.arg[:(- 1)])
op_def.input.extend([op_input for op_input in op.input])
matmul_op = self._consumers[op.output[0]][0]
ops_to_delete.extend([matmul_op])
op_def.input.extend([matmul_op.input[1]])
biasadd_op = self._consumers[matmul_op.output[0]][0]
ops_to_delete.extend([biasadd_op])
op_def.input.extend([biasadd_op.input[1]])
split_op = self._consumers[biasadd_op.output[0]][0]
ops_to_delete.extend([split_op])
input_gate_op = self._consumers[split_op.output[0]][0]
ops_to_delete.extend([input_gate_op])
new_input_tanh_op = self._consumers[split_op.output[1]][0]
ops_to_delete.extend([new_input_tanh_op])
forget_add_op = self._consumers[split_op.output[2]][0]
ops_to_delete.extend([forget_add_op])
output_gate_op = self._consumers[split_op.output[3]][0]
ops_to_delete.extend([output_gate_op])
mace_check((len(forget_add_op.input) == 1), 'Wrong LSTM format in forget gate inputs')
for arg in forget_add_op.arg:
if (arg.name == MaceKeyword.mace_scalar_input_str):
op_def.arg.extend([arg])
remember_mul_op = self._consumers[input_gate_op.output[0]][0]
ops_to_delete.extend([remember_mul_op])
mace_check((remember_mul_op.name == self._consumers[new_input_tanh_op.output[0]][0].name), 'Wrong LSTM format in input sig & input tanh mul')
forget_gate_op = self._consumers[forget_add_op.output[0]][0]
ops_to_delete.extend([forget_gate_op])
forget_mul_op = self._consumers[forget_gate_op.output[0]][0]
ops_to_delete.extend([forget_mul_op])
op_def.input.extend([forget_mul_op.input[0]])
remember_forget_add_op = self._consumers[remember_mul_op.output[0]][0]
ops_to_delete.extend([remember_forget_add_op])
mace_check((remember_forget_add_op.name == self._consumers[forget_mul_op.output[0]][0].name), 'Wrong LSTM format in add forget gate & remember mul')
op_def.output.extend([remember_forget_add_op.output[0]])
op_def.output_shape.extend(remember_forget_add_op.output_shape)
for consumer in self._consumers[remember_forget_add_op.output[0]]:
if ((consumer.type == MaceOp.Activation.name) and (consumer.name.find('basic_lstm_cell') > 0)):
cell_tanh_op = consumer
ops_to_delete.extend([cell_tanh_op])
final_mul_op = self._consumers[cell_tanh_op.output[0]][0]
ops_to_delete.extend([final_mul_op])
mace_check((final_mul_op.name == self._consumers[output_gate_op.output[0]][0].name), 'Wrong LSTM format in final mul')
op_def.output.extend([final_mul_op.output[0]])
op_def.output_shape.extend(final_mul_op.output_shape)
for op_to_del in ops_to_delete:
net.op.remove(op_to_del)
return True
return False
def fold_conv_and_bn(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Conv2D.name) and (self.consumer_count(op.output[0]) == 1)):
consumer_op = self._consumers[op.output[0]][0]
input_len = len(op.input)
if ((consumer_op.type == MaceOp.BatchNorm.name) and ((input_len == 2) or ((input_len == 3) and (op.input[(- 1)] in self._consts))) and (len(self._consumers[op.input[1]]) == 1)):
print(('Fold conv and bn: %s(%s)' % (op.name, op.type)))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
if (filter_format == DataFormat.HWIO):
for hwi in six.moves.range(((filter.dims[0] * filter.dims[1]) * filter.dims[2])):
for o in six.moves.range(filter.dims[3]):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
elif (filter_format == DataFormat.OIHW):
for o in six.moves.range(filter.dims[0]):
for hwi in six.moves.range(((filter.dims[1] * filter.dims[2]) * filter.dims[3])):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
else:
mace_check(False, ('filter format %s not supported' % filter_format))
if (len(op.input) == 3):
conv_bias = self._consts[op.input[2]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_deconv_and_bn(self):
net = self._model
for op in net.op:
if ((op.type in [MaceOp.Deconv2D.name, MaceOp.DepthwiseDeconv2d]) and (self.consumer_count(op.output[0]) == 1)):
consumer_op = self._consumers[op.output[0]][0]
framework = ConverterUtil.get_arg(op, MaceKeyword.mace_framework_type_str).i
input_len = len(op.input)
if ((consumer_op.type == MaceOp.BatchNorm.name) and (((framework == FrameworkType.CAFFE.value) and ((input_len == 2) or ((input_len == 3) and (op.input[(- 1)] in self._consts)))) or ((framework == FrameworkType.TENSORFLOW.value) and ((input_len == 3) or ((input_len == 4) and (op.input[(- 1)] in self._consts))))) and (len(self._consumers[op.input[1]]) == 1)):
print(('Fold deconv and bn: %s(%s)' % (op.name, op.type)))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
if (filter_format == DataFormat.HWIO):
for hw in six.moves.range((filter.dims[0] * filter.dims[1])):
for o in six.moves.range(filter.dims[2]):
for i in six.moves.range(filter.dims[3]):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
elif (filter_format == DataFormat.OIHW):
for i in six.moves.range(filter.dims[0]):
for o in six.moves.range(filter.dims[1]):
for hw in six.moves.range((filter.dims[2] * filter.dims[3])):
filter.float_data[idx] *= scale.float_data[o]
idx += 1
else:
mace_check(False, ('filter format %s not supported' % filter_format))
bias_dim = (- 1)
if ((framework == FrameworkType.CAFFE.value) and (len(op.input) == 3)):
bias_dim = 2
if ((framework == FrameworkType.TENSORFLOW.value) and (len(op.input) == 4)):
bias_dim = 3
if (bias_dim != (- 1)):
conv_bias = self._consts[op.input[bias_dim]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def fold_depthwise_conv_and_bn(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.DepthwiseConv2d.name) and (self.consumer_count(op.output[0]) == 1)):
consumer_op = self._consumers[op.output[0]][0]
input_len = len(op.input)
if ((consumer_op.type == MaceOp.BatchNorm.name) and ((input_len == 2) or ((input_len == 3) and (op.input[(- 1)] in self._consts))) and (len(self._consumers[op.input[1]]) == 1)):
print(('Fold depthwise conv and bn: %s(%s)' % (op.name, op.type)))
filter = self._consts[op.input[1]]
scale = self._consts[consumer_op.input[1]]
offset = self._consts[consumer_op.input[2]]
idx = 0
filter_format = self.filter_format()
if (filter_format == DataFormat.HWIO):
for hw in six.moves.range((filter.dims[0] * filter.dims[1])):
for i in six.moves.range(filter.dims[2]):
for o in six.moves.range(filter.dims[3]):
filter.float_data[idx] *= scale.float_data[((i * filter.dims[3]) + o)]
idx += 1
elif (filter_format == DataFormat.OIHW):
for o in six.moves.range(filter.dims[0]):
for i in six.moves.range(filter.dims[1]):
for hw in six.moves.range((filter.dims[2] * filter.dims[3])):
filter.float_data[idx] *= scale.float_data[((i * filter.dims[0]) + o)]
idx += 1
else:
mace_check(False, ('filter format %s not supported' % filter_format))
if (len(op.input) == 3):
conv_bias = self._consts[op.input[2]]
for c in six.moves.range(conv_bias.dims[0]):
conv_bias.float_data[c] *= scale.float_data[c]
conv_bias.float_data[c] += offset.float_data[c]
net.tensors.remove(offset)
else:
op.input.extend([consumer_op.input[2]])
del consumer_op.input[:]
net.tensors.remove(scale)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def sort_feature_map_shape(shape, data_format):
batch = shape[0]
if (data_format == DataFormat.NHWC):
height = shape[1]
width = shape[2]
channels = shape[3]
else:
height = shape[2]
width = shape[3]
channels = shape[1]
return (batch, height, width, channels)
def sort_filter_shape(filter_shape, filter_format):
if (filter_format == DataFormat.HWIO):
filter_height = filter_shape[0]
filter_width = filter_shape[1]
in_channels = filter_shape[2]
out_channels = filter_shape[3]
elif (filter_format == DataFormat.OIHW):
filter_height = filter_shape[2]
filter_width = filter_shape[3]
in_channels = filter_shape[1]
out_channels = filter_shape[0]
elif (filter_format == DataFormat.HWOI):
filter_height = filter_shape[0]
filter_width = filter_shape[1]
in_channels = filter_shape[3]
out_channels = filter_shape[2]
else:
mace_check(False, ('filter format %s not supported' % filter_format))
return (filter_height, filter_width, in_channels, out_channels)
def transform_add_to_biasadd(self):
if (self._option.device == DeviceType.HTP.value):
return False
net = self._model
for op in net.op:
if ((not self._has_none_df) and (op.type == 'Eltwise') and (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUM.value) and (len(op.input) == 2) and (op.input[1] in self._consts) and (len(self._consts[op.input[1]].dims) == 1)):
print(('Transform add to biasadd: %s(%s)' % (op.name, op.type)))
op.type = MaceOp.BiasAdd.name
return True
return False
def replace_quantize_info(self, op, replace_op):
if (len(replace_op.quantize_info) > 0):
del op.quantize_info[:]
op.quantize_info.extend(replace_op.quantize_info)
for i in range(len(op.quantize_info)):
self._quantize_activation_info[op.output[i]] = op.quantize_info[i]
def fold_biasadd(self):
net = self._model
for op in net.op:
framework = ConverterUtil.get_arg(op, MaceKeyword.mace_framework_type_str).i
if (((((op.type == MaceOp.Conv2D.name) or (op.type == MaceOp.DepthwiseConv2d.name) or (op.type == MaceOp.FullyConnected.name)) and (len(op.input) == 2)) or ((op.type == MaceOp.Deconv2D.name) and (((framework == FrameworkType.CAFFE.value) and (len(op.input) == 2)) or ((framework == FrameworkType.TENSORFLOW.value) and (len(op.input) == 3))))) and (len(self._consumers.get(op.output[0], [])) == 1)):
consumer_op = self._consumers[op.output[0]][0]
is_add = False
if ((consumer_op.type == MaceOp.Eltwise.name) and (self._option.device == DeviceType.HTP.value)):
is_add = (ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUM.value)
if ((consumer_op.type == MaceOp.BiasAdd.name) or is_add):
print(('Fold biasadd: %s(%s)' % (op.name, op.type)))
op.name = consumer_op.name
op.output[0] = consumer_op.output[0]
op.input.append(consumer_op.input[1])
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def flatten_atrous_conv(self):
if ((self._option.device != DeviceType.GPU.value) and (self._option.device != DeviceType.APU.value) and (self._option.device != DeviceType.HTA.value) and (self._option.device != DeviceType.HTP.value)):
return
net = self._model
for op in net.op:
if ((op.type == MaceOp.SpaceToBatchND.name) and (len(self._consumers.get(op.output[0], [])) == 1)):
conv_op = self._consumers.get(op.output[0])[0]
if (((conv_op.type == MaceOp.Conv2D.name) or (conv_op.type == MaceOp.DepthwiseConv2d.name)) and (len(self._consumers.get(conv_op.output[0], [])) == 1)):
b2s_op = self._consumers.get(conv_op.output[0])[0]
if (b2s_op.type == MaceOp.BatchToSpaceND.name):
six.print_('Flatten atrous convolution')
padding_arg_values = ConverterUtil.get_arg(op, MaceKeyword.mace_paddings_str).ints
blocks_arg_values = ConverterUtil.get_arg(b2s_op, MaceKeyword.mace_space_batch_block_shape_str).ints
dilation_arg = ConverterUtil.get_arg(conv_op, MaceKeyword.mace_dilations_str)
if (dilation_arg is None):
dilation_arg = conv_op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints[:] = blocks_arg_values
padding_arg = ConverterUtil.get_arg(conv_op, MaceKeyword.mace_padding_str)
if (padding_arg is None):
padding_arg = conv_op.arg.add()
padding_arg.name = MaceKeyword.mace_padding_str
if ((len(padding_arg_values) > 0) and (padding_arg_values[0] > 0)):
padding_arg.i = PaddingMode.SAME.value
strides_arg = ConverterUtil.get_arg(conv_op, MaceKeyword.mace_strides_str)
if (strides_arg is None):
strides_arg = conv_op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints[:] = [1, 1]
conv_op.output_shape[0].dims[:] = b2s_op.output_shape[0].dims[:]
conv_op.output[0] = b2s_op.output[0]
conv_op.name = b2s_op.name
self.safe_remove_node(op, None)
self.replace_quantize_info(b2s_op, conv_op)
self.safe_remove_node(b2s_op, conv_op)
return True
return False
def fold_activation(self):
if (self._option.device == DeviceType.HTP.value):
return
net = self._model
for op in net.op:
if (((op.type == MaceOp.Conv2D.name) or (op.type == MaceOp.Deconv2D.name) or (op.type == MaceOp.DepthwiseConv2d.name) or (op.type == MaceOp.FullyConnected.name) or (op.type == MaceOp.BatchNorm.name) or (op.type == MaceOp.InstanceNorm.name)) and (len(self._consumers.get(op.output[0], [])) == 1)):
consumer_op = self._consumers[op.output[0]][0]
fold_consumer = False
if (consumer_op.type == MaceOp.Activation.name):
act_type_arg = ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_activation_type_str)
act_type = act_type_arg.s.decode()
if (self._option.device == DeviceType.APU.value):
fold_consumer = (act_type in [ActivationType.RELU.name, ActivationType.RELUX.name])
else:
fold_consumer = (act_type != ActivationType.PRELU.name)
if ((self._option.quantize_stat or self._option.quantize) and (act_type not in [ActivationType.RELU.name, ActivationType.RELUX.name])):
continue
if fold_consumer:
print(('Fold activation: %s(%s)' % (op.name, op.type)))
op.name = consumer_op.name
op.output[0] = consumer_op.output[0]
for arg in consumer_op.arg:
if ((arg.name == MaceKeyword.mace_activation_type_str) or (arg.name == MaceKeyword.mace_activation_max_limit_str) or (arg.name == MaceKeyword.mace_activation_coefficient_str) or (arg.name == MaceKeyword.mace_hardsigmoid_alpha_str) or (arg.name == MaceKeyword.mace_hardsigmoid_beta_str)):
op.arg.extend([arg])
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def transform_global_conv_to_fc(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Conv2D.name) and (len(op.input) >= 2) and (op.input[1] in self._consts) and (op.input[0] in self._producer)):
producer = self._producer[op.input[0]]
input_shape = producer.output_shape[0].dims
(batch, height, width, channels) = self.sort_feature_map_shape(input_shape, ConverterUtil.data_format(producer))
filter = self._consts[op.input[1]]
filter_shape = filter.dims
(filter_height, filter_width, in_channels, out_channels) = self.sort_filter_shape(filter_shape, self.filter_format())
zero_padding = True
padding_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_str)
if (padding_arg is not None):
if (padding_arg.i != PaddingMode.VALID.value):
zero_padding = False
else:
padding_value_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_padding_values_str)
if (padding_value_arg is not None):
if (not all(((v == 0) for v in padding_value_arg.ints))):
zero_padding = False
if ((height == filter_height) and (width == filter_width) and zero_padding and (len(self._consumers[op.input[1]]) == 1)):
print(('transform global conv to fc %s(%s)' % (op.name, op.type)))
op.type = MaceOp.FullyConnected.name
return False
def reshape_fc_weight(self):
if (self._option.device in [DeviceType.APU.value, DeviceType.HTP.value]):
return
net = self._model
filter_format = self.filter_format()
for op in net.op:
if (op.type == MaceOp.FullyConnected.name):
weight = self._consts[op.input[1]]
if (len(weight.dims) == 2):
print('Reshape fully connected weight shape')
input_op = self._producer[op.input[0]]
input_shape = list(input_op.output_shape[0].dims)
weight.dims[:] = ([weight.dims[0]] + input_shape[1:])
if (len(input_shape) == 2):
if (filter_format == DataFormat.HWIO):
weight.dims[:] = ([1, 1] + weight.dims[:])
elif (filter_format == DataFormat.OIHW):
weight.dims[:] = (weight.dims[:] + [1, 1])
else:
mace_check(False, ('FC does not support filter format %s' % filter_format.name))
return False
def add_winograd_arg(self):
if (self._wino_arg == 0):
return False
net = self._model
for op in net.op:
if (op.type == MaceOp.Conv2D.name):
winograd_arg = op.arg.add()
winograd_arg.name = MaceKeyword.mace_wino_arg_str
winograd_arg.i = self._wino_arg
return False
def transpose_matmul_weight(self):
if (self._option.device != DeviceType.CPU.value):
return False
net = self._model
transposed_weights = []
for op in net.op:
if (op.type == MaceOp.MatMul.name):
rhs = op.input[1]
if ((rhs in self._consts) and (len(self._consts[rhs].dims) == 2)):
arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str)
if (arg is None):
arg = op.arg.add()
arg.name = MaceKeyword.mace_transpose_b_str
arg.i = 0
if (arg.i == 0):
arg.i = 1
if (rhs not in transposed_weights):
filter = self._consts[rhs]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_weights.append(rhs)
six.print_('Transpose matmul weight to shape:', filter.dims)
def transpose_filters(self):
net = self._model
filter_format = self.filter_format()
transposed_filter = set()
transposed_deconv_filter = set()
if (((self._option.quantize and (self._option.device == DeviceType.CPU.value)) or (self._option.device == DeviceType.APU.value)) and (not (self._option.quantize_schema == MaceKeyword.mace_int8))):
print('Transpose filters to OHWI')
if (filter_format == DataFormat.HWIO):
transpose_order = [3, 0, 1, 2]
elif (filter_format == DataFormat.OIHW):
transpose_order = [0, 2, 3, 1]
else:
mace_check(False, ('Quantize model does not support conv filter format: %s' % filter_format.name))
for op in net.op:
if (((op.type == MaceOp.Conv2D.name) or (op.type == MaceOp.Deconv2D.name) or ((op.type == MaceOp.DepthwiseConv2d.name) and (self._option.device == DeviceType.APU.value)) or ((op.type == MaceOp.FullyConnected.name) and (len(self._consts[op.input[1]].dims) == 4))) and (op.input[1] not in transposed_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(transpose_order)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
elif ((op.type == MaceOp.DepthwiseConv2d.name) and (filter_format == DataFormat.OIHW) and (self._option.device == DeviceType.CPU.value) and (op.input[1] not in transposed_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(2, 3, 1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
for op in net.op:
if ((op.type == MaceOp.Deconv2D.name) and (op.input[1] not in transposed_deconv_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(3, 1, 2, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
self.set_filter_format(DataFormat.OHWI)
elif ((self._option.device == DeviceType.HEXAGON.value) or (self._option.device == DeviceType.HTA.value) or (self._option.device == DeviceType.HTP.value)):
print('Transpose filters to HWIO/HWIM')
for op in net.op:
has_data_format = (ConverterUtil.data_format(op) == DataFormat.AUTO)
if (has_data_format and ((op.type == MaceOp.Eltwise.name) or (op.type == MaceOp.Concat.name))):
for i in range(len(op.input)):
if ((op.input[i] in self._consts) and (op.input[i] not in transposed_filter)):
filter = self._consts[op.input[i]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
if ((len(filter_data.shape) == 1) and (len(op.output_shape[0].dims) == 4)):
filter_data = np.array(filter.float_data).reshape([1, 1, 1, filter.dims[0]])
if (filter_format == DataFormat.OIHW):
filter_data = filter_data.transpose(0, 2, 3, 1)
else:
print(op.type, op.name, filter_format)
mace_check(False, 'Unsupported filter format.')
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[i])
if ((filter_format == DataFormat.OIHW) and ((op.type == MaceOp.Conv2D.name) or ((op.type == MaceOp.DepthwiseConv2d.name) and ((self._option.device == DeviceType.HEXAGON.value) or (self._option.device == DeviceType.HTA.value))) or ((op.type == MaceOp.FullyConnected.name) and (len(self._consts[op.input[1]].dims) == 4))) and (op.input[1] in self._consts) and (op.input[1] not in transposed_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(2, 3, 1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
if (((op.type == MaceOp.Deconv2D.name) or (op.type == MaceOp.DepthwiseDeconv2d.name)) and (op.input[1] in self._consts) and (op.input[1] not in transposed_deconv_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
if (filter_format == DataFormat.HWIO):
filter_data = filter_data.transpose(2, 0, 1, 3)
elif (filter_format == DataFormat.OIHW):
if (self._option.device == DeviceType.HTP.value):
filter_data = filter_data.transpose(2, 3, 0, 1)
else:
filter_data = filter_data.transpose(1, 2, 3, 0)
else:
mace_check(False, 'Unsupported filter format.')
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
if ((op.type == MaceOp.DepthwiseConv2d.name) and (self._option.device == DeviceType.HTP.value)):
filter = self._consts[op.input[1]]
dims = filter.dims[:]
if (filter_format == DataFormat.HWIO):
filter.dims[:] = [dims[0], dims[1], 1, (dims[2] * dims[3])]
elif (filter_format == DataFormat.OIHW):
filter_data = np.array(filter.float_data).reshape(dims)
filter_data = filter_data.transpose(2, 3, 0, 1)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
else:
mace_check(False, 'Unsupported filter format.')
transposed_filter.add(op.input[1])
else:
if (filter_format == DataFormat.HWIO):
for op in net.op:
if (((op.type == MaceOp.Conv2D.name) or (op.type == MaceOp.Deconv2D.name) or (op.type == MaceOp.DepthwiseConv2d.name)) and (op.input[1] in self._consts) and (op.input[1] not in transposed_filter)):
print('Transpose Conv2D/Deconv2D filters to OIHW/MIHW')
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(3, 2, 0, 1)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[1])
if ((op.type == MaceOp.MatMul.name) and (ConverterUtil.get_arg(op, MaceKeyword.mace_winograd_filter_transformed) is not None) and (op.input[1] not in transposed_filter)):
print('Transpose Winograd filters to OIHW/MIHW')
filter = self._consts[op.input[0]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(3, 2, 0, 1)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_filter.add(op.input[0])
if ((op.type == MaceOp.FullyConnected.name) and (op.input[1] not in transposed_filter)):
weight = self._consts[op.input[1]]
if (len(weight.dims) == 4):
print('Transpose FullyConnected filters to OIHW/MIHW')
weight_data = np.array(weight.float_data).reshape(weight.dims)
weight_data = weight_data.transpose(3, 2, 0, 1)
weight.float_data[:] = weight_data.flat
weight.dims[:] = weight_data.shape
transposed_filter.add(op.input[1])
self.set_filter_format(DataFormat.OIHW)
for op in net.op:
if ((op.type in [MaceOp.Deconv2D.name, MaceOp.DepthwiseDeconv2d]) and (op.input[1] in self._consts) and (op.input[1] not in transposed_deconv_filter)):
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(1, 0, 2, 3)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
transposed_deconv_filter.add(op.input[1])
return False
def fold_reshape(self):
net = self._model
for op in net.op:
if (op.type == MaceOp.Softmax.name):
should_fold = False
if ((op.input[0] in self._producer) and (self._producer[op.input[0]].type == MaceOp.Reshape.name) and (len(op.output_shape[0].dims) == 2)):
producer = self._producer[op.input[0]]
reshape_input_rank = len(self.get_tensor_shape(producer.input[0]))
if (reshape_input_rank == 4):
should_fold = True
if should_fold:
print(('Fold reshape and softmax: %s(%s)' % (op.name, op.type)))
producer = self._producer[op.input[0]]
op.output_shape[0].dims[:] = self.get_tensor_shape(producer.input[0])
if (op.output[0] in self._consumers):
consumer = self._consumers[op.output[0]][0]
if (len(consumer.input) > 1):
if ((consumer.input[1] in self._producer) and (self._producer[consumer.input[1]].type == 'Shape')):
self.safe_remove_node(self._producer[consumer.input[1]], None, remove_input_tensor=True)
self.safe_remove_node(consumer, op, remove_input_tensor=True)
self.safe_remove_node(producer, self._producer.get(producer.input[0], None), remove_input_tensor=True)
return True
return False
def is_after_fc(self, op):
while (op.input[0] in self._producer):
producer = self._producer[op.input[0]]
if (producer.type in [MaceOp.Activation.name, MaceOp.BiasAdd.name]):
op = producer
continue
elif (producer.type == MaceOp.FullyConnected.name):
return True
else:
return False
return False
def transform_matmul_to_fc(self):
net = self._model
filter_format = self.filter_format()
for op in net.op:
is_htp = (self._option.device == DeviceType.HTP.value)
if ((self._option.device == DeviceType.APU.value) or is_htp):
if (op.type == MaceOp.MatMul.name):
transpose_a_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_a_str)
transpose_b_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str)
transpose_a = ((transpose_a_arg is not None) and (transpose_a_arg.i == 1))
transpose_b = ((transpose_b_arg is not None) and (transpose_b_arg.i == 1))
if ((transpose_a is False) and (transpose_b is False) and (op.input[1] in self._consts) and (len(self.get_tensor_shape(op.input[0])) == 2) and (len(self.get_tensor_shape(op.input[1])) == 2)):
if (op.input[0] in self._producer):
product_op = self._producer[op.input[0]]
if (is_htp and (product_op.type == MaceOp.Reshape.name)):
consumers = self._consumers[product_op.output[0]]
print('convert reshape and matmul to fc')
self.safe_remove_node(product_op, None, remove_input_tensor=True)
for matmul_op in consumers:
matmul_op.type = MaceOp.FullyConnected.name
filter = self._consts[matmul_op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
six.print_('Transpose matmul weight to shape:', filter.dims)
return True
op.type = MaceOp.FullyConnected.name
filter = self._consts[op.input[1]]
filter_data = np.array(filter.float_data).reshape(filter.dims)
filter_data = filter_data.transpose(1, 0)
filter.float_data[:] = filter_data.flat
filter.dims[:] = filter_data.shape
six.print_('Transpose matmul weight to shape:', filter.dims)
return True
continue
framework = ConverterUtil.framework_type(net)
is_torch = (framework == FrameworkType.PYTORCH.value)
is_tf = (framework == FrameworkType.TENSORFLOW.value)
is_onnx = (framework == FrameworkType.ONNX.value)
if (is_htp and (op.type == MaceOp.Reshape.name) and (len(op.input) == 2) and (op.input[1] in self._consts) and (len(op.output_shape[0].dims) == 2) and (is_tf or is_torch or is_onnx) and (op.input[0] in self._producer) and (op.output[0] in self._consumers)):
input_op = self._producer[op.input[0]]
input_shape = input_op.output_shape[0].dims
if ((len(input_shape) == 4) and (np.prod(input_shape[1:]) == op.output_shape[0].dims[1])):
is_fc = True
consumers = self._consumers[op.output[0]]
for matmul_op in consumers:
if (matmul_op.type != MaceOp.MatMul.name):
is_fc = False
else:
weight = self._consts[matmul_op.input[1]]
od = op.output_shape[0].dims
wd = weight.dims
if (len(wd) != 2):
is_fc = False
if ((is_tf and (wd[0] != od[1])) or ((is_torch or is_onnx) and (wd[1] != od[1]))):
is_fc = False
if is_fc:
print('convert reshape and matmul to fc')
self.safe_remove_node(op, None, remove_input_tensor=True)
for matmul_op in consumers:
weight = self._consts[matmul_op.input[1]]
matmul_op.type = MaceOp.FullyConnected.name
weight_data = np.array(weight.float_data).reshape(weight.dims)
if is_tf:
weight.dims[:] = (input_shape[1:] + [weight_data.shape[1]])
if (is_torch or is_onnx):
in_data_format = ConverterUtil.data_format(input_op)
if (in_data_format == DataFormat.NCHW):
size = (input_shape[2] * input_shape[3])
mace_check(((weight.dims[1] % size) == 0), 'Reshape dims of input cannot be divisible by dims of output')
weight.dims[1] = (weight.dims[1] // size)
weight.dims.extend(input_shape[2:])
else:
size = (input_shape[1] * input_shape[2])
mace_check(((weight.dims[1] % size) == 0), 'Reshape dims of input cannot be divisible by dims of output')
weight.dims[1] = (weight.dims[1] // size)
weight.dims.extend(input_shape[1:2])
return True
if ((op.type == MaceOp.MatMul.name) and (is_tf or is_torch or is_onnx) and (op.input[1] in self._consts)):
producer = self._producer[op.input[0]]
weight = self._consts[op.input[1]]
if ((len(weight.dims) == 2) and self.is_after_fc(op) and (len(producer.output_shape[0].dims) == 2) and ((is_tf and (weight.dims[0] == producer.output_shape[0].dims[1])) or (is_torch and (weight.dims[1] == producer.output_shape[0].dims[1])) or (is_onnx and (weight.dims[1] == producer.output_shape[0].dims[1])))):
six.print_('convert matmul to fc')
op.type = MaceOp.FullyConnected.name
weight_data = np.array(weight.float_data).reshape(weight.dims)
if is_tf:
weight.dims[:] = ([1, 1] + list(weight_data.shape))
if (is_torch or is_onnx):
weight.dims.extend([1, 1])
return True
return False
def update_fc_output_shape(self):
net = self._model
framework = ConverterUtil.framework_type(net)
is_torch = (framework == FrameworkType.PYTORCH.value)
is_onnx = (framework == FrameworkType.ONNX.value)
dev = self._option.device
if (not ((is_torch or is_onnx) and ((dev == DeviceType.GPU.value) or (dev == DeviceType.CPU.value)))):
return False
for op in net.op:
if (op.type != MaceOp.FullyConnected.name):
continue
out_data_format = ConverterUtil.data_format(op)
if (len(op.output_shape[0].dims) != 2):
continue
if (out_data_format == DataFormat.NCHW):
op.output_shape[0].dims.extend([1, 1])
else:
dim1 = op.output_shape[0].dims[1]
del op.output_shape[0].dims[1:]
op.output_shape[0].dims.extend([1, 1, dim1])
return False
def update_float_op_data_type(self):
print('update op with float data type')
net = self._model
data_type = self._option.data_type
net.data_type = data_type
if self._option.quantize:
return
for op in net.op:
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str)
if (not data_type_arg):
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = data_type
elif ((data_type_arg.i != data_type) and (data_type_arg.i == mace_pb2.DT_FLOAT)):
data_type_arg.i = data_type
return False
def sort_dfs(self, op, visited, sorted_nodes):
if (op.name in visited):
return
visited.update([op.name])
if (len(op.input) > 0):
for input_tensor in op.input:
producer_op = self._producer.get(input_tensor, None)
if (producer_op is None):
pass
elif (producer_op.name not in visited):
self.sort_dfs(producer_op, visited, sorted_nodes)
sorted_nodes.append(op)
def sort_by_execution(self):
print('Sort by execution')
net = self._model
visited = set()
sorted_nodes = []
output_nodes = list(self._option.check_nodes.keys())
if (not self._quantize_activation_info):
output_nodes.extend(self._option.output_nodes)
for output_node in output_nodes:
mace_check((output_node in self._producer), ('output_tensor %s not existed in model' % output_node))
self.sort_dfs(self._producer[output_node], visited, sorted_nodes)
del net.op[:]
net.op.extend(sorted_nodes)
print('Final ops:')
index = 0
for op in net.op:
if (op.type not in [MaceOp.Quantize.name, MaceOp.Dequantize.name]):
index_str = str(index)
index += 1
else:
index_str = ''
print(('%s (%s, index:%s): %s' % (op.name, op.type, index_str, [out_shape.dims for out_shape in op.output_shape])))
return False
def is_transposable_data_format_ops(self, op):
transposable = (op.type in MaceTransposableDataFormatOps)
framework = ConverterUtil.framework_type(self._model)
is_torch = (framework == FrameworkType.PYTORCH.value)
is_onnx = (framework == FrameworkType.ONNX.value)
if (op.type == MaceOp.Reshape):
input_op = self._producer[op.input[0]]
if ((len(input_op.output_shape) == 0) or (len(op.output_shape) == 0)):
transposable = False
else:
input_dims = input_op.output_shape[0].dims
output_dims = op.output_shape[0].dims
if ((len(input_op.output_shape) != 1) or (len(input_dims) != 4) or (len(output_dims) != 4)):
transposable = False
elif (is_torch or is_onnx):
transposable = True
else:
(in_b, in_h, in_w, in_c) = self.sort_feature_map_shape(input_dims, ConverterUtil.data_format(input_op))
(ou_b, ou_h, ou_w, ou_c) = self.sort_feature_map_shape(output_dims, ConverterUtil.data_format(op))
transposable = ((in_b == ou_b) and (in_c == ou_c))
if ((self._option.device == DeviceType.HEXAGON.value) or (self._option.device == DeviceType.HTP.value)):
transposable = True
elif (op.type == MaceOp.Squeeze):
input_op = self._producer[op.input[0]]
if ((len(input_op.output_shape) == 0) or (len(op.output_shape) == 0)):
transposable = False
else:
input_dims = input_op.output_shape[0].dims
output_dims = op.output_shape[0].dims
src_df = ConverterUtil.data_format(self._model)
arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
if ((len(input_dims) == 4) and (len(output_dims) == 2) and (((src_df == DataFormat.NCHW) and (arg.ints == [2, 3])) or ((src_df == DataFormat.NHWC) and (arg.ints == [1, 2])))):
transposable = True
else:
transposable = False
elif (op.type == MaceOp.Transpose):
if (op.output[0] in self._consumers):
consumer = self._consumers[op.output[0]][0]
if (consumer.type == MaceOp.Reshape):
transposable = False
elif ((op.type == MaceOp.FullyConnected) and (self._option.device == DeviceType.HTP.value)):
transposable = False
if ((op.type in MaceTransposableDataFormatOps) and (not transposable)):
print(('%s(%s) is not a transposable op in this model.' % (op.name, op.type)))
return transposable
def update_data_format(self):
print('update data format')
net = self._model
for op in net.op:
df_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
if (not df_arg):
df_arg = op.arg.add()
df_arg.name = MaceKeyword.mace_data_format_str
if (op.type in MaceFixedDataFormatOps):
df_arg.i = DataFormat.AUTO.value
elif self.is_transposable_data_format_ops(op):
input_df = DataFormat.AUTO.value
for input_tensor in op.input:
if (input_tensor in self._consts):
continue
mace_check((input_tensor in self._producer), ('Input tensor %s not in producer' % input_tensor))
father_op = self._producer[input_tensor]
temp_input_df = ConverterUtil.get_arg(father_op, MaceKeyword.mace_data_format_str)
if (temp_input_df.i != DataFormat.AUTO.value):
input_df = temp_input_df.i
if (input_df == DataFormat.AUTO.value):
df_arg.i = input_df
has_data_format_arg = op.arg.add()
has_data_format_arg.name = MaceKeyword.mace_has_data_format_str
has_data_format_arg.i = 1
return False
def transpose_data_format(self):
print('Transpose arguments based on data format')
net = self._model
src_data_format = ConverterUtil.data_format(net)
for op in net.op:
has_data_format = (ConverterUtil.data_format(op) == DataFormat.AUTO)
if (op.type == MaceOp.Pad.name):
for arg in op.arg:
if (arg.name == MaceKeyword.mace_paddings_str):
mace_check((len(arg.ints) == 8), 'pad dim rank should be 8.')
if ((src_data_format == DataFormat.NCHW) and has_data_format):
print(('Transpose pad args: %s(%s)' % (op.name, op.type)))
self.transpose_shape(arg.ints, [0, 1, 4, 5, 6, 7, 2, 3])
elif ((op.type == MaceOp.Concat.name) or (op.type == MaceOp.Split.name)):
for arg in op.arg:
if (arg.name == MaceKeyword.mace_axis_str):
if ((src_data_format == DataFormat.NCHW) and has_data_format and (len(op.output_shape[0].dims) == 4)):
print(('Transpose concat/split args: %s(%s)' % (op.name, op.type)))
if (arg.i < 0):
arg.i += 4
if (arg.i == 1):
arg.i = 3
elif (arg.i == 2):
arg.i = 1
elif (arg.i == 3):
arg.i = 2
if (op.input[0] in self._producer):
producer = self._producer[op.input[0]]
input_shape = producer.output_shape[0].dims
if ((producer.type == MaceOp.FullyConnected.name) and (len(input_shape) == 2)):
axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
if (axis_arg.i == 1):
axis_arg.i = 3
elif (op.type == MaceOp.Squeeze.name):
for arg in op.arg:
if (arg.name == MaceKeyword.mace_axis_str):
if ((src_data_format == DataFormat.NCHW) and has_data_format and (len(self._producer[op.input[0]].output_shape[0].dims) == 4) and (len(op.output_shape[0].dims) == 2) and (arg.ints == [2, 3])):
print(('Transpose squeeze args: %s(%s)' % (op.name, op.type)))
arg.ints[:] = [1, 2]
elif (op.type == MaceOp.Reduce.name):
for arg in op.arg:
if (arg.name == MaceKeyword.mace_axis_str):
if ((src_data_format == DataFormat.NCHW) and has_data_format):
print(('Transpose reduce args: %s(%s)' % (op.name, op.type)))
reduce_axises = list(arg.ints)
new_axises = []
for i in range(len(reduce_axises)):
idx = reduce_axises[i]
if ((idx == 2) or (idx == 3)):
new_axises.append((idx - 1))
elif (idx == 1):
new_axises.append(3)
elif (idx == (- 1)):
new_axises.append(2)
else:
new_axises.append(idx)
new_axises.sort()
arg.ints[:] = []
arg.ints.extend(new_axises)
elif (op.type == MaceOp.Crop.name):
offset_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_offset_str)
mace_check((offset_arg and (src_data_format == DataFormat.NCHW) and has_data_format and (len(op.output_shape[0].dims) == 4)), 'MACE only support crop with NCHW format')
print(('Transpose crop args: %s(%s)' % (op.name, op.type)))
self.transpose_shape(offset_arg.ints, [0, 2, 3, 1])
elif (op.type == MaceOp.Reshape.name):
for arg in op.arg:
if ((arg.name == MaceKeyword.mace_dim_str) and (len(arg.ints) == 4) and (src_data_format == DataFormat.NCHW) and has_data_format):
self.transpose_shape(arg.ints, [0, 2, 3, 1])
elif (op.type == MaceOp.Transpose.name):
for arg in op.arg:
if ((arg.name == MaceKeyword.mace_dims_str) and (len(arg.ints) == 4) and (src_data_format == DataFormat.NCHW) and has_data_format):
dst_shape = [0, 3, 1, 2]
self.transpose_shape(dst_shape, arg.ints)
self.transpose_shape(dst_shape, [0, 2, 3, 1])
arg.ints[:] = dst_shape
if ((src_data_format == DataFormat.NCHW) and has_data_format):
print(('Transpose output shapes: %s(%s)' % (op.name, op.type)))
for output_shape in op.output_shape:
if (len(output_shape.dims) == 4):
self.transpose_shape(output_shape.dims, [0, 2, 3, 1])
return False
def quantize_nodes(self):
if (not self._option.quantize):
return False
print('Add mace quantize and dequantize nodes')
for op in self._model.op:
for i in range(len(op.input)):
if (op.input[i] in self._option.input_nodes):
input_node = self._option.input_nodes[op.input[i]]
if (input_node.data_type == mace_pb2.DT_INT32):
continue
if (op.input[i] in self.input_name_map):
op.input[i] = self.input_name_map[op.input[i]]
for i in range(len(op.output)):
if (op.output[i] in self.output_name_map):
op.name = ((MaceKeyword.mace_output_node_name + '_') + op.name)
new_output_name = self.output_name_map[op.output[i]]
self._quantize_activation_info[new_output_name] = self._quantize_activation_info[op.output[i]]
if (op.output[i] in self._consumers):
for consumer_op in self._consumers[op.output[i]]:
self.replace(consumer_op.input, op.output[i], new_output_name)
op.output[i] = new_output_name
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str)
mace_check(data_type_arg, ('Data type does not exist for %s(%s)' % (op.name, op.type)))
if (data_type_arg.i == mace_pb2.DT_FLOAT):
if (self._option.quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
data_type_arg.i = mace_pb2.DT_INT16
elif (self._option.quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
data_type_arg.i = mace_pb2.DT_UINT16
elif (self._option.quantize_schema == MaceKeyword.mace_int8):
data_type_arg.i = mace_pb2.DT_INT8
else:
data_type_arg.i = mace_pb2.DT_UINT8
elif (data_type_arg.i == mace_pb2.DT_UINT8):
mace_check(((op.type == MaceOp.Quantize.name) or (op.type == MaceOp.Dequantize.name)), ('Only Quantization ops support uint8, but got %s(%s)' % (op.name, op.type)))
elif ((data_type_arg.i == mace_pb2.DT_INT16) and (self._option.quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor)):
mace_check(((op.type == MaceOp.Quantize.name) or (op.type == MaceOp.Dequantize.name)), ('Only Quantization ops support int16, but got %s(%s)' % (op.name, op.type)))
elif ((data_type_arg.i == mace_pb2.DT_UINT16) and (self._option.quantize_schema == MaceKeyword.mace_htp_u16a_s8w)):
mace_check(((op.type == MaceOp.Quantize.name) or (op.type == MaceOp.Dequantize.name)), ('Only Quantization ops support int16, but got %s(%s)' % (op.name, op.type)))
elif ((data_type_arg.i == mace_pb2.DT_INT8) and (self._option.quantize_schema == MaceKeyword.mace_int8)):
mace_check(((op.type == MaceOp.Quantize.name) or (op.type == MaceOp.Dequantize.name)), ('Only Quantization ops support int8, but got %s(%s)' % (op.name, op.type)))
else:
mace_check((op.type == MaceOp.Quantize.name), ('Quantization only support float ops, but get %s(%s, %s)' % (op.name, op.type, mace_pb2.DataType.Name(data_type_arg.i))))
for (i, input_node) in enumerate(self._option.input_nodes.values()):
if (input_node.data_type == mace_pb2.DT_INT32):
continue
new_input_name = self.input_name_map[input_node.name]
op_def = self._model.op.add()
op_def.name = self.normalize_op_name(new_input_name)
op_def.type = MaceOp.Quantize.name
op_def.input.extend([input_node.name])
op_def.output.extend([new_input_name])
output_shape = op_def.output_shape.add()
output_shape.dims.extend(input_node.shape)
quantize_info = self._quantize_activation_info[new_input_name]
self.copy_quantize_info(op_def, quantize_info)
self._model.input_info[i].scale = quantize_info.scale
self._model.input_info[i].zero_point = quantize_info.zero_point
if (self._option.quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_INT16)
elif (self._option.quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT16)
elif (self._option.quantize_schema == MaceKeyword.mace_int8):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_INT8)
else:
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT8)
ConverterUtil.add_data_format_arg(op_def, input_node.data_format)
find_range_every_time_arg = op_def.arg.add()
find_range_every_time_arg.name = MaceKeyword.mace_find_range_every_time
find_range_every_time_arg.i = 1
output_nodes = self._option.check_nodes.values()
for (i, output_node) in enumerate(output_nodes):
op_def = self._model.op.add()
op_def.name = self.normalize_op_name(output_node.name)
op_def.type = MaceOp.Dequantize.name
op_def.input.extend([self.output_name_map[output_node.name]])
op_def.output.extend([output_node.name])
output_shape = op_def.output_shape.add()
producer_op = self._producer[output_node.name]
output_shape.dims.extend(producer_op.output_shape[0].dims)
op_def.output_type.extend([mace_pb2.DT_FLOAT])
quantize_info = producer_op.quantize_info[0]
self._model.output_info[i].scale = quantize_info.scale
self._model.output_info[i].zero_point = quantize_info.zero_point
if (self._option.quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_INT16)
elif (self._option.quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT16)
elif (self._option.quantize_schema == MaceKeyword.mace_int8):
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_INT8)
else:
ConverterUtil.add_data_type_arg(op_def, mace_pb2.DT_UINT8)
ConverterUtil.add_data_format_arg(op_def, output_node.data_format)
quantize_flag_arg = self._model.arg.add()
quantize_flag_arg.name = MaceKeyword.mace_quantize_flag_arg_str
quantize_flag_arg.i = 1
return False
def quantize_tensor(self, tensor):
if (tensor.data_type == mace_pb2.DT_FLOAT):
ops = self._consumers.get(tensor.name, None)
check_conv = False
check_deconv = False
if ((ops is not None) and (len(ops) == 1)):
if (len(ops[0].input) >= 3):
check_conv = ((ops[0].type in [MaceOp.Conv2D.name, MaceOp.DepthwiseConv2d.name, MaceOp.FullyConnected.name, MaceOp.MatMul.name]) and (ops[0].input[2] == tensor.name))
if (ops[0].type in [MaceOp.Deconv2D.name, MaceOp.DepthwiseDeconv2d]):
from_caffe = (ConverterUtil.get_arg(ops[0], MaceKeyword.mace_framework_type_str).i == FrameworkType.CAFFE.value)
if (from_caffe and (len(ops[0].input) >= 3)):
check_deconv = (ops[0].input[2] == tensor.name)
elif (len(ops[0].input) >= 4):
check_deconv = (ops[0].input[3] == tensor.name)
if (check_conv or check_deconv):
conv_op = ops[0]
scale_input = self._quantize_activation_info[conv_op.input[0]].scale
if (conv_op.input[1] not in self._quantized_tensor):
self.quantize_tensor(self._consts[conv_op.input[1]])
scale_filter = self._consts[conv_op.input[1]].scale
scale = (scale_input * scale_filter)
quantized_tensor = quantize_util.quantize_with_scale_and_zero(tensor.float_data, scale, 0)
if ((self._option.device == DeviceType.HEXAGON.value) or (self._option.device == DeviceType.HTA.value)):
quantized_tensor.minval = (scale * (- (2 ** 31)))
quantized_tensor.maxval = (scale * ((2 ** 31) - 1))
tensor.data_type = mace_pb2.DT_INT32
elif (self._option.quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
quantized_tensor = quantize_util.quantize_int16(tensor.float_data)
tensor.data_type = mace_pb2.DT_INT16
elif (self._option.quantize_schema == MaceKeyword.mace_int8):
quantized_tensor = quantize_util.quantize_int8(tensor.float_data)
tensor.data_type = mace_pb2.DT_INT8
else:
non_zero = (self._option.device == DeviceType.CPU.value)
has_qat = False
if (InfoKey.has_qat in self._converter_info):
if (tensor.name in self._converter_info[InfoKey.has_qat]):
has_qat = True
if (has_qat and (self._option.platform.name == 'ONNX')):
mace_check((tensor.name in self._converter_info[InfoKey.qat_type]), 'ONNX model tensor {} has QAT info, but QAT type info is missing.'.format(tensor.name))
tensor_qat_type = self._converter_info[InfoKey.qat_type][tensor.name]
mace_check(((tensor_qat_type == QatType.SYMMETRIC.value) or (tensor_qat_type == QatType.ASYMMETRIC.value)), 'QAT type can only be SYMMETRIC or ASYMMETRIC, but {} is got.'.format(tensor_qat_type))
symmetric = (tensor_qat_type == QatType.SYMMETRIC.value)
if symmetric:
maxval = (127.5 * tensor.scale)
minval = (- maxval)
else:
(minval, maxval) = quantize_util.scale_zero_to_min_max(tensor.scale, tensor.zero_point)
func = quantize_util.quantize_with_min_and_max
quantized_tensor = func(tensor.float_data, self._option.device, non_zero, minval, maxval)
else:
func = quantize_util.quantize
quantized_tensor = func(tensor.float_data, self._option.device, non_zero)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
tensor.int32_data.extend(quantized_tensor.data)
tensor.scale = quantized_tensor.scale
tensor.zero_point = quantized_tensor.zero
tensor.minval = quantized_tensor.minval
tensor.maxval = quantized_tensor.maxval
tensor.quantized = True
self._quantized_tensor.update([tensor.name])
return False
def quantize_weights(self):
print('Quantize weights')
net = self._model
for tensor in net.tensors:
self.quantize_tensor(tensor)
return False
def quantize_large_tensor(self, tensor):
if (tensor.data_type == mace_pb2.DT_FLOAT):
ops = self._consumers.get(tensor.name, None)
if ((ops is not None) and (len(ops) == 1)):
if (ops[0].type in [MaceOp.Conv2D.name, MaceOp.FullyConnected.name, MaceOp.MatMul.name]):
quantized_tensor = quantize_util.quantize(tensor.float_data, self._option.device, False)
tensor.data_type = mace_pb2.DT_UINT8
del tensor.float_data[:]
tensor.int32_data.extend(quantized_tensor.data)
tensor.scale = quantized_tensor.scale
tensor.zero_point = quantized_tensor.zero
tensor.minval = quantized_tensor.minval
tensor.maxval = quantized_tensor.maxval
tensor.quantized = True
self._quantized_tensor.update([tensor.name])
def quantize_large_weights(self):
print('Quantize large weights')
net = self._model
for tensor in net.tensors:
self.quantize_large_tensor(tensor)
return False
def add_quantize_info(self, op, minval, maxval):
quantize_schema = self._option.quantize_schema
if (quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
maxval = max(abs(minval), abs(maxval))
minval = (- maxval)
scale = (maxval / (2 ** 15))
zero = 0
elif (quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
(scale, zero, minval, maxval) = quantize_util.adjust_range_uint16(minval, maxval, self._option.device, non_zero=False)
elif (quantize_schema == MaceKeyword.mace_int8):
(scale, zero, minval, maxval) = quantize_util.adjust_range_int8(minval, maxval)
else:
(scale, zero, minval, maxval) = quantize_util.adjust_range(minval, maxval, self._option.device, non_zero=False)
quantize_info = op.quantize_info.add()
quantize_info.minval = minval
quantize_info.maxval = maxval
quantize_info.scale = scale
quantize_info.zero_point = zero
return quantize_info
def copy_quantize_info(self, op, info):
quantize_info = op.quantize_info.add()
quantize_info.minval = info.minval
quantize_info.maxval = info.maxval
quantize_info.scale = info.scale
quantize_info.zero_point = info.zero_point
def transform_fake_quantize(self):
print('Transform fake quantize')
net = self._model
for op in net.op:
if ((op.type == 'FakeQuantWithMinMaxVars') or (op.type == 'FakeQuantWithMinMaxArgs')):
if (self._option.quantize and (op.input[0] not in self._consts)):
producer_op = self._producer[op.input[0]]
minval = ConverterUtil.get_arg(op, 'min').f
maxval = ConverterUtil.get_arg(op, 'max').f
quantize_info = self.add_quantize_info(producer_op, minval, maxval)
self._quantize_activation_info[op.input[0]] = quantize_info
self._quantize_activation_info[op.output[0]] = quantize_info
print(op.input[0], op.output[0])
op.type = MaceOp.Identity.name
return False
def rearrange_batch_to_space(self):
if (not self._option.quantize):
return False
for conv_op in self._model.op:
if ((conv_op.type in [MaceOp.Conv2D.name, MaceOp.DepthwiseConv2d.name]) and (self.consumer_count(conv_op.output[0]) == 1)):
b2s_op = self._consumers[conv_op.output[0]][0]
if ((b2s_op.type == MaceOp.BatchToSpaceND.name) and (self.consumer_count(b2s_op.output[0]) == 1)):
biasadd_or_act_op = self._consumers[b2s_op.output[0]][0]
if (biasadd_or_act_op.type == MaceOp.BiasAdd.name):
biasadd_op = biasadd_or_act_op
if ((self.consumer_count(biasadd_op.output[0]) == 1) and (self._consumers[biasadd_op.output[0]][0].type == MaceOp.Activation.name)):
act_op = self._consumers[biasadd_op.output[0]][0]
biasadd_op.input[0] = conv_op.output[0]
b2s_op.input[0] = act_op.output[0]
act_op.output_shape[0].dims[:] = conv_op.output_shape[0].dims[:]
for op in self._consumers[act_op.output[0]]:
self.replace(op.input, act_op.output[0], b2s_op.output[0])
else:
biasadd_op.input[0] = conv_op.output[0]
b2s_op.input[0] = biasadd_op.output[0]
for op in self._consumers[biasadd_op.output[0]]:
self.replace(op.input, biasadd_op.output[0], b2s_op.output[0])
print(('Rearrange batch to space: %s(%s)' % (b2s_op.name, b2s_op.type)))
return True
elif (biasadd_or_act_op.type == MaceOp.Activation.name):
act_op = biasadd_or_act_op
act_op.input[0] = conv_op.output[0]
b2s_op.input[0] = act_op.output[0]
for op in self._consumers[act_op.output[0]]:
self.replace(op.input, act_op.output[0], b2s_op.output[0])
print(('Rearrange batch to space: %s(%s)' % (b2s_op.name, b2s_op.type)))
return True
return False
def add_quantize_tensor_range(self):
range_file = self._option.quantize_range_file
quantize_schema = self._option.quantize_schema
if range_file:
print('Add quantize tensor range')
post_quantize_info = {}
with open(range_file) as f:
for line in f:
(tensor_name, minmax) = line.split('')[:2]
(min_val, max_val) = [float(i) for i in minmax.strip().split(',')]
if (quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
max_val = max(abs(min_val), abs(max_val))
min_val = (- max_val)
scale = (max_val / (2 ** 15))
zero = 0
elif (quantize_schema == MaceKeyword.mace_int8):
(scale, zero, min_val, max_val) = quantize_util.adjust_range_int8(min_val, max_val)
elif (quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
device = self._option.device
(scale, zero, min_val, max_val) = quantize_util.adjust_range_uint16(min_val, max_val, device, non_zero=False)
else:
(scale, zero, min_val, max_val) = quantize_util.adjust_range(min_val, max_val, self._option.device, non_zero=False)
activation_info = mace_pb2.QuantizeActivationInfo()
activation_info.minval = min_val
activation_info.maxval = max_val
activation_info.scale = scale
activation_info.zero_point = zero
if (tensor_name not in self._quantize_activation_info):
post_quantize_info[tensor_name] = activation_info
for op in self._model.op:
if (op.name.find(MaceKeyword.mace_output_node_name) >= 0):
continue
for output in op.output:
if (output not in self._quantize_activation_info):
mace_check((output in post_quantize_info), ('%s does not have quantize activation info' % op))
op.quantize_info.extend([post_quantize_info[output]])
self._quantize_activation_info[output] = post_quantize_info[output]
if (not self._option.quantize):
return False
print('Add default quantize info for input')
for (i, input_node) in enumerate(self._option.input_nodes.values()):
if (input_node.data_type == mace_pb2.DT_INT32):
continue
new_input_name = self.input_name_map[input_node.name]
if (input_node.name not in self._quantize_activation_info):
print(('Input range %s: %s' % (input_node.name, str(input_node.range))))
if (quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
maxval = max(abs(input_node.range[0]), abs(input_node.range[1]))
minval = (- maxval)
scale = (maxval / (2 ** 15))
zero = 0
elif (quantize_schema == MaceKeyword.mace_htp_u16a_s8w):
(scale, zero, minval, maxval) = quantize_util.adjust_range_uint16(input_node.range[0], input_node.range[1], self._option.device, non_zero=False)
elif (quantize_schema == MaceKeyword.mace_int8):
(scale, zero, minval, maxval) = quantize_util.adjust_range_int8(input_node.range[0], input_node.range[1])
else:
(scale, zero, minval, maxval) = quantize_util.adjust_range(input_node.range[0], input_node.range[1], self._option.device, non_zero=False)
quantize_info = mace_pb2.QuantizeActivationInfo()
quantize_info.minval = minval
quantize_info.maxval = maxval
quantize_info.scale = scale
quantize_info.zero_point = zero
self._quantize_activation_info[new_input_name] = quantize_info
input_op = self._producer[input_node.name]
input_op.quantize_info.extend([quantize_info])
else:
self._quantize_activation_info[new_input_name] = self._quantize_activation_info[input_node.name]
print('Add default quantize info for ops like Pooling, Softmax')
for op in self._model.op:
if (op.type in [MaceOp.ExpandDims.name, MaceOp.Pad.name, MaceOp.Pooling.name, MaceOp.Reduce.name, MaceOp.Reshape.name, MaceOp.ResizeBilinear.name, MaceOp.Squeeze.name, MaceOp.StridedSlice.name, MaceOp.BatchToSpaceND.name, MaceOp.SpaceToBatchND.name, MaceOp.SpaceToDepth.name, MaceOp.DepthToSpace.name, MaceOp.Transpose.name]):
del op.quantize_info[:]
producer_op = self._producer[op.input[0]]
if (producer_op.output[0] in self._option.input_nodes):
new_input_name = self.input_name_map[producer_op.output[0]]
self.copy_quantize_info(op, self._quantize_activation_info[new_input_name])
else:
self.copy_quantize_info(op, producer_op.quantize_info[0])
self._quantize_activation_info[op.output[0]] = op.quantize_info[0]
elif ((op.type == MaceOp.Concat.name) and ((not op.quantize_info) or self._option.change_concat_ranges)):
if op.quantize_info:
maxval = op.quantize_info[0].maxval
minval = op.quantize_info[0].minval
del op.quantize_info[:]
else:
maxval = float('-inf')
minval = float('inf')
for i in range(len(op.input)):
minval = min(minval, self._producer[op.input[i]].quantize_info[0].minval)
maxval = max(maxval, self._producer[op.input[i]].quantize_info[0].maxval)
quantize_info = self.add_quantize_info(op, minval, maxval)
self._quantize_activation_info[op.output[0]] = quantize_info
if self._option.change_concat_ranges:
for i in range(len(op.input)):
producer_op = self._producer[op.input[i]]
del producer_op.quantize_info[:]
self.copy_quantize_info(producer_op, quantize_info)
self._quantize_activation_info[producer_op.output[0]] = producer_op.quantize_info[0]
elif (op.type == MaceOp.Activation.name):
act_type = ConverterUtil.get_arg(op, MaceKeyword.mace_activation_type_str).s.decode()
if (act_type not in [ActivationType.TANH.name, ActivationType.SIGMOID.name, ActivationType.RELUX.name]):
continue
del op.quantize_info[:]
if (act_type == ActivationType.TANH.name):
quantize_info = self.add_quantize_info(op, (- 1.0), 1.0)
elif (act_type == ActivationType.SIGMOID.name):
quantize_info = self.add_quantize_info(op, 0.0, 1.0)
elif (act_type == ActivationType.RELUX.name):
for arg in op.arg:
if (arg.name == MaceKeyword.mace_activation_max_limit_str):
maxval = arg.f
minval = 0.0
quantize_info = self.add_quantize_info(op, minval, maxval)
self._quantize_activation_info[op.output[0]] = quantize_info
elif (op.type == MaceOp.Softmax.name):
del op.quantize_info[:]
if (self._option.device == DeviceType.APU.value):
mace_check((quantize_schema != MaceKeyword.mace_htp_u16a_s8w), 'mace_htp_u16a_s8w is not a valid quantize_schema for APU')
if (quantize_schema == MaceKeyword.mace_apu_16bit_per_tensor):
quantize_info = self.add_quantize_info(op, 0.0, 1.0)
else:
quantize_info = self.add_quantize_info(op, 0.0, (255.0 / 256.0))
else:
quantize_info = self.add_quantize_info(op, 0.0, 1.0)
self._quantize_activation_info[op.output[0]] = quantize_info
elif ((op.type == MaceOp.Eltwise.name) and (not op.quantize_info) and (len(op.input) == 2) and (op.input[0] not in self._consts) and (op.input[1] not in self._consts)):
producer_op0 = self._producer[op.input[0]]
producer_op1 = self._producer[op.input[1]]
if (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUM.value):
minval = (producer_op0.quantize_info[0].minval + producer_op1.quantize_info[0].minval)
maxval = (producer_op0.quantize_info[0].maxval + producer_op1.quantize_info[0].maxval)
elif (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.SUB.value):
minval = (producer_op0.quantize_info[0].minval - producer_op1.quantize_info[0].maxval)
maxval = (producer_op0.quantize_info[0].maxval - producer_op1.quantize_info[0].minval)
elif (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i == EltwiseType.PROD.value):
mul_a = (producer_op0.quantize_info[0].minval * producer_op1.quantize_info[0].minval)
mul_b = (producer_op0.quantize_info[0].minval * producer_op1.quantize_info[0].maxval)
mul_c = (producer_op0.quantize_info[0].maxval * producer_op1.quantize_info[0].minval)
mul_d = (producer_op0.quantize_info[0].maxval * producer_op1.quantize_info[0].maxval)
minval = min(mul_a, mul_b, mul_c, mul_d)
maxval = max(mul_a, mul_b, mul_c, mul_d)
else:
print(op)
mace_check(False, 'Quantized Elementwise only support: SUM and SUB without ranges now.')
quantize_info = self.add_quantize_info(op, minval, maxval)
self._quantize_activation_info[op.output[0]] = quantize_info
elif (op.type == MaceOp.Split.name):
del op.quantize_info[:]
producer_op = self._producer[op.input[0]]
for i in op.output:
self.copy_quantize_info(op, producer_op.quantize_info[0])
return False
def check_quantize_info(self):
if (not self._option.quantize):
return False
print('Check quantize info')
for op in self._model.op:
if ((op.name.find(MaceKeyword.mace_input_node_name) == (- 1)) and (op.name.find(MaceKeyword.mace_output_node_name) == (- 1)) and (op.type != MaceOp.Quantize.name) and (op.type != MaceOp.Dequantize.name)):
mace_check((len(op.output) == len(op.quantize_info)), ('missing quantize info: %s' % op))
for i in six.moves.range(len(op.quantize_info)):
print(('Op output %s range: [%f, %f]' % (op.output[i], op.quantize_info[i].minval, op.quantize_info[i].maxval)))
def fp16_gather_weight(self):
for op in self._model.op:
if (op.type != MaceOp.Gather.name):
continue
if (op.input[0] not in self._consts):
raise KeyError(('Not in const tensor: ' + str(op.input[0])))
const_tensor = self._consts[op.input[0]]
if (const_tensor.data_type == mace_pb2.DT_FLOAT16):
print((str(const_tensor.name) + ' is alreay float16'))
continue
print(('FP16 Embedding Lookup Weights: %s' % const_tensor.name))
op_outputs = [x for x in op.output]
new_gather_name = (op.name + '_fp16')
new_gather_output_name = (new_gather_name + ':0')
dehalve_name = op.name
const_tensor.data_type = mace_pb2.DT_FLOAT16
op.name = new_gather_name
op.output[:] = [new_gather_output_name]
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str)
if (data_type_arg is None):
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
dehalve_op = self._model.op.add()
dehalve_op.name = dehalve_name
dehalve_op.type = MaceOp.Cast.name
dehalve_op.input.extend([new_gather_output_name])
dehalve_op.output.extend(op_outputs)
dehalve_op.output_shape.extend(op.output_shape)
dehalve_op.output_type.extend([mace_pb2.DT_FLOAT])
data_type_arg = dehalve_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
def fp16_matmul_weight(self):
if (self._option.device != DeviceType.CPU.value):
return
print('Convert matmul weights to fp16 for specific matmul: activation + weights')
for op in self._model.op:
if (op.type != MaceOp.MatMul.name):
continue
if ((op.input[0] not in self._consts) and (op.input[1] not in self._consts)):
continue
if ((op.input[0] in self._consts) and (op.input[1] in self._consts)):
continue
transpose_a_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_a_str)
transpose_b_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_transpose_b_str)
transpose_a = ((transpose_a_arg is not None) and (transpose_a_arg.i == 1))
transpose_b = ((transpose_b_arg is not None) and (transpose_b_arg.i == 1))
left_tensor = op.input[0]
right_tensor = op.input[1]
left_shape = self.get_tensor_shape(left_tensor)
right_shape = self.get_tensor_shape(right_tensor)
height = (left_shape[(- 1)] if transpose_a else left_shape[(- 2)])
width = (right_shape[(- 2)] if transpose_b else right_shape[(- 1)])
batch = reduce((lambda x, y: (x * y)), left_shape[:(- 2)], 1)
if (batch != 1):
continue
if (left_tensor in self._consts):
if ((width != 1) or transpose_a):
continue
const_tensor = self._consts[left_tensor]
else:
if ((height != 1) or (not transpose_b)):
continue
const_tensor = self._consts[right_tensor]
print(('Convert Matmul Weights to fp16: %s' % op.name))
const_tensor.data_type = mace_pb2.DT_FLOAT16
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str)
if (data_type_arg is None):
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_FLOAT16
op.output_type.extend([mace_pb2.DT_FLOAT])
def add_opencl_informations(self):
print('Add OpenCL informations')
net = self._model
arg = net.arg.add()
arg.name = MaceKeyword.mace_opencl_mem_type
if (self._option.cl_mem_type == 'image'):
arg.i = MemoryType.GPU_IMAGE.value
else:
arg.i = MemoryType.GPU_BUFFER.value
def transform_reshape_and_flatten(self):
net = self._model
for op in net.op:
if (op.type != MaceOp.Reshape.name):
continue
dim_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_dim_str)
shape_tensor = None
if (len(op.input) == 1):
print('Transform Caffe or PyTorch Reshape')
dims = []
axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
if dim_arg:
dims = dim_arg.ints
shape_tensor = net.tensors.add()
shape_tensor.name = (op.name + '_shape')
shape_tensor.dims.append(len(op.output_shape[0].dims))
shape_tensor.data_type = mace_pb2.DT_INT32
elif (axis_arg is not None):
axis = axis_arg.i
for i in range(0, axis):
dims.append(0)
dims.append((- 1))
for i in range((axis + 1), len(op.output_shape[0].dims)):
dims.append(0)
shape_tensor = net.tensors.add()
shape_tensor.name = (op.name + '_shape')
shape_tensor.dims.append(len(dims))
shape_tensor.data_type = mace_pb2.DT_INT32
else:
mace_check(False, 'Only support reshape and flatten')
shape_tensor.int32_data.extend(dims)
op.input.append(shape_tensor.name)
def transform_shape_tensor_to_param(self):
kOpTypeMap = {MaceOp.ResizeNearestNeighbor.name: (1, MaceKeyword.mace_resize_size_str), MaceOp.Deconv2D.name: (2, MaceKeyword.mace_dim_str), MaceOp.Reshape.name: (1, MaceKeyword.mace_dim_str)}
net = self._model
for op in net.op:
if (op.type not in kOpTypeMap):
continue
info = kOpTypeMap[op.type]
dim_arg = ConverterUtil.get_arg(op, info[1])
if ((len(op.input) > info[0]) and (dim_arg is None) and (op.input[info[0]] in self._consts)):
shape_tensor = self._consts[op.input[info[0]]]
dim_arg = op.arg.add()
dim_arg.name = info[1]
dim_arg.ints.extend(shape_tensor.int32_data)
def fold_fc_reshape(self):
if (self._option.device in [DeviceType.APU.value, DeviceType.HTP.value]):
return False
net = self._model
for op in net.op:
if ((op.type == MaceOp.FullyConnected.name) and (op.output[0] in self._consumers)):
consumers = self._consumers[op.output[0]]
op_output_shape = op.output_shape[0].dims[:]
for consumer in consumers:
if ((consumer.type == MaceOp.Reshape.name) and (consumer.input[1] in self._consts) and (self._consts[consumer.input[1]].int32_data[:] == [op_output_shape[0], 1, 1, op_output_shape[1]])):
net.tensors.remove(self._consts[consumer.input[1]])
del consumer.input[1]
self.safe_remove_node(consumer, None)
return True
return False
def transform_channel_shuffle(self):
net = self._model
for op in net.op:
if ((op.type == MaceOp.Transpose.name) and (len(op.output_shape[0].dims) == 5)):
perm = ConverterUtil.get_arg(op, MaceKeyword.mace_dims_str).ints
framework = ConverterUtil.framework_type(net)
if ((framework == FrameworkType.TENSORFLOW.value) and ([0, 1, 2, 4, 3] == list(perm))):
group_dim = 4
elif ((framework == FrameworkType.ONNX.value) and ([0, 2, 1, 3, 4] == list(perm))):
group_dim = 2
else:
continue
reshape_op = self._consumers.get(op.output[0], None)
if (reshape_op and (len(reshape_op) == 1) and (reshape_op[0].type == MaceOp.Reshape.name) and (len(reshape_op[0].output_shape[0].dims) == 4)):
print('Transform channel shuffle')
output_shape = reshape_op[0].output_shape[0].dims
self.safe_remove_node(reshape_op[0], op, remove_input_tensor=True)
else:
continue
op.type = MaceOp.ChannelShuffle.name
del op.arg[:]
group_arg = op.arg.add()
group_arg.name = MaceKeyword.mace_group_str
group_arg.i = op.output_shape[0].dims[group_dim]
op.output_shape[0].dims[:] = output_shape
producer_op = self._producer.get(op.input[0], None)
if producer_op:
if (producer_op.type == MaceOp.Reshape.name):
self.safe_remove_node(producer_op, None)
elif (producer_op.type == MaceOp.Stack.name):
print('Change channel shuffle stack to concat')
producer_op.type = MaceOp.Concat.name
producer_op.output_shape[0].dims[:] = output_shape
return True
def quantize_specific_ops_only(self):
to_quantize_ops_output_type = {MaceOp.MatMul.name: mace_pb2.DT_INT32, MaceOp.Gather.name: mace_pb2.DT_UINT8}
for op in self._model.op:
if ((op.type not in to_quantize_ops_output_type) or (len(op.output) > 1) or (ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str).i != mace_pb2.DT_FLOAT)):
continue
quantized_inputs_names = []
should_quantize = False
has_const = False
for (idx, input_tensor) in enumerate(op.input):
if (input_tensor in self._consts):
has_const = True
break
if (not has_const):
continue
for (idx, input_tensor) in enumerate(op.input):
if (self.get_tensor_data_type(input_tensor) == mace_pb2.DT_FLOAT):
should_quantize = True
break
if (not should_quantize):
continue
else:
print(('Quantize op %s (%s)' % (op.name, op.type)))
non_zero = ((self._option.device == DeviceType.CPU.value) and (op.type == MaceOp.MatMul.name))
for (idx, input_tensor) in enumerate(op.input):
quantized_inputs_names.append(input_tensor)
if (self.get_tensor_data_type(input_tensor) != mace_pb2.DT_FLOAT):
continue
if (input_tensor in self._consts):
const_tensor = self._consts[input_tensor]
quantized_tensor = quantize_util.quantize(const_tensor.float_data, self._option.device, non_zero)
del const_tensor.float_data[:]
const_tensor.int32_data.extend(quantized_tensor.data)
const_tensor.data_type = mace_pb2.DT_UINT8
const_tensor.scale = quantized_tensor.scale
const_tensor.zero_point = quantized_tensor.zero
const_tensor.minval = quantized_tensor.minval
const_tensor.maxval = quantized_tensor.maxval
const_tensor.quantized = True
else:
input_shape = self.get_tensor_shape(input_tensor)
quantize_op = self._model.op.add()
quantize_op.name = (self.normalize_op_name(input_tensor) + '_quant')
quantize_op.type = MaceOp.Quantize.name
quantize_op.input.extend([input_tensor])
quantize_output_name = (quantize_op.name + '_0')
quantize_op.output.extend([quantize_output_name])
output_shape = quantize_op.output_shape.add()
output_shape.dims.extend(input_shape)
quantize_op.output_type.extend([mace_pb2.DT_UINT8])
data_type_arg = quantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_UINT8
ConverterUtil.add_data_format_arg(quantize_op, self.get_tensor_data_format(input_tensor))
data_type_arg = quantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_non_zero
data_type_arg.i = 0
find_range_arg = quantize_op.arg.add()
find_range_arg.name = MaceKeyword.mace_find_range_every_time
find_range_arg.i = 1
quantized_inputs_names[(- 1)] = quantize_output_name
del op.input[:]
op.input.extend(quantized_inputs_names)
original_output_name = op.output[0]
op.output[0] = (original_output_name + '_quant')
op.output_type.extend([to_quantize_ops_output_type[op.type]])
data_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_op_data_type_str)
if (data_type_arg is None):
data_type_arg = op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = mace_pb2.DT_UINT8
dequantize_op = self._model.op.add()
dequantize_op.name = (op.name + '_dequant')
dequantize_op.type = MaceOp.Dequantize.name
dequantize_op.input.extend([op.output[0]])
dequantize_op.output.extend([original_output_name])
dequantize_op.output_shape.extend(op.output_shape)
dequantize_op.output_type.extend([mace_pb2.DT_FLOAT])
data_type_arg = dequantize_op.arg.add()
data_type_arg.name = MaceKeyword.mace_op_data_type_str
data_type_arg.i = to_quantize_ops_output_type[op.type]
ConverterUtil.add_data_format_arg(dequantize_op, self.get_tensor_data_format(original_output_name))
quantize_flag_arg = ConverterUtil.get_arg(self._model, MaceKeyword.mace_quantize_flag_arg_str)
if (quantize_flag_arg is None):
quantize_flag_arg = self._model.arg.add()
quantize_flag_arg.name = MaceKeyword.mace_quantize_flag_arg_str
quantize_flag_arg.i = 1
return True
return False
def transform_single_bn_to_depthwise_conv(self):
for op in self._model.op:
if (op.type != MaceOp.BatchNorm.name):
continue
if (len(op.input) != 3):
continue
producer = self._producer[op.input[0]]
if (producer.type in [MaceOp.Conv2D.name, MaceOp.Deconv2D.name, MaceOp.DepthwiseDeconv2d.name, MaceOp.DepthwiseConv2d.name, MaceOp.BatchToSpaceND.name]):
continue
op.type = MaceOp.DepthwiseConv2d.name
padding_arg = op.arg.add()
padding_arg.name = MaceKeyword.mace_padding_str
padding_arg.i = PaddingMode.VALID.value
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend([1, 1])
dilation_arg = op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
dilation_arg.ints.extend([1, 1])
for tensor in self._model.tensors:
if (tensor.name == op.input[1]):
tensor.dims[:] = [1, 1, 1, tensor.dims[0]]
break
return True
return False
def transform_mul_max_to_prelu(self):
if (self._option.device != DeviceType.APU.value):
return False
net = self._model
for op in net.op:
if ((op.type != MaceOp.Eltwise.name) or (ConverterUtil.get_arg(op, MaceKeyword.mace_element_type_str).i != EltwiseType.PROD.value) or (op.output[0] not in self._consumers)):
continue
if (len(op.input) != 1):
continue
consumer_op = self._consumers[op.output[0]][0]
if ((consumer_op.type != MaceOp.Eltwise.name) or (ConverterUtil.get_arg(consumer_op, MaceKeyword.mace_element_type_str).i != EltwiseType.MAX.value)):
continue
if (op.input[0] not in consumer_op.input):
continue
float_value_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_scalar_input_str)
mace_check((float_value_arg is not None), (((op.name + ': ') + MaceKeyword.mace_scalar_input_str) + ' value float should not be None'))
scalar = float_value_arg.f
if (scalar < 0):
continue
if (scalar > 1):
scalar = 1
print(('Change mul and max to prelu: %s(%s)' % (op.name, op.type)))
op.name = consumer_op.name
op.output[0] = consumer_op.output[0]
alpha_tensor = net.tensors.add()
alpha_tensor.name = (op.name + '_alpha')
alpha_tensor.dims.append(1)
alpha_tensor.data_type = mace_pb2.DT_FLOAT
alpha_tensor.float_data.extend([scalar])
op.input.extend([alpha_tensor.name])
ConverterUtil.del_arg(op, MaceKeyword.mace_scalar_input_str)
ConverterUtil.del_arg(op, MaceKeyword.mace_scalar_input_index_str)
op.type = MaceOp.Activation.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = six.b(ActivationType.PRELU.name)
self.replace_quantize_info(op, consumer_op)
self.safe_remove_node(consumer_op, op)
return True
return False
def transform_expand_dims_to_reshape(self):
if (self._option.device != DeviceType.APU.value):
return False
net = self._model
for op in net.op:
if (op.type == MaceOp.ExpandDims.name):
op.type = MaceOp.Reshape.name
return True
return False
def quantize_fold_relu(self):
if (self._option.quantize_schema != MaceKeyword.mace_int8):
return
net = self._model
for op in net.op:
if (op.type == MaceOp.Activation.name):
act_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_activation_type_str)
act_type = act_type_arg.s.decode()
if (act_type in ['RELU', 'RELUX']):
producer = self._producer[op.input[0]]
self.replace_quantize_info(producer, op)
self.safe_remove_node(op, producer)
return True
return False
def transform_keras_quantize_info(self):
mace_check((self._option.platform == Platform.KERAS), 'For KERAS models')
changed = False
for op in self._model.op:
for i in range(len(op.quantize_info)):
if (not (op.output[i] in self._quantize_activation_info)):
self._quantize_activation_info[op.output[i]] = op.quantize_info[i]
changed = True
return changed
def fold_div_bn(self):
net = self._model
for op in net.op:
if (op.type == MaceOp.BatchNorm.name):
scale = self._consts[op.input[1]]
producer_op = self._producer[op.input[0]]
if (producer_op.type != MaceOp.Eltwise.name):
continue
eltwise_type = ConverterUtil.get_arg(producer_op, MaceKeyword.mace_element_type_str)
if (eltwise_type.i != EltwiseType.DIV.value):
continue
if (producer_op.input[1] not in self._consts):
continue
divisor = self._consts[producer_op.input[1]]
if ((divisor.data_type != mace_pb2.DT_FLOAT) or (scale.data_type != mace_pb2.DT_FLOAT)):
continue
scale_dims = scale.dims
divisor_dims = divisor.dims
df_op = ConverterUtil.data_format(op)
df_producer = ConverterUtil.data_format(producer_op)
df_nchw = DataFormat.NCHW
dim_match = ((df_op == df_nchw) and (df_producer == df_nchw) and (len(scale_dims) == 1) and (len(divisor_dims) == 4) and (divisor_dims[1] == np.prod(np.array(divisor_dims))) and (divisor_dims[1] == scale_dims[0]))
if (not dim_match):
continue
if (not np.allclose(np.array(scale.float_data).reshape((- 1)), np.array(divisor.float_data).reshape((- 1)))):
continue
if (producer_op.input[0] not in self._producer):
continue
producer_producer = self._producer[producer_op.input[0]]
self.safe_remove_node(producer_op, producer_producer, remove_input_tensor=True)
del op.input[1]
self._model.tensors.remove(scale)
op.type = MaceOp.BiasAdd.name
return False
def add_general_info(self):
runtime_arg = self._model.arg.add()
runtime_arg.name = MaceKeyword.mace_runtime_type_str
runtime_arg.i = self._option.device
self._model.name = self._option.name
self._model.infer_order = self._option.order
return False
def tensor_is_used(self, tensor):
for output in self._model.output_info:
if (tensor.name == output.name):
return True
for op in self._model.op:
for input in op.input:
if (tensor.name == input):
return True
return False
def remove_unused_tensor(self):
unused_tensors = []
for tensor in self._model.tensors:
if (not self.tensor_is_used(tensor)):
unused_tensors.append(tensor)
for ts in unused_tensors:
self._model.tensors.remove(ts)
return (len(unused_tensors) != 0)
def get_rhs_op_scale_true(self, rsqrt_op, second_mean_consumer_op):
rhs_dict = dict()
rhs_dict['is_in'] = False
rsqrt_consumers = self._consumers.get(rsqrt_op.output[0], [])
if (len(rsqrt_consumers) != 1):
return rhs_dict
rsqrt_mul_scale_op = rsqrt_consumers[0]
if (rsqrt_mul_scale_op.type != MaceOp.Eltwise.name):
return rhs_dict
elt_type = ConverterUtil.get_arg(rsqrt_mul_scale_op, MaceKeyword.mace_element_type_str).i
scale_tensor_name = rsqrt_mul_scale_op.input[1]
if (not ((len(rsqrt_mul_scale_op.input) == 2) and (len(rsqrt_mul_scale_op.output) == 1) and (scale_tensor_name in self._consts) and (elt_type == EltwiseType.PROD.value) and (rsqrt_mul_scale_op.output[0] in second_mean_consumer_op.input))):
return rhs_dict
right_mul_consumers = self._consumers.get(second_mean_consumer_op.output[0], [])
if (len(right_mul_consumers) != 1):
return rhs_dict
offset_minus_rsqrt_mean_op = right_mul_consumers[0]
if (offset_minus_rsqrt_mean_op.type != MaceOp.Eltwise.name):
return rhs_dict
elt_type = ConverterUtil.get_arg(offset_minus_rsqrt_mean_op, MaceKeyword.mace_element_type_str).i
if (not ((len(offset_minus_rsqrt_mean_op.input) == 2) and (len(offset_minus_rsqrt_mean_op.output) == 1) and (offset_minus_rsqrt_mean_op.input[0] in self._consts) and (elt_type == EltwiseType.SUB.value))):
return rhs_dict
offset_tensor_name = offset_minus_rsqrt_mean_op.input[0]
rhs_of_final_add = offset_minus_rsqrt_mean_op
rhs_dict['is_in'] = True
rhs_dict['rsqrt_mul_scale_op'] = rsqrt_mul_scale_op
rhs_dict['scale_tensor_name'] = scale_tensor_name
rhs_dict['offset_tensor_name'] = offset_tensor_name
rhs_dict['rhs_of_final_add'] = rhs_of_final_add
return rhs_dict
def get_rhs_op_scale_false(self, rsqrt_op, second_mean_consumer_op):
rhs_dict = dict()
rhs_dict['is_in'] = False
neg_consumers = self._consumers.get(second_mean_consumer_op.output[0], [])
if (len(neg_consumers) != 1):
return rhs_dict
rsqrt_mul_neg_mean_op = neg_consumers[0]
if (rsqrt_mul_neg_mean_op.type != MaceOp.Eltwise.name):
return rhs_dict
elt_type = ConverterUtil.get_arg(rsqrt_mul_neg_mean_op, MaceKeyword.mace_element_type_str).i
if (not ((len(rsqrt_mul_neg_mean_op.input) == 2) and (len(rsqrt_mul_neg_mean_op.output) == 1) and (elt_type == EltwiseType.PROD.value) and (rsqrt_op.output[0] in rsqrt_mul_neg_mean_op.input))):
return rhs_dict
rhs_of_final_add = rsqrt_mul_neg_mean_op
rhs_dict['is_in'] = True
rhs_dict['rhs_of_final_add'] = rhs_of_final_add
return rhs_dict
def get_lhs_and_final_add(self, scale_offset, rhs_dict, op, rsqrt_op):
lhs_dict = dict()
lhs_dict['is_in'] = False
rhs_of_final_add = rhs_dict['rhs_of_final_add']
if scale_offset:
rsqrt_mul_scale_op = rhs_dict['rsqrt_mul_scale_op']
consumers_after_branch = self._consumers.get(rsqrt_mul_scale_op.output[0], [])
else:
consumers_after_branch = self._consumers.get(rsqrt_op.output[0], [])
if (len(consumers_after_branch) != 2):
return lhs_dict
lhs_of_final_add = None
for consume_op in consumers_after_branch:
if (consume_op.input[0] == op.input[0]):
lhs_of_final_add = consume_op
break
if (lhs_of_final_add is None):
return lhs_dict
if (not ((lhs_of_final_add is not None) and (lhs_of_final_add.type == MaceOp.Eltwise.name))):
return lhs_dict
elt_type = ConverterUtil.get_arg(lhs_of_final_add, MaceKeyword.mace_element_type_str).i
lhs_consumers = self._consumers.get(lhs_of_final_add.output[0], [])
if (not ((len(lhs_of_final_add.input) == 2) and (len(lhs_of_final_add.output) == 1) and (len(lhs_consumers) == 1) and (rhs_of_final_add.output[0] in lhs_consumers[0].input) and (elt_type == EltwiseType.PROD.value))):
return lhs_dict
final_add_op = lhs_consumers[0]
if (not ((final_add_op.type == MaceOp.Eltwise.name) and (len(final_add_op.input) == 2) and (len(final_add_op.output) == 1))):
return lhs_dict
elt_type = ConverterUtil.get_arg(final_add_op, MaceKeyword.mace_element_type_str).i
if (elt_type != EltwiseType.SUM.value):
return lhs_dict
lhs_dict['is_in'] = True
lhs_dict['lhs_of_final_add'] = lhs_of_final_add
lhs_dict['final_add_op'] = final_add_op
return lhs_dict
def do_fold_instance_norm(self, op, lhs_dict, rhs_dict, scale_offset, unused_ops, unused_args, epsilon):
net = self._model
del op.output_shape[0].dims[:]
op.output_shape[0].dims.extend(lhs_dict['final_add_op'].output_shape[0].dims)
for unused_op in unused_ops:
net.op.remove(unused_op)
affine_arg = op.arg.add()
affine_arg.name = MaceKeyword.mace_affine_str
if scale_offset:
affine_arg.i = 1
op.input.extend([rhs_dict['scale_tensor_name'], rhs_dict['offset_tensor_name']])
net.op.remove(rhs_dict['rsqrt_mul_scale_op'])
else:
affine_arg.i = 0
net.op.remove(rhs_dict['rhs_of_final_add'])
net.op.remove(lhs_dict['lhs_of_final_add'])
self.replace_quantize_info(op, lhs_dict['final_add_op'])
op.output[0] = lhs_dict['final_add_op'].output[0]
self.safe_remove_node(lhs_dict['final_add_op'], op)
op.type = MaceOp.InstanceNorm.name
for arg in unused_args:
op.arg.remove(arg)
epsilon_arg = op.arg.add()
epsilon_arg.name = MaceKeyword.mace_epsilon_str
epsilon_arg.f = epsilon
def fold_instance_norm(self):
net = self._model
for op in net.op:
is_reduce = ((op.type == MaceOp.Reduce.name) and (len(op.input) == 1) and (len(op.output) == 1))
if (not is_reduce):
continue
reduce_type_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_reduce_type_str)
reduce_type = reduce_type_arg.i
axis_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_axis_str)
axis = axis_arg.ints
keepdims_arg = ConverterUtil.get_arg(op, MaceKeyword.mace_keepdims_str)
keepdims = keepdims_arg.i
if (not ((reduce_type == ReduceType.MEAN.value) and (len(axis) == 2) and (axis[0] == 1) and (axis[1] == 2) and (keepdims == 1) and (len(op.output_shape[0].dims) == 4))):
continue
mean_consumers = self._consumers.get(op.output[0], [])
if (len(mean_consumers) != 2):
continue
sqr_diff_mean_idx = (- 1)
sqr_diff_mean_op = None
for idx in range(2):
if (mean_consumers[idx].type == MaceOp.SqrDiffMean.name):
sqr_diff_mean_idx = idx
sqr_diff_mean_op = mean_consumers[idx]
break
if (sqr_diff_mean_idx == (- 1)):
continue
second_mean_consumer_op = mean_consumers[(1 - sqr_diff_mean_idx)]
if (second_mean_consumer_op.type != MaceOp.Eltwise.name):
continue
elt_type = ConverterUtil.get_arg(second_mean_consumer_op, MaceKeyword.mace_element_type_str).i
scale_offset = False
if (elt_type == EltwiseType.PROD.value):
scale_offset = True
elif (elt_type == EltwiseType.NEG.value):
scale_offset = False
else:
continue
sqr_diff_mean_consumers = self._consumers.get(sqr_diff_mean_op.output[0], [])
if (len(sqr_diff_mean_consumers) != 1):
continue
var_plus_epsilon_op = sqr_diff_mean_consumers[0]
if (var_plus_epsilon_op.type != MaceOp.Eltwise.name):
continue
elt_type = ConverterUtil.get_arg(var_plus_epsilon_op, MaceKeyword.mace_element_type_str).i
scalar_input_index = ConverterUtil.get_arg(var_plus_epsilon_op, MaceKeyword.mace_scalar_input_index_str).i
if (not ((len(var_plus_epsilon_op.input) == 1) and (len(var_plus_epsilon_op.output) == 1) and (elt_type == EltwiseType.SUM.value) and (scalar_input_index == 1))):
continue
epsilon = ConverterUtil.get_arg(var_plus_epsilon_op, MaceKeyword.mace_scalar_input_str).f
var_plus_epsilon_consumers = self._consumers.get(var_plus_epsilon_op.output[0], [])
if (len(var_plus_epsilon_consumers) != 1):
continue
rsqrt_op = var_plus_epsilon_consumers[0]
if (rsqrt_op.type != MaceOp.Eltwise.name):
continue
elt_type = ConverterUtil.get_arg(rsqrt_op, MaceKeyword.mace_element_type_str).i
power = ConverterUtil.get_arg(rsqrt_op, MaceKeyword.mace_scalar_input_str).f
if (not ((len(rsqrt_op.input) == 1) and (len(rsqrt_op.output) == 1) and (elt_type == EltwiseType.POW.value) and (power == (- 0.5)))):
continue
if scale_offset:
rhs_dict = self.get_rhs_op_scale_true(rsqrt_op, second_mean_consumer_op)
else:
rhs_dict = self.get_rhs_op_scale_false(rsqrt_op, second_mean_consumer_op)
if (not rhs_dict['is_in']):
continue
lhs_dict = self.get_lhs_and_final_add(scale_offset, rhs_dict, op, rsqrt_op)
if (not lhs_dict['is_in']):
continue
unused_ops = [sqr_diff_mean_op, var_plus_epsilon_op, rsqrt_op, second_mean_consumer_op]
unused_args = [reduce_type_arg, axis_arg, keepdims_arg]
self.do_fold_instance_norm(op, lhs_dict, rhs_dict, scale_offset, unused_ops, unused_args, epsilon)
return True
return False
def do_single_transpose(self, input_name, already_dealt):
tensor = self._consts[input_name]
shape = list(tensor.dims)
if (len(shape) == 4):
array = np.array(tensor.float_data).reshape(shape)
array = array.transpose(0, 2, 3, 1)
tensor.dims[:] = array.shape
tensor.float_data[:] = array.flat
already_dealt.add(input_name)
def transpose_const_op_input(self):
net = self._model
framework = ConverterUtil.framework_type(net)
is_onnx = (framework == FrameworkType.ONNX.value)
is_torch = (framework == FrameworkType.PYTORCH.value)
is_megengine = (framework == FrameworkType.MEGENGINE.value)
already_dealt = set()
equal_types = set([MaceOp.Eltwise.name, MaceOp.Concat.name])
for op in net.op:
if ((is_onnx or is_torch or is_megengine) and (not self._option.quantize) and ((self._option.device == DeviceType.GPU.value) or (self._option.device == DeviceType.CPU.value))):
num_input = 1
if (op.type in equal_types):
num_input = len(op.input)
for idx in range(num_input):
input_name = op.input[idx]
if ((input_name in self._consts) and (input_name not in self._option.input_nodes) and (input_name not in already_dealt)):
self.do_single_transpose(input_name, already_dealt)
return False
def transform_biasadd_to_add(self):
if (self._option.device != DeviceType.HTP.value):
return False
net = self._model
for op in net.op:
if ((op.type == MaceOp.BiasAdd.name) and (len(op.input) == 2) and (op.input[1] in self._consts) and (len(self._consts[op.input[1]].dims) == 1)):
print(('Transform biasadd to add: %s(%s)' % (op.name, op.type)))
op.type = MaceOp.Eltwise.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_element_type_str
type_arg.i = EltwiseType.SUM.value
return True
return False
def transform_slice_to_strided_slice(self):
if (self._option.device != DeviceType.HTP.value):
return False
net = self._model
framework = ConverterUtil.framework_type(net)
for op in net.op:
if ((op.type == MaceOp.Slice.name) and (framework == FrameworkType.ONNX.value) and (len(op.input) == 5)):
op.type = MaceOp.StridedSlice.name
tensor_shape = self.get_tensor_shape(op.input[0])
input3 = self._consts[op.input[3]]
axes_data = input3.int32_data
for tensor in self._model.tensors:
if (tensor.name in [op.input[1], op.input[2], op.input[4]]):
tensor.dims[:] = [len(tensor_shape)]
for tensor in self._model.tensors:
if (tensor.name == op.input[1]):
for i in range(len(tensor_shape)):
if (i not in axes_data):
tensor.int32_data.insert(i, 0)
if (tensor.name == op.input[2]):
for i in range(len(tensor_shape)):
if (i in axes_data):
if (tensor.int32_data[i] < 0):
tensor.int32_data[i] += (tensor_shape[i] + 1)
else:
tensor.int32_data.insert(i, tensor_shape[i])
if (tensor.name == op.input[4]):
for i in range(len(tensor_shape)):
if (i not in axes_data):
tensor.int32_data.insert(i, 1)
del input3.int32_data[0]
for tensor in self._model.tensors:
if (tensor.name == op.input[1]):
for i in range(len(tensor_shape)):
input3.int32_data.insert(i, tensor.int32_data[i])
if (tensor.name == op.input[2]):
for i in range(len(tensor_shape)):
input3.int32_data.insert(((2 * i) + 1), tensor.int32_data[i])
if (tensor.name == op.input[4]):
for i in range(len(tensor_shape)):
input3.int32_data.insert(((3 * i) + 2), tensor.int32_data[i])
np.array(input3.int32_data).reshape([len(tensor_shape), 3])
input3.dims[:] = [len(tensor_shape), 3]
return True
return False
def add_transpose_op(self, node, transpose_dims):
producer_op = self._producer[node]
op = self._model.op.add()
op.name = (node + '_transpose')
op.type = MaceOp.Transpose.name
op.input.append(node)
tensor_name = (op.input[0] + '_transpose')
op.output.append(tensor_name)
output_shape = op.output_shape.add()
shape_info = producer_op.output_shape[0].dims
transposed_shape = []
for i in transpose_dims:
transposed_shape.append(shape_info[i])
output_shape.dims.extend(transposed_shape)
data_type_arg = op.arg.add()
data_type_arg.name = 'T'
data_type_arg.i = self._option.data_type
framework_type_arg = op.arg.add()
framework_type_arg.name = MaceKeyword.mace_framework_type_str
framework_type_arg.i = FrameworkType.ONNX.value
ConverterUtil.add_data_format_arg(op, DataFormat.NONE)
dims_arg = op.arg.add()
dims_arg.name = MaceKeyword.mace_dims_str
dims_arg.ints.extend(transpose_dims)
def add_transpose_for_htp(self):
if (self._option.device != DeviceType.HTP.value):
return False
net = self._model
framework = ConverterUtil.framework_type(net)
for op in net.op:
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
if (op.input[0] in self._producer):
producer_op = self._producer[op.input[0]]
producer_data_format = ConverterUtil.get_arg(producer_op, MaceKeyword.mace_data_format_str)
if ((op.type == MaceOp.Conv2D.name) and (framework == FrameworkType.ONNX.value) and (data_format.i == DataFormat.AUTO.value) and (producer_data_format.i == DataFormat.NCHW.value)):
self.add_transpose_op(op.input[0], [0, 2, 3, 1])
op.input[0] = (op.input[0] + '_transpose')
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
data_format.i = DataFormat.NHWC.value
return True
elif ((op.type == MaceOp.MatMul.name) and (framework == FrameworkType.ONNX.value) and (data_format.i == DataFormat.NCHW.value) and (producer_data_format.i == DataFormat.AUTO.value)):
self.add_transpose_op(op.input[0], [0, 3, 1, 2])
op.input[0] = (op.input[0] + '_transpose')
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
data_format.i = DataFormat.NONE.value
return True
elif ((op.type == MaceOp.Transpose.name) and (framework == FrameworkType.ONNX.value) and (data_format.i == DataFormat.NCHW.value) and (producer_data_format.i == DataFormat.AUTO.value)):
if (op.output[0] in self._consumers):
consumer = self._consumers[op.output[0]][0]
if (consumer.type == MaceOp.Reshape):
self.add_transpose_op(op.input[0], [0, 3, 1, 2])
op.input[0] = (op.input[0] + '_transpose')
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
data_format.i = DataFormat.NONE.value
return True
elif ((op.type in [MaceOp.Eltwise.name, MaceOp.Concat.name]) and (framework == FrameworkType.ONNX.value) and (data_format.i == DataFormat.NCHW.value) and (len(op.input) == 2) and (op.input[1] in self._producer)):
input_1 = self._producer[op.input[1]]
input_1_data_format = ConverterUtil.get_arg(input_1, MaceKeyword.mace_data_format_str)
if (producer_data_format.i == DataFormat.AUTO.value):
self.add_transpose_op(op.input[0], [0, 3, 1, 2])
op.input[0] = (op.input[0] + '_transpose')
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
data_format.i = DataFormat.NONE.value
return True
elif (input_1_data_format.i == DataFormat.AUTO.value):
print(op.input[1])
self.add_transpose_op(op.input[1], [0, 3, 1, 2])
op.input[1] = (op.input[1] + '_transpose')
data_format = ConverterUtil.get_arg(op, MaceKeyword.mace_data_format_str)
data_format.i = DataFormat.NONE.value
return True
return False |
def
if (status == 451):
return 'Unavailable_for_Legal_Reasons (451)'
if (not try_status(status)):
return ('Other Unexpected Status (%s)' % status)
return ('%s (%s)' % (str(HTTPStatus(status)).split('.')[1].title(), status)) |
def main(args, init_distributed=False):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences'
if (torch.cuda.is_available() and (not args.cpu)):
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
print(args)
task = tasks.setup_task(args)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=True, epoch=0)
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad))))
trainer = Trainer(args, task, model, criterion)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences))
(extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer)
max_epoch = (args.max_epoch or math.inf)
max_update = (args.max_update or math.inf)
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
while ((lr > args.min_lr) and (epoch_itr.epoch < max_epoch) and (trainer.get_num_updates() < max_update)):
train(args, trainer, task, epoch_itr)
if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
if ((epoch_itr.epoch % args.save_interval) == 0):
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
if (':' in getattr(args, 'data', '')):
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch)
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum)) |
def get_paths(agent_name: str, args) -> dict:
dir = rospkg.RosPack().get_path('arena_local_planner_drl')
PATHS = {'model': os.path.join(dir, 'agents', agent_name), 'tb': os.path.join(dir, 'training_logs', 'tensorboard', agent_name), 'eval': os.path.join(dir, 'training_logs', 'train_eval_log', agent_name), 'robot_setting': os.path.join(rospkg.RosPack().get_path('simulator_setup'), 'robot', ('myrobot' + '.model.yaml')), 'hyperparams': os.path.join(dir, 'configs', 'hyperparameters'), 'robot_as': os.path.join(dir, 'configs', 'default_settings.yaml'), 'curriculum': os.path.join(dir, 'configs', 'training_curriculum_map1small.yaml')}
if (args.load is None):
os.makedirs(PATHS['model'])
elif ((not os.path.isfile(os.path.join(PATHS['model'], (AGENT_NAME + '.zip')))) and (not os.path.isfile(os.path.join(PATHS['model'], 'best_model.zip')))):
raise FileNotFoundError(("Couldn't find model named %s.zip' or 'best_model.zip' in '%s'" % (AGENT_NAME, PATHS['model'])))
if args.eval_log:
if (not os.path.exists(PATHS['eval'])):
os.makedirs(PATHS['eval'])
else:
PATHS['eval'] = None
if args.tb:
if (not os.path.exists(PATHS['tb'])):
os.makedirs(PATHS['tb'])
else:
PATHS['tb'] = None
return PATHS |
class CFooterNode(Node):
__instance: CFooterNode = None
def instance() -> CFooterNode:
return CFooterNode.__instance
snippet = '\n return;\n}}\n\n#ifdef CNN_TEST\n#include <stdio.h>\n#ifdef TIMING\n#include <ctime>\n#endif\n\nint main()\n{{\n int i, j, k, width, height, max_colour;\n unsigned char byte;\n float x[{x_dim}][{y_dim}][{z_dim}];\n float scores[{in_dim}];\n FILE *f = fopen("img.bin", "rb");\n fread((float*)x, sizeof(float), {x_dim} * {y_dim} * {z_dim}, f);\n fclose(f);\n {weights_init}\n\n cnn{id}(x, scores);\n FILE *w = fopen("{exe_return_filename}", "w");\n for (int i = 0; i < {in_dim}; i++)\n fprintf(w, "%f ", scores[i]);\n fclose(w);\n}}\n#endif\n '
end_c = '\n'
def __init__(self, exe_return_filename, weights_method, prev_node):
super().__init__(prev_node)
CFooterNode.__instance = self
dim = CHeaderNode.instance().in_dim
id = CHeaderNode.instance().id
self.in_dim = prev_node.out_dim
self.in_var = prev_node.out_var
self.x_dim = dim[0]
self.y_dim = dim[1]
if (len(dim) > 2):
self.z_dim = dim[2]
else:
self.z_dim = 1
self.version = '5'
if (id is None):
self.id = ''
else:
self.id = id
self.exe_return_filename = exe_return_filename
if (weights_method == 'stdio'):
self.weights_init = 'init_weights();'
elif (weights_method == 'direct'):
self.weights_init = ''
else:
raise Exception('Unimplemented')
def write_c(self):
Writer.cur_depth -= 1
super().write_c() |
def main(params):
model = build_model(params['model'])
post_process = build_post_process(params['post_process'])
pt = Predictor(model, post_process, params)
pt.predict() |
def iobes2bio(iobes_labels):
bio_labels = []
for label in iobes_labels:
if (label[0] == 'S'):
bio_labels.append(('B' + label[1:]))
elif (label[0] == 'E'):
bio_labels.append(('I' + label[1:]))
else:
bio_labels.append(label)
return bio_labels |
_sentencepiece
_tokenizers
class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = T5Tokenizer
rust_tokenizer_class = T5TokenizerFast
test_rust_tokenizer = True
def setUp(self):
super().setUp()
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = T5Tokenizer(SAMPLE_VOCAB)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.'])
_property
def t5_base_tokenizer(self):
return T5Tokenizer.from_pretrained('t5-base')
_property
def t5_base_tokenizer_fast(self):
return T5TokenizerFast.from_pretrained('t5-base')
def get_tokenizer(self, **kwargs) -> T5Tokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'I was born in 92000, and this is false.'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_eos_treatment(self):
tokenizer = self.t5_base_tokenizer
batch_with_eos_added = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'])
batch_without_eos_added = tokenizer(['hi', 'I went to the gym', ''])
self.assertListEqual(batch_with_eos_added['input_ids'], batch_without_eos_added['input_ids'])
def test_prepare_seq2seq_batch(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
tgt_text = ['Summary of the text.', 'Another summary.']
expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id]
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
result = list(batch.input_ids.numpy()[0])
self.assertListEqual(expected_src_tokens, result)
self.assertEqual((2, 9), batch.input_ids.shape)
self.assertEqual((2, 9), batch.attention_mask.shape)
def test_empty_target_text(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
batch = tokenizer.prepare_seq2seq_batch(src_text, return_tensors=FRAMEWORK)
self.assertIn('input_ids', batch)
self.assertIn('attention_mask', batch)
self.assertNotIn('decoder_input_ids', batch)
self.assertNotIn('decoder_attention_mask', batch)
def test_max_target_length(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A short paragraph for summarization.', 'Another short paragraph for summarization.']
tgt_text = ['Summary of the text.', 'Another summary.']
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, max_target_length=32, padding='max_length', return_tensors=FRAMEWORK)
self.assertEqual(32, batch['labels'].shape[1])
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, max_length=32, padding='max_length', return_tensors=FRAMEWORK)
self.assertEqual(32, batch['labels'].shape[1])
def test_outputs_not_longer_than_maxlen(self):
tokenizer = self.t5_base_tokenizer
batch = tokenizer.prepare_seq2seq_batch([('I am a small frog' * 1000), 'I am a small frog'], return_tensors=FRAMEWORK)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual(batch.input_ids.shape, (2, 512))
def test_eos_in_input(self):
tokenizer = self.t5_base_tokenizer
src_text = ['A long paragraph for summarization. </s>']
tgt_text = ['Summary of the text. </s>']
expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1]
expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1]
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, return_tensors=FRAMEWORK)
src_ids = list(batch.input_ids.numpy()[0])
tgt_ids = list(batch.labels.numpy()[0])
self.assertEqual(expected_src_tokens, src_ids)
self.assertEqual(expected_tgt_tokens, tgt_ids)
def test_token_type_ids(self):
src_text_1 = ['A first paragraph for summarization.']
src_text_2 = ['A second paragraph for summarization.']
fast_token_type_ids = self.t5_base_tokenizer_fast(src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True).token_type_ids
slow_token_type_ids = self.t5_base_tokenizer(src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True).token_type_ids
self.assertEqual(slow_token_type_ids, fast_token_type_ids)
self.assertEqual(len(slow_token_type_ids[0]), 18)
def test_fast_and_slow_same_result(self):
src_text = '<pad> Today is <unk> nice day </s>'
tgt_ids = [0, 1960, 19, 2, 1245, 239, 1]
tgt_text = '<pad> Today is<unk> nice day</s>'
fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids
slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids
self.assertEqual(tgt_ids, fast_ids)
self.assertEqual(tgt_ids, slow_ids)
fast_text = self.t5_base_tokenizer_fast.decode(fast_ids)
slow_text = self.t5_base_tokenizer.decode(fast_ids)
self.assertEqual(tgt_text, fast_text)
self.assertEqual(tgt_text, slow_text) |
def get_globalso_net(worker, enc_net, ref_net, init_net_path=None):
net = get_net(enc_net, ref_net, init_net_path=init_net_path)
train_set = _verify_and_get_test_set(worker)
return GlobalSONet(net, train_set) |
class RetinaNetE2ETest(unittest.TestCase):
def setUp(self):
self.model = get_model_zoo('COCO-Detection/retinanet_R_50_FPN_1x.yaml')
def test_empty_data(self):
inst = [get_empty_instance(200, 250), get_empty_instance(200, 249)]
self.model.eval()
self.model([create_model_input(torch.rand(3, 200, 250)), create_model_input(torch.rand(3, 200, 249))])
self.model.train()
with EventStorage():
losses = self.model([create_model_input(torch.rand(3, 200, 250), inst[0]), create_model_input(torch.rand(3, 200, 249), inst[1])])
sum(losses.values()).backward()
del losses |
def to_absolute_coordinates(boxlist, height, width, check_range=True, scope=None):
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(tf.greater_equal(1.01, box_maximum), ['maximum box coordinate value is larger than 1.01: ', box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width) |
def read_file_list(filename):
file = open(filename)
data = file.read()
lines = data.replace(',', ' ').replace('\t', ' ').split('\n')
list = [[v.strip() for v in line.split(' ') if (v.strip() != '')] for line in lines if ((len(line) > 0) and (line[0] != '#'))]
list = [(float(l[0]), l[1:]) for l in list if (len(l) > 1)]
return dict(list) |
class TestGuidedAnchorHead(TestCase):
def test_guided_anchor_head_loss(self):
s = 256
img_metas = [{'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': (1, 1)}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (torch.rand(1, 4, (s // stride[1]), (s // stride[0])) for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = guided_anchor_head.loss_by_feat(*outs, [gt_instances], img_metas)
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_box_loss = sum(empty_gt_losses['loss_bbox']).item()
empty_shape_loss = sum(empty_gt_losses['loss_shape']).item()
empty_loc_loss = sum(empty_gt_losses['loss_loc']).item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(empty_loc_loss, 0, 'location loss should be non-zero')
self.assertEqual(empty_box_loss, 0, 'there should be no box loss when there are no true boxes')
self.assertEqual(empty_shape_loss, 0, 'there should be no shape loss when there are no true boxes')
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = guided_anchor_head.loss_by_feat(*outs, [gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls']).item()
onegt_box_loss = sum(one_gt_losses['loss_bbox']).item()
onegt_shape_loss = sum(one_gt_losses['loss_shape']).item()
onegt_loc_loss = sum(one_gt_losses['loss_loc']).item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_shape_loss, 0, 'shape loss should be non-zero')
self.assertGreater(onegt_loc_loss, 0, 'location loss should be non-zero')
def test_guided_anchor_head_predict_by_feat(self):
s = 256
img_metas = [{'img_shape': (s, s), 'pad_shape': (s, s), 'scale_factor': (1, 1)}]
guided_anchor_head = GuidedAnchorHead(**guided_anchor_head_config)
feats = (torch.rand(1, 4, (s // stride[1]), (s // stride[0])) for stride in guided_anchor_head.square_anchor_generator.strides)
outs = guided_anchor_head(feats)
guided_anchor_head.predict_by_feat(*outs, batch_img_metas=img_metas, rescale=True) |
def look_for_implied_ibids(splitted_citations):
def look_for_journal(els):
for el in els:
if (el['type'] == 'JOURNAL'):
return True
return False
current_journal = None
for citation in splitted_citations:
if (current_journal and (not look_for_journal(citation))):
for el in citation:
if (el['type'] == 'MISC'):
numeration = find_numeration(el['misc_txt'])
if numeration:
if (not numeration['series']):
numeration['series'] = extract_series_from_volume(current_journal['volume'])
if numeration['series']:
volume = (numeration['series'] + numeration['volume'])
else:
volume = numeration['volume']
ibid_el = {'type': 'JOURNAL', 'misc_txt': '', 'title': current_journal['title'], 'volume': volume, 'year': numeration['year'], 'page': (numeration['page'] or numeration['jinst_page']), 'page_end': numeration['page_end'], 'is_ibid': True, 'extra_ibids': []}
citation.append(ibid_el)
el['misc_txt'] = el['misc_txt'][numeration['len']:]
current_journal = None
for el in citation:
if (el['type'] == 'JOURNAL'):
current_journal = el
return splitted_citations |
class MidasCore(nn.Module):
def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True, img_size=384, **kwargs):
super().__init__()
self.core = midas
self.output_channels = None
self.core_out = {}
self.trainable = trainable
self.fetch_features = fetch_features
self.handles = []
self.layer_names = layer_names
self.set_trainable(trainable)
self.set_fetch_features(fetch_features)
self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio, img_size=img_size, do_resize=kwargs.get('do_resize', True))
if freeze_bn:
self.freeze_bn()
def set_trainable(self, trainable):
self.trainable = trainable
if trainable:
self.unfreeze()
else:
self.freeze()
return self
def set_fetch_features(self, fetch_features):
self.fetch_features = fetch_features
if fetch_features:
if (len(self.handles) == 0):
self.attach_hooks(self.core)
else:
self.remove_hooks()
return self
def freeze(self):
for p in self.parameters():
p.requires_grad = False
self.trainable = False
return self
def unfreeze(self):
for p in self.parameters():
p.requires_grad = True
self.trainable = True
return self
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
return self
def forward(self, x, denorm=False, return_rel_depth=False):
with torch.no_grad():
if denorm:
x = denormalize(x)
x = self.prep(x)
with torch.set_grad_enabled(self.trainable):
rel_depth = self.core(x)
if (not self.fetch_features):
return rel_depth
out = [self.core_out[k] for k in self.layer_names]
if return_rel_depth:
return (rel_depth, out)
return out
def get_rel_pos_params(self):
for (name, p) in self.core.pretrained.named_parameters():
if ('relative_position' in name):
(yield p)
def get_enc_params_except_rel_pos(self):
for (name, p) in self.core.pretrained.named_parameters():
if ('relative_position' not in name):
(yield p)
def freeze_encoder(self, freeze_rel_pos=False):
if freeze_rel_pos:
for p in self.core.pretrained.parameters():
p.requires_grad = False
else:
for p in self.get_enc_params_except_rel_pos():
p.requires_grad = False
return self
def attach_hooks(self, midas):
if (len(self.handles) > 0):
self.remove_hooks()
if ('out_conv' in self.layer_names):
self.handles.append(list(midas.scratch.output_conv.children())[3].register_forward_hook(get_activation('out_conv', self.core_out)))
if ('r4' in self.layer_names):
self.handles.append(midas.scratch.refinenet4.register_forward_hook(get_activation('r4', self.core_out)))
if ('r3' in self.layer_names):
self.handles.append(midas.scratch.refinenet3.register_forward_hook(get_activation('r3', self.core_out)))
if ('r2' in self.layer_names):
self.handles.append(midas.scratch.refinenet2.register_forward_hook(get_activation('r2', self.core_out)))
if ('r1' in self.layer_names):
self.handles.append(midas.scratch.refinenet1.register_forward_hook(get_activation('r1', self.core_out)))
if ('l4_rn' in self.layer_names):
self.handles.append(midas.scratch.layer4_rn.register_forward_hook(get_activation('l4_rn', self.core_out)))
return self
def remove_hooks(self):
for h in self.handles:
h.remove()
return self
def __del__(self):
self.remove_hooks()
def set_output_channels(self, model_type):
self.output_channels = MIDAS_SETTINGS[model_type]
def build(midas_model_type='DPT_BEiT_L_384', train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):
if (midas_model_type not in MIDAS_SETTINGS):
raise ValueError(f'Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}')
if ('img_size' in kwargs):
kwargs = MidasCore.parse_img_size(kwargs)
img_size = kwargs.pop('img_size', [384, 384])
print('img_size', img_size)
midas = torch.hub.load('intel-isl/MiDaS', midas_model_type, pretrained=use_pretrained_midas, force_reload=force_reload)
kwargs.update({'keep_aspect_ratio': force_keep_ar})
midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features, freeze_bn=freeze_bn, img_size=img_size, **kwargs)
midas_core.set_output_channels(midas_model_type)
return midas_core
def build_from_config(config):
return MidasCore.build(**config)
def parse_img_size(config):
assert ('img_size' in config)
if isinstance(config['img_size'], str):
assert (',' in config['img_size']), 'img_size should be a string with comma separated img_size=H,W'
config['img_size'] = list(map(int, config['img_size'].split(',')))
assert (len(config['img_size']) == 2), 'img_size should be a string with comma separated img_size=H,W'
elif isinstance(config['img_size'], int):
config['img_size'] = [config['img_size'], config['img_size']]
else:
assert (isinstance(config['img_size'], list) and (len(config['img_size']) == 2)), 'img_size should be a list of H,W'
return config |
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling=0, device=None):
nb_full_blocks = int((nb_rows / nb_columns))
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q)
remaining_rows = (nb_rows - (nb_full_blocks * nb_columns))
if (remaining_rows > 0):
q = orthogonal_matrix_chunk(nb_columns, device=device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if (scaling == 0):
multiplier = torch.randn((nb_rows, nb_columns), device=device).norm(dim=1)
elif (scaling == 1):
multiplier = (math.sqrt(float(nb_columns)) * torch.ones((nb_rows,), device=device))
else:
raise ValueError(f'Invalid scaling {scaling}')
return (torch.diag(multiplier) final_matrix) |
class Objective():
def initialize(cls, target_rate, alpha):
cls.softmax = torch.nn.Softmax(dim=1)
cls.target_rate = target_rate
cls.alpha = alpha
cls.eps = 1e-30
def weighted_cross_entropy(cls, correlation_matrix, easy_match, hard_match, batch):
loss_buf = correlation_matrix.new_zeros(correlation_matrix.size(0))
correlation_matrix = Norm.unit_gaussian_normalize(correlation_matrix)
for (idx, (ct, thres, npt)) in enumerate(zip(correlation_matrix, batch['pckthres'], batch['n_pts'])):
if (len(hard_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, hard_match['src'][idx], hard_match['trg'][idx])
loss_buf[idx] += cross_ent.sum()
if (len(easy_match['src'][idx]) > 0):
cross_ent = cls.cross_entropy(ct, easy_match['src'][idx], easy_match['trg'][idx])
smooth_weight = (easy_match['dist'][idx] / (thres * cls.alpha)).pow(2)
loss_buf[idx] += (smooth_weight * cross_ent).sum()
loss_buf[idx] /= npt
return torch.mean(loss_buf)
def cross_entropy(cls, correlation_matrix, src_match, trg_match):
pdf = cls.softmax(correlation_matrix.index_select(0, src_match))
prob = pdf[(range(len(trg_match)), trg_match)]
cross_ent = (- torch.log((prob + cls.eps)))
return cross_ent
def information_entropy(cls, correlation_matrix, rescale_factor=4):
bsz = correlation_matrix.size(0)
correlation_matrix = Correlation.mutual_nn_filter(correlation_matrix)
side = int(math.sqrt(correlation_matrix.size(1)))
new_side = (side // rescale_factor)
trg2src_dist = correlation_matrix.view(bsz, (- 1), side, side)
src2trg_dist = correlation_matrix.view(bsz, side, side, (- 1)).permute(0, 3, 1, 2)
trg2src_dist = F.interpolate(trg2src_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src2trg_dist = F.interpolate(src2trg_dist, [new_side, new_side], mode='bilinear', align_corners=True)
src_pdf = Norm.l1normalize(trg2src_dist.view(bsz, (- 1), (new_side * new_side)))
trg_pdf = Norm.l1normalize(src2trg_dist.view(bsz, (- 1), (new_side * new_side)))
src_pdf[(src_pdf == 0.0)] = cls.eps
trg_pdf[(trg_pdf == 0.0)] = cls.eps
src_ent = (- (src_pdf * torch.log2(src_pdf)).sum(dim=2)).view(bsz, (- 1))
trg_ent = (- (trg_pdf * torch.log2(trg_pdf)).sum(dim=2)).view(bsz, (- 1))
score_net = ((src_ent + trg_ent).mean(dim=1) / 2)
return score_net.mean()
def layer_selection_loss(cls, layer_sel):
return (layer_sel.mean(dim=0) - cls.target_rate).pow(2).sum() |
class MLP(nn.Module):
def __init__(self, params: ModelArgs):
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
self.tok_embeddings = VocabParallelEmbedding(params.vocab_size, params.dim)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(FeedForward(dim=params.dim, hidden_dim=(4 * params.dim), multiple_of=params.multiple_of))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = torch.nn.Linear(params.dim, params.dim, bias=False)
def forward(self, tokens: torch.Tensor):
(_bsz, seqlen) = tokens.shape
h = self.tok_embeddings(tokens)
for (i, layer) in enumerate(self.layers):
h = layer(h)
h = self.norm(h)
output = self.output(h)
return output |
def get_data_from_batch(batch, w2i, act2i):
uttrs_list = [d[0] for d in batch]
dialog_maxlen = max([len(uttrs) for uttrs in uttrs_list])
uttr_maxlen = max([len(u) for uttrs in uttrs_list for u in uttrs])
uttr_var = make_word_vector(uttrs_list, w2i, dialog_maxlen, uttr_maxlen)
batch_labels = [d[1] for d in batch]
labels_var = []
for labels in batch_labels:
vec_labels = [act2i[l] for l in labels]
pad_len = (dialog_maxlen - len(labels))
for _ in range(pad_len):
vec_labels.append(act2i[g.SILENT])
labels_var.append(torch.LongTensor(vec_labels))
labels_var = to_var(torch.stack(labels_var, 0))
batch_prev_acts = [d[4] for d in batch]
prev_var = []
for prev_acts in batch_prev_acts:
vec_prev_acts = []
for act in prev_acts:
tmp = ([0] * len(act2i))
tmp[act2i[act]] = 1
vec_prev_acts.append(tmp)
pad_len = (dialog_maxlen - len(prev_acts))
for _ in range(pad_len):
vec_prev_acts.append(([0] * len(act2i)))
prev_var.append(torch.FloatTensor(vec_prev_acts))
prev_var = to_var(torch.stack(prev_var, 0))
context = copy.deepcopy([d[2] for d in batch])
context = padding(context, 1, dialog_maxlen, len(context[0][0]))
bow = copy.deepcopy([d[3] for d in batch])
bow = padding(bow, 0, dialog_maxlen, len(bow[0][0]))
act_filter = copy.deepcopy([d[5] for d in batch])
act_filter = padding(act_filter, 0, dialog_maxlen, len(act_filter[0][0]))
return (uttr_var, labels_var, context, bow, prev_var, act_filter) |
class MJVOPTION(Structure):
_fields_ = [('label', c_int), ('frame', c_int), ('geomgroup', (c_ubyte * 5)), ('sitegroup', (c_ubyte * 5)), ('flags', (c_ubyte * 18))] |
class Broadcast2D(Lambda):
def __init__(self, size):
Lambda.__init__(self, (lambda x: tf.tile(tf.expand_dims(tf.expand_dims(x, 2), 3), [1, 1, size, size]))) |
def find_unused_parameters(model: nn.Module, inputs: Any) -> List[str]:
assert model.training
for (_, prm) in model.named_parameters():
prm.grad = None
if isinstance(inputs, tuple):
losses = model(*inputs)
else:
losses = model(inputs)
if isinstance(losses, dict):
losses = sum(losses.values())
losses.backward()
unused: List[str] = []
for (name, prm) in model.named_parameters():
if (prm.grad is None):
unused.append(name)
prm.grad = None
return unused |
def sample_rule_priority(preds):
pred_num = len(preds)
rule_num = random.randint(0, (4 * pred_num))
fact_num = random.randint(0, pred_num)
cache = set()
rules = []
for _ in range(0, rule_num):
rule = None
while True:
rule = sample_one_rule(preds)
rule_hash = ((' '.join(sorted(rule[0])) + ' ') + rule[1])
if (rule_hash not in cache):
cache.add(rule_hash)
break
rules.append(rule)
facts = random.sample(preds, fact_num)
query = random.sample(preds, 1)[0]
return (rules, facts, query) |
class LeNet(MetaModule):
def __init__(self, n_out):
super(LeNet, self).__init__()
layers = []
layers.append(MetaConv2d(1, 6, kernel_size=5))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
layers.append(MetaConv2d(6, 16, kernel_size=5))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
layers.append(MetaConv2d(16, 120, kernel_size=5))
layers.append(nn.ReLU(inplace=True))
self.main = nn.Sequential(*layers)
layers = []
layers.append(MetaLinear(120, 84))
layers.append(nn.ReLU(inplace=True))
layers.append(MetaLinear(84, n_out))
self.fc_layers = nn.Sequential(*layers)
def forward(self, x):
x = self.main(x)
x = x.view((- 1), 120)
return self.fc_layers(x).squeeze() |
def load_remove_save(input_file: str, output_file: str, for_which_classes: list, minimum_valid_object_size: dict=None):
img_in = sitk.ReadImage(input_file)
img_npy = sitk.GetArrayFromImage(img_in)
volume_per_voxel = float(np.prod(img_in.GetSpacing(), dtype=np.float64))
(image, largest_removed, kept_size) = remove_all_but_the_largest_connected_component(img_npy, for_which_classes, volume_per_voxel, minimum_valid_object_size)
img_out_itk = sitk.GetImageFromArray(image)
img_out_itk = copy_geometry(img_out_itk, img_in)
sitk.WriteImage(img_out_itk, output_file)
return (largest_removed, kept_size) |
class TPUDistributedDataParallel(nn.Module):
def __init__(self, module, process_group):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = utils.get_world_size(self.process_group)
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
gradients = []
for p in self.parameters():
if (not p.requires_grad):
continue
if (p.grad is None):
p.grad = torch.zeros_like(p)
if p.grad.requires_grad:
raise RuntimeError("TPUDistributedDataParallel only works with gradients that don't require grad")
gradients.append(p.grad)
import torch_xla.core.xla_model as xm
xm.all_reduce('sum', gradients, scale=(1.0 / self.world_size), groups=self.process_group[1]) |
class SplinterTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', question_token='[QUESTION]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
self.question_token = question_token
def question_token_id(self):
return self.convert_tokens_to_ids(self.question_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
question_suffix = ([self.question_token_id] + [self.convert_tokens_to_ids('.')])
if (self.padding_side == 'right'):
return (((((cls + token_ids_0) + question_suffix) + sep) + token_ids_1) + sep)
else:
return (((((cls + token_ids_0) + sep) + token_ids_1) + question_suffix) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
question_suffix = ([self.question_token_id] + [self.convert_tokens_to_ids('.')])
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
if (self.padding_side == 'right'):
return ((len((((cls + token_ids_0) + question_suffix) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
else:
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len(((token_ids_1 + question_suffix) + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
def setup_python(interval=1):
gpu_memory_list = get_info()
i = 2
while True:
if (gpu_memory_list[i] <= 10000):
if ((i == 2) or (i == 3)):
print(('\n' + cmd))
os.system(cmd)
gpu_memory_list = get_info()
else:
gpu_memory_str = ('gpu memory:%d MiB' % gpu_memory_list[i])
sys.stdout.write(((('\r' + str(i)) + ' ') + gpu_memory_str))
sys.stdout.flush()
time.sleep(interval)
i += 1
if ((((i - 2) % 2) == 0) and (i != 2)):
i = 2
gpu_memory_list = get_info() |
class SharedQueue(LocalSocketComm):
def __init__(self, name='', create=False, maxsize=1):
super().__init__(name, create)
if self._create:
self._queue = queue.Queue(maxsize)
else:
self._queue = None
def _sync(self):
while True:
(connection, _) = self._server.accept()
try:
recv_data = _socket_recv(connection)
msg: SocketRequest = pickle.loads(recv_data)
response = SocketResponse()
if (msg.method == 'put'):
self.put(**msg.args)
elif (msg.method == 'get'):
response = QueueGetResponse()
response.obj = self.get(**msg.args)
elif (msg.method == 'qsize'):
response = QueueSizeResponse()
response.size = self.qsize()
elif (msg.method == 'empty'):
response = QueueEmptyResponse()
response.empty = self.empty()
response.status = SUCCESS_CODE
except Exception:
response = SocketResponse()
response.status = ERROR_CODE
message = pickle.dumps(response)
_socket_send(connection, message)
def put(self, obj, block=True, timeout=None):
if self._server:
self._queue.put(obj, block=block, timeout=timeout)
else:
args = {}
args['obj'] = obj
args['block'] = block
args['timeout'] = timeout
request = SocketRequest(method='put', args=args)
self._request(request)
def get(self, block=True, timeout=None):
if self._server:
obj = self._queue.get(block=block, timeout=timeout)
return obj
else:
args = {}
args['block'] = block
args['timeout'] = timeout
request = SocketRequest(method='get', args=args)
response: QueueGetResponse = self._request(request)
if (response.status == SUCCESS_CODE):
return response.obj
return None
def qsize(self):
if self._server:
return self._queue.qsize()
else:
request = SocketRequest(method='qsize', args={})
response: QueueSizeResponse = self._request(request)
if (response.status == SUCCESS_CODE):
return response.size
return (- 1)
def empty(self):
if self._server:
return self._queue.empty()
else:
request = SocketRequest(method='empty', args={})
response: QueueEmptyResponse = self._request(request)
if (response.status == SUCCESS_CODE):
return response.empty
return False |
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_5C, self).__init__()
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d((nf + gc), gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d((nf + (2 * gc)), gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d((nf + (3 * gc)), gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d((nf + (4 * gc)), nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return ((x5 * 0.2) + x) |
(unsafe_hash=True)
class FeedEntry():
title: str = dataclasses.field(compare=False)
long_url: str = dataclasses.field(compare=True)
summary: str = dataclasses.field(compare=False)
categories: List[str] = dataclasses.field(compare=False, repr=True)
data: Dict[(str, Any)] = dataclasses.field(compare=False, repr=False)
feed_reader: Any = dataclasses.field(compare=False, repr=False)
def __post_init__(self):
self.short_url: Optional[str] = None
self.matching_title_search_pattern: Optional[Pattern] = None
def _matching_pattern(self, patterns: Dict[(str, List[Pattern])]) -> Optional[Tuple[(str, Pattern)]]:
for (search_key, val) in {'title': self.title, 'url': self.long_url}.items():
for pattern in patterns[search_key]:
if pattern.search(val):
log.log(5, '%s matches %s pattern %s.', self, search_key, repr(pattern.pattern))
return (search_key, pattern)
for pattern in patterns['category']:
for category in self.categories:
if pattern.search(category):
log.log(5, '%s having category %s matches category pattern %s.', self, repr(category), repr(pattern.pattern))
return ('category', pattern)
return None
def blacklisted_pattern(self) -> Optional[Tuple[(str, Pattern)]]:
return self._matching_pattern(self.feed_reader.blacklist)
def whitelisted_pattern(self) -> Optional[Tuple[(str, Pattern)]]:
return self._matching_pattern(self.feed_reader.whitelist)
def message(self, channel: Optional[str]=None) -> str:
feed_config = self.feed_reader.config
native_channel = self.feed_reader.channel
channel = (channel or native_channel)
explain = (feed_config.get('whitelist') or {}).get('explain')
msg_config = (feed_config.get('message') or {})
include_summary = (msg_config.get('summary') and self.summary)
style_config = (feed_config.get('style') or {})
def _style_title(text: str, **kwargs: Any) -> str:
return style(text, styler=('irc' if style_config else 'unicode'), **kwargs)
format_map = dict(identity=config.runtime.identity, channel=channel, styled_native_channel=style(native_channel, styler='irc', fg='silver'), styled_feed=style(self.feed_reader.name, styler='irc', **style_config.get('name', {})), url=(self.short_url or self.long_url))
format_map['caption'] = ''
if (msg_config.get('title', True) and (title := self.title)):
if (explain and (pattern := self.matching_title_search_pattern) and (match := pattern.search(title))):
(span0, span1) = match.span()
(title_pre, title_mid, title_post) = (title[:span0], title[span0:span1], title[span1:])
if include_summary:
title_pre = _style_title(title_pre, bold=True)
title_mid = _style_title(title_mid, bold=True, italics=True)
title_post = _style_title(title_post, bold=True)
title = ((title_pre + title_mid) + title_post)
else:
title = ((title_pre + _style_title(title_mid, italics=True)) + title_post)
elif include_summary:
title = _style_title(title, bold=True)
format_map['caption'] += title
if include_summary:
if format_map['caption']:
format_map['caption'] += ': '
format_map['caption'] += self.summary
msg_format = ('' if (channel == native_channel) else '<{styled_native_channel}> ')
msg_format += '[{styled_feed}]'
if format_map['caption']:
msg_format += ' {caption} '
msg_format += ' {url}'
privmsg_format = f':{{identity}} PRIVMSG {{channel}} :{msg_format}'
base_bytes_use = len(privmsg_format.format_map({**format_map, 'caption': ''}).encode())
caption_bytes_width = max(0, (config.QUOTE_LEN_MAX - base_bytes_use))
format_map['caption'] = shorten_to_bytes_width(format_map['caption'], caption_bytes_width)
msg = msg_format.format_map(format_map)
return msg
def topic(self, topic: str) -> str:
if (not (topic_config := self.feed_reader.config.get('topic'))):
return topic
topic_parts = {k: v for (k, _, v) in (p.partition(': ') for p in topic.split(' | '))}
for (key, pattern) in topic_config.items():
if re.search(pattern, self.title):
topic_parts[key] = (self.short_url or self.feed_reader.url_shortener.shorten_urls([self.long_url])[self.long_url])
topic = ' | '.join(((f'{k}: {v}' if v else k) for (k, v) in topic_parts.items()))
return topic |
_module
class ConcatDataset(_ConcatDataset):
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags) |
def get_all_parameters(cls, parsed_args):
prefix = _get_prefix(cls)
if ((prefix is None) or (len(prefix) == 0)):
raise ValueError('Cannot retrieve parameters without prefix')
info = _get_info(cls)
if inspect.ismethod(cls.__init__):
spec = inspect.getargspec(cls.__init__)
if (spec.defaults is None):
arg_defaults = {}
else:
arg_defaults = dict(list(zip(spec.args[::(- 1)], spec.defaults[::(- 1)])))
else:
arg_defaults = {}
all_params = {}
for (arg_name, arg_info) in info.items():
prefixed_name = (prefix + arg_name)
arg_value = None
if hasattr(parsed_args, prefixed_name):
arg_value = getattr(parsed_args, prefixed_name)
if ((arg_value is None) and (arg_name in arg_defaults)):
arg_value = arg_defaults[arg_name]
if (arg_value is not None):
all_params[arg_name] = arg_value
return all_params |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, in_chans=3, cardinality=1, base_width=64, stem_width=64, stem_type='', output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0.0, drop_block_rate=0.0, global_pool='avg', zero_init_last_bn=True, block_args=None):
block_args = (block_args or dict())
assert (output_stride in (8, 16, 32))
self.num_classes = num_classes
self.drop_rate = drop_rate
super(ResNet, self).__init__()
deep_stem = ('deep' in stem_type)
inplanes = ((stem_width * 2) if deep_stem else 64)
if deep_stem:
stem_chs = (stem_width, stem_width)
if ('tiered' in stem_type):
stem_chs = ((3 * (stem_width // 4)), stem_width)
self.conv1 = nn.Sequential(*[nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), norm_layer(stem_chs[0]), act_layer(inplace=True), nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), norm_layer(stem_chs[1]), act_layer(inplace=True), nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)])
else:
self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(inplanes)
self.act1 = act_layer(inplace=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]
if (aa_layer is not None):
self.maxpool = nn.Sequential(*[nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=inplanes, stride=2)])
else:
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
channels = [64, 128, 256, 512]
(stage_modules, stage_feature_info) = make_blocks(block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args)
for stage in stage_modules:
self.add_module(*stage)
self.feature_info.extend(stage_feature_info)
self.num_features = (512 * block.expansion)
(self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
for (n, m) in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
if zero_init_last_bn:
for m in self.modules():
if hasattr(m, 'zero_init_last_bn'):
m.zero_init_last_bn()
def get_classifier(self):
return self.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
(self.global_pool, self.fc) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
x = self.fc(x)
return x |
def matplotlib_imshow(img, one_channel=False):
if one_channel:
img = img.mean(dim=0)
img = ((img / 2) + 0.5)
npimg = img.numpy()
if one_channel:
plt.imshow(npimg, cmap='Greys')
else:
plt.imshow(np.transpose(npimg, (1, 2, 0))) |
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, dropout_p=0.2, inplanes=128, input_3x3=True, downsample_kernel_size=3, downsample_padding=1, num_classes=1000):
super(SENet, self).__init__()
self.inplanes = inplanes
if input_3x3:
layer0_modules = [('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True))]
else:
layer0_modules = [('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True))]
layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
self.layer1 = self._make_layer(block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0)
self.layer2 = self._make_layer(block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding)
self.layer3 = self._make_layer(block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding)
self.layer4 = self._make_layer(block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding)
self.avg_pool = nn.AvgPool2d(7, stride=1)
self.dropout = (nn.Dropout(dropout_p) if (dropout_p is not None) else None)
self.last_linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, groups, reduction, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def features(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.avg_pool(x)
if (self.dropout is not None):
x = self.dropout(x)
x = x.view(x.size(0), (- 1))
x = self.last_linear(x)
return x
def forward(self, x, x_):
x = self.features(x)
x = self.logits(x)
return x |
def build_fake_ut():
fake_ut = '\nimport shutil\nimport unittest\nimport time\nimport os\nimport sys\n\nimport numpy as np\n\nfrom neural_compressor.utils import logger\nfrom neural_compressor.quantization import fit\nfrom neural_compressor.config import PostTrainingQuantConfig\nfrom neural_compressor.data import Datasets, DATALOADERS\nimport torchvision\n\nimport importlib\nif importlib.util.find_spec("mpi4py") is None:\n CONDITION = True\nelse:\n from mpi4py import MPI\n CONDITION = False\n\ndef save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path):\n import json\n data = {\'acc_lst\': acc_lst, \'perf_lst\': perf_lst}\n with open(acc_perf_data_file_path, \'w\') as fp:\n json.dump(data, fp)\n logger.info(f"Save data to {acc_perf_data_file_path}")\n\ndef next_acc_and_perf(acc_perf_data_file_path):\n import json\n acc, perf = None, None\n with open(acc_perf_data_file_path, \'r\') as fp:\n data = json.load(fp)\n acc = data[\'acc_lst\'][0]\n perf = data[\'perf_lst\'][0]\n new_acc_lst = data[\'acc_lst\'][1:]\n new_perf_lst = data[\'perf_lst\'][1:]\n save_acc_perf_to_local(new_acc_lst, new_perf_lst, acc_perf_data_file_path)\n return acc, perf\n\(CONDITION , "missing the mpi4py package")\nclass TestDistributedTuning(unittest.TestCase):\n \n def setUpClass(self):\n self.comm = MPI.COMM_WORLD\n self.size = self.comm.Get_size()\n self.rank = self.comm.Get_rank()\n\n \n def tearDownClass(self):\n if self.rank == 0:\n if os.path.exists(\'test_pt_stage_1_met.json\'):\n os.remove(\'test_pt_stage_1_met.json\')\n if os.path.exists(\'test_pt_stage_3_fp32_met.json\'):\n os.remove(\'test_pt_stage_3_fp32_met.json\')\n if os.path.exists(\'test_pt_stage_stage_4_fp32_met.json\'):\n os.remove(\'test_pt_stage_stage_4_fp32_met.json\')\n if os.path.exists(\'test_pt_stage_not_met.json\'):\n os.remove(\'test_pt_stage_not_met.json\')\n if os.path.exists(\'test_pt_num_of_nodes_more_than_len_of_tune_cfg_lst_met.json\'):\n os.remove(\'test_pt_num_of_nodes_more_than_len_of_tune_cfg_lst_met.json\')\n\n def test_mpi4py_installation(self):\n logger.info(f"Test rank {self.rank} of {self.size} processes")\n self.assertGreater(self.size, 0)\n self.assertGreaterEqual(self.size, 0)\n\n def test_pt_stage_1_met(self):\n logger.info("*** Test: distributed tuning testing test_pt_stage_1_met start.")\n\n num_processes = 3\n logger.info(f"*** Test: distributed tuning testing test_pt_stage_1_met start. NP: {num_processes} (load acc and perf from local).")\n\n # model\n resnet18 = torchvision.models.resnet18()\n\n # fake evaluation function\n num_baseline = num_processes # TODO, replace num_baseline with 1 when evaluating baseline only once.\n acc_lst = [2.0] * num_baseline + [1.0, 2.1, 2.2, 2.3, 2.0] #the tuning result (2.1)\n perf_lst = [2.0] * num_baseline + [2.5, 2.0, 1.5, 1.1, 5.0]\n\n # make sure this path can be accessed by all nodes\n acc_perf_data_file_path = \'test_pt_stage_1_met.json\'\n save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path)\n\n def _fake_eval(model):\n acc, perf = next_acc_and_perf(acc_perf_data_file_path)\n logger.info(f"Current evaluate result: acc {acc}, perf: {perf}.")\n time.sleep(perf)\n return acc\n\n # dataset and dataloader\n dataset = Datasets("pytorch")["dummy"](((100, 3, 3, 1)))\n dataloader = DATALOADERS["pytorch"](dataset)\n\n # tuning and accuracy criterion\n conf = PostTrainingQuantConfig(quant_level=1)\n # fit\n q_model = fit(model=resnet18,\n conf=conf,\n calib_dataloader= dataloader,\n eval_dataloader=dataloader,\n eval_func=_fake_eval)\n if self.rank == 0:\n self.assertIsNotNone(q_model)\n\n def test_pt_stage_3_fp32_met(self):\n logger.info("*** Test: distributed tuning testing test_pt_stage_3_fp32_met start.")\n\n num_processes = 3\n logger.info(f"*** Test: distributed tuning testing test_pt_stage_1_met start. NP: {num_processes} (load acc and perf from local).")\n\n # model\n resnet18 = torchvision.models.resnet18()\n\n # fake evaluation function\n num_baseline = num_processes # TODO, replace num_baseline with 1 when evaluating baseline only once.\n acc_lst = [2.0] * num_baseline + [1.0] * 16 + [2.0, 1.0, 1.0]\n perf_lst = [2.0] * num_baseline + [1.0] * 16 + [1.0, 1.0, 1.0]\n\n # make sure this path can be accessed by all nodes\n acc_perf_data_file_path = \'test_pt_stage_3_fp32_met.json\'\n save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path)\n\n def _fake_eval(model):\n acc, perf = next_acc_and_perf(acc_perf_data_file_path)\n logger.info(f"Current evaluate result: acc {acc}, perf: {perf}.")\n time.sleep(perf)\n return acc\n\n # dataset and dataloader\n dataset = Datasets("pytorch")["dummy"](((100, 3, 3, 1)))\n dataloader = DATALOADERS["pytorch"](dataset)\n\n # tuning and accuracy criterion\n conf = PostTrainingQuantConfig(quant_level=1)\n # fit\n q_model = fit(model=resnet18,\n conf=conf,\n calib_dataloader= dataloader,\n eval_dataloader=dataloader,\n eval_func=_fake_eval)\n if self.rank == 0:\n self.assertIsNotNone(q_model)\n\n def test_pt_stage_4_fp32_met(self):\n logger.info("*** Test: distributed tuning testing test_pt_stage_3_met start.")\n\n num_processes = 3\n logger.info(f"*** Test: distributed tuning testing test_pt_stage_1_met start. NP: {num_processes} (load acc and perf from local).")\n\n # model\n resnet18 = torchvision.models.resnet18()\n\n # fake evaluation function\n num_baseline = num_processes # TODO, replace num_baseline with 1 when evaluating baseline only once.\n acc_lst = [2.0] * num_baseline + [1.0] * 37 + [2.0, 1.0, 1.0]\n perf_lst = [2.0] * num_baseline + [1.0] * 37 + [1.0, 1.0, 1.0]\n\n # make sure this path can be accessed by all nodes\n acc_perf_data_file_path = \'test_pt_stage_stage_4_fp32_met.json\'\n save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path)\n\n def _fake_eval(model):\n acc, perf = next_acc_and_perf(acc_perf_data_file_path)\n logger.info(f"Current evaluate result: acc {acc}, perf: {perf}.")\n time.sleep(perf)\n return acc\n\n # dataset and dataloader\n dataset = Datasets("pytorch")["dummy"](((100, 3, 3, 1)))\n dataloader = DATALOADERS["pytorch"](dataset)\n\n # tuning and accuracy criterion\n conf = PostTrainingQuantConfig(quant_level=1)\n # fit\n q_model = fit(model=resnet18,\n conf=conf,\n calib_dataloader= dataloader,\n eval_dataloader=dataloader,\n eval_func=_fake_eval)\n if self.rank == 0:\n self.assertIsNotNone(q_model)\n\n def test_pt_stage_not_met(self):\n logger.info("*** Test: distributed tuning testing test_pt_stage_not_met start.")\n num_processes = 3\n logger.info(f"*** Test: distributed tuning testing test_pt_stage_1_met start. NP: {num_processes} (load acc and perf from local).")\n\n # model\n resnet18 = torchvision.models.resnet18()\n\n # fake evaluation function\n num_baseline = num_processes # TODO, replace num_baseline with 1 when evaluating baseline only once.\n acc_lst = [2.0] * num_baseline + [1.0] * 60\n perf_lst = [2.0] * num_baseline + [1.0] * 60\n\n # make sure this path can be accessed by all nodes\n acc_perf_data_file_path = \'test_pt_stage_not_met.json\'\n save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path)\n\n def _fake_eval(model):\n acc, perf = next_acc_and_perf(acc_perf_data_file_path)\n logger.info(f"Current evaluate result: acc {acc}, perf: {perf}.")\n time.sleep(perf)\n return acc\n\n # dataset and dataloader\n dataset = Datasets("pytorch")["dummy"](((100, 3, 3, 1)))\n dataloader = DATALOADERS["pytorch"](dataset)\n\n # tuning and accuracy criterion\n conf = PostTrainingQuantConfig(quant_level=1)\n # fit\n q_model = fit(model=resnet18,\n conf=conf,\n calib_dataloader= dataloader,\n eval_dataloader=dataloader,\n eval_func=_fake_eval)\n if self.rank == 0:\n self.assertIsNone(q_model) # None of the tuning configs met the requirements!\n\n def test_pt_num_of_nodes_more_than_len_of_tune_cfg_lst_met(self):\n logger.info("*** Test: distributed tuning testing test_pt_num_of_nodes_more_than_len_of_tune_cfg_lst_met start.")\n\n num_processes = 18\n logger.info(f"*** Test: distributed tuning testing test_pt_stage_1_met start. NP: {num_processes} (load acc and perf from local).")\n\n # model\n resnet18 = torchvision.models.resnet18()\n\n # fake evaluation function\n num_baseline = num_processes # TODO, replace num_baseline with 1 when evaluating baseline only once.\n acc_lst = [2.0] * num_baseline + [1.0] * 37 + [2.0, 1.0, 1.0] * 6\n perf_lst = [2.0] * num_baseline + [1.0] * 37 + [1.0, 1.0, 1.0] * 6\n\n # make sure this path can be accessed by all nodes\n acc_perf_data_file_path = \'test_pt_num_of_nodes_more_than_len_of_tune_cfg_lst_met.json\'\n save_acc_perf_to_local(acc_lst, perf_lst, acc_perf_data_file_path)\n\n def _fake_eval(model):\n acc, perf = next_acc_and_perf(acc_perf_data_file_path)\n logger.info(f"Current evaluate result: acc {acc}, perf: {perf}.")\n time.sleep(perf)\n return acc\n\n # dataset and dataloader\n dataset = Datasets("pytorch")["dummy"](((100, 3, 3, 1)))\n dataloader = DATALOADERS["pytorch"](dataset)\n\n # tuning and accuracy criterion\n conf = PostTrainingQuantConfig(quant_level=1)\n # fit\n q_model = fit(model=resnet18,\n conf=conf,\n calib_dataloader= dataloader,\n eval_dataloader=dataloader,\n eval_func=_fake_eval)\n if self.rank == 0:\n self.assertIsNotNone(q_model)\n\n def test_pt_met_wait_before_no_met(self):\n pass\n\n def test_pt_met_wait_before_met(self):\n pass\n\nif __name__ == "__main__":\n unittest.main()\n '
with open('fake_ut.py', 'w', encoding='utf-8') as f:
f.write(fake_ut) |
def get_open_cases(date):
return sum(((dt_first_last_timestamps['start_time'] <= date) & (dt_first_last_timestamps['end_time'] > date))) |
def z_rotation(vector, theta):
R = np.array([[np.cos(theta), (- np.sin(theta)), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
return np.dot(R, vector) |
def listdir(*parts):
list1 = [d for d in os.listdir(os.path.join(*parts)) if ('.DS_Store' not in d)]
list1.sort()
return list1 |
class DensePoseConfidenceBasedSampler(DensePoseBaseSampler):
def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None):
super().__init__(count_per_class)
self.confidence_channel = confidence_channel
self.search_count_multiplier = search_count_multiplier
self.search_proportion = search_proportion
assert ((search_count_multiplier is None) or (search_proportion is None)), f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'
def _produce_index_sample(self, values: torch.Tensor, count: int):
k = values.shape[1]
if (k == count):
index_sample = list(range(k))
else:
(_, sorted_confidence_indices) = torch.sort(values[2])
if (self.search_count_multiplier is not None):
search_count = min(int((count * self.search_count_multiplier)), k)
elif (self.search_proportion is not None):
search_count = min(max(int((k * self.search_proportion)), count), k)
else:
search_count = min(count, k)
sample_from_top = random.sample(range(search_count), count)
index_sample = sorted_confidence_indices[:search_count][sample_from_top]
return index_sample
def _produce_labels_and_results(self, instance) -> Tuple[(torch.Tensor, torch.Tensor)]:
converter = ToChartResultConverterWithConfidences
chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
(labels, dp_result) = (chart_result.labels.cpu(), chart_result.uv.cpu())
dp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))
return (labels, dp_result) |
class IvarCorrection(Correction):
def __init__(self, config):
self.logger = logging.getLogger(__name__)
filename = config.get('filename')
if (filename is None):
raise CorrectionError("Missing argument 'filename' required by SdssIvarCorrection")
try:
hdu = fitsio.read(filename, ext='VAR_FUNC')
if ('LOGLAM' in hdu.dtype.names):
log_lambda = hdu['LOGLAM']
elif ('LAMBDA' in hdu.dtype.names):
self.logger.warning("DeprecationWarning: Reading correction using 'LAMBDA'. Newer versions of picca always save 'LOGLAM' and so this option will be removed in the future.")
log_lambda = np.log10(hdu['LAMBDA'])
else:
raise CorrectionError(f"Error loading IvarCorrection. In extension 'VAR_FUNC' in file {filename} one of the fields 'LOGLAM' or 'LAMBDA' should be present. I did not find them.")
eta = hdu['ETA']
except OSError as error:
raise CorrectionError(f'Error loading CalibrationCorrection. Failed to find or open file {filename}') from error
except ValueError as error:
raise CorrectionError(f"Error loading IvarCorrection. File {filename} does not have fields 'loglam' and/or 'eta' in HDU 'VAR_FUNC'") from error
self.correct_ivar = interp1d(log_lambda, eta, fill_value='extrapolate', kind='nearest')
def apply_correction(self, forest):
correction = self.correct_ivar(forest.log_lambda)
forest.ivar /= correction |
_task('semisupervised_translation')
class SemisupervisedTranslationTask(MultilingualTranslationTask):
def add_args(parser):
MultilingualTranslationTask.add_args(parser)
parser.add_argument('--lambda-parallel-config', default='1.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (parallel data). use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-denoising-config', default='0.0', type=str, metavar='CONFIG', help='Cross-entropy reconstruction coefficient (denoising autoencoding)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--lambda-otf-bt-config', default='0.0', type=str, metavar='CONFIG', help='cross-entropy reconstruction coefficient (on-the-fly back-translation parallel data)use fixed weight during training if set to floating point number. use piecewise linear function over number of updates to schedule the weight with the format: w0:step0,w1:step1,...')
parser.add_argument('--bt-max-len-a', default=1.1, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length')
parser.add_argument('--bt-max-len-b', default=10.0, type=float, metavar='N', help='generate back-translated sequences of maximum length ax + b, where x is the source length')
parser.add_argument('--bt-beam-size', default=1, type=int, metavar='N', help='beam size used in beam search of online back-translation')
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N', help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N', help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N', help='word blanking probability for denoising autoencoding data generation')
def __init__(self, args, dicts, training):
super().__init__(args, dicts, training)
(self.lambda_parallel, self.lambda_parallel_steps) = parse_lambda_config(args.lambda_parallel_config)
(self.lambda_otf_bt, self.lambda_otf_bt_steps) = parse_lambda_config(args.lambda_otf_bt_config)
(self.lambda_denoising, self.lambda_denoising_steps) = parse_lambda_config(args.lambda_denoising_config)
if ((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)):
denoising_lang_pairs = [('%s-%s' % (tgt, tgt)) for tgt in {lang_pair.split('-')[1] for lang_pair in args.lang_pairs}]
self.model_lang_pairs = (self.model_lang_pairs + denoising_lang_pairs)
self.backtranslate_datasets = {}
self.backtranslators = {}
def setup_task(cls, args, **kwargs):
(dicts, training) = MultilingualTranslationTask.prepare(args, **kwargs)
return cls(args, dicts, training)
def load_dataset(self, split, epoch=0, **kwargs):
paths = self.args.data.split(os.pathsep)
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
def split_exists(split, src, tgt, lang):
if (src is not None):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
else:
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, src, tgt))
return indexed_dataset.dataset_exists(filename, impl=self.args.dataset_impl)
def load_indexed_dataset(path, dictionary):
return data_utils.load_indexed_dataset(path, dictionary, self.args.dataset_impl)
(src_datasets, tgt_datasets) = ({}, {})
if ((self.lambda_parallel > 0.0) or (self.lambda_parallel_steps is not None) or (not split.startswith('train'))):
for lang_pair in self.lang_pairs:
(src, tgt) = lang_pair.split('-')
if split_exists(split, src, tgt, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, src, tgt))
elif split_exists(split, tgt, src, src):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split, tgt, src))
else:
continue
src_datasets[lang_pair] = load_indexed_dataset((prefix + src), self.dicts[src])
tgt_datasets[lang_pair] = load_indexed_dataset((prefix + tgt), self.dicts[tgt])
logger.info('parallel-{} {} {} examples'.format(data_path, split, len(src_datasets[lang_pair])))
if (len(src_datasets) == 0):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
backtranslate_datasets = {}
if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and split.startswith('train')):
for lang_pair in self.lang_pairs:
(src, tgt) = lang_pair.split('-')
if (not split_exists(split, tgt, None, tgt)):
raise FileNotFoundError('Dataset not found: backtranslation {} ({})'.format(split, data_path))
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
dataset = load_indexed_dataset(filename, self.dicts[tgt])
lang_pair_dataset_tgt = LanguagePairDataset(dataset, dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target)
lang_pair_dataset = LanguagePairDataset(dataset, dataset.sizes, src_dict=self.dicts[src], tgt=dataset, tgt_sizes=dataset.sizes, tgt_dict=self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target)
backtranslate_datasets[lang_pair] = BacktranslationDataset(tgt_dataset=self.alter_dataset_langtok(lang_pair_dataset_tgt, src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_lang=src), backtranslation_fn=self.backtranslators[lang_pair], src_dict=self.dicts[src], tgt_dict=self.dicts[tgt], output_collater=self.alter_dataset_langtok(lang_pair_dataset=lang_pair_dataset, src_eos=self.dicts[src].eos(), src_lang=src, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt).collater)
logger.info('backtranslate-{}: {} {} {} examples'.format(tgt, data_path, split, len(backtranslate_datasets[lang_pair])))
self.backtranslate_datasets[lang_pair] = backtranslate_datasets[lang_pair]
noising_datasets = {}
if (((self.lambda_denoising > 0.0) or (self.lambda_denoising_steps is not None)) and split.startswith('train')):
for lang_pair in self.lang_pairs:
(_, tgt) = lang_pair.split('-')
if (not split_exists(split, tgt, None, tgt)):
continue
filename = os.path.join(data_path, '{}.{}-None.{}'.format(split, tgt, tgt))
tgt_dataset1 = load_indexed_dataset(filename, self.dicts[tgt])
tgt_dataset2 = load_indexed_dataset(filename, self.dicts[tgt])
noising_dataset = NoisingDataset(tgt_dataset1, self.dicts[tgt], seed=1, max_word_shuffle_distance=self.args.max_word_shuffle_distance, word_dropout_prob=self.args.word_dropout_prob, word_blanking_prob=self.args.word_blanking_prob)
noising_datasets[lang_pair] = self.alter_dataset_langtok(LanguagePairDataset(noising_dataset, tgt_dataset1.sizes, self.dicts[tgt], tgt_dataset2, tgt_dataset2.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target), src_eos=self.dicts[tgt].eos(), src_lang=tgt, tgt_eos=self.dicts[tgt].eos(), tgt_lang=tgt)
logger.info('denoising-{}: {} {} {} examples'.format(tgt, data_path, split, len(noising_datasets[lang_pair])))
def language_pair_dataset(lang_pair):
(src, tgt) = lang_pair.split('-')
(src_dataset, tgt_dataset) = (src_datasets[lang_pair], tgt_datasets[lang_pair])
return self.alter_dataset_langtok(LanguagePairDataset(src_dataset, src_dataset.sizes, self.dicts[src], tgt_dataset, tgt_dataset.sizes, self.dicts[tgt], left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions), self.dicts[src].eos(), src, self.dicts[tgt].eos(), tgt)
self.datasets[split] = RoundRobinZipDatasets(OrderedDict((([(lang_pair, language_pair_dataset(lang_pair)) for lang_pair in src_datasets.keys()] + [(_get_bt_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in backtranslate_datasets.items()]) + [(_get_denoising_dataset_key(lang_pair), dataset) for (lang_pair, dataset) in noising_datasets.items()])), eval_key=(None if self.training else ('%s-%s' % (self.args.source_lang, self.args.target_lang))))
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if (not isinstance(model, FairseqMultiModel)):
raise ValueError('SemisupervisedTranslationTask requires a FairseqMultiModel architecture')
self.sequence_generators = {}
if (((self.lambda_otf_bt > 0.0) or (self.lambda_otf_bt_steps is not None)) and self.training):
for lang_pair in self.lang_pairs:
(src, tgt) = lang_pair.split('-')
key = '{}-{}'.format(tgt, src)
self.sequence_generators[key] = SequenceGenerator(tgt_dict=self.dicts[src], beam_size=args.bt_beam_size, max_len_a=args.bt_max_len_a, max_len_b=args.bt_max_len_b)
decoder_lang_tok_idx = self.get_decoder_langtok(src)
def backtranslate_fn(sample, model=model.models[key], bos_token=decoder_lang_tok_idx, sequence_generator=self.sequence_generators[key]):
return sequence_generator.generate([model], sample, bos_token=bos_token)
self.backtranslators[lang_pair] = backtranslate_fn
return model
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
(agg_loss, agg_sample_size, agg_logging_output) = (0.0, 0.0, {})
def forward_backward(model, samples, logging_output_key, weight):
nonlocal agg_loss, agg_sample_size, agg_logging_output
if ((samples is None) or (len(samples) == 0)):
return
(loss, sample_size, logging_output) = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
agg_logging_output[logging_output_key] = logging_output
if (self.lambda_parallel > 0.0):
for lang_pair in self.lang_pairs:
forward_backward(model.models[lang_pair], sample[lang_pair], lang_pair, self.lambda_parallel)
if (self.lambda_otf_bt > 0.0):
for lang_pair in self.lang_pairs:
sample_key = _get_bt_dataset_key(lang_pair)
forward_backward(model.models[lang_pair], sample[sample_key], sample_key, self.lambda_otf_bt)
if (self.lambda_denoising > 0.0):
for lang_pair in self.lang_pairs:
(_, tgt) = lang_pair.split('-')
sample_key = _get_denoising_dataset_key(lang_pair)
forward_backward(model.models['{0}-{0}'.format(tgt)], sample[sample_key], sample_key, self.lambda_denoising)
return (agg_loss, agg_sample_size, agg_logging_output)
def update_step(self, num_updates):
def lambda_step_func(config, n_iter):
ranges = [i for i in range((len(config) - 1)) if (config[i][0] <= n_iter < config[(i + 1)][0])]
if (len(ranges) == 0):
assert (n_iter >= config[(- 1)][0])
return config[(- 1)][1]
assert (len(ranges) == 1)
i = ranges[0]
(x_a, y_a) = config[i]
(x_b, y_b) = config[(i + 1)]
return (y_a + (((n_iter - x_a) * float((y_b - y_a))) / float((x_b - x_a))))
if (self.lambda_parallel_steps is not None):
self.lambda_parallel = lambda_step_func(self.lambda_parallel_steps, num_updates)
if (self.lambda_denoising_steps is not None):
self.lambda_denoising = lambda_step_func(self.lambda_denoising_steps, num_updates)
if (self.lambda_otf_bt_steps is not None):
self.lambda_otf_bt = lambda_step_func(self.lambda_otf_bt_steps, num_updates)
def aggregate_logging_outputs(self, logging_outputs, criterion):
logging_output_keys = {key for logging_output in logging_outputs for key in logging_output}
lang_pair_keys = set(((self.lang_pairs + [_get_bt_dataset_key(lang_pair) for lang_pair in self.lang_pairs]) + [_get_denoising_dataset_key(lang_pair) for lang_pair in self.lang_pairs]))
logging_output_keys = logging_output_keys.intersection(lang_pair_keys)
return super().aggregate_logging_outputs(logging_outputs, criterion, logging_output_keys) |
def generate_and_tokenize_prompt(tokenizer, data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(tokenizer, full_prompt)
return tokenized_full_prompt |
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config):
model.train()
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = (warmup_steps * step_size)
for (i, (image0, image1, text, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
images = torch.cat([image0, image1], dim=0)
(images, targets) = (images.to(device), targets.to(device))
text_inputs = tokenizer(text, padding='longest', return_tensors='pt').to(device)
if ((epoch > 0) or (not config['warm_up'])):
alpha = config['alpha']
else:
alpha = (config['alpha'] * min(1, (i / len(data_loader))))
loss = model(images, text_inputs, targets=targets, train=True, alpha=alpha)
optimizer.zero_grad()
loss.backward()
optimizer.step()
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.update(loss=loss.item())
if ((epoch == 0) and ((i % step_size) == 0) and (i <= warmup_iterations)):
scheduler.step((i // step_size))
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger.global_avg())
return {k: '{:.4f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()} |
class QGRLModel(QGModel):
def __init__(self, config, word_mat=None, elmo_word_mat=None, label_mat=None, pos_mat=None, ner_mat=None, trainable=True):
QGModel.__init__(self, config, word_mat=word_mat, elmo_word_mat=elmo_word_mat, label_mat=label_mat, pos_mat=pos_mat, ner_mat=ner_mat, trainable=trainable)
self.reward = tf.placeholder_with_default(tf.ones([self.N]), (self.N,), name='reward')
self.sampled_que = tf.placeholder_with_default(tf.zeros([self.N, (self.QL + 2)], dtype=tf.int32), (self.N, (self.QL + 2)), name='sampled_question')
self.lamda = tf.placeholder_with_default(config.mixing_ratio, (), name='mixing_ratio')
def build_graph(self):
para_emb = self.input_embedding(self.para, self.labels, self.pos_tags, self.ner_tags, self.elmo_para_input)
(para_enc, para_h_end, para_c_end) = self.input_encoder(para_emb, self.para_len)
para_enc_ = self.gated_self_attention(para_enc)
self.enc = para_enc_
self.init_h = para_h_end
self.init_c = para_c_end
(outputs, oups, attn_ws) = self.decode(self.que)
batch_loss_ml = self._compute_loss(outputs, oups, attn_ws)
self.loss_ml = tf.reduce_mean(batch_loss_ml)
(outputs, oups, attn_ws) = self.decode(self.sampled_que, reuse=True)
batch_loss_rl = self._compute_loss(outputs, oups, attn_ws)
self.loss_rl = tf.reduce_mean((batch_loss_rl * self.reward))
self.loss = (((1 - self.lamda) * self.loss_ml) + (self.lamda * self.loss_rl))
(self.symbols, self.probs) = self.search(1)
(self.symbols_rl, self.probs_rl) = self.sample()
def add_train_op(self):
lr = self.config.rl_learning_rate
self.opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.8, beta2=0.999, epsilon=1e-07)
grads = self.opt.compute_gradients(self.loss, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N)
(gradients, variables) = zip(*grads)
(capped_grads, _) = tf.clip_by_global_norm(gradients, self.config.grad_clip)
self.train_op = self.opt.apply_gradients(zip(capped_grads, variables), global_step=self.global_step) |
def test_handle_market_order_bid_3():
(book, agent, limit_orders) = setup_book_with_orders(asks=[(100, [30, 40])])
market_order = MarketOrder(agent_id=2, time_placed=TIME, symbol=SYMBOL, quantity=70, side=Side.BID)
book.handle_market_order(market_order)
assert (book.get_l3_ask_data() == [])
assert (len(agent.messages) == 4)
assert (agent.messages[0][0] == 1)
assert (agent.messages[0][1].order.agent_id == 1)
assert (agent.messages[0][1].order.side == Side.ASK)
assert (agent.messages[0][1].order.fill_price == 100)
assert (agent.messages[0][1].order.quantity == 30)
assert (agent.messages[1][0] == 2)
assert (agent.messages[1][1].order.agent_id == 2)
assert (agent.messages[1][1].order.side == Side.BID)
assert (agent.messages[1][1].order.fill_price == 100)
assert (agent.messages[1][1].order.quantity == 30)
assert (agent.messages[2][0] == 1)
assert (agent.messages[2][1].order.agent_id == 1)
assert (agent.messages[2][1].order.side == Side.ASK)
assert (agent.messages[2][1].order.fill_price == 100)
assert (agent.messages[2][1].order.quantity == 40)
assert (agent.messages[3][0] == 2)
assert (agent.messages[3][1].order.agent_id == 2)
assert (agent.messages[3][1].order.side == Side.BID)
assert (agent.messages[3][1].order.fill_price == 100)
assert (agent.messages[3][1].order.quantity == 40) |
def quantize_sym_model(sym_model, ctx, qconfig):
assert (isinstance(sym_model, tuple) and isinstance(sym_model[0], mx.symbol.Symbol))
(symnet, args, auxs) = sym_model
if (not check_mx_version('1.7.0')):
qconfig.pop('quantize_granularity', None)
arguments = {'sym': symnet, 'offline_params': list(args.keys())}
arguments.update(qconfig)
if check_mx_version('2.0.0'):
arguments['device'] = ctx
else:
arguments['ctx'] = ctx
(qsymnet, calib_tensors) = mx.contrib.quantization._quantize_symbol(**arguments)
return ((qsymnet, args, auxs), calib_tensors) |
class TestDraw(unittest.TestCase):
def test_draw_net(self):
for filename in getFilenames():
net = caffe_pb2.NetParameter()
with open(filename) as infile:
text_format.Merge(infile.read(), net)
caffe.draw.draw_net(net, 'LR') |
def get_intrinsics_path(mode: str) -> Path:
return ((PATHS['mannequin_lmdb'] / mode) / 'intrinsics') |
def download_pcl(data_path, mode):
if (mode == 'gdrive'):
path = os.path.join(data_path, 'pcl')
os.makedirs(name=path, exist_ok=True)
archive_url = '
download_gdrive(archive_url, path, 'pcl.zip')
elif (mode == 'at'):
at_hash = 'e8b0af9c3f8c3c63a8212546f67a25a3'
download_at(at_hash, data_path, 'pcl.zip')
shutil.move(os.path.join(data_path, 'pcl'), os.path.join(data_path, 'PCL'))
os.rename(os.path.join(data_path, 'PCL', 'MI.h5'), os.path.join(data_path, 'PCL', 'PCL.h5')) |
class TicTacTeo(SymbolicEnvironment):
all_variations = ''
def __init__(self, width=3, know_valid_pos=True):
actions = [PLACE]
self.language = LanguageFrame(actions, extensional=[ZERO, MINE, EMPTY, OPPONENT, SUCC], constants=[str(i) for i in range(width)])
background = []
background.extend([Atom(SUCC, [str(i), str((i + 1))]) for i in range((width - 1))])
background.append(Atom(ZERO, ['0']))
self.max_step = 50
initial_state = np.zeros([3, 3])
super(TicTacTeo, self).__init__(background, initial_state, actions)
self.width = width
self.all_positions = [(i, j) for i in range(width) for j in range(width)]
self.know_valid_pos = know_valid_pos
self.action_n = len(self.all_positions)
self.state_dim = (width ** 2)
def next_step(self, action):
def tuple2int(t):
return (int(t[0]), int(t[1]))
self.step += 1
(reward, finished) = self.get_reward()
if finished:
return (reward, finished)
valids = self.get_valid()
if (tuple2int(action.terms) in valids):
self._state[tuple2int(action.terms)] = 1
self.random_move(self.know_valid_pos)
return (reward, finished)
def get_valid(self):
return [(x, y) for (x, y) in self.all_positions if (self._state[(x, y)] == 0)]
def all_actions(self):
return [Atom(PLACE, [str(position[0]), str(position[1])]) for position in self.all_positions]
def state2vector(self, state):
return state.flatten()
def state2atoms(self, state):
atoms = set()
def tuple2strings(t):
return (str(t[0]), str(t[1]))
for position in self.all_positions:
if (state[position] == 0):
atoms.add(Atom(EMPTY, tuple2strings(position)))
elif (state[position] == (- 1)):
atoms.add(Atom(OPPONENT, tuple2strings(position)))
elif (state[position] == 1):
atoms.add(Atom(MINE, tuple2strings(position)))
return atoms
def state(self):
return copy.deepcopy(self._state)
def random_move(self, know_valid):
valid_position = self.get_valid()
if (not valid_position):
return
if know_valid:
position = choice(valid_position)
self._state[position] = (- 1)
else:
position = choice(self.all_positions)
if (position in valid_position):
self._state[position] = (- 1)
def get_reward(self):
if (np.any((np.sum(self._state, axis=0) == 3)) or np.any((np.sum(self._state, axis=1) == 3))):
return (1, True)
for i in range((- self.width), self.width):
if ((np.trace(self._state, i) == 3) or (np.trace(np.flip(self._state, 0), i) == 3)):
return (1, True)
if (np.any((np.sum(self._state, axis=0) == (- 3))) or np.any((np.sum(self._state, axis=1) == (- 3)))):
return ((- 1), True)
for i in range((- self.width), self.width):
if ((np.trace(self._state, i) == (- 3)) or (np.trace(np.flip(self._state, 0), i) == (- 3))):
return ((- 1), True)
if (not self.get_valid()):
return (0, True)
return (0, False) |
_tf
class TFTransfoXLModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFTransfoXLModel, TFTransfoXLLMHeadModel) if is_tf_available() else ())
all_generative_model_classes = (() if is_tf_available() else ())
test_resize_embeddings = False
def setUp(self):
self.model_tester = TFTransfoXLModelTester(self)
self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_transfo_xl_model(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*config_and_inputs)
def test_transfo_xl_lm_head(self):
self.model_tester.set_seed()
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*config_and_inputs)
def test_model_common_attributes(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert (x is None)
name = model.get_prefix_bias_name()
assert (name is None)
def test_model_from_pretrained(self):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFTransfoXLModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 6))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
return (pointclouds_pl, labels_pl) |
def gather_grad(params):
world_size = get_world_size()
if (world_size == 1):
return
for param in params:
if (param.grad is not None):
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data.div_(world_size) |
def get_pattern(config, modules, framework='pytorch'):
assert (framework in FRAMEWORK.keys()), f'does not support {framework}, currently only support {FRAMEWORK.keys()}'
name = config.pattern
name = name.split('_')[(- 1)]
pattern = FRAMEWORK[framework]
if ('x' in name):
pattern += 'NxM'
elif (':' in name):
pattern += 'N:M'
elif ('mha' in name):
pattern += 'MHA'
else:
assert False, f'currently only support {PATTERNS.keys()}'
if (pattern not in PATTERNS.keys()):
assert False, f'currently only support {PATTERNS.keys()}'
return PATTERNS[pattern](config, modules) |
def test_dbnet_draw_border_map():
target_generator = textdet_targets.DBNetTargets()
poly = np.array([[20, 21], [(- 14), 20], [(- 11), 30], [(- 22), 26]])
img_size = (40, 40)
thr_map = np.zeros(img_size, dtype=np.float32)
thr_mask = np.zeros(img_size, dtype=np.uint8)
target_generator.draw_border_map(poly, thr_map, thr_mask) |
def write_sequences(gt, output_folder):
os.makedirs(output_folder, exist_ok=True)
for (seq, seq_frames) in gt.items():
write_sequence(seq_frames, os.path.join(output_folder, (seq + '.txt')))
return |
class TripletEvaluator(SentenceEvaluator):
def __init__(self, dataloader: DataLoader, main_distance_function: SimilarityFunction=None, name: str=''):
self.dataloader = dataloader
self.main_distance_function = main_distance_function
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.name = name
if name:
name = ('_' + name)
self.csv_file: str = (('triplet_evaluation' + name) + '_results.csv')
self.csv_headers = ['epoch', 'steps', 'accuracy_cosinus', 'accuracy_manhatten', 'accuracy_euclidean']
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
model.eval()
if (epoch != (- 1)):
if (steps == (- 1)):
out_txt = f' after epoch {epoch}:'
else:
out_txt = f' in epoch {epoch} after {steps} steps:'
else:
out_txt = ':'
logging.info(((('Evaluation the model on ' + self.name) + ' dataset') + out_txt))
num_triplets = 0
(num_correct_cos_triplets, num_correct_manhatten_triplets, num_correct_euclidean_triplets) = (0, 0, 0)
self.dataloader.collate_fn = model.smart_batching_collate
for (step, batch) in enumerate(tqdm(self.dataloader, desc='Evaluating')):
(features, label_ids) = batch_to_device(batch, self.device)
with torch.no_grad():
(emb1, emb2, emb3) = [model(sent_features)['sentence_embedding'].to('cpu').numpy() for sent_features in features]
pos_cos_distance = paired_cosine_distances(emb1, emb2)
neg_cos_distances = paired_cosine_distances(emb1, emb3)
pos_manhatten_distance = paired_manhattan_distances(emb1, emb2)
neg_manhatten_distances = paired_manhattan_distances(emb1, emb3)
pos_euclidean_distance = paired_euclidean_distances(emb1, emb2)
neg_euclidean_distances = paired_euclidean_distances(emb1, emb3)
for idx in range(len(pos_cos_distance)):
num_triplets += 1
if (pos_cos_distance[idx] < neg_cos_distances[idx]):
num_correct_cos_triplets += 1
if (pos_manhatten_distance[idx] < neg_manhatten_distances[idx]):
num_correct_manhatten_triplets += 1
if (pos_euclidean_distance[idx] < neg_euclidean_distances[idx]):
num_correct_euclidean_triplets += 1
accuracy_cos = (num_correct_cos_triplets / num_triplets)
accuracy_manhatten = (num_correct_manhatten_triplets / num_triplets)
accuracy_euclidean = (num_correct_euclidean_triplets / num_triplets)
logging.info('Accuracy Cosine Distance:\t{:.4f}'.format(accuracy_cos))
logging.info('Accuracy Manhatten Distance:\t{:.4f}'.format(accuracy_manhatten))
logging.info('Accuracy Euclidean Distance:\t{:.4f}\n'.format(accuracy_euclidean))
if (output_path is not None):
csv_path = os.path.join(output_path, self.csv_file)
if (not os.path.isfile(csv_path)):
with open(csv_path, mode='w', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhatten, accuracy_euclidean])
else:
with open(csv_path, mode='a', encoding='utf-8') as f:
writer = csv.writer(f)
writer.writerow([epoch, steps, accuracy_cos, accuracy_manhatten, accuracy_euclidean])
if (self.main_distance_function == SimilarityFunction.COSINE):
return accuracy_cos
if (self.main_distance_function == SimilarityFunction.MANHATTAN):
return accuracy_manhatten
if (self.main_distance_function == SimilarityFunction.EUCLIDEAN):
return accuracy_euclidean
return max(accuracy_cos, accuracy_manhatten, accuracy_euclidean) |
_module()
class SCFlowDecoder(BaseModule):
_h_channels = {'Basic': 128, 'Small': 96}
_cxt_channels = {'Basic': 128, 'Small': 64}
def __init__(self, net_type: str, num_levels: int, radius: int, iters: int, detach_flow: bool, detach_mask: bool, detach_pose: bool, mask_flow: bool, mask_corr: bool, pose_head_cfg: dict(), depth_transform: str='exp', detach_depth_for_xy: bool=False, corr_lookup_cfg: dict=dict(align_corners=True), gru_type: str='SeqConv', feat_channels: Union[(int, Sequence[int])]=256, conv_cfg: Optional[dict]=None, norm_cfg: Optional[dict]=None, act_cfg: Optional[dict]=None) -> None:
super().__init__()
assert (net_type in ['Basic', 'Small'])
assert (type(feat_channels) in (int, tuple, list))
self.corr_block = CorrelationPyramid(num_levels=num_levels)
feat_channels = (feat_channels if isinstance(tuple, list) else [feat_channels])
self.net_type = net_type
self.num_levels = num_levels
self.radius = radius
self.detach_flow = detach_flow
self.detach_mask = detach_mask
self.detach_pose = detach_pose
self.detach_depth_for_xy = detach_depth_for_xy
self.mask_flow = mask_flow
self.mask_corr = mask_corr
self.depth_transform = depth_transform
self.h_channels = self._h_channels.get(net_type)
self.cxt_channels = self._cxt_channels.get(net_type)
self.iters = iters
corr_lookup_cfg['radius'] = radius
self.corr_lookup = CorrLookup(**corr_lookup_cfg)
self.encoder = MotionEncoder(num_levels=num_levels, radius=radius, net_type=net_type, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.gru_type = gru_type
self.gru = self.make_gru_block()
self.pose_pred = build_head(pose_head_cfg)
self.flow_pred = XHead(self.h_channels, feat_channels, 2, x='flow')
self.mask_pred = XHead(self.h_channels, feat_channels, 1, x='mask')
self.delta_flow_encoder = nn.Sequential(*self.make_delta_flow_encoder(2, channels=[128, 64], kernels=[7, 3], paddings=[3, 1], conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.mask_encoder = nn.Sequential(*self.make_delta_flow_encoder(1, channels=[64, 32], kernels=[3, 3], paddings=[1, 1], conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
def make_delta_flow_encoder(self, in_channel, channels, kernels, paddings, conv_cfg, norm_cfg, act_cfg):
encoder = []
for (ch, k, p) in zip(channels, kernels, paddings):
encoder.append(ConvModule(in_channels=in_channel, out_channels=ch, kernel_size=k, padding=p, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
in_channel = ch
return encoder
def make_gru_block(self):
return ConvGRU(self.h_channels, ((self.encoder.out_channels[0] + 2) + self.cxt_channels), net_type=self.gru_type)
def _downsample(self, flow: torch.Tensor, mask: torch.Tensor):
scale = (2 ** (self.num_levels - 1))
(N, _, H, W) = flow.shape
mask = mask.view(N, 1, (scale * scale), (H / scale), (W / scale))
mask = torch.softmax(mask, dim=2)
downflow = F.unfold((flow / scale), [scale, scale], padding=1, stride=scale)
downflow = downflow.view(N, 2, (scale * scale), (H / scale), (W / scale))
downflow = torch.sum((mask * downflow), dim=2)
return downflow
def forward(self, feat_render: torch.Tensor, feat_real: torch.Tensor, h_feat: torch.Tensor, cxt_feat: torch.Tensor, ref_rotation: torch.Tensor, ref_translation: torch.Tensor, depth: torch.Tensor, internel_k: torch.Tensor, label: torch.Tensor, init_flow: torch.Tensor, invalid_flow_num: float) -> Sequence[torch.Tensor]:
corr_pyramid = self.corr_block(feat_render, feat_real)
update_rotation = ref_rotation
update_translation = ref_translation
(rotation_preds, translation_preds) = ([], [])
(delta_rotation_preds, delta_translation_preds) = ([], [])
(flow_from_pose, flow_from_pred) = ([], [])
mask_preds = []
scale = (2 ** (self.num_levels - 1))
(N, H, W) = depth.size()
flow = init_flow
(points_2d_list, points_3d_list) = ([], [])
for i in range(N):
(points_2d, points_3d) = cal_3d_2d_corr(depth[i], internel_k[i], ref_rotation[i], ref_translation[i])
points_2d_list.append(points_2d)
points_3d_list.append(points_3d)
init_mask = torch.ones((N, 1, H, W), dtype=init_flow.dtype, device=init_flow.device)
init_mask = F.interpolate(init_mask, scale_factor=((1 / scale), (1 / scale)), mode='bilinear', align_corners=True)
mask = init_mask
for i in range(self.iters):
if self.detach_flow:
flow = flow.detach()
if self.detach_mask:
mask = mask.detach()
flow = ((1 / scale) * F.interpolate(flow, scale_factor=((1 / scale), (1 / scale)), mode='bilinear', align_corners=True))
corr = self.corr_lookup(corr_pyramid, flow)
if self.mask_corr:
corr = (corr * mask)
if self.mask_flow:
motion_feat = self.encoder(corr, (flow * mask))
else:
motion_feat = self.encoder(corr, flow)
x = torch.cat([cxt_feat, motion_feat], dim=1)
h_feat = self.gru(h_feat, x)
delta_flow_pred = self.flow_pred(h_feat)
mask = self.mask_pred(h_feat)
mask = torch.sigmoid(mask)
delta_flow_feat = self.delta_flow_encoder(delta_flow_pred)
mask_feat = self.mask_encoder(mask)
(delta_rotation, delta_translation) = self.pose_pred(torch.cat([h_feat, delta_flow_feat, mask_feat], axis=1), label)
flow_pred = (flow + delta_flow_pred)
flow_pred = (scale * F.interpolate(flow_pred, scale_factor=(scale, scale), mode='bilinear', align_corners=True))
upsample_mask_pred = F.interpolate(mask, scale_factor=(scale, scale), mode='bilinear', align_corners=True)
(update_rotation, update_translation) = get_pose_from_delta_pose(delta_rotation, delta_translation, (update_rotation.detach() if self.detach_pose else update_rotation), (update_translation.detach() if self.detach_pose else update_translation), depth_transform=self.depth_transform, detach_depth_for_xy=self.detach_depth_for_xy)
flow = get_flow_from_delta_pose_and_points(update_rotation, update_translation, internel_k, points_2d_list, points_3d_list, H, W, invalid_num=invalid_flow_num)
rotation_preds.append(update_rotation)
translation_preds.append(update_translation)
delta_rotation_preds.append(delta_rotation)
delta_translation_preds.append(delta_translation)
flow_from_pose.append(flow)
flow_from_pred.append(flow_pred)
mask_preds.append(upsample_mask_pred)
return (flow_from_pose, flow_from_pred, rotation_preds, translation_preds, mask_preds, delta_rotation_preds, delta_translation_preds) |
class _Unbuffered():
def __init__(self, stream: TextIO) -> None:
self.stream = stream
def write(self, data: Any) -> None:
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr: str) -> Any:
return getattr(self.stream, attr) |
def run_all_reduce_sparse(rank, size, backend='gloo'):
dist.init_process_group(backend, rank=rank, world_size=size)
if (rank == 0):
data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.1, 1.2]]).to_sparse(2)
result = all_reduce_sparse(data)
else:
data = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 2.0]]).to_sparse(2)
result = all_reduce_sparse(data)
indices = torch.tensor([[1, 1], [1, 2]])
values = torch.tensor([1.1, 3.2])
tensor_size = torch.tensor([2, 3])
assert torch.equal(result.indices(), indices)
assert torch.equal(result.values(), values)
assert torch.equal(torch.tensor(result.size()), tensor_size) |
class VerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
self.t = A.VerticalFlip(p=self.p)
def __call__(self, image):
return self.t(image=image)['image']
def __repr__(self):
return (self.__class__.__name__ + '(p={0})'.format(self.p)) |
class FunnelTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
cls_token_type_id: int = 2
def __init__(self, vocab_file, do_lower_case=True, unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', bos_token='<s>', eos_token='</s>', clean_text=True, tokenize_chinese_chars=True, strip_accents=None, wordpieces_prefix='##', **kwargs):
super().__init__(vocab_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, bos_token=bos_token, eos_token=eos_token, clean_text=clean_text, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, wordpieces_prefix=wordpieces_prefix, **kwargs)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((len(cls) * [self.cls_token_type_id]) + (len((token_ids_0 + sep)) * [0]))
return (((len(cls) * [self.cls_token_type_id]) + (len((token_ids_0 + sep)) * [0])) + (len((token_ids_1 + sep)) * [1]))
def _convert_encoding(self, encoding, **kwargs):
encoding_dict = super()._convert_encoding(encoding, **kwargs)
if ('token_type_ids' in encoding_dict):
encoding_dict['token_type_ids'] = [[(self.cls_token_type_id if (i == self.cls_token_id) else t) for (i, t) in zip(input_ids, type_ids)] for (input_ids, type_ids) in zip(encoding_dict['input_ids'], encoding_dict['token_type_ids'])]
return encoding_dict |
class DataLoader(object):
def __init__(self, fname, mode):
self.fname = fname
self.mode = mode
def preprocess(self, line, speaker_tag='<first_speaker>'):
line = ((speaker_tag + ' ') + line.lower())
return line
def load_data(self):
dataset = []
f = codecs.open(self.fname, 'r', encoding='utf-8')
for line in f.readlines():
df = json.loads(line.strip())
c = ''
context = df['context']
for (idx, turn) in enumerate(context):
speaker_tag = ('<first_speaker>' if ((idx % 2) == 0) else '<second_speaker>')
c += (self.preprocess(turn, speaker_tag) + ' ')
c = (('</s> ' + c.strip()) + ' </s>')
r_gt = df['positive_responses'][0]
speaker_tag = ('<first_speaker>' if (((idx + 1) % 2) == 0) else '<second_speaker>')
m_names = ['model_1', 'model_2', 'model_3', 'model_4', 'model_5', 'model_6', 'model_7', 'model_8']
scores = (([5] * 4) + ([1] * 4))
m_rs = df['positive_responses'][1:5]
if (self.mode == 'random'):
m_rs += df['random_negative_responses'][:4]
elif (self.mode == 'adversarial'):
m_rs += df['adversarial_negative_responses'][:4]
m_rs = [(('</s> ' + self.preprocess(response, speaker_tag)) + ' </s>') for response in m_rs]
entry = {'c': c, 'r_gt': r_gt, 'r_models': {}}
for (n, r, s) in zip(m_names, m_rs, scores):
entry['r_models'][n] = [r, s, len(r)]
dataset.append(entry)
f.close()
return dataset |
def specificity(y_true, y_pred):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
spec_out = []
for classe in np.unique(y_true):
negatives = np.sum((y_true != classe).astype(int))
tn = np.sum((y_pred[(y_true != classe)] != classe).astype(int))
spec_out.append((tn / negatives))
return spec_out |
def create_image_or_video_tensor(size: Sequence[int]) -> torch.Tensor:
return torch.randint(0, 256, size, dtype=torch.uint8) |
def conv_layer(x, input_channel, output_channel, k_size=3, relu=True, stride=1, bn=True, name='conv_layer'):
with tf.name_scope(name):
w = weight_variable([k_size, k_size, input_channel, output_channel], 'weight')
b = bias_variable([output_channel], 'bias')
answer = (conv2d(x, w, s=stride) + b)
if bn:
answer = batchnorm(answer)
if relu:
answer = tf.nn.relu(answer)
return answer |
def data_files(data_dir, subset):
if (subset not in ['train', 'validation', 'test']):
print('Invalid subset!')
exit((- 1))
tf_record_pattern = os.path.join(data_dir, ('%s-*' % subset))
data_files = tf.gfile.Glob(tf_record_pattern)
print(data_files)
if (not data_files):
print(('No files found for data dir %s at %s' % (subset, data_dir)))
exit((- 1))
return data_files |
def train(train_loader, model, model_base, landscape_model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
num = 0
end = time.time()
for (i, (images, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
if ((num % 500) == 0):
torch.save({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, 'temporary.pth.tar')
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
if ((num % 500) == 0):
print('Iter {0} : loss {1}'.format(num, loss))
if args.resume_temporary:
if os.path.isfile(args.resume_temporary):
utils.load_state_ckpt(args.resume_temporary, model_base)
utils.load_state_ckpt(args.resume_temporary, landscape_model)
else:
print("=> no temporary checkpoint found at '{}'".format(args.resume_temporary))
theta = ((75 - 0.1) / 49)
y = model_base(images).detach_()
y.requires_grad = True
landscape_model.zero_grad()
landscape_out = landscape_model(y)
loss_landscape = criterion(landscape_out, target)
loss_landscape.backward()
dy = y.grad
print('Base : loss_landscape {0}'.format(loss_landscape))
for i in range(50):
deta = (0.1 + (theta * i))
input_y = (y + (deta * dy)).detach_()
input_y.requires_grad = True
landscape_model.zero_grad()
landscape_out = landscape_model(input_y)
loss_landscape = criterion(landscape_out, target)
loss_landscape.backward()
norm = (input_y.grad.cpu() - dy.cpu())
norm = norm.view((- 1))
dy_norm = np.linalg.norm(norm)
print('loss_landscape {0} dy_norm {1}'.format(loss_landscape, dy_norm))
num += 1 |
class ValueNode(Node):
def __init__(self, state, parents=set(), children=set()):
super().__init__(state, parents, children)
self.state = state
self.value = 0.0
self.visits = 0
def backward(self, value):
self.visits += 1
self.value += value |
class GroupsSimpleStationarySingleItem(GroupsSimpleStationary):
def _reset(self):
super()._reset()
self.world.remove_object(self.distractor_item) |
class ImageNet(datasets.ImageFolder):
def __init__(self, root=MyPath.db_root_dir('imagenet'), split='train', transform=None):
super(ImageNet, self).__init__(root=os.path.join(root, ('ILSVRC2012_img_%s' % split)), transform=None)
self.transform = transform
self.split = split
self.resize = tf.Resize(256)
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
(path, target) = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
im_size = img.size
img = self.resize(img)
if (self.transform is not None):
img = self.transform(img)
out = {'image': img, 'target': target, 'meta': {'im_size': im_size, 'index': index}}
return out
def get_image(self, index):
(path, target) = self.imgs[index]
with open(path, 'rb') as f:
img = Image.open(f).convert('RGB')
img = self.resize(img)
return img |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.