code stringlengths 281 23.7M |
|---|
def get_squeezenext(version, width_scale, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if (version == '23'):
layers = [6, 6, 8, 1]
elif (version == '23v5'):
layers = [2, 4, 14, 1]
else:
raise ValueError('Unsupported SqueezeNet version {}'.format(version))
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if (width_scale != 1):
channels = [[int((cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = int((init_block_channels * width_scale))
final_block_channels = int((final_block_channels * width_scale))
net = SqueezeNext(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
(repr=False)
class _Test():
version: Version
subject: str
case_description: str
description: str
data: Any
schema: (Mapping[(str, Any)] | bool)
valid: bool
_remotes: referencing.jsonschema.SchemaRegistry
comment: (str | None) = None
def __repr__(self):
return f'<Test {self.fully_qualified_name}>'
def fully_qualified_name(self):
return ' > '.join([self.version.name, self.subject, self.case_description, self.description])
def to_unittest_method(self, skip=(lambda test: None), **kwargs):
if self.valid:
def fn(this):
self.validate(**kwargs)
else:
def fn(this):
with this.assertRaises(jsonschema.ValidationError):
self.validate(**kwargs)
fn.__name__ = '_'.join(['test', _DELIMITERS.sub('_', self.subject), _DELIMITERS.sub('_', self.case_description), _DELIMITERS.sub('_', self.description)])
reason = skip(self)
if ((reason is None) or (os.environ.get('JSON_SCHEMA_DEBUG', '0') != '0')):
return fn
elif (os.environ.get('JSON_SCHEMA_EXPECTED_FAILURES', '0') != '0'):
return unittest.expectedFailure(fn)
else:
return unittest.skip(reason)(fn)
def validate(self, Validator, **kwargs):
Validator.check_schema(self.schema)
validator = Validator(schema=self.schema, registry=self._remotes, **kwargs)
if (os.environ.get('JSON_SCHEMA_DEBUG', '0') != '0'):
breakpoint()
validator.validate(instance=self.data)
def validate_ignoring_errors(self, Validator):
with suppress(jsonschema.ValidationError):
self.validate(Validator=Validator) |
def convert_embed(func: Callable[([str], str)], embed: Embed) -> Embed:
embed_dict = embed.to_dict()
embed_dict['title'] = func(embed_dict.get('title', ''))
embed_dict['description'] = func(embed_dict.get('description', ''))
if ('footer' in embed_dict):
embed_dict['footer']['text'] = func(embed_dict['footer'].get('text', ''))
if ('fields' in embed_dict):
for field in embed_dict['fields']:
field['name'] = func(field.get('name', ''))
field['value'] = func(field.get('value', ''))
return Embed.from_dict(embed_dict) |
def collate_fn_tagger(batch):
dim = len(batch[0].keys())
if (dim == 4):
tokens = [item['token'] for item in batch]
tagger = [item['tagger'] for item in batch]
ins = [item['ins'] for item in batch]
mod = [item['mod'] for item in batch]
return (tokens, tagger, ins, mod)
elif (dim == 1):
tokens = [item['token'] for item in batch]
return tokens
else:
raise Exception('Error Batch Input, Please Check.') |
def _download_compacted_table(hb_index: int, rcf: RoundCompletionInfo, read_kwargs_provider: Optional[ReadKwargsProvider]=None, deltacat_storage=unimplemented_deltacat_storage, deltacat_storage_kwargs: Optional[dict]=None) -> pa.Table:
tables = []
hb_index_to_indices = rcf.hb_index_to_entry_range
if (str(hb_index) not in hb_index_to_indices):
return None
indices = hb_index_to_indices[str(hb_index)]
assert ((indices is not None) and (len(indices) == 2)), 'indices should not be none and contains exactly two elements'
for offset in range((indices[1] - indices[0])):
table = deltacat_storage.download_delta_manifest_entry(rcf.compacted_delta_locator, entry_index=(indices[0] + offset), file_reader_kwargs_provider=read_kwargs_provider, **deltacat_storage_kwargs)
tables.append(table)
return pa.concat_tables(tables) |
_module()
class CustomizedTextLoggerHook(TextLoggerHook):
def _log_info(self, log_dict, runner):
if ((runner.meta is not None) and ('exp_name' in runner.meta)):
if (self.every_n_iters(runner, self.interval_exp_name) or (self.by_epoch and self.end_of_epoch(runner))):
exp_info = f"Exp name: {runner.meta['exp_name']}"
runner.logger.info(exp_info)
if (log_dict['mode'] == 'train'):
lr_str = {}
for lr_type in ['lr', 'layer_0_lr']:
if isinstance(log_dict[lr_type], dict):
lr_str[lr_type] = []
for (k, val) in log_dict[lr_type].items():
lr_str.append(f'{lr_type}_{k}: {val:.3e}')
lr_str[lr_type] = ' '.join(lr_str)
else:
lr_str[lr_type] = f'{lr_type}: {log_dict[lr_type]:.3e}'
if self.by_epoch:
log_str = f"Epoch [{log_dict['epoch']}][{log_dict['iter']}/{len(runner.data_loader)}] "
else:
log_str = f"Iter [{log_dict['iter']}/{runner.max_iters}] "
log_str += f"{lr_str['lr']}, {lr_str['layer_0_lr']}, "
if ('time' in log_dict.keys()):
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1))
eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1))
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f"time: {log_dict['time']:.3f}, data_time: {log_dict['data_time']:.3f}, "
if torch.cuda.is_available():
log_str += f"memory: {log_dict['memory']}, "
elif self.by_epoch:
log_str = f"Epoch({log_dict['mode']}) [{log_dict['epoch']}][{log_dict['iter']}] "
else:
log_str = f"Iter({log_dict['mode']}) [{log_dict['iter']}] "
log_items = []
for (name, val) in log_dict.items():
if (name in ['mode', 'Epoch', 'iter', 'lr', 'layer_0_lr', 'time', 'data_time', 'memory', 'epoch']):
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def log(self, runner):
if ('eval_iter_num' in runner.log_buffer.output):
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=cur_iter)
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['layer_0_lr'] = min(cur_lr)
log_dict['lr'] = max(cur_lr)
else:
assert isinstance(cur_lr, dict)
(log_dict['lr'], log_dict['layer_0_lr']) = ({}, {})
for (k, lr_) in cur_lr.items():
assert isinstance(lr_, list)
log_dict['layer_0_lr'].update({k: min(lr_)})
log_dict['lr'].update({k: max(lr_)})
if ('time' in runner.log_buffer.output):
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
return log_dict |
class Effect11767(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Hybrid Turret')), 'trackingSpeed', src.getModifiedItemAttr('eliteBonusHeavyGunship1'), skill='Heavy Assault Cruisers', **kwargs) |
def setUpModule():
global mol, m, h1er, h1ei, h1es, g2er, g2ei, g2es, ci0, ci1, ci2, ci3
global norb, nelecr, neleci
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [['H', (1.0, (- 1.0), 0.0)], ['H', (0.0, (- 1.0), (- 1.0))], ['H', (0.0, (- 0.5), (- 0.0))], ['H', (0.0, (- 0.0), (- 1.0))], ['H', (1.0, (- 0.5), 0.0)], ['H', (0.0, 1.0, 1.0)]]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
m.conv_tol_grad = 1e-08
ehf = m.scf()
(mo_a, mo_b) = m.mo_coeff
norb = mo_a.shape[1]
nelecr = (((mol.nelectron + 1) // 2), ((mol.nelectron + 1) // 2))
h1er = reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a))
g2er = ao2mo.incore.general(m._eri, ((mo_a,) * 4), compact=False)
h1es = (h1er, h1er)
g2es = (g2er, g2er, g2er)
na = fci.cistring.num_strings(norb, nelecr[0])
nb = fci.cistring.num_strings(norb, nelecr[1])
numpy.random.seed(15)
ci0 = numpy.random.random((na, nb))
ci1 = numpy.random.random((na, nb))
neleci = (((mol.nelectron + 1) // 2), ((mol.nelectron - 1) // 2))
na = fci.cistring.num_strings(norb, neleci[0])
nb = fci.cistring.num_strings(norb, neleci[1])
h1ei = (reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a)), reduce(numpy.dot, (mo_b.T, m.get_hcore(), mo_b)))
g2ei = (ao2mo.incore.general(m._eri, ((mo_a,) * 4), compact=False), ao2mo.incore.general(m._eri, (mo_a, mo_a, mo_b, mo_b), compact=False), ao2mo.incore.general(m._eri, ((mo_b,) * 4), compact=False))
numpy.random.seed(15)
ci2 = numpy.random.random((na, nb))
ci3 = numpy.random.random((na, nb)) |
def convert_lossless_jpeg(input_filepath, output_filepath=None):
input_filepath = pathlib.Path(input_filepath)
if (output_filepath is None):
output_filepath = input_filepath.parent.joinpath(f'{input_filepath.stem}.tif')
im = imread(input_filepath)
imageio.imwrite(str(output_filepath), im, format='.tif') |
def InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000):
if (weights not in {'imagenet', None}):
raise ValueError('The `weights` argument should be either `None` (random initialization) or `imagenet` (pre-training on ImageNet).')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as imagenet with `include_top` as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape, default_size=299, min_size=139, data_format=K.image_data_format(), require_flatten=False, weights=weights)
if (input_tensor is None):
img_input = Input(shape=input_shape)
elif (not K.is_keras_tensor(input_tensor)):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if (K.image_data_format() == 'channels_first'):
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed0')
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed1')
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed2')
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed4')
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name=('mixed' + str((5 + i))))
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=channel_axis, name='mixed7')
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3, strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate([branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name=('mixed9_' + str(i)))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name=('mixed' + str((9 + i))))
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
elif (pooling == 'avg'):
x = GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='inception_v3')
if (weights == 'imagenet'):
if (K.image_data_format() == 'channels_first'):
if (K.backend() == 'tensorflow'):
warnings.warn('You are using the TensorFlow backend, yet you are using the Theano image data format convention (`image_data_format="channels_first"`). For best performance, set `image_data_format="channels_last"` in your Keras config at ~/.keras/keras.json.')
if include_top:
weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', md5_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
return model |
def preprocess(image, label, size, mean_pixel):
image = nd.zoom(image.astype('float32'), ((size / float(image.shape[0])), (size / float(image.shape[1])), 1.0), order=1)
label = nd.zoom(label, ((size / float(label.shape[0])), (size / float(label.shape[1]))), order=0)
image = (image - mean_pixel)
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, 0)
label = np.reshape(label, newshape=(1, 1, size, size))
return (image, label) |
(scope='function', autouse=True)
def _skip_sensitive(request, sensitive_url):
destructive = ('nondestructive' not in request.node.keywords)
if (sensitive_url and destructive):
pytest.skip("This test is destructive and the target URL is considered a sensitive environment. If this test is not destructive, add the 'nondestructive' marker to it. Sensitive URL: {0}".format(sensitive_url)) |
class CodeStream(CodeStreamAPI):
__slots__ = ['_length_cache', '_raw_code_bytes', 'invalid_positions', 'valid_positions']
logger = logging.getLogger('eth.vm.CodeStream')
def __init__(self, code_bytes: bytes) -> None:
validate_is_bytes(code_bytes, title='CodeStream bytes')
self.program_counter = 0
self._raw_code_bytes = code_bytes
self._length_cache = len(code_bytes)
self.invalid_positions: Set[int] = set()
self.valid_positions: Set[int] = set()
def read(self, size: int) -> bytes:
old_program_counter = self.program_counter
target_program_counter = (old_program_counter + size)
self.program_counter = target_program_counter
return self._raw_code_bytes[old_program_counter:target_program_counter]
def pc(self) -> int:
return (self.program_counter - 1)
def __len__(self) -> int:
return self._length_cache
def __getitem__(self, i: int) -> int:
return self._raw_code_bytes[i]
def __iter__(self) -> Iterator[int]:
pc = self.program_counter
while (pc < self._length_cache):
opcode = self._raw_code_bytes[pc]
self.program_counter = (pc + 1)
(yield opcode)
pc = self.program_counter
(yield STOP)
def peek(self) -> int:
pc = self.program_counter
if (pc < self._length_cache):
return self._raw_code_bytes[pc]
else:
return STOP
def seek(self, program_counter: int) -> Iterator['CodeStream']:
anchor_pc = self.program_counter
self.program_counter = program_counter
try:
(yield self)
finally:
self.program_counter = anchor_pc
def _potentially_disqualifying_opcode_positions(self, position: int) -> Iterator[int]:
deepest_lookback = min(32, position)
for bytes_back in range(deepest_lookback, 0, (- 1)):
earlier_position = (position - bytes_back)
opcode = self._raw_code_bytes[earlier_position]
if ((PUSH1 + (bytes_back - 1)) <= opcode <= PUSH32):
(yield earlier_position)
def is_valid_opcode(self, position: int) -> bool:
if (position >= self._length_cache):
return False
elif (position in self.invalid_positions):
return False
elif (position in self.valid_positions):
return True
else:
for disqualifier in self._potentially_disqualifying_opcode_positions(position):
if self.is_valid_opcode(disqualifier):
self.invalid_positions.add(position)
return False
self.valid_positions.add(position)
return True |
class EuclideanCodebook(nn.Module):
def __init__(self, dim: int, codebook_size: int, kmeans_init: int=False, kmeans_iters: int=10, decay: float=0.99, epsilon: float=1e-05, threshold_ema_dead_code: int=2):
super().__init__()
self.decay = decay
init_fn: tp.Union[(tp.Callable[(..., torch.Tensor)], tp.Any)] = (uniform_init if (not kmeans_init) else torch.zeros)
embed = init_fn(codebook_size, dim)
self.codebook_size = codebook_size
self.kmeans_iters = kmeans_iters
self.epsilon = epsilon
self.threshold_ema_dead_code = threshold_ema_dead_code
self.register_buffer('inited', torch.Tensor([(not kmeans_init)]))
self.register_buffer('cluster_size', torch.zeros(codebook_size))
self.register_buffer('embed', embed)
self.register_buffer('embed_avg', embed.clone())
.ignore
def init_embed_(self, data):
if self.inited:
return
if (dist.is_available() and dist.is_initialized()):
data = SyncFunction.apply(data)
(embed, cluster_size) = kmeans(data, self.codebook_size, self.kmeans_iters)
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed.clone())
self.cluster_size.data.copy_(cluster_size)
self.inited.data.copy_(torch.Tensor([True]))
broadcast_tensors(self.buffers())
def replace_(self, samples, mask):
modified_codebook = torch.where(mask[(..., None)], sample_vectors(samples, self.codebook_size), self.embed)
self.embed.data.copy_(modified_codebook)
def expire_codes_(self, batch_samples):
if (self.threshold_ema_dead_code == 0):
return
expired_codes = (self.cluster_size < self.threshold_ema_dead_code)
if (not torch.any(expired_codes)):
return
if is_distributed():
batch_samples = SyncFunction.apply(batch_samples)
batch_samples = rearrange(batch_samples, '... d -> (...) d')
self.replace_(batch_samples, mask=expired_codes)
broadcast_tensors(self.buffers())
def preprocess(self, x):
x = rearrange(x, '... d -> (...) d')
return x
def quantize(self, x):
embed = self.embed.t()
dist = (- ((x.pow(2).sum(1, keepdim=True) - ((2 * x) embed)) + embed.pow(2).sum(0, keepdim=True)))
embed_ind = dist.max(dim=(- 1)).indices
return embed_ind
def postprocess_emb(self, embed_ind, shape):
return embed_ind.view(*shape[:(- 1)])
def dequantize(self, embed_ind):
quantize = F.embedding(embed_ind, self.embed)
return quantize
def encode(self, x):
shape = x.shape
x = self.preprocess(x)
embed_ind = self.quantize(x)
embed_ind = self.postprocess_emb(embed_ind, shape)
return embed_ind
def decode(self, embed_ind):
quantize = self.dequantize(embed_ind)
return quantize
def forward(self, x):
(shape, dtype) = (x.shape, x.dtype)
x = self.preprocess(x)
self.init_embed_(x)
embed_ind = self.quantize(x)
embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
embed_ind = self.postprocess_emb(embed_ind, shape)
quantize = self.dequantize(embed_ind)
if self.training:
embed_onehot_sum = embed_onehot.sum(0)
embed_sum = (x.t() embed_onehot)
if is_distributed():
dist.all_reduce(embed_onehot_sum)
dist.all_reduce(embed_sum)
self.cluster_size.data.mul_(self.decay).add_(embed_onehot_sum, alpha=(1 - self.decay))
self.embed_avg.data.mul_(self.decay).add_(embed_sum.t(), alpha=(1 - self.decay))
n = self.cluster_size.sum()
cluster_size = (((self.cluster_size + self.epsilon) / (n + (self.codebook_size * self.epsilon))) * n)
embed_normalized = (self.embed_avg / cluster_size.unsqueeze(1))
self.embed.data.copy_(embed_normalized)
self.expire_codes_(x)
return (quantize, embed_ind) |
class LogitsList():
def __init__(self, score: float, logits: List[List[float]]):
self.score = score
self.logits = logits
def __repr__(self):
return 'LogitsList(score={}, logits[:2]={})'.format(self.score, self.logits[:2])
def save(self, path: str) -> None:
with open(path, 'w') as fh:
fh.write((str(self.score) + '\n'))
for example_logits in self.logits:
fh.write((' '.join((str(logit) for logit in example_logits)) + '\n'))
def load(path: str, with_score: bool=True) -> 'LogitsList':
score = (- 1)
logits = []
with open(path, 'r') as fh:
for (line_idx, line) in enumerate(fh.readlines()):
line = line.rstrip('\n')
if ((line_idx == 0) and with_score):
score = float(line)
else:
logits.append([float(x) for x in line.split()])
return LogitsList(score=score, logits=logits) |
def cli() -> ExitCode:
try:
hide_cursor()
parser = get_command_parser()
argcomplete.autocomplete(parser)
parsed_pipx_args = parser.parse_args()
setup(parsed_pipx_args)
check_args(parsed_pipx_args)
if (not parsed_pipx_args.command):
parser.print_help()
return ExitCode(1)
return run_pipx_command(parsed_pipx_args)
except PipxError as e:
print(str(e), file=sys.stderr)
logger.debug(f'PipxError: {e}', exc_info=True)
return ExitCode(1)
except KeyboardInterrupt:
return ExitCode(1)
except Exception:
logger.debug('Uncaught Exception:', exc_info=True)
raise
finally:
logger.debug('pipx finished.')
show_cursor() |
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.warm_up = 0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
if (self.warm_up < 10):
self.warm_up += 1
return self.diff
else:
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
return self.average_time
else:
return self.diff |
class TestSmtLibParserGriggio(TestCase):
def test_griggio(self):
for file_id in range(1, 7):
script = self.parse(file_id)
for (i, cmd) in enumerate(script):
self.assertEqual(cmd.name, TESTS[file_id][i], ('Test %d: %s != %s ' % (file_id, cmd.name, TESTS[file_id][i])))
buf = StringIO()
script.serialize(buf)
self.assertTrue(True)
def parse(self, file_id):
fname = (SMTLIB_FILE_PATTERN % file_id)
reset_env()
parser = SmtLibParser()
script = parser.get_script_fname(fname)
self.assertIsNotNone(script)
return script |
class BoxCoderTest(tf.test.TestCase):
def test_batch_decode(self):
mock_anchor_corners = tf.constant([[0, 0.1, 0.2, 0.3], [0.2, 0.4, 0.4, 0.6]], tf.float32)
mock_anchors = box_list.BoxList(mock_anchor_corners)
mock_box_coder = MockBoxCoder()
expected_boxes = [[[0.0, 0.1, 0.5, 0.6], [0.5, 0.6, 0.7, 0.8]], [[0.1, 0.2, 0.3, 0.4], [0.7, 0.8, 0.9, 1.0]]]
encoded_boxes_list = [mock_box_coder.encode(box_list.BoxList(tf.constant(boxes)), mock_anchors) for boxes in expected_boxes]
encoded_boxes = tf.stack(encoded_boxes_list)
decoded_boxes = box_coder.batch_decode(encoded_boxes, mock_box_coder, mock_anchors)
with self.test_session() as sess:
decoded_boxes_result = sess.run(decoded_boxes)
self.assertAllClose(expected_boxes, decoded_boxes_result) |
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMPose models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument('--verify', action='store_true', help='verify the onnx model output against pytorch output')
parser.add_argument('--shape', type=int, nargs='+', default=[1, 3, 256, 192], help='input size')
args = parser.parse_args()
return args |
def mod_arith(q_format: str, a_format: str) -> QuizEntry:
(quotient, m, b) = (random.randint(30, 40), random.randint(10, 20), random.randint(200, 350))
ans = random.randint(0, 9)
a = (((quotient * m) + ans) - b)
question = q_format.format(a, b, m)
answer = a_format.format(ans)
return QuizEntry(question, [answer], DYNAMICALLY_GEN_VARIATION_TOLERANCE) |
class InviteQuerySet(models.QuerySet):
def filter_current_site(self):
return self.filter(project__site=settings.SITE_ID)
def filter_user(self, user):
if user.is_authenticated:
if user.has_perm('projects.view_invite'):
return self.all()
elif is_site_manager(user):
return self.filter_current_site()
else:
from .models import Project
projects = Project.objects.filter(memberships__user=user, memberships__role='owner')
return self.filter(project__in=projects)
else:
return self.none() |
class Effect6253(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Energy Neutralizer')), 'maxRange', src.getModifiedItemAttr('shipBonusAB'), skill='Amarr Battleship', **kwargs) |
def prompt_user_for_preset_file(window: QtWidgets.QWidget, new_file: bool, name: (str | None)=None) -> (None | Path):
from randovania.layout.versioned_preset import VersionedPreset
return _prompt_user_for_file(window, caption='Select a Randovania Preset file.', filter=f'Randovania Preset, *.{VersionedPreset.file_extension()};;All Files (*.*)', dir=name, new_file=new_file) |
class KPartition(Sequence, _CutBase):
__slots__ = ['parts', 'node_labels', '_mechanism', '_purview']
def __init__(self, *parts, node_labels=None):
self.parts = parts
self.node_labels = node_labels
self._mechanism = None
self._purview = None
def __len__(self):
return len(self.parts)
def __bool__(self):
return (len(self) > 0)
def __getitem__(self, index):
return self.parts[index]
def __eq__(self, other):
if (not isinstance(other, KPartition)):
return NotImplemented
return (self.parts == other.parts)
def __hash__(self):
return hash(self.parts)
def __str__(self):
return fmt.fmt_partition(self)
def __repr__(self):
return fmt.make_repr(self, ['parts', 'node_labels'])
def mechanism(self):
if (self._mechanism is None):
self._mechanism = tuple(chain.from_iterable((part.mechanism for part in self)))
return self._mechanism
def purview(self):
if (self._purview is None):
self._purview = tuple(sorted(chain.from_iterable((part.purview for part in self))))
return self._purview
def indices(self):
return tuple(sorted(set((self.mechanism + self.purview))))
def normalize(self):
return type(self)(*sorted(self), node_labels=self.node_labels)
def num_connections_cut(self):
n = 0
purview_lengths = [len(part.purview) for part in self.parts]
for (i, part) in enumerate(self.parts):
n += (len(part.mechanism) * (sum(purview_lengths[:i]) + sum(purview_lengths[(i + 1):])))
return n
def cut_matrix(self, n):
cm = np.zeros((n, n))
for part in self.parts:
outside_part = tuple((set(self.purview) - set(part.purview)))
cm[np.ix_(part.mechanism, outside_part)] = 1
return cm
def to_json(self):
return {'parts': list(self)}
def from_json(cls, dct):
return cls(*dct['parts']) |
class FileReader(FileHandler):
def __repr__(self) -> str:
return f'<{self.__class__.__name__} [path: {self.file_path}, open: {self.open}]>'
def _open(self) -> BinaryIO:
return open(self.file_path, 'rb')
def read(self) -> bytes:
return self.file.read()
def write(self, data: bytes) -> None:
pass |
class KnownValues(unittest.TestCase):
def test_ip_adc2(self):
myadc.ncvs = 2
myadc.method = 'adc(2)'
myadc.method_type = 'ip'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=2)
self.assertAlmostEqual(e[0], 15., 6)
self.assertAlmostEqual(e[1], 15., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
def test_ip_adc2x(self):
myadc.ncvs = 2
myadc.method = 'adc(2)-x'
myadc.method_type = 'ip'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=3)
self.assertAlmostEqual(e[0], 15., 6)
self.assertAlmostEqual(e[1], 15., 6)
self.assertAlmostEqual(e[2], 15., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 3.044151e-07, 6)
def test_ip_adc3(self):
myadc.ncvs = 2
myadc.method = 'adc(3)'
myadc.method_type = 'ip'
(e, t_amp1, t_amp2) = myadc.kernel_gs()
self.assertAlmostEqual(e, (- 0.), 6)
(e, v, p, x) = myadc.kernel(nroots=3)
self.assertAlmostEqual(e[0], 15., 6)
self.assertAlmostEqual(e[1], 15., 6)
self.assertAlmostEqual(e[2], 15., 6)
self.assertAlmostEqual(p[0], 1., 6)
self.assertAlmostEqual(p[1], 1., 6)
self.assertAlmostEqual(p[2], 3.0441505e-07, 6) |
def make_loader_getter(*, shape: InputShape, name_layout: InputNameLayout, debug_trail: DebugTrail, strict_coercion: bool=True, debug_ctx: DebugCtx) -> Callable[([], Loader)]:
def getter():
retort = TestRetort(recipe=[ValueProvider(InputShapeRequest, shape), ValueProvider(InputNameLayoutRequest, name_layout), bound(int, ValueProvider(LoaderRequest, int_loader)), ModelLoaderProvider(), debug_ctx.accum])
loader = retort.replace(debug_trail=debug_trail, strict_coercion=strict_coercion).get_loader(Gauge)
return loader
return getter |
class Examples(SegmentationBase):
def __init__(self, size=None, random_crop=False, interpolation='bicubic'):
super().__init__(data_csv='data/sflckr_examples.txt', data_root='data/sflckr_images', segmentation_root='data/sflckr_segmentations', size=size, random_crop=random_crop, interpolation=interpolation) |
def haop_bf(filename: str, minunity: float):
sdb = readfile(filename=filename)
mymap = {'a': 2, 'g': 3, 'c': 2, 't': 3}
upminsup = ceil((minunity / 3))
begintime = time.time()
(freArr, canArr) = min_freItem(sdb, mymap, upminsup, minunity)
f_level = 1
candidate = gen_candidate(f_level, canArr)
compnum = 0
while (len(candidate) != 0):
if (len(freArr) <= f_level):
freArr.append([])
if (len(canArr) <= f_level):
canArr.append([])
for p in candidate:
num = 0
occnum = 0
compnum += 1
ptn_len = len(p)
hupval = 0
for s in range(ptn_len):
hupval += mymap[p[s]]
link_pan = Creat_ptn(p)
for db in sdb:
db: seqdb
if (len(db.S) > 0):
s_length = len(db.S)
if (ptn_len > s_length):
num = 0
else:
num += no_que(db.S, link_pan, ptn_len)
link_pan.clear()
occnum = num
if (occnum >= upminsup):
hupval = ((occnum * hupval) / len(p))
if (hupval >= minunity):
freArr[f_level].append(p)
canArr[f_level].append(p)
f_level += 1
candidate.clear()
candidate = gen_candidate(f_level, canArr)
elapsed_time = ((time.time() - begintime) * 1000)
return (f_level, freArr, elapsed_time, compnum) |
def _ray_get_actor_cpus():
if (Version(ray.__version__) < Version('2.0.0')):
resource_ids = ray.worker.get_resource_ids()
if ('CPU' in resource_ids):
return sum((cpu[1] for cpu in resource_ids['CPU']))
else:
resource_ids = ray.get_runtime_context().get_assigned_resources()
for key in resource_ids.keys():
if key.startswith('CPU'):
return resource_ids[key]
return 1 |
def test_coefs(ir_url, vis_url):
reader = GOESCoefficientReader(ir_url=ir_url, vis_url=vis_url)
for platform in CALIB_COEFS:
for (channel, coefs) in CALIB_COEFS[platform].items():
coefs_expected = reader.get_coefs(platform=platform, channel=channel)
for cname in coefs_expected.keys():
if (not np.allclose(coefs[cname], coefs_expected[cname])):
raise ValueError('Coefficient {} for {} channel {} does not match the reference'.format(cname, platform, channel))
logger.info('Coefficients OK')
return True |
def wait_for_payment_balance(raiden: 'RaidenService', token_network_registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress, partner_address: Address, target_address: Address, target_balance: TokenAmount, retry_timeout: float) -> None:
condition = ChannelHasPaymentBalance(target_address, target_balance)
waiter = ChannelStateWaiter(raiden, retry_timeout, token_network_registry_address, token_address, partner_address)
waiter.wait(condition) |
class SourceAddCommand(Command):
name = 'source add'
description = 'Add source configuration for project.'
arguments = [argument('name', 'Source repository name.'), argument('url', 'Source repository URL. Required, except for PyPI, for which it is not allowed.', optional=True)]
options = [option('default', 'd', 'Set this source as the default (disable PyPI). A default source will also be the fallback source if you add other sources. (<warning>Deprecated</warning>, use --priority)'), option('secondary', 's', 'Set this source as secondary. (<warning>Deprecated</warning>, use --priority)'), option('priority', 'p', f"Set the priority of this source. One of: {', '.join((p.name.lower() for p in Priority))}. Defaults to {Priority.PRIMARY.name.lower()}.", flag=False)]
def handle(self) -> int:
from poetry.factory import Factory
from poetry.utils.source import source_to_table
name: str = self.argument('name')
lower_name = name.lower()
url: str = self.argument('url')
is_default: bool = self.option('default', False)
is_secondary: bool = self.option('secondary', False)
priority_str: (str | None) = self.option('priority', None)
if (lower_name == 'pypi'):
name = 'PyPI'
if url:
self.line_error('<error>The URL of PyPI is fixed and cannot be set.</error>')
return 1
elif (not url):
self.line_error('<error>A custom source cannot be added without a URL.</error>')
return 1
if (is_default and is_secondary):
self.line_error('<error>Cannot configure a source as both <c1>default</c1> and <c1>secondary</c1>.</error>')
return 1
if (is_default or is_secondary):
if (priority_str is not None):
self.line_error('<error>Priority was passed through both --priority and a deprecated flag (--default or --secondary). Please only provide one of these.</error>')
return 1
else:
self.line_error('<warning>Warning: Priority was set through a deprecated flag (--default or --secondary). Consider using --priority next time.</warning>')
if is_default:
priority = Priority.DEFAULT
elif is_secondary:
priority = Priority.SECONDARY
elif (priority_str is None):
priority = Priority.PRIMARY
else:
priority = Priority[priority_str.upper()]
if (priority is Priority.SECONDARY):
allowed_prios = (p for p in Priority if (p is not Priority.SECONDARY))
self.line_error(f"<warning>Warning: Priority 'secondary' is deprecated. Consider changing the priority to one of the non-deprecated values: {', '.join((repr(p.name.lower()) for p in allowed_prios))}.</warning>")
sources = AoT([])
new_source = Source(name=name, url=url, priority=priority)
is_new_source = True
for source in self.poetry.get_sources():
if ((source.priority is Priority.DEFAULT) and (priority is Priority.DEFAULT)):
self.line_error(f'<error>Source with name <c1>{source.name}</c1> is already set to default. Only one default source can be configured at a time.</error>')
return 1
if (source.name.lower() == lower_name):
source = new_source
is_new_source = False
sources.append(source_to_table(source))
if is_new_source:
self.line(f'Adding source with name <c1>{name}</c1>.')
sources.append(source_to_table(new_source))
else:
self.line(f'Source with name <c1>{name}</c1> already exists. Updating.')
try:
pool = Factory.create_pool(self.poetry.config, sources, NullIO())
pool.repository(name)
except ValueError as e:
self.line_error(f'<error>Failed to validate addition of <c1>{name}</c1>: {e}</error>')
return 1
self.poetry.pyproject.poetry_config['source'] = sources
self.poetry.pyproject.save()
return 0 |
class SequencerWidget(QtWidgets.QWidget):
def __init__(self, inputs=None, sequence_file=None, parent=None):
super().__init__(parent)
self._parent = parent
self._check_queue_signature()
if (inputs is not None):
self._inputs = inputs
else:
self._inputs = self._parent.displays
self._get_properties()
self._setup_ui()
self._layout()
self.data = SequenceHandler(list(self.names_inv.keys()))
self.tree.setModel(SequencerTreeModel(data=self.data))
if (sequence_file is not None):
self.load_sequence(filename=sequence_file)
def _check_queue_signature(self):
call_signature = signature(self._parent.queue)
if ('procedure' not in call_signature.parameters):
raise AttributeError("The queue method of of the ManagedWindow does not accept the 'procedure'keyword argument. Accepting this keyword argument is required when usingthe 'SequencerWidget'.")
def _get_properties(self):
parameter_objects = self._parent.procedure_class().parameter_objects()
self.names = {key: parameter.name for (key, parameter) in parameter_objects.items() if (key in self._inputs)}
self.names_inv = {name: key for (key, name) in self.names.items()}
self.names_choices = list(sorted(self.names_inv.keys()))
def _setup_ui(self):
self.tree = SequencerTreeView(self)
self.tree.setHeaderHidden(False)
self.tree.setItemDelegateForColumn(1, ComboBoxDelegate(self, self.names_choices))
self.tree.setItemDelegateForColumn(2, LineEditDelegate(self))
self.load_seq_button = QtWidgets.QPushButton('Load sequence')
self.load_seq_button.clicked.connect(self.load_sequence)
self.load_seq_button.setToolTip('Load a sequence from a file.')
self.save_seq_button = QtWidgets.QPushButton('Save sequence')
self.save_seq_button.clicked.connect(self.save_sequence)
self.save_seq_button.setToolTip('Save a sequence to a file.')
self.queue_button = QtWidgets.QPushButton('Queue sequence')
self.queue_button.clicked.connect(self.queue_sequence)
self.add_root_item_btn = QtWidgets.QPushButton('Add root item')
self.add_root_item_btn.clicked.connect(partial(self._add_tree_item, level=0))
self.add_tree_item_btn = QtWidgets.QPushButton('Add item')
self.add_tree_item_btn.clicked.connect(self._add_tree_item)
self.remove_tree_item_btn = QtWidgets.QPushButton('Remove item')
self.remove_tree_item_btn.clicked.connect(self._remove_selected_tree_item)
def _layout(self):
btn_box = QtWidgets.QHBoxLayout()
btn_box.addWidget(self.load_seq_button)
btn_box.addWidget(self.save_seq_button)
btn_box_2 = QtWidgets.QHBoxLayout()
btn_box_2.addWidget(self.add_root_item_btn)
btn_box_2.addWidget(self.add_tree_item_btn)
btn_box_2.addWidget(self.remove_tree_item_btn)
btn_box_3 = QtWidgets.QHBoxLayout()
btn_box_3.addWidget(self.queue_button)
vbox = QtWidgets.QVBoxLayout(self)
vbox.setSpacing(6)
vbox.addLayout(btn_box)
vbox.addWidget(self.tree)
vbox.addLayout(btn_box_2)
vbox.addLayout(btn_box_3)
self.setLayout(vbox)
def _add_tree_item(self, *, level=None, parameter=None):
selected = self.tree.selectionModel().selection().indexes()
if ((len(selected) >= 1) and (level != 0)):
parent = selected[0]
else:
parent = None
if (parameter is None):
parameter = self.names_choices[0]
model = self.tree.model()
node_index = model.add_node(parameter=parameter, parent=parent)
self.tree.openPersistentEditor(model.index(node_index.row(), 1, parent))
self.tree.expandAll()
self.tree.selectRow(node_index)
def _remove_selected_tree_item(self):
selected = self.tree.selectionModel().selection().indexes()
if (len(selected) == 0):
return
node_index = self.tree.model().remove_node(selected[0])
if node_index.isValid():
self.tree.selectRow(node_index)
def get_sequence(self):
return self.data.parameters_sequence(self.names_inv)
def queue_sequence(self):
self.queue_button.setEnabled(False)
try:
sequence = self.get_sequence()
except SequenceEvaluationError:
log.error('Evaluation of one of the sequence strings went wrong, no sequence queued.')
else:
log.info(('Queuing %d measurements based on the entered sequences.' % len(sequence)))
for entry in sequence:
QtWidgets.QApplication.processEvents()
parameters = dict(ChainMap(*entry[::(- 1)]))
procedure = self._parent.make_procedure()
procedure.set_parameters(parameters)
self._parent.queue(procedure=procedure)
finally:
self.queue_button.setEnabled(True)
def save_sequence(self):
dialog = SequenceDialog(save=True)
if dialog.exec():
filename = dialog.selectedFiles()[0]
with open(filename, 'w') as file_object:
self.tree.save(file_object)
log.info(('Saved sequence file %s' % filename))
def load_sequence(self, *, filename=None):
append_flag = False
if ((filename is None) or (filename == '')):
dialog = SequenceDialog()
if dialog.exec():
append_flag = (dialog.append_checkbox.checkState() == QtCore.Qt.CheckState.Checked)
filenames = dialog.selectedFiles()
filename = filenames[0]
else:
return
with open(filename, 'r') as file_object:
self.tree.model().load(file_object, append=append_flag)
self.tree.expandAll() |
class ArchiveUtilTestCase(support.TempdirManager):
.usefixtures('needs_zlib')
def test_make_tarball(self, name='archive'):
tmpdir = self._create_files()
self._make_tarball(tmpdir, name, '.tar.gz')
self._make_tarball(tmpdir, name, '.tar', compress=None)
.usefixtures('needs_zlib')
def test_make_tarball_gzip(self):
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.gz', compress='gzip')
def test_make_tarball_bzip2(self):
pytest.importorskip('bz2')
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.bz2', compress='bzip2')
def test_make_tarball_xz(self):
pytest.importorskip('lzma')
tmpdir = self._create_files()
self._make_tarball(tmpdir, 'archive', '.tar.xz', compress='xz')
.skipif("not can_fs_encode('archiv')")
def test_make_tarball_latin1(self):
self.test_make_tarball('archiv')
.skipif("not can_fs_encode('')")
def test_make_tarball_extended(self):
self.test_make_tarball('')
def _make_tarball(self, tmpdir, target_name, suffix, **kwargs):
tmpdir2 = self.mkdtemp()
if same_drive(tmpdir, tmpdir2):
pytest.skip('source and target should be on same drive')
base_name = os.path.join(tmpdir2, target_name)
with path.Path(tmpdir):
make_tarball(splitdrive(base_name)[1], 'dist', **kwargs)
tarball = (base_name + suffix)
assert os.path.exists(tarball)
assert (self._tarinfo(tarball) == self._created_files)
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return names
finally:
tar.close()
_zip_created_files = ['dist/', 'dist/file1', 'dist/file2', 'dist/sub/', 'dist/sub/file3', 'dist/sub2/']
_created_files = [p.rstrip('/') for p in _zip_created_files]
def _create_files(self):
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
return tmpdir
.usefixtures('needs_zlib')
.skipif("not (find_executable('tar') and find_executable('gzip'))")
def test_tarfile_vs_tar(self):
tmpdir = self._create_files()
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
tarball = (base_name + '.tar.gz')
assert os.path.exists(tarball)
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f', '-9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
assert os.path.exists(tarball2)
assert (self._tarinfo(tarball) == self._created_files)
assert (self._tarinfo(tarball2) == self._created_files)
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = (base_name + '.tar')
assert os.path.exists(tarball)
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = (base_name + '.tar')
assert os.path.exists(tarball)
.skipif("not find_executable('compress')")
def test_compress_deprecated(self):
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter('always')
make_tarball(base_name, 'dist', compress='compress')
finally:
os.chdir(old_dir)
tarball = (base_name + '.tar.Z')
assert os.path.exists(tarball)
assert (len(w.warnings) == 1)
os.remove(tarball)
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with check_warnings() as w:
warnings.simplefilter('always')
make_tarball(base_name, 'dist', compress='compress', dry_run=True)
finally:
os.chdir(old_dir)
assert (not os.path.exists(tarball))
assert (len(w.warnings) == 1)
.usefixtures('needs_zlib')
def test_make_zipfile(self):
zipfile = pytest.importorskip('zipfile')
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
with path.Path(tmpdir):
make_zipfile(base_name, 'dist')
tarball = (base_name + '.zip')
assert os.path.exists(tarball)
with zipfile.ZipFile(tarball) as zf:
assert (sorted(zf.namelist()) == self._zip_created_files)
def test_make_zipfile_no_zlib(self):
zipfile = pytest.importorskip('zipfile')
patch(self, archive_util.zipfile, 'zlib', None)
called = []
zipfile_class = zipfile.ZipFile
def fake_zipfile(*a, **kw):
if (kw.get('compression', None) == zipfile.ZIP_STORED):
called.append((a, kw))
return zipfile_class(*a, **kw)
patch(self, archive_util.zipfile, 'ZipFile', fake_zipfile)
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
with path.Path(tmpdir):
make_zipfile(base_name, 'dist')
tarball = (base_name + '.zip')
assert (called == [((tarball, 'w'), {'compression': zipfile.ZIP_STORED})])
assert os.path.exists(tarball)
with zipfile.ZipFile(tarball) as zf:
assert (sorted(zf.namelist()) == self._zip_created_files)
def test_check_archive_formats(self):
assert (check_archive_formats(['gztar', 'xxx', 'zip']) == 'xxx')
assert (check_archive_formats(['gztar', 'bztar', 'xztar', 'ztar', 'tar', 'zip']) is None)
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
with pytest.raises(ValueError):
make_archive(base_name, 'xxx')
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
ARCHIVE_FORMATS['xxx'] = (_breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
assert (os.getcwd() == current_dir)
finally:
ARCHIVE_FORMATS.pop('xxx')
def test_make_archive_tar(self):
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'tar', base_dir, 'dist')
assert os.path.exists(res)
assert (os.path.basename(res) == 'archive.tar')
assert (self._tarinfo(res) == self._created_files)
.usefixtures('needs_zlib')
def test_make_archive_gztar(self):
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'gztar', base_dir, 'dist')
assert os.path.exists(res)
assert (os.path.basename(res) == 'archive.tar.gz')
assert (self._tarinfo(res) == self._created_files)
def test_make_archive_bztar(self):
pytest.importorskip('bz2')
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'bztar', base_dir, 'dist')
assert os.path.exists(res)
assert (os.path.basename(res) == 'archive.tar.bz2')
assert (self._tarinfo(res) == self._created_files)
def test_make_archive_xztar(self):
pytest.importorskip('lzma')
base_dir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'xztar', base_dir, 'dist')
assert os.path.exists(res)
assert (os.path.basename(res) == 'archive.tar.xz')
assert (self._tarinfo(res) == self._created_files)
def test_make_archive_owner_group(self):
if UID_0_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir = self._create_files()
root_dir = self.mkdtemp()
base_name = os.path.join(self.mkdtemp(), 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner, group=group)
assert os.path.exists(res)
res = make_archive(base_name, 'zip', root_dir, base_dir)
assert os.path.exists(res)
res = make_archive(base_name, 'tar', root_dir, base_dir, owner=owner, group=group)
assert os.path.exists(res)
res = make_archive(base_name, 'tar', root_dir, base_dir, owner='kjhkjhkjg', group='oihohoh')
assert os.path.exists(res)
.usefixtures('needs_zlib')
_unix_id
_uid_0
def test_tarfile_root_owner(self):
tmpdir = self._create_files()
base_name = os.path.join(self.mkdtemp(), 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = make_tarball(base_name, 'dist', compress=None, owner=owner, group=group)
finally:
os.chdir(old_dir)
assert os.path.exists(archive_name)
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
assert (member.uid == 0)
assert (member.gid == 0)
finally:
archive.close() |
class TestNutsCheckTrace():
def test_multiple_samplers(self, caplog):
with pm.Model():
prob = pm.Beta('prob', alpha=5.0, beta=3.0)
pm.Binomial('outcome', n=1, p=prob)
caplog.clear()
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning)
pm.sample(3, tune=2, discard_tuned_samples=False, n_init=None, chains=1)
messages = [msg.msg for msg in caplog.records]
assert all((('boolean index did not' not in msg) for msg in messages))
def test_bad_init_nonparallel(self):
with pm.Model():
pm.HalfNormal('a', sigma=1, initval=(- 1), transform=None)
with pytest.raises(SamplingError) as error:
pm.sample(chains=1, random_seed=1)
error.match('Initial evaluation')
.skipif((sys.version_info < (3, 6)), reason='requires python3.6 or higher')
def test_bad_init_parallel(self):
with pm.Model():
pm.HalfNormal('a', sigma=1, initval=(- 1), transform=None)
with pytest.raises(SamplingError) as error:
pm.sample(cores=2, random_seed=1)
error.match('Initial evaluation')
def test_emits_energy_warnings(self, caplog):
with pm.Model():
a = pm.Normal('a', size=2, initval=floatX(np.zeros(2)))
a = pt.switch((a > 0), np.inf, a)
b = pt.slinalg.solve(floatX(np.eye(2)), a, check_finite=False)
pm.Normal('c', mu=b, size=2, initval=floatX(np.r_[(0.0, 0.0)]))
caplog.clear()
with caplog.at_level(logging.DEBUG, logger='pymc'):
idata = pm.sample(20, tune=5, chains=2, random_seed=526)
assert any((('Energy change' in w.msg) for w in caplog.records))
def test_sampler_stats(self):
with pm.Model() as model:
pm.Normal('x', mu=0, sigma=1)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', '.*number of samples.*', UserWarning)
trace = pm.sample(draws=10, tune=1, chains=1, return_inferencedata=False)
expected_stat_names = {'depth', 'diverging', 'energy', 'energy_error', 'model_logp', 'max_energy_error', 'mean_tree_accept', 'step_size', 'step_size_bar', 'tree_size', 'tune', 'perf_counter_diff', 'perf_counter_start', 'process_time_diff', 'reached_max_treedepth', 'index_in_trajectory', 'largest_eigval', 'smallest_eigval', 'warning'}
assert (trace.stat_names == expected_stat_names)
for varname in trace.stat_names:
if (varname == 'warning'):
continue
assert (trace.get_sampler_stats(varname).shape == (10,))
model_logp_fn = model.compile_logp()
model_logp_ = np.array([model_logp_fn(trace.point(i, chain=c)) for c in trace.chains for i in range(len(trace))])
assert (trace.model_logp == model_logp_).all() |
class OtherModelNodeStorageParameter(Parameter):
def __init__(self, model, other_model, node, **kwargs):
super().__init__(model, **kwargs)
self.other_model = other_model
self.node = node
self._other_model = None
self._other_model_node = None
def setup(self):
super().setup()
self._other_model = self.model.parent.models[self.other_model]
self._other_model_node = self._other_model.nodes[self.node]
def value(self, ts, scenario_index):
return self._other_model_node.volume[scenario_index.global_id]
def load(cls, model, data):
return cls(model, **data) |
def walk_resources(package_or_requirement, resource_name, recurse=True, base=''):
base = (base.rstrip('/') + '/')
resource_base = ((resource_name.rstrip('/') + '/') + base.strip('/')).rstrip('/')
for filename in pymagic.resource_listdir(package_or_requirement, resource_base):
if (filename.startswith('.') or any((filename.endswith(i) for i in ('.pyc', '.pyo', '~')))):
continue
if pymagic.resource_isdir(package_or_requirement, ((resource_base + '/') + filename)):
if recurse:
for i in walk_resources(package_or_requirement, resource_name, recurse, base=(base + filename)):
(yield i)
else:
(yield (base + filename)) |
class ResourceObserver():
def __init__(self, changed=None, moved=None, created=None, removed=None, validate=None):
self.changed = changed
self.moved = moved
self.created = created
self.removed = removed
self._validate = validate
def resource_changed(self, resource):
if (self.changed is not None):
self.changed(resource)
def resource_moved(self, resource, new_resource):
if (self.moved is not None):
self.moved(resource, new_resource)
def resource_created(self, resource):
if (self.created is not None):
self.created(resource)
def resource_removed(self, resource):
if (self.removed is not None):
self.removed(resource)
def validate(self, resource):
if (self._validate is not None):
self._validate(resource) |
class StringStrategy(object):
__metaclass__ = SingletonMeta
def make_mutable(self, w_str):
raise NotImplementedError('abstract base class')
def as_str_ascii(self, w_str):
raise ValueError("can't convert")
def as_str_utf8(self, w_str):
raise NotImplementedError('abstract base class')
def as_unicode(self, w_str):
raise NotImplementedError('abstract base class')
def as_charlist_ascii(self, w_str):
raise ValueError("can't convert")
def as_charlist_utf8(self, w_str):
raise NotImplementedError('abstract base class')
def as_unicharlist(self, w_str):
raise NotImplementedError('abstract base class')
def length(self, w_str):
raise NotImplementedError('abstract base class')
def getitem(self, w_str, index):
raise NotImplementedError('abstract base class')
def getslice(self, w_str, start, stop):
raise NotImplementedError('abstract base class')
def eq(self, w_str, w_other):
length = self.length(w_str)
if (length != w_other.length()):
return False
otherstrategy = w_other.get_strategy()
for i in range(length):
if (self.getitem(w_str, i) != otherstrategy.getitem(w_other, i)):
return False
return True
def cmp(self, w_str, w_other):
len1 = self.length(w_str)
len2 = w_other.length()
if (len1 < len2):
cmplen = len1
else:
cmplen = len2
otherstrategy = w_other.get_strategy()
for i in range(cmplen):
diff = (ord(self.getitem(w_str, i)) - ord(otherstrategy.getitem(w_other, i)))
if diff:
return diff
i += 1
return (len1 - len2)
def cmp_case_insensitive(self, w_str, w_other):
len1 = self.length(w_str)
len2 = w_other.length()
if (len1 < len2):
cmplen = len1
else:
cmplen = len2
otherstrategy = w_other.get_strategy()
for i in range(cmplen):
ch1 = unicodedb.tolower(ord(self.getitem(w_str, i)))
ch2 = unicodedb.tolower(ord(otherstrategy.getitem(w_other, i)))
diff = (ch1 - ch2)
if diff:
return diff
i += 1
return (len1 - len2)
def hash(self, w_str):
return compute_hash(w_str.as_unicode())
def upper(self, w_str):
raise NotImplementedError('abstract base class')
def lower(self, w_str):
raise NotImplementedError('abstract base class')
def setitem(self, w_str, index, unichar):
raise NotImplementedError('abstract base class')
def setslice(self, w_str, index, w_from, fromstart, fromend):
raise NotImplementedError('abstract base class') |
def _flatten_pkcs1_examples(vectors):
flattened_vectors = []
for vector in vectors:
examples = vector[0].pop('examples')
for example in examples:
merged_vector = (vector[0], vector[1], example)
flattened_vectors.append(merged_vector)
return flattened_vectors |
def format_(rows, limit=15, sort='size', order='descending'):
localrows = []
for row in rows:
localrows.append(list(row))
sortby = ['type', '#', 'size']
if (sort not in sortby):
raise ValueError(('invalid sort, should be one of' + str(sortby)))
orders = ['ascending', 'descending']
if (order not in orders):
raise ValueError(('invalid order, should be one of' + str(orders)))
if (sortby.index(sort) == 0):
if (order == 'ascending'):
localrows.sort(key=(lambda x: _repr(x[0])))
elif (order == 'descending'):
localrows.sort(key=(lambda x: _repr(x[0])), reverse=True)
elif (order == 'ascending'):
localrows.sort(key=(lambda x: x[sortby.index(sort)]))
elif (order == 'descending'):
localrows.sort(key=(lambda x: x[sortby.index(sort)]), reverse=True)
localrows = localrows[0:limit]
for row in localrows:
row[2] = stringutils.pp(row[2])
localrows.insert(0, ['types', '# objects', 'total size'])
return _format_table(localrows) |
.parametrize(('start', 'end', 'expected'), [(0, 0, 'a = "hello"\n'), (1, 1, 'b = [\n "a",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "long",\n "line",\n]\n'), (2, 2, 'c = 42\n'), (0, 2, 'a = "hello"\nb = [\n "a",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "very",\n "long",\n "line",\n]\nc = 42\n')])
def test_pylsp_format_range(config, unformatted_document, start, end, expected):
range = {'start': {'line': start, 'character': 0}, 'end': {'line': end, 'character': 0}}
result = pylsp_format_range(config, unformatted_document, range=range)
assert (result == [{'range': {'start': {'line': start, 'character': 0}, 'end': {'line': (end + 1), 'character': 0}}, 'newText': expected}]) |
def package_directory_arg(arg: str) -> pathlib.Path:
pkg_dir = pathlib.Path(arg).expanduser().resolve()
try:
next(pkg_dir.iterdir(), None)
except OSError as exc:
raise argparse.ArgumentTypeError(f'Error: while trying to access package directory ({pkg_dir}): {exc}')
return pkg_dir |
class StatsView():
views = {}
def __init__(self):
pass
def register(cls):
StatsView.views[cls.name] = cls
def getView(cls, name):
return cls.views[name]
def populatePanel(self, panel):
raise NotImplementedError()
def getHeaderText(self, fit):
raise NotImplementedError()
def refreshPanel(self, fit):
raise NotImplementedError() |
def ql_syscall_clock_time(ql: Qiling, id, new, old, *args, **kw):
if (not (id in clock_types)):
raise NotImplementedError(f'Unknown clock id {id} not implemented')
if (id != 0):
raise NotImplementedError(f'Clock type {clock_types[id]} not implemented')
if (new != 0):
clock_new = ql.unpack64(ql.mem.read(new, 8))
ql.log.warn(f'syscall_clock_time(id = {clock_types[id]}, new = {clock_new}) set time not supported')
return (- 1)
if (old != 0):
clock_old = ql.unpack64(ql.mem.read(old, 8))
ql.log.debug(f'syscall_clock_time(id = {clock_types[id]}, old = {clock_old})')
ql.mem.write_ptr(old, time_ns(), 8)
return 0 |
class PretrainedVocab(BaseVocab):
def __init__(self, embedding_name, *args, **kwargs):
self.type = 'pretrained'
if (embedding_name not in vocab.pretrained_aliases):
from pythia.common.registry import registry
writer = registry.get('writer')
error = (('Unknown embedding type: %s' % embedding_name), 'error')
if (writer is not None):
writer.write(error, 'error')
raise RuntimeError(error)
vector_cache = os.path.join(get_pythia_root(), '.vector_cache')
embedding = vocab.pretrained_aliases[embedding_name](cache=vector_cache)
self.UNK_INDEX = 3
self.stoi = defaultdict((lambda : self.UNK_INDEX))
self.itos = {}
self.itos[self.PAD_INDEX] = self.PAD_TOKEN
self.itos[self.SOS_INDEX] = self.SOS_TOKEN
self.itos[self.EOS_INDEX] = self.EOS_TOKEN
self.itos[self.UNK_INDEX] = self.UNK_TOKEN
self.stoi[self.SOS_TOKEN] = self.SOS_INDEX
self.stoi[self.EOS_TOKEN] = self.EOS_INDEX
self.stoi[self.PAD_TOKEN] = self.PAD_INDEX
self.stoi[self.UNK_TOKEN] = self.UNK_INDEX
self.vectors = torch.FloatTensor((len(self.itos.keys()) + len(embedding.itos)), len(embedding.vectors[0]))
for i in range(4):
self.vectors[i] = ((torch.ones_like(self.vectors[i]) * 0.1) * i)
index = 4
for word in embedding.stoi:
self.itos[index] = word
self.stoi[word] = index
actual_index = embedding.stoi[word]
self.vectors[index] = embedding.vectors[actual_index]
index += 1 |
def normal_order_integrals(n_qubits, n_occupied, array_to_normal_order, array_mapping, h1_old, h2_old, h1_new, h2_new):
a_enum = []
adag_enum = []
for ind in range(n_qubits):
if (ind in n_occupied):
a_enum.append((- (ind + 1)))
adag_enum.append((ind + 1))
else:
a_enum.append((ind + 1))
adag_enum.append((- (ind + 1)))
array_to_sort = []
for (ind, _) in enumerate(array_to_normal_order):
if (array_mapping[ind] == 'adag'):
array_to_sort.append(adag_enum[array_to_normal_order[ind]])
elif (array_mapping[ind] == 'a'):
array_to_sort.append(a_enum[array_to_normal_order[ind]])
sign = ((- 1.0) ** sort(array_to_sort)[1])
array_sorted = sort(array_to_sort)[0]
ind_ini_term = array_to_normal_order
mapping_no_term = []
ind_no_term = []
sign_no_term = sign
for ind in array_sorted:
if (ind in a_enum):
mapping_no_term.append('a')
ind_no_term.append(a_enum.index(ind))
elif (ind in adag_enum):
mapping_no_term.append('adag')
ind_no_term.append(adag_enum.index(ind))
i_i = 0
j_j = 1
k_k = 2
l_l = 3
id_term = 0.0
if (len(array_to_normal_order) == 2):
if (ind_no_term[0] == ind_no_term[1]):
if (mapping_no_term == ['adag', 'a']):
temp_sign_h1 = float((1 * sign_no_term))
ind_old_h1 = [ind_ini_term[i_i], ind_ini_term[j_j]]
ind_new_h1 = [ind_no_term[i_i], ind_no_term[j_j]]
h1_new[ind_new_h1[0]][ind_new_h1[1]] += float((temp_sign_h1 * h1_old[ind_old_h1[0]][ind_old_h1[1]]))
elif (mapping_no_term == ['a', 'adag']):
temp_sign_h1 = float(((- 1) * sign_no_term))
ind_old_h1 = [ind_ini_term[i_i], ind_ini_term[j_j]]
ind_new_h1 = [ind_no_term[j_j], ind_no_term[i_i]]
h1_new[ind_new_h1[0]][ind_new_h1[1]] += float((temp_sign_h1 * h1_old[ind_old_h1[0]][ind_old_h1[1]]))
id_term += float((sign_no_term * h1_old[ind_old_h1[0]][ind_old_h1[1]]))
elif (mapping_no_term == ['adag', 'a']):
temp_sign_h1 = float((1 * sign_no_term))
ind_old_h1 = [ind_ini_term[i_i], ind_ini_term[j_j]]
ind_new_h1 = [ind_no_term[i_i], ind_no_term[j_j]]
h1_new[ind_new_h1[0]][ind_new_h1[1]] += float((temp_sign_h1 * h1_old[ind_old_h1[0]][ind_old_h1[1]]))
elif (mapping_no_term == ['a', 'adag']):
temp_sign_h1 = float(((- 1) * sign_no_term))
ind_old_h1 = [ind_ini_term[i_i], ind_ini_term[j_j]]
ind_new_h1 = [ind_no_term[j_j], ind_no_term[i_i]]
h1_new[ind_new_h1[0]][ind_new_h1[1]] += float((temp_sign_h1 * h1_old[ind_old_h1[0]][ind_old_h1[1]]))
elif (len(array_to_normal_order) == 4):
if (len(set(ind_no_term)) == 4):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[k_k], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[j_j], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[j_j], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 1')
elif (len(set(ind_no_term)) == 3):
if (ind_no_term[0] == ind_no_term[1]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[k_k], ind_no_term[l_l]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[l_l], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 2')
elif (ind_no_term[0] == ind_no_term[2]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[l_l]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[l_l], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 3')
elif (ind_no_term[0] == ind_no_term[3]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[k_k], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 4')
elif (ind_no_term[1] == ind_no_term[2]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[l_l]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[l_l], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 5')
elif (ind_no_term[1] == ind_no_term[3]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[k_k], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[k_k], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 6')
elif (ind_no_term[2] == ind_no_term[3]):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[k_k], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[j_j], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[j_j], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR 7')
else:
print('ERROR 8')
elif (len(set(ind_no_term)) == 2):
if ((ind_no_term[0] == ind_no_term[1]) and (ind_no_term[2] == ind_no_term[3])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[k_k], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[k_k], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1_1 = ((- 1) * sign_no_term)
temp_sign_h1_2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
coordinates_for_old_h1_term_1 = [ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h1_2 = [ind_no_term[k_k], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[coordinates_for_old_h1_term_1[0]][coordinates_for_old_h1_term_1[1]] += ((0.5 * temp_sign_h1_1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1_2[0]][ind_old_h1_2[1]] += ((0.5 * temp_sign_h1_2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
id_term += ((0.5 * sign_no_term) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[k_k], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[0] == ind_no_term[2]) and (ind_no_term[1] == ind_no_term[3])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1_1 = (1 * sign_no_term)
temp_sign_h1_2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
coordinates_for_old_h1_term_1 = [ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h1_2 = [ind_no_term[j_j], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[coordinates_for_old_h1_term_1[0]][coordinates_for_old_h1_term_1[1]] += ((0.5 * temp_sign_h1_1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1_2[0]][ind_old_h1_2[1]] += ((0.5 * temp_sign_h1_2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
id_term += (((- 0.5) * sign_no_term) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[0] == ind_no_term[3]) and (ind_no_term[1] == ind_no_term[2])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1_1 = ((- 1) * sign_no_term)
temp_sign_h1_2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
coordinates_for_old_h1_term_1 = [ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h1_2 = [ind_no_term[j_j], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[coordinates_for_old_h1_term_1[0]][coordinates_for_old_h1_term_1[1]] += ((0.5 * temp_sign_h1_1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1_2[0]][ind_old_h1_2[1]] += ((0.5 * temp_sign_h1_2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
id_term += ((0.5 * sign_no_term) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[0] == ind_no_term[1]) and (ind_no_term[0] == ind_no_term[2])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1_1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
coordinates_for_old_h1_term_1 = [ind_no_term[i_i], ind_no_term[l_l]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[coordinates_for_old_h1_term_1[0]][coordinates_for_old_h1_term_1[1]] += ((0.5 * temp_sign_h1_1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[l_l]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1_1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
coordinates_for_old_h1_term_1 = [ind_no_term[l_l], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[coordinates_for_old_h1_term_1[0]][coordinates_for_old_h1_term_1[1]] += ((0.5 * temp_sign_h1_1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[l_l], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[0] == ind_no_term[1]) and (ind_no_term[0] == ind_no_term[3])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[k_k]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[k_k]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[k_k], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[k_k], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[0] == ind_no_term[2]) and (ind_no_term[0] == ind_no_term[3])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
elif ((ind_no_term[1] == ind_no_term[2]) and (ind_no_term[1] == ind_no_term[3])):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'adag', 'a']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[i_i], ind_no_term[j_j]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['adag', 'a', 'a', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[j_j], ind_no_term[j_j], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'adag', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'adag', 'a', 'adag']):
temp_sign_h2 = ((- 1) * sign_no_term)
temp_sign_h1 = ((- 1) * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
ind_old_h1 = [ind_no_term[j_j], ind_no_term[i_i]]
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
h1_new[ind_old_h1[0]][ind_old_h1[1]] += ((0.5 * temp_sign_h1) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[j_j], ind_no_term[j_j], ind_no_term[i_i], ind_no_term[j_j]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
else:
print('ERROR')
if (len(set(ind_no_term)) == 1):
if (mapping_no_term == ['adag', 'adag', 'a', 'a']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
elif (mapping_no_term == ['a', 'a', 'adag', 'adag']):
temp_sign_h2 = (1 * sign_no_term)
ind_new_h2 = [ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i], ind_no_term[i_i]]
ind_old_h2 = [ind_ini_term[0], ind_ini_term[1], ind_ini_term[2], ind_ini_term[3]]
ind_old_h2 = last_two_indices_swap(ind_old_h2)
h2_new[ind_new_h2[0]][ind_new_h2[1]][ind_new_h2[2]][ind_new_h2[3]] += ((0.5 * temp_sign_h2) * h2_old[ind_old_h2[0]][ind_old_h2[1]][ind_old_h2[2]][ind_old_h2[3]])
else:
print('ERROR')
return (h1_new, h2_new, id_term) |
def parse_args():
parser = ArgumentParser(description='Generate training and validation set of OpenVINO annotations for Open Images by cropping box image.')
parser.add_argument('root_path', help='Root dir containing images and annotations')
parser.add_argument('n_proc', default=1, type=int, help='Number of processes to run')
args = parser.parse_args()
return args |
def parse_args():
msg = 'convert inputs to tf.Record format'
usage = 'input_converter.py [<args>] [-h | --help]'
parser = argparse.ArgumentParser(description=msg, usage=usage)
parser.add_argument('--input', required=True, type=str, nargs=2, help='Path of input file')
parser.add_argument('--output_name', required=True, type=str, help='Output name')
parser.add_argument('--output_dir', required=True, type=str, help='Output directory')
parser.add_argument('--vocab', nargs=2, required=True, type=str, help='Path of vocabulary')
parser.add_argument('--num_shards', default=100, type=int, help='Number of output shards')
parser.add_argument('--shuffle', action='store_true', help='Shuffle inputs')
parser.add_argument('--unk', default='<unk>', type=str, help='Unknown word symbol')
parser.add_argument('--eos', default='<eos>', type=str, help='End of sentence symbol')
return parser.parse_args() |
class _RPN(nn.Module):
def __init__(self, din):
super(_RPN, self).__init__()
self.din = din
self.anchor_scales = cfg.ANCHOR_SCALES
self.anchor_ratios = cfg.ANCHOR_RATIOS
self.feat_stride = cfg.FEAT_STRIDE[0]
self.RPN_Conv = nn.Conv2d(self.din, 512, 3, 1, 1, bias=True)
self.nc_score_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 2)
self.RPN_cls_score = nn.Conv2d(512, self.nc_score_out, 1, 1, 0)
self.nc_bbox_out = ((len(self.anchor_scales) * len(self.anchor_ratios)) * 4)
self.RPN_bbox_pred = nn.Conv2d(512, self.nc_bbox_out, 1, 1, 0)
self.RPN_proposal = _ProposalLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.RPN_anchor_target = _AnchorTargetLayer(self.feat_stride, self.anchor_scales, self.anchor_ratios)
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
def reshape(x, d):
input_shape = x.size()
x = x.view(input_shape[0], int(d), int((float((input_shape[1] * input_shape[2])) / float(d))), input_shape[3])
return x
def forward(self, base_feat, im_info, gt_boxes, num_boxes):
batch_size = base_feat.size(0)
rpn_conv1 = F.relu(self.RPN_Conv(base_feat), inplace=True)
rpn_cls_score = self.RPN_cls_score(rpn_conv1)
rpn_cls_score_reshape = self.reshape(rpn_cls_score, 2)
rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, dim=1)
rpn_cls_prob = self.reshape(rpn_cls_prob_reshape, self.nc_score_out)
rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv1)
cfg_key = ('TRAIN' if self.training else 'TEST')
(rois, output_cls_score) = self.RPN_proposal((rpn_cls_prob.data, rpn_bbox_pred.data, im_info, cfg_key))
self.rpn_loss_cls = 0
self.rpn_loss_box = 0
if self.training:
assert (gt_boxes is not None)
rpn_data = self.RPN_anchor_target((rpn_cls_score.data, gt_boxes, im_info, num_boxes))
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(batch_size, (- 1), 2)
rpn_label = rpn_data[0].view(batch_size, (- 1))
rpn_keep = Variable(rpn_label.view((- 1)).ne((- 1)).nonzero().view((- 1)))
rpn_cls_score = torch.index_select(rpn_cls_score.view((- 1), 2), 0, rpn_keep)
rpn_label = torch.index_select(rpn_label.view((- 1)), 0, rpn_keep.data)
rpn_label = Variable(rpn_label.long())
self.rpn_loss_cls = F.cross_entropy(rpn_cls_score, rpn_label)
fg_cnt = torch.sum(rpn_label.data.ne(0))
(rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights) = rpn_data[1:]
rpn_bbox_inside_weights = Variable(rpn_bbox_inside_weights)
rpn_bbox_outside_weights = Variable(rpn_bbox_outside_weights)
rpn_bbox_targets = Variable(rpn_bbox_targets)
self.rpn_loss_box = _smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights, sigma=3, dim=[1, 2, 3])
return (rois, output_cls_score, self.rpn_loss_cls, self.rpn_loss_box) |
def pre_load_checkpoint(checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if (ckpt and ckpt.model_checkpoint_path):
print(' [*] Reading checkpoint from {}'.format(ckpt.model_checkpoint_path))
epoch_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
return (epoch_step, ckpt.model_checkpoint_path)
else:
return (0, None) |
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return (self.task_holding or ((not self.packet_pending) and self.task_waiting))
def isWaitingWithPacket(self):
return (self.packet_pending and self.task_waiting and (not self.task_holding)) |
(Petition)
class PetitionAdmin(admin.ModelAdmin):
change_form_template = 'petition/petition_change_form.html'
form = PetitionAdminForm
search_fields = ('title',)
list_display = ('title', 'non_confirmed_signature_number', 'confirmed_signature_number')
fieldsets = ((gettext_lazy('To whom is this petition?'), {'fields': ('org', 'user')}), (gettext_lazy('Content of the petition'), {'fields': ('title', 'text', 'side_text', 'footer_text', 'footer_links', 'sign_form_footer', 'target', 'paper_signatures_enabled', 'paper_signatures')}), (gettext_lazy('Background color'), {'fields': ('linear_gradient_direction', 'gradient_from', 'gradient_to', 'bgcolor')}), (gettext_lazy('Setup of the newsletter associated to the petition'), {'fields': ('has_newsletter', 'newsletter_text', 'newsletter_subscribe_method', 'newsletter_subscribe_ 'newsletter_subscribe_ 'newsletter_subscribe_ 'newsletter_subscribe_mail_subject', 'newsletter_subscribe_mail_from', 'newsletter_subscribe_mail_to', 'newsletter_subscribe_mail_smtp_host', 'newsletter_subscribe_mail_smtp_port', 'newsletter_subscribe_mail_smtp_user', 'newsletter_subscribe_mail_smtp_password', 'newsletter_subscribe_mail_smtp_tls', 'newsletter_subscribe_mail_smtp_starttls')}), (gettext_lazy('Confirmation email setup'), {'fields': ('confirmation_email_reply',)}), (gettext_lazy('Petition meta-data for social networks'), {'fields': ('twitter_description', 'twitter_image', 'org_twitter_handle')}), (gettext_lazy('Publish the petition'), {'fields': ('published',)}))
def non_confirmed_signature_number(self, petition):
return petition.get_signature_number(confirmed=False)
non_confirmed_signature_number.short_description = gettext_lazy('Unconfirmed signatures')
def confirmed_signature_number(self, petition):
return petition.get_signature_number(confirmed=True)
confirmed_signature_number.short_description = gettext_lazy('Confirmed signatures') |
def application_status(cerberus_url, start_time, end_time):
if (not cerberus_url):
logging.error('url where Cerberus publishes True/False signal is not provided.')
sys.exit(1)
else:
duration = ((end_time - start_time) / 60)
url = (((((cerberus_url + '/') + 'history') + '?') + 'loopback=') + str(duration))
logging.info(('Scraping the metrics for the test duration from cerberus url: %s' % url))
try:
failed_routes = []
status = True
metrics = requests.get(url, timeout=60).content
metrics_json = json.loads(metrics)
for entry in metrics_json['history']['failures']:
if (entry['component'] == 'route'):
name = entry['name']
failed_routes.append(name)
status = False
else:
continue
except Exception as e:
logging.error(('Failed to scrape metrics from cerberus API at %s: %s' % (url, e)))
sys.exit(1)
return (status, set(failed_routes)) |
()
def hsd_file_jp01(tmp_path):
from satpy.readers.ahi_hsd import _BASIC_INFO_TYPE, _CAL_INFO_TYPE, _DATA_INFO_TYPE, _ERROR_INFO_TYPE, _ERROR_LINE_INFO_TYPE, _INTER_CALIBRATION_INFO_TYPE, _NAV_INFO_TYPE, _NAVIGATION_CORRECTION_INFO_TYPE, _NAVIGATION_CORRECTION_SUBINFO_TYPE, _OBSERVATION_LINE_TIME_INFO_TYPE, _OBSERVATION_TIME_INFO_TYPE, _PROJ_INFO_TYPE, _SEGMENT_INFO_TYPE, _SPARE_TYPE, _VISCAL_INFO_TYPE
nrows = 11000
ncols = 11000
filename = (tmp_path / 'somedata.DAT')
error_lines = 0
number_nav_corrections = 0
number_observation_times = 6
dat_type = np.dtype([('block1', _BASIC_INFO_TYPE), ('block2', _DATA_INFO_TYPE), ('block3', _PROJ_INFO_TYPE), ('block4', _NAV_INFO_TYPE), ('block5', _CAL_INFO_TYPE), ('calibration', _VISCAL_INFO_TYPE), ('block6', _INTER_CALIBRATION_INFO_TYPE), ('block7', _SEGMENT_INFO_TYPE), ('block8', _NAVIGATION_CORRECTION_INFO_TYPE), ('navigation_corrections', _NAVIGATION_CORRECTION_SUBINFO_TYPE, (number_nav_corrections,)), ('block9', _OBSERVATION_TIME_INFO_TYPE), ('observation_time_information', _OBSERVATION_LINE_TIME_INFO_TYPE, (number_observation_times,)), ('block10', _ERROR_INFO_TYPE), ('error_info', _ERROR_LINE_INFO_TYPE, (error_lines,)), ('block11', _SPARE_TYPE), ('image', '<u2', (nrows, ncols))])
dat = np.zeros(1, dat_type)
dat['block1']['blocklength'] = _BASIC_INFO_TYPE.itemsize
dat['block1']['observation_area'] = 'JP01'
dat['block1']['satellite'] = b'Himawari-8'
dat['block2']['blocklength'] = _DATA_INFO_TYPE.itemsize
dat['block2']['number_of_lines'] = nrows
dat['block2']['number_of_columns'] = ncols
dat['block3']['blocklength'] = _PROJ_INFO_TYPE.itemsize
dat['block3']['sub_lon'] = 140.7
dat['block3']['CFAC'] =
dat['block3']['LFAC'] =
dat['block3']['COFF'] = 5500.5
dat['block3']['LOFF'] = 5500.5
dat['block3']['distance_from_earth_center'] = 42164.0
dat['block3']['earth_equatorial_radius'] = 6378.137
dat['block3']['earth_polar_radius'] = 6356.7523
dat['block4']['blocklength'] = _NAV_INFO_TYPE.itemsize
dat['block5']['blocklength'] = (_CAL_INFO_TYPE.itemsize + _VISCAL_INFO_TYPE.itemsize)
dat['block6']['blocklength'] = _INTER_CALIBRATION_INFO_TYPE.itemsize
dat['block7']['blocklength'] = _SEGMENT_INFO_TYPE.itemsize
dat['block8']['blocklength'] = (_NAVIGATION_CORRECTION_INFO_TYPE.itemsize + (number_nav_corrections * _NAVIGATION_CORRECTION_SUBINFO_TYPE.itemsize))
dat['block9']['blocklength'] = (_OBSERVATION_TIME_INFO_TYPE.itemsize + (number_observation_times * _OBSERVATION_LINE_TIME_INFO_TYPE.itemsize))
dat['block10']['blocklength'] = (_ERROR_INFO_TYPE.itemsize + (error_lines * _ERROR_LINE_INFO_TYPE.itemsize))
dat['block11']['blocklength'] = _SPARE_TYPE.itemsize
dat.tofile(filename)
return filename |
def get_parser_with_args():
parser = options.get_parser('Collect Top-K Probs', default_task='pytorch_translate')
pytorch_translate_options.add_verbosity_args(parser)
pytorch_translate_options.add_dataset_args(parser, gen=True)
generation_group = options.add_generation_args(parser)
generation_group.add_argument('--source-binary-file', default='', help='Path for the binary file containing source eval examples. (Overrides --source-text-file. Must be used in conjunction with --target-binary-file).')
generation_group.add_argument('--target-binary-file', default='', help='Path for the binary file containing target eval examples. (Overrides --target-text-file. Must be used in conjunction with --source-binary-file).')
generation_group.add_argument('--k-probs-to-collect', type=int, default=8, help='Number of probabilities to collect for each output step.')
generation_group.add_argument('--top-k-probs-binary-file', type=str, default='', help='File into which to save top-K probabilities for each token.')
generation_group.add_argument('--teacher-max-tokens', type=int, default=1000, help='Maximum number of words in minibatch for teacher to score.')
return parser |
class DataTrainingArguments():
source_lang: str = field(default=None, metadata={'help': 'Source language id for translation.'})
target_lang: str = field(default=None, metadata={'help': 'Target language id for translation.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (sacrebleu) on a jsonlines file.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (sacrebleu) on a jsonlines file.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to be the target language token.(Usually it is the target language token)'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
elif ((self.source_lang is None) or (self.target_lang is None)):
raise ValueError('Need to specify the source language and the target language.')
valid_extensions = ['json', 'jsonl']
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in valid_extensions), '`train_file` should be a jsonlines file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in valid_extensions), '`validation_file` should be a jsonlines file.'
if (self.val_max_target_length is None):
self.val_max_target_length = self.max_target_length |
def get_mock_github():
def get_commit_mock(commit_sha):
if (commit_sha == 'aaaaaaa'):
commit_mock = Mock()
commit_mock.sha = commit_sha
commit_mock.html_url = '
commit_mock.last_modified = 'now'
commit_mock.commit = Mock()
commit_mock.commit.message = 'some cool message'
commit_mock.committer = Mock()
commit_mock.committer.login = 'someuser'
commit_mock.committer.avatar_url = 'avatarurl'
commit_mock.committer.html_url = 'htmlurl'
commit_mock.author = Mock()
commit_mock.author.login = 'someuser'
commit_mock.author.avatar_url = 'avatarurl'
commit_mock.author.html_url = 'htmlurl'
return commit_mock
raise GithubException(None, None)
def get_branch_mock(branch_name):
if (branch_name == 'master'):
branch_mock = Mock()
branch_mock.commit = Mock()
branch_mock.commit.sha = 'aaaaaaa'
return branch_mock
raise GithubException(None, None)
def get_repo_mock(namespace, name):
repo_mock = Mock()
repo_mock.owner = Mock()
repo_mock.owner.login = namespace
repo_mock.full_name = ('%s/%s' % (namespace, name))
repo_mock.name = name
repo_mock.description = ('some %s repo' % name)
if (name != 'anotherrepo'):
repo_mock.pushed_at = datetime.utcfromtimestamp(0)
else:
repo_mock.pushed_at = None
repo_mock.html_url = (' % (namespace, name))
repo_mock.private = (name == 'somerepo')
repo_mock.permissions = Mock()
repo_mock.permissions.admin = (namespace == 'knownuser')
return repo_mock
def get_user_repos_mock(type='all', sort='created'):
return [get_repo_mock('knownuser', 'somerepo')]
def get_org_repos_mock(type='all'):
return [get_repo_mock('someorg', 'somerepo'), get_repo_mock('someorg', 'anotherrepo')]
def get_orgs_mock():
return [get_org_mock('someorg')]
def get_user_mock(username='knownuser'):
if (username == 'knownuser'):
user_mock = Mock()
user_mock.name = username
user_mock.plan = Mock()
user_mock.plan.private_repos = 1
user_mock.login = username
user_mock.html_url = (' % username)
user_mock.avatar_url = 'avatarurl'
user_mock.get_repos = Mock(side_effect=get_user_repos_mock)
user_mock.get_orgs = Mock(side_effect=get_orgs_mock)
return user_mock
raise GithubException(None, None)
def get_org_mock(namespace):
if (namespace == 'someorg'):
org_mock = Mock()
org_mock.get_repos = Mock(side_effect=get_org_repos_mock)
org_mock.login = namespace
org_mock.html_url = (' % namespace)
org_mock.avatar_url = 'avatarurl'
org_mock.name = namespace
org_mock.plan = Mock()
org_mock.plan.private_repos = 2
return org_mock
raise GithubException(None, None)
def get_tags_mock():
sometag = Mock()
sometag.name = 'sometag'
sometag.commit = get_commit_mock('aaaaaaa')
someothertag = Mock()
someothertag.name = 'someothertag'
someothertag.commit = get_commit_mock('aaaaaaa')
return [sometag, someothertag]
def get_branches_mock():
master = Mock()
master.name = 'master'
master.commit = get_commit_mock('aaaaaaa')
otherbranch = Mock()
otherbranch.name = 'otherbranch'
otherbranch.commit = get_commit_mock('aaaaaaa')
return [master, otherbranch]
def get_contents_mock(filepath):
if (filepath == 'Dockerfile'):
m = Mock()
m.content = 'hello world'
return m
if (filepath == 'somesubdir/Dockerfile'):
m = Mock()
m.content = 'hi universe'
return m
raise GithubException(None, None)
def get_git_tree_mock(commit_sha, recursive=False):
first_file = Mock()
first_file.type = 'blob'
first_file.path = 'Dockerfile'
second_file = Mock()
second_file.type = 'other'
second_file.path = '/some/Dockerfile'
third_file = Mock()
third_file.type = 'blob'
third_file.path = 'somesubdir/Dockerfile'
t = Mock()
if (commit_sha == 'aaaaaaa'):
t.tree = [first_file, second_file, third_file]
else:
t.tree = []
return t
repo_mock = Mock()
repo_mock.default_branch = 'master'
repo_mock.ssh_url = 'ssh_url'
repo_mock.get_branch = Mock(side_effect=get_branch_mock)
repo_mock.get_tags = Mock(side_effect=get_tags_mock)
repo_mock.get_branches = Mock(side_effect=get_branches_mock)
repo_mock.get_commit = Mock(side_effect=get_commit_mock)
repo_mock.get_contents = Mock(side_effect=get_contents_mock)
repo_mock.get_git_tree = Mock(side_effect=get_git_tree_mock)
gh_mock = Mock()
gh_mock.get_repo = Mock(return_value=repo_mock)
gh_mock.get_user = Mock(side_effect=get_user_mock)
gh_mock.get_organization = Mock(side_effect=get_org_mock)
return gh_mock |
def override_training_args(args: Namespace) -> Tuple[(List[str], List[str])]:
overrides = []
overrides.extend(_override_attr('params.common', CommonParams, args))
overrides.extend(_override_attr('params.dataset', DatasetParams, args))
overrides.extend(_override_attr('params.distributed_training', DistributedTrainingParams, args))
overrides.extend(_override_attr('params.optimization', OptimizationParams, args))
overrides.extend(_override_attr('params.checkpoint', CheckpointParams, args))
overrides.extend(_override_attr('params.bmuf', FairseqBMUFConfig, args))
(module_overrides, module_deletes) = override_module_args(args)
overrides.extend(module_overrides)
return (overrides, module_deletes) |
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
if (not (0.0 <= iou_thresh <= 1.0)):
raise ValueError('iou_thresh must be between 0 and 1')
if (not isinstance(selected_boxes, box_list.BoxList)):
raise ValueError('selected_boxes must be a BoxList')
if (not isinstance(pool_boxes, box_list.BoxList)):
raise ValueError('pool_boxes must be a BoxList')
if (not pool_boxes.has_field('scores')):
raise ValueError("pool_boxes must have a 'scores' field")
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
num_matches = tf.reduce_sum(match_indicator, 1)
match_assert = tf.Assert(tf.reduce_all(tf.greater(num_matches, 0)), ['Each box in selected_boxes must match with at least one box in pool_boxes.'])
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(tf.reduce_all(tf.greater_equal(scores, 0)), ['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = (tf.reshape(sum_scores, [(- 1)]) / num_matches)
box_locations = (tf.matmul(match_indicator, (pool_boxes.get() * scores)) / sum_scores)
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes |
class _TestFunctionalBase(unittest.TestCase):
def setUpClass(cls):
cls.base_df1 = ta.dataframe({'int64_list': [[11, 12, 13], [21, 22, 23, 24, 25, 26], [31, 32]], 'int32_list': [[11, 12, 13], [21, 22, 23, 24, 25, 26], [31, 32]]}, dtype=dt.Struct([dt.Field('int64_list', dt.List(dt.int64)), dt.Field('int32_list', dt.List(dt.int32))]))
cls.base_width_bucket_df = ta.dataframe({'x': [3.14, 2, (- 1)], 'bound1': [0, 0, 0], 'bound2': [4, 4, 3.2], 'bucketCount': [3, 3, 4]}, dtype=dt.Struct([dt.Field('x', dt.float64), dt.Field('bound1', dt.float64), dt.Field('bound2', dt.float64), dt.Field('bucketCount', dt.int64)]))
cls.setUpTestCaseData()
def setUpTestCaseData(cls):
raise unittest.SkipTest('abstract base test')
def test_slice(self):
self.assertEqual(list(functional.slice(type(self).df1['int64_list'], 2, 3)), [[12, 13], [22, 23, 24], [32]])
def test_intersect_constant_aray(self):
self.assertEqual(list(functional.array_intersect(type(self).df1['int64_list'], [12, 22, 23, 32])), [[12], [22, 23], [32]])
int32_list_intersect = functional.array_intersect(type(self).df1['int32_list'], [np.int32(12), np.int32(22), np.int32(23), np.int32(32)])
self.assertTrue(dt.is_list(int32_list_intersect.dtype))
self.assertTrue(dt.is_int32(int32_list_intersect.dtype.item_dtype))
self.assertEqual(list(int32_list_intersect), [[12], [22, 23], [32]])
def test_width_bucket(self):
df = self.width_bucket_df
self.assertEqual(list(functional.width_bucket(df['x'], df['bound1'], df['bound2'], df['bucketCount'])), [3, 2, 0]) |
class TestFunc(torch.autograd.Function):
def forward(ctx, x):
y = torch.empty_like(x)
ctx.x = x
ctx.y = y
wp.launch(kernel=test_kernel, dim=len(x), inputs=[wp.torch.from_torch(x)], outputs=[wp.torch.from_torch(y)], device=device)
return y
def backward(ctx, adj_y):
adj_x = torch.zeros_like(ctx.x).contiguous()
adj_y = adj_y.contiguous()
wp.launch(kernel=test_kernel, dim=len(ctx.x), inputs=[wp.torch.from_torch(ctx.x)], outputs=[None], adj_inputs=[wp.torch.from_torch(adj_x)], adj_outputs=[wp.torch.from_torch(adj_y)], device=device, adjoint=True)
return adj_x |
class MethodSignature(PipelineSignature):
builtin_args = ()
def _assert_valid_outputs(self, outputs):
super()._assert_valid_outputs(outputs)
for (output_name, spec) in outputs.items():
if (not is_semantic_type(spec.qiime_type)):
raise TypeError(('Output %r must be a semantic QIIME type, not %r' % (output_name, spec.qiime_type)))
def _assert_valid_views(self, inputs, parameters, outputs):
for (name, spec) in itertools.chain(inputs.items(), parameters.items(), outputs.items()):
if (not spec.has_view_type()):
raise TypeError(('Method is missing a function annotation for parameter: %r' % name)) |
_end_docstrings(PIPELINE_INIT_ARGS)
class ImageClassificationPipeline(Pipeline):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
requires_backends(self, 'vision')
self.check_model_type((TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if (self.framework == 'tf') else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING))
def _sanitize_parameters(self, top_k=None):
postprocess_params = {}
if (top_k is not None):
postprocess_params['top_k'] = top_k
return ({}, {}, postprocess_params)
def __call__(self, images: Union[(str, List[str], 'Image.Image', List['Image.Image'])], **kwargs):
return super().__call__(images, **kwargs)
def preprocess(self, image):
image = load_image(image)
model_inputs = self.feature_extractor(images=image, return_tensors=self.framework)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, top_k=5):
if (top_k > self.model.config.num_labels):
top_k = self.model.config.num_labels
if (self.framework == 'pt'):
probs = model_outputs.logits.softmax((- 1))[0]
(scores, ids) = probs.topk(top_k)
elif (self.framework == 'tf'):
probs = stable_softmax(model_outputs.logits, axis=(- 1))[0]
topk = tf.math.top_k(probs, k=top_k)
(scores, ids) = (topk.values.numpy(), topk.indices.numpy())
else:
raise ValueError(f'Unsupported framework: {self.framework}')
scores = scores.tolist()
ids = ids.tolist()
return [{'score': score, 'label': self.model.config.id2label[_id]} for (score, _id) in zip(scores, ids)] |
class FeatureDataset(torch.utils.data.Dataset):
def __init__(self, vid2features, videos, padding_size=100, random_sampling=False):
super(FeatureDataset, self).__init__()
self.vid2features = vid2features
self.padding_size = padding_size
self.random_sampling = random_sampling
self.videos = videos
self.keys = self.vid2features.keys()
def __len__(self):
return len(self.videos)
def __getitem__(self, index):
if (self.videos[index] in self.keys):
feat = self.vid2features[self.videos[index]][:]
len_feat = len(feat)
return (resize_axis(feat, axis=0, new_size=self.padding_size, fill_value=0, random_sampling=self.random_sampling).transpose((- 1), (- 2)), len_feat, self.videos[index])
else:
return (torch.Tensor([]), 0, 'None') |
class TestOnlineExactClassifier(unittest.TestCase):
def test_batch_classification(self):
datasets = Banana()
(train_dataset, test_dataset) = (datasets.train_dataset, datasets.test_dataset)
(train_x, train_y) = train_dataset[:]
(test_x, test_y) = test_dataset[:]
input_dim = train_x.size((- 1))
stem = Identity(input_dim)
alpha_eps = 0.01
lr = 0.1
classifier = OnlineExactClassifier(stem, train_x, train_y, alpha_eps, lr)
if torch.cuda.is_available():
classifier = classifier.cuda()
classifier.fit(train_x, train_y, 100)
test_pred = classifier.predict(test_x)
test_acc = test_pred.eq(test_y).float().mean()
self.assertGreaterEqual(test_acc, 0.89)
def test_online_classification(self):
num_init = 5
datasets = Banana()
(train_dataset, test_dataset) = (datasets.train_dataset, datasets.test_dataset)
(train_x, train_y) = train_dataset[:]
(init_x, train_x) = (train_x[:num_init], train_x[num_init:])
(init_y, train_y) = (train_y[:num_init], train_y[num_init:])
(test_x, test_y) = test_dataset[:]
input_dim = train_x.size((- 1))
stem = Identity(input_dim)
alpha_eps = 0.01
lr = 0.001
classifier = OnlineExactClassifier(stem, init_x, init_y, alpha_eps, lr)
if torch.cuda.is_available():
classifier = classifier.cuda()
correct = 0
for (t, (x, y)) in enumerate(zip(train_x, train_y)):
pred_y = classifier.predict(x)
classifier.update(x, y, update_stem=True, update_gp=True)
if (pred_y == y):
correct += 1
cum_acc = (correct / train_x.size(0))
self.assertGreaterEqual(cum_acc, 0.8)
test_pred = classifier.predict(test_x)
test_acc = test_pred.eq(test_y).float().mean()
self.assertGreaterEqual(test_acc, 0.89) |
class _AttentionDownConv(nn.Module):
def __init__(self, features=16):
super(_AttentionDownConv, self).__init__()
self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1)
self.downsample = nn.Conv2d(features, features, kernel_size=3, stride=2, padding=1)
def forward(self, input):
return F.relu(self.downsample(F.relu(self.conv2(F.relu(self.conv1(input)))))) |
class DAVClient():
proxy: Optional[str] = None
url: URL = None
huge_tree: bool = False
def __init__(self, url: str, proxy: Optional[str]=None, username: Optional[str]=None, password: Optional[str]=None, auth: Optional[AuthBase]=None, timeout: Optional[int]=None, ssl_verify_cert: Union[(bool, str)]=True, ssl_cert: Union[(str, typing.Tuple[(str, str)], None)]=None, headers: typing.Dict[(str, str)]={}, huge_tree: bool=False) -> None:
self.session = requests.Session()
log.debug(('url: ' + str(url)))
self.url = URL.objectify(url)
self.huge_tree = huge_tree
if (proxy is not None):
_proxy = proxy
if ('://' not in proxy):
_proxy = ((self.url.scheme + '://') + proxy)
p = _proxy.split(':')
if (len(p) == 2):
_proxy += ':8080'
log.debug(('init - proxy: %s' % _proxy))
self.proxy = _proxy
self.headers = headers
self.headers.update({'User-Agent': 'Mozilla/5.0', 'Content-Type': 'text/xml', 'Accept': 'text/xml, text/calendar'})
if (self.url.username is not None):
username = unquote(self.url.username)
password = unquote(self.url.password)
self.username = username
self.password = password
if isinstance(self.password, str):
self.password = self.password.encode('utf-8')
self.auth = auth
self.timeout = timeout
self.ssl_verify_cert = ssl_verify_cert
self.ssl_cert = ssl_cert
self.url = self.url.unauth()
log.debug(('self.url: ' + str(url)))
self._principal = None
def __enter__(self) -> Self:
return self
def __exit__(self, exc_type: Optional[typing.Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]) -> None:
self.close()
def close(self) -> None:
self.session.close()
def principal(self, *largs, **kwargs):
if (not self._principal):
self._principal = Principal(*largs, client=self, **kwargs)
return self._principal
def calendar(self, **kwargs):
return Calendar(client=self, **kwargs)
def check_dav_support(self) -> Optional[str]:
try:
response = self.options(self.principal().url)
except:
response = self.options(str(self.url))
return response.headers.get('DAV', None)
def check_cdav_support(self) -> bool:
support_list = self.check_dav_support()
return ((support_list is not None) and ('calendar-access' in support_list))
def check_scheduling_support(self) -> bool:
support_list = self.check_dav_support()
return ((support_list is not None) and ('calendar-auto-schedule' in support_list))
def propfind(self, url: Optional[str]=None, props: str='', depth: int=0) -> DAVResponse:
return self.request((url or str(self.url)), 'PROPFIND', props, {'Depth': str(depth)})
def proppatch(self, url: str, body: str, dummy: None=None) -> DAVResponse:
return self.request(url, 'PROPPATCH', body)
def report(self, url: str, query: str='', depth: int=0) -> DAVResponse:
return self.request(url, 'REPORT', query, {'Depth': str(depth), 'Content-Type': 'application/xml; charset="utf-8"'})
def mkcol(self, url: str, body: str, dummy: None=None) -> DAVResponse:
return self.request(url, 'MKCOL', body)
def mkcalendar(self, url: str, body: str='', dummy: None=None) -> DAVResponse:
return self.request(url, 'MKCALENDAR', body)
def put(self, url: str, body: str, headers: Mapping[(str, str)]={}) -> DAVResponse:
return self.request(url, 'PUT', body, headers)
def post(self, url: str, body: str, headers: Mapping[(str, str)]={}) -> DAVResponse:
return self.request(url, 'POST', body, headers)
def delete(self, url: str) -> DAVResponse:
return self.request(url, 'DELETE')
def options(self, url: str) -> DAVResponse:
return self.request(url, 'OPTIONS')
def extract_auth_types(self, header):
auth_types = header.lower().split(',')
auth_types = map((lambda auth_type: auth_type.strip()), auth_types)
auth_types = map((lambda auth_type: auth_type.split(' ')[0]), auth_types)
return list(filter((lambda auth_type: auth_type), auth_types))
def request(self, url: str, method: str='GET', body: str='', headers: Mapping[(str, str)]={}) -> DAVResponse:
combined_headers = self.headers.copy()
combined_headers.update(headers)
if (((body is None) or (body == '')) and ('Content-Type' in combined_headers)):
del combined_headers['Content-Type']
url_obj = URL.objectify(url)
proxies = None
if (self.proxy is not None):
proxies = {url_obj.scheme: self.proxy}
log.debug(('using proxy - %s' % proxies))
log.debug('sending request - method={0}, url={1}, headers={2}\nbody:\n{3}'.format(method, str(url_obj), combined_headers, to_normal_str(body)))
try:
r = self.session.request(method, str(url_obj), data=to_wire(body), headers=combined_headers, proxies=proxies, auth=self.auth, timeout=self.timeout, verify=self.ssl_verify_cert, cert=self.ssl_cert)
log.debug(('server responded with %i %s' % (r.status_code, r.reason)))
response = DAVResponse(r, self)
except:
if (self.auth or (not self.password)):
raise
r = self.session.request(method='GET', url=str(url_obj), headers=combined_headers, proxies=proxies, timeout=self.timeout, verify=self.ssl_verify_cert, cert=self.ssl_cert)
if (not (r.status_code == 401)):
raise
if ((r.status_code == 401) and ('WWW-Authenticate' in r.headers) and (not self.auth) and self.username):
auth_types = self.extract_auth_types(r.headers['WWW-Authenticate'])
if (self.password and self.username and ('digest' in auth_types)):
self.auth = requests.auth.HTTPDigestAuth(self.username, self.password)
elif (self.password and self.username and ('basic' in auth_types)):
self.auth = requests.auth.HTTPBasicAuth(self.username, self.password)
elif (self.password and ('bearer' in auth_types)):
self.auth = HTTPBearerAuth(self.password)
else:
raise NotImplementedError('The server does not provide any of the currently supported authentication methods: basic, digest, bearer')
return self.request(url, method, body, headers)
elif ((r.status_code == 401) and ('WWW-Authenticate' in r.headers) and self.auth and self.password and isinstance(self.password, bytes)):
auth_types = self.extract_auth_types(r.headers['WWW-Authenticate'])
if (self.password and self.username and ('digest' in auth_types)):
self.auth = requests.auth.HTTPDigestAuth(self.username, self.password.decode())
elif (self.password and self.username and ('basic' in auth_types)):
self.auth = requests.auth.HTTPBasicAuth(self.username, self.password.decode())
elif (self.password and ('bearer' in auth_types)):
self.auth = HTTPBearerAuth(self.password.decode())
self.username = None
self.password = None
return self.request(str(url_obj), method, body, headers)
if ((response.status == requests.codes.forbidden) or (response.status == requests.codes.unauthorized)):
try:
reason = response.reason
except AttributeError:
reason = 'None given'
raise error.AuthorizationError(url=str(url_obj), reason=reason)
if error.debug_dump_communication:
import datetime
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(prefix='caldavcomm', delete=False) as commlog:
commlog.write(((b'=' * 80) + b'\n'))
commlog.write(f'{datetime.datetime.now():%FT%H:%M:%S}'.encode('utf-8'))
commlog.write(b'\n====>\n')
commlog.write(f'''{method} {url}
'''.encode('utf-8'))
commlog.write(b'\n'.join([to_wire(f'{x}: {headers[x]}') for x in headers]))
commlog.write(b'\n\n')
commlog.write(to_wire(body))
commlog.write(b'<====\n')
commlog.write(f'{response.status} {response.reason}'.encode('utf-8'))
commlog.write(b'\n'.join([to_wire(f'{x}: {response.headers[x]}') for x in response.headers]))
commlog.write(b'\n\n')
ct = response.headers.get('Content-Type', '')
if (response.tree is not None):
commlog.write(to_wire(etree.tostring(response.tree, pretty_print=True)))
else:
commlog.write(to_wire(response._raw))
commlog.write(b'\n')
return response |
def _validate_sample_rates(input_filepath_list: List[Path], combine_type: CombineType):
sample_rates = [file_info.sample_rate(f) for f in input_filepath_list]
if (not core.all_equal(sample_rates)):
raise IOError('Input files do not have the same sample rate. The {} combine type requires that all files have the same sample rate'.format(combine_type)) |
def test_nonsquare_deterministic_2_state_by_node2state_by_state():
result = convert.state_by_node2state_by_state(nonsquare_deterministic_2)
answer = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 1, 0, 0]])
assert np.array_equal(result, answer) |
class logger():
def __init__(self, n_steps, n_lvls):
self.n_steps = n_steps
self.n_lvls = n_lvls
self.lvl = (- 1)
self.lvl_step = 0
self.steps = 0
self.pbar = tqdm(total=(self.n_lvls * self.n_steps), desc='Starting')
def step(self):
self.pbar.update(1)
self.steps += 1
self.lvl_step += 1
def new_lvl(self):
self.lvl += 1
self.lvl_step = 0
def print(self):
self.pbar.set_description(f'Lvl {self.lvl}/{(self.n_lvls - 1)}, step {self.lvl_step}/{self.n_steps}') |
def parse_type_string(expr_string: str, expr_fallback_name: str, line: int, column: int) -> ProperType:
try:
(_, node) = parse_type_comment(expr_string.strip(), line=line, column=column, errors=None)
if (isinstance(node, UnboundType) and (node.original_str_expr is None)):
node.original_str_expr = expr_string
node.original_str_fallback = expr_fallback_name
return node
elif isinstance(node, UnionType):
return node
else:
return RawExpressionType(expr_string, expr_fallback_name, line, column)
except (SyntaxError, ValueError):
return RawExpressionType(expr_string, expr_fallback_name, line, column) |
class SerialTransport(asyncio.Transport):
force_poll: bool = False
def __init__(self, loop, protocol, *args, **kwargs) -> None:
super().__init__()
self.async_loop = loop
self._protocol: asyncio.BaseProtocol = protocol
self.sync_serial = serial.serial_for_url(*args, **kwargs)
self._write_buffer: list[bytes] = []
self.poll_task: (asyncio.Task | None) = None
self._poll_wait_time = 0.0005
self.sync_serial.timeout = 0
self.sync_serial.write_timeout = 0
self.future: (asyncio.Task | None) = None
def setup(self):
if ((os.name == 'nt') or self.force_poll):
self.poll_task = asyncio.create_task(self._polling_task())
self.poll_task.set_name('transport_serial poll')
else:
self.async_loop.add_reader(self.sync_serial.fileno(), self._read_ready)
self.async_loop.call_soon(self._protocol.connection_made, self)
def close(self, exc: (Exception | None)=None) -> None:
if (not self.sync_serial):
return
with contextlib.suppress(Exception):
self.sync_serial.flush()
self.flush()
if self.poll_task:
self.poll_task.cancel()
self.future = asyncio.ensure_future(self.poll_task)
self.poll_task = None
else:
self.async_loop.remove_reader(self.sync_serial.fileno())
self.sync_serial.close()
self.sync_serial = None
if exc:
with contextlib.suppress(Exception):
self._protocol.connection_lost(exc)
def write(self, data) -> None:
self._write_buffer.append(data)
if (not self.poll_task):
self.async_loop.add_writer(self.sync_serial.fileno(), self._write_ready)
def flush(self) -> None:
if (not self.poll_task):
self.async_loop.remove_writer(self.sync_serial.fileno())
self._write_buffer.clear()
def loop(self):
return self.async_loop
def get_protocol(self) -> asyncio.BaseProtocol:
return self._protocol
def set_protocol(self, protocol: asyncio.BaseProtocol) -> None:
self._protocol = protocol
def get_write_buffer_limits(self) -> tuple[(int, int)]:
return (1, 1024)
def can_write_eof(self):
return False
def write_eof(self):
def set_write_buffer_limits(self, high=None, low=None):
def get_write_buffer_size(self):
return len(self._write_buffer)
def is_reading(self) -> bool:
return True
def pause_reading(self):
def resume_reading(self):
def is_closing(self):
return False
def abort(self) -> None:
self.close()
def _read_ready(self):
try:
if (data := self.sync_serial.read(1024)):
self._protocol.data_received(data)
except serial.SerialException as exc:
self.close(exc=exc)
def _write_ready(self):
data = b''.join(self._write_buffer)
try:
if ((nlen := self.sync_serial.write(data)) < len(data)):
self._write_buffer = [data[nlen:]]
if (not self.poll_task):
self.async_loop.add_writer(self.sync_serial.fileno(), self._write_ready)
return
self.flush()
except (BlockingIOError, InterruptedError):
return
except serial.SerialException as exc:
self.close(exc=exc)
async def _polling_task(self):
try:
while True:
(await asyncio.sleep(self._poll_wait_time))
while self._write_buffer:
self._write_ready()
if self.sync_serial.in_waiting:
self._read_ready()
except serial.SerialException as exc:
self.close(exc=exc)
except asyncio.CancelledError:
pass |
class TestRequireRuntimeDependencies():
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert (builder.config.require_runtime_dependencies is builder.config.require_runtime_dependencies is False)
def test_target(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'require-runtime-dependencies': True}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (builder.config.require_runtime_dependencies is True)
def test_target_not_boolean(self, isolation):
config = {'tool': {'hatch': {'build': {'targets': {'foo': {'require-runtime-dependencies': 9000}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Field `tool.hatch.build.targets.foo.require-runtime-dependencies` must be a boolean'):
_ = builder.config.require_runtime_dependencies
def test_global(self, isolation):
config = {'tool': {'hatch': {'build': {'require-runtime-dependencies': True}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (builder.config.require_runtime_dependencies is True)
def test_global_not_boolean(self, isolation):
config = {'tool': {'hatch': {'build': {'require-runtime-dependencies': 9000}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
with pytest.raises(TypeError, match='Field `tool.hatch.build.require-runtime-dependencies` must be a boolean'):
_ = builder.config.require_runtime_dependencies
def test_target_overrides_global(self, isolation):
config = {'tool': {'hatch': {'build': {'require-runtime-dependencies': True, 'targets': {'foo': {'require-runtime-dependencies': False}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = 'foo'
assert (builder.config.require_runtime_dependencies is False) |
def string_escape(text: str) -> str:
replacements = (('\\', '\\\\'), ("'", "\\'"), ('"', '\\"'), ('\n', '\\n'), ('\r', '\\r'), ('\x00', '\\x00'), ('\ufeff', '\\ufeff'), ('\u2028', '\\u2028'), ('\u2029', '\\u2029'))
for (orig, repl) in replacements:
text = text.replace(orig, repl)
return text |
def decode_network_values(ptype, plen, buf):
nvalues = short.unpack_from(buf, header.size)[0]
off = ((header.size + short.size) + nvalues)
valskip = double.size
assert (((((valskip + 1) * nvalues) + short.size) + header.size) == plen)
assert (double.size == number.size)
result = []
for dstype in [ord(x) for x in buf[(header.size + short.size):off]]:
if (dstype == DS_TYPE_COUNTER):
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif (dstype == DS_TYPE_GAUGE):
result.append((dstype, double.unpack_from(buf, off)[0]))
off += valskip
elif (dstype == DS_TYPE_DERIVE):
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
elif (dstype == DS_TYPE_ABSOLUTE):
result.append((dstype, number.unpack_from(buf, off)[0]))
off += valskip
else:
raise ValueError(('DS type %i unsupported' % dstype))
return result |
def test_pformat(fake_manager):
fake_object = helpers.FakeObject(fake_manager, {'attr1': ('foo' * 10), 'ham': ('eggs' * 15)})
assert (fake_object.pformat() == "<class 'tests.unit.helpers.FakeObject'> => \n{'attr1': 'foofoofoofoofoofoofoofoofoofoo',\n 'ham': 'eggseggseggseggseggseggseggseggseggseggseggseggseggseggseggs'}") |
def test_lookup_notification_page_valid(initialized_db, set_secscan_config):
secscan = V4SecurityScanner(application, instance_keys, storage)
secscan._secscan_api = mock.Mock()
secscan._secscan_api.retrieve_notification_page.return_value = {'notifications': [{'id': '5e4b387e-88d3-4364-86fd-063447a6fad2', 'manifest': 'sha256:35cf703de2d9eaad8752d6fe1b8f02b5d2149f1d8357c9cc7fb7d0a', 'reason': 'added', 'vulnerability': {}}], 'page': {}}
result = secscan.lookup_notification_page('5e4b387e-88d3-4364-86fd-063447a6fad2')
assert (result.status == PaginatedNotificationStatus.SUCCESS)
assert (result.next_page_index is None)
assert (result.data[0]['manifest'] == 'sha256:35cf703de2d9eaad8752d6fe1b8f02b5d2149f1d8357c9cc7fb7d0a') |
class PreferencesButton(Gtk.HBox):
def __init__(self, search_bar_box):
super().__init__()
menu = Gtk.Menu()
limit_item = ConfigCheckMenuItem(_('_Limit Results'), 'browsers', 'search_limit', True)
limit_item.connect('toggled', search_bar_box.toggle_limit_widgets)
menu.append(limit_item)
multi_item = ConfigCheckMenuItem(_('_Allow multiple queries'), 'browsers', 'multiple_queries', True)
multi_item.connect('toggled', search_bar_box.toggle_multi)
menu.append(multi_item)
menu.show_all()
button = MenuButton(SymbolicIconImage(Icons.EMBLEM_SYSTEM, Gtk.IconSize.MENU), arrow=True)
button.set_menu(menu)
self.pack_start(button, True, True, 0) |
class TestCheckpointUtils(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def _train_transformer(self, seed, extra_args=None):
if (extra_args is None):
extra_args = []
with tempfile.TemporaryDirectory(f'_train_transformer_seed{seed}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', (['--encoder-layers', '3', '--decoder-layers', '3', '--encoder-embed-dim', '8', '--decoder-embed-dim', '8', '--seed', str(seed)] + extra_args))
(yield os.path.join(data_dir, 'checkpoint_last.pt'))
def test_load_model_ensemble_and_task(self):
with contextlib.redirect_stdout(StringIO()):
with self._train_transformer(seed=123) as model1:
with self._train_transformer(seed=456) as model2:
(ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model1, model2])
self.assertEqual(len(ensemble), 2)
self.assertEqual(ensemble[0].args.seed, 123)
self.assertEqual(ensemble[1].args.seed, 456)
self.assertEqual(task.args.seed, 123)
def test_prune_state_dict(self):
with contextlib.redirect_stdout(StringIO()):
extra_args = ['--encoder-layerdrop', '0.01', '--decoder-layerdrop', '0.01']
with self._train_transformer(seed=1, extra_args=extra_args) as model:
(ensemble, cfg, task) = checkpoint_utils.load_model_ensemble_and_task(filenames=[model], arg_overrides={'encoder_layers_to_keep': '0,2', 'decoder_layers_to_keep': '1'})
self.assertEqual(len(ensemble), 1)
self.assertEqual(len(ensemble[0].encoder.layers), 2)
self.assertEqual(len(ensemble[0].decoder.layers), 1) |
def sha_conv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, groups=1, bias=False, activation=(lambda : nn.ReLU(inplace=True)), activate=True, shared_conv=None):
return ShaConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, activation=activation, activate=activate, shared_conv=shared_conv) |
def convert_examples_to_features(examples, seq_length, tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, (seq_length - 3))
elif (len(tokens_a) > (seq_length - 2)):
tokens_a = tokens_a[0:(seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append('[CLS]')
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append('[SEP]')
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append('[SEP]')
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < seq_length):
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert (len(input_ids) == seq_length)
assert (len(input_mask) == seq_length)
assert (len(input_type_ids) == seq_length)
features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids))
return features |
_task('pytorch_translate_translation_from_pretrained_xlm')
class PytorchTranslateTranslationFromPretrainedXLMTask(PytorchTranslateTask):
def add_args(parser):
PytorchTranslateTask.add_args(parser)
parser.add_argument('--save-only', action='store_true', help='skip eval and only do save')
def load_dictionary(cls, filename):
return MaskedLMDictionary.load(filename)
def build_dictionary(cls, filenames, workers=1, threshold=(- 1), nwords=(- 1), padding_factor=8):
d = MaskedLMDictionary()
for filename in filenames:
MaskedLMDictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
source_dict = MaskedLMDictionary.load(args.source_vocab_file)
target_dict = MaskedLMDictionary.load(args.target_vocab_file)
source_lang = (args.source_lang or 'src')
target_lang = (args.target_lang or 'tgt')
print(f'| [{source_lang}] dictionary: {len(source_dict)} types')
print(f'| [{target_lang}] dictionary: {len(target_dict)} types')
use_char_source = ((args.char_source_vocab_file != '') or (getattr(args, 'arch', '') in constants.ARCHS_FOR_CHAR_SOURCE))
if use_char_source:
char_source_dict = MaskedLMDictionary.load(args.char_source_vocab_file)
args.char_source_dict_size = len(char_source_dict)
else:
char_source_dict = None
return cls(args, source_dict, target_dict, char_source_dict) |
class TestQuantsimConfig():
def test_parse_config_file_defaults(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'per_channel_quantization': 'True'}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (name == 'relu3'):
assert module.input_quantizers[0].enabled
else:
assert (not module.input_quantizers[0].enabled)
if (name in ['conv1', 'conv2']):
assert (not module.output_quantizers[0].enabled)
else:
assert module.output_quantizers[0].enabled
assert (not module.input_quantizers[0].use_symmetric_encodings)
assert (not module.input_quantizers[0].use_strict_symmetric)
assert (not module.input_quantizers[0].use_unsigned_symmetric)
assert (not module.output_quantizers[0].use_symmetric_encodings)
assert (not module.output_quantizers[0].use_strict_symmetric)
assert (not module.output_quantizers[0].use_unsigned_symmetric)
for (_, param_quantizer) in module.param_quantizers.items():
assert (not param_quantizer.enabled)
assert param_quantizer.use_symmetric_encodings
assert (not param_quantizer.use_strict_symmetric)
assert (not param_quantizer.use_unsigned_symmetric)
assert (len(param_quantizer._cppOp) > 1)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_params(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {'weight': {'is_quantized': 'True', 'is_symmetric': 'False'}}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (_, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
for (param_name, param_quantizer) in module.param_quantizers.items():
if (param_name == 'weight'):
if (module in (sim.model.bn1, sim.model.bn2)):
assert (not param_quantizer.enabled)
else:
assert param_quantizer.enabled
assert (not param_quantizer.use_symmetric_encodings)
else:
assert (not param_quantizer.enabled)
assert param_quantizer.use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_default_supported_kernels(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}, {'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}, 'params': {'weight': {'is_quantized': 'True', 'is_symmetric': 'False'}}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
expected_supported_kernels = [{'activation': {'bitwidth': 16, 'dtype': QuantizationDataType.int}, 'param': {'bitwidth': 8, 'dtype': QuantizationDataType.int}}, {'activation': {'bitwidth': 16, 'dtype': QuantizationDataType.float}, 'param': {'bitwidth': 16, 'dtype': QuantizationDataType.float}}]
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
supported_kernels_in_defaults = sim.get_supported_kernels()['defaults']
assert (len(supported_kernels_in_defaults) == 2)
assert (supported_kernels_in_defaults == expected_supported_kernels)
def test_parse_config_file_op_type(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {'Conv': {'is_input_quantized': 'True', 'is_symmetric': 'False', 'params': {'bias': {'is_quantized': 'True', 'is_symmetric': 'False'}}}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if isinstance(module._module_to_wrap, torch.nn.Conv2d):
assert module.input_quantizers[0].enabled
if (name in ['conv1', 'conv2']):
assert (not module.output_quantizers[0].enabled)
else:
assert module.output_quantizers[0].enabled
else:
if (name == 'relu3'):
assert module.input_quantizers[0].enabled
else:
assert (not module.input_quantizers[0].enabled)
assert module.output_quantizers[0].enabled
assert (not module.input_quantizers[0].use_symmetric_encodings)
assert (not module.output_quantizers[0].use_symmetric_encodings)
for (param_name, param_quantizer) in module.param_quantizers.items():
if (isinstance(module._module_to_wrap, torch.nn.Conv2d) and (param_name == 'bias')):
assert param_quantizer.enabled
assert (not param_quantizer.use_symmetric_encodings)
else:
assert (not param_quantizer.enabled)
assert param_quantizer.use_symmetric_encodings
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def _test_parse_config_file_op_type_per_channel_helper(self, per_channel_fields):
for k in ['defaults', 'Conv', 'Gemm']:
assert (k in per_channel_fields.keys())
model = MultiInput()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'True', 'is_symmetric': 'True'}, 'per_channel_quantization': per_channel_fields['defaults']}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'per_channel_quantization': per_channel_fields['Conv']}, 'Gemm': {'per_channel_quantization': per_channel_fields['Gemm']}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=(torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20)))
return sim
def test_parse_config_file_op_type_per_channel(self):
per_channel_fields = {'defaults': 'True', 'Conv': 'True', 'Gemm': 'True'}
sim = self._test_parse_config_file_op_type_per_channel_helper(per_channel_fields)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if ('weight' in module.param_quantizers):
assert isinstance(module.param_quantizers['weight'], StaticGridPerChannelQuantizer)
per_channel_fields = {'defaults': 'True', 'Conv': 'False', 'Gemm': 'True'}
sim = self._test_parse_config_file_op_type_per_channel_helper(per_channel_fields)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if ('weight' in module.param_quantizers):
if isinstance(module._module_to_wrap, torch.nn.Conv2d):
assert isinstance(module.param_quantizers['weight'], StaticGridPerTensorQuantizer)
else:
assert isinstance(module.param_quantizers['weight'], StaticGridPerChannelQuantizer)
per_channel_fields = {'defaults': 'True', 'Conv': 'False', 'Gemm': 'False'}
sim = self._test_parse_config_file_op_type_per_channel_helper(per_channel_fields)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if ('weight' in module.param_quantizers):
if (isinstance(module._module_to_wrap, torch.nn.Conv2d) or isinstance(module._module_to_wrap, torch.nn.Linear)):
assert isinstance(module.param_quantizers['weight'], StaticGridPerTensorQuantizer)
else:
assert isinstance(module.param_quantizers['weight'], StaticGridPerChannelQuantizer)
per_channel_fields = {'defaults': 'False', 'Conv': 'True', 'Gemm': 'False'}
sim = self._test_parse_config_file_op_type_per_channel_helper(per_channel_fields)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if ('weight' in module.param_quantizers):
if isinstance(module._module_to_wrap, torch.nn.Conv2d):
assert isinstance(module.param_quantizers['weight'], StaticGridPerChannelQuantizer)
else:
assert isinstance(module.param_quantizers['weight'], StaticGridPerTensorQuantizer)
random_input = (torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20))
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
sim.compute_encodings(forward_pass, None)
sim.export('./data/', 'test_parse_config_file_op_type_per_channel', dummy_input=(torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20)))
with open('./data/test_parse_config_file_op_type_per_channel.encodings', 'r') as encodings_file:
encodings = json.load(encodings_file)
assert (len(encodings['param_encodings']['bn1.weight']) == 1)
assert (len(encodings['param_encodings']['fc.weight']) == 1)
assert (len(encodings['param_encodings']['conv1.weight']) == 16)
assert (len(encodings['param_encodings']['conv2.weight']) == 8)
assert (len(encodings['param_encodings']['conv3.weight']) == 8)
def test_hw_version(self):
model = SingleResidual()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'hw_version': 'V01'}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
config_file = './data/quantsim_config.json'
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file=config_file, dummy_input=torch.rand(1, 3, 32, 32))
version = sim.configure_quantization_ops(config_file, 8, 8, QuantizationDataType.int)._get_hw_version()
assert (version == 'V01')
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file=config_file, dummy_input=torch.rand(1, 3, 32, 32))
version = sim.configure_quantization_ops(config_file, 8, 8, QuantizationDataType.int)._get_hw_version()
assert (version == 'default')
def test_op_instance_config_1(self):
for model in [SingleResidual(), SingleResidualWithAvgPool(), SingleResidualWithModuleAdd()]:
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'hw_version': 'V01', 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}], 'per_channel_quantization': 'True'}, 'params': {}, 'op_type': {'Conv': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}], 'per_channel_quantization': 'False'}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (_, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
assert (len(module.supported_kernels) == 1)
if (module._module_to_wrap._get_name() == 'Conv2d'):
assert isinstance(module.param_quantizers['weight'], StaticGridPerTensorQuantizer)
assert (module.supported_kernels == [((16, QuantizationDataType.int), (8, QuantizationDataType.int))])
else:
if module.param_quantizers:
assert isinstance(module.param_quantizers['weight'], StaticGridPerChannelQuantizer)
if module.supported_kernels:
assert (module.supported_kernels == [((16, QuantizationDataType.float), (16, QuantizationDataType.float))])
del sim
def test_parse_config_file_op_type_supported_kernels(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}]}, 'params': {'weight': {'is_quantized': 'True', 'is_symmetric': 'False'}}, 'op_type': {'Conv': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}]}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
expected_supported_kernels = [{'activation': {'bitwidth': 16, 'dtype': QuantizationDataType.int}, 'param': {'bitwidth': 8, 'dtype': QuantizationDataType.int}}]
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
supported_kernels_in_defaults = sim.get_supported_kernels()['Conv']
assert (len(supported_kernels_in_defaults) == 1)
assert (supported_kernels_in_defaults == expected_supported_kernels)
def test_parse_config_file_supported_kernels_1(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}]}, 'params': {'weight': {'is_quantized': 'True', 'is_symmetric': 'True'}}, 'op_type': {'Conv': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}]}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), default_param_bw=16, default_output_bw=16, default_data_type=QuantizationDataType.int)
for (_, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (module._module_to_wrap._get_name() == 'Conv2d'):
assert (module.supported_kernels == [((16, QuantizationDataType.int), (8, QuantizationDataType.int))])
else:
assert (module.supported_kernels == [((16, QuantizationDataType.int), (16, QuantizationDataType.int))])
def test_parse_config_file_supported_kernels_2(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}]}, 'params': {'weight': {'is_quantized': 'True', 'is_symmetric': 'True'}}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
aimet_torch.quantsim.SUPPORTED_KERNELS_ACTION = SupportedKernelsAction.assert_on_error
with pytest.raises(RuntimeError):
QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), default_param_bw=16, default_output_bw=8, default_data_type=QuantizationDataType.int)
aimet_torch.quantsim.SUPPORTED_KERNELS_ACTION = SupportedKernelsAction.warn_on_error
def test_parse_config_file_supergroups(self):
model = QuantSimTinyModel()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'False'}}, 'params': {}, 'op_type': {}, 'supergroups': [{'op_list': ['Conv', 'Relu']}, {'op_list': ['Relu', 'MaxPool']}, {'op_list': ['Conv', 'Relu', 'AveragePool']}, {'op_list': ['Conv', 'Clip']}], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
for (_, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
assert (not module.input_quantizers[0].enabled)
assert (not sim.model.conv1.output_quantizers[0].enabled)
assert (not sim.model.bn1.output_quantizers[0].enabled)
assert (not sim.model.relu1.output_quantizers[0].enabled)
assert sim.model.maxpool.output_quantizers[0].enabled
assert (not sim.model.conv2.output_quantizers[0].enabled)
assert (not sim.model.bn2.output_quantizers[0].enabled)
assert sim.model.relu2.output_quantizers[0].enabled
assert (not model.conv3.output_quantizers[0].enabled)
assert (not sim.model.relu3.output_quantizers[0].enabled)
assert sim.model.avgpool.output_quantizers[0].enabled
assert model.conv4.output_quantizers[0].enabled
assert model.fc.output_quantizers[0].enabled
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_elementwise_ops(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {}, 'params': {}}, 'params': {}, 'op_type': {'Add': {'is_input_quantized': 'True'}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (name in ['conv3', 'ada']):
assert module.output_quantizers[0].enabled
else:
assert (not module.output_quantizers[0].enabled)
assert (not module.input_quantizers[0].enabled)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_model_inputs(self):
class MultiInputWithConstant(torch.nn.Module):
def __init__(self, num_classes=3):
super(MultiInputWithConstant, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 16, kernel_size=2, stride=2, padding=3, bias=False)
self.conv2 = torch.nn.Conv2d(16, 8, kernel_size=3, stride=2, padding=2)
self.conv3 = torch.nn.Conv2d(3, 8, kernel_size=3, stride=2, padding=2)
self.add1 = Add()
self.add2 = Add()
def forward(self, *inputs):
x1 = self.conv1(inputs[0])
x1 = self.conv2(x1)
x2 = self.conv3(inputs[1])
x = self.add1(x1, x2)
x = self.add2(x, torch.tensor(2.0))
return x
model = MultiInputWithConstant()
model.eval()
quantsim_config = {'defaults': {'ops': {}, 'params': {}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=(torch.rand(1, 3, 32, 32), torch.rand(1, 3, 20, 20)), in_place=True)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (name in ('conv1', 'conv3')):
assert module.input_quantizers[0].enabled
elif (name == 'add2'):
assert (not module.input_quantizers[0].enabled)
assert module.input_quantizers[1].enabled
else:
assert (not module.input_quantizers[0].enabled)
assert (not module.output_quantizers[0].enabled)
assert (not module.input_quantizers[0].use_symmetric_encodings)
assert (not module.output_quantizers[0].use_symmetric_encodings)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_model_outputs(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {}, 'params': {}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {'is_output_quantized': 'True'}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (name == 'fc'):
assert module.output_quantizers[0].enabled
else:
assert (not module.output_quantizers[0].enabled)
assert (not module.input_quantizers[0].enabled)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_supergroups_with_functional_add(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {}}, 'params': {}, 'op_type': {}, 'supergroups': [{'op_list': ['Add', 'Relu']}], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
assert (not sim.model.relu3.input_quantizers[0].enabled)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_supergroups_with_module_add(self):
model = SingleResidualWithModuleAdd()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {}}, 'params': {}, 'op_type': {}, 'supergroups': [{'op_list': ['Add', 'Relu']}], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', in_place=True, dummy_input=torch.rand(1, 3, 32, 32))
for (_, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (module in [model.add, model.conv1, model.conv2]):
assert (not module.output_quantizers[0].enabled)
else:
assert module.output_quantizers[0].enabled
assert (not module.input_quantizers[0].enabled)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_parse_config_file_symmetric_modes(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {}, 'params': {'is_symmetric': 'True'}, 'per_channel_quantization': 'True', 'strict_symmetric': 'True', 'unsigned_symmetric': 'False'}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32))
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
for q in module.input_quantizers:
assert q.use_strict_symmetric
assert (not q.use_unsigned_symmetric)
for q in module.output_quantizers:
assert q.use_strict_symmetric
assert (not q.use_unsigned_symmetric)
for q in module.param_quantizers.values():
assert q.use_strict_symmetric
assert (not q.use_unsigned_symmetric)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_get_all_ops_in_neighborhood(self):
model = SingleResidual()
model.eval()
input_shapes = (1, 3, 32, 32)
random_inputs = utils.create_rand_tensors_given_shapes(input_shapes, utils.get_device(model))
conn_graph = ConnectedGraph(model, random_inputs)
starting_op = conn_graph.get_op_from_module_name('SingleResidual.conv3')
add_op = [op for op in conn_graph.get_all_ops().values() if (op.type == 'Add')][0]
neighborhood = get_all_ops_in_neighborhood(starting_op, 'output')
assert (len(neighborhood) == 2)
assert (starting_op in neighborhood)
assert (add_op in neighborhood)
.cuda
def test_parse_config_file_defaults_gpu(self):
model = SingleResidual()
model.eval()
model.cuda()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32).cuda(), in_place=True)
for (name, module) in sim.model.named_modules():
if isinstance(module, QcQuantizeWrapper):
if (name == 'relu3'):
assert module.input_quantizers[0].enabled
else:
assert (not module.input_quantizers[0].enabled)
if (name in ['conv1', 'conv2']):
assert (not module.output_quantizers[0].enabled)
else:
assert module.output_quantizers[0].enabled
assert (not module.input_quantizers[0].use_symmetric_encodings)
assert (not module.output_quantizers[0].use_symmetric_encodings)
for (_, param_quantizer) in module.param_quantizers.items():
assert (not param_quantizer.enabled)
assert param_quantizer.use_symmetric_encodings
assert (len(param_quantizer._cppOp) == 1)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_gelu_layernorm_quantsim_config(self):
import json
import aimet_common.libpymo as libpymo
from aimet_common.defs import QuantScheme
from aimet_torch.quantsim import QuantizationSimModel
class ModelWithGeluLayerNorm(torch.nn.Module):
def __init__(self):
super(ModelWithGeluLayerNorm, self).__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.ln1 = torch.nn.LayerNorm(4)
self.gelu1 = torch.nn.GELU()
def forward(self, x):
x = self.linear1(x)
x = self.ln1(x)
x = self.gelu1(x)
return x
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {'LayerNorm': {'is_input_quantized': 'True', 'params': {'bias': {'is_quantized': 'True'}}}, 'GELU': {'is_input_quantized': 'True'}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
model = ModelWithGeluLayerNorm()
model.eval()
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, config_file='./data/quantsim_config.json', dummy_input=random_input)
sim.compute_encodings(forward_pass, None)
from aimet_torch.qc_quantize_op import StaticGridPerTensorQuantizer
assert isinstance(sim.model.ln1.param_quantizers['weight'], StaticGridPerTensorQuantizer)
assert isinstance(sim.model.ln1.param_quantizers['bias'], StaticGridPerTensorQuantizer)
assert isinstance(sim.model.ln1.input_quantizers[0], StaticGridPerTensorQuantizer)
assert sim.model.ln1.input_quantizers[0].encoding
in_quantizer = sim.model.ln1.input_quantizers[0]
assert in_quantizer.enabled
assert (in_quantizer.round_mode == libpymo.RoundingMode.ROUND_NEAREST)
assert (in_quantizer.quant_scheme == QuantScheme.post_training_tf)
assert (in_quantizer.bitwidth == 8)
assert isinstance(sim.model.gelu1.input_quantizers[0], StaticGridPerTensorQuantizer)
assert sim.model.gelu1.input_quantizers[0].encoding
in_quantizer = sim.model.gelu1.input_quantizers[0]
assert in_quantizer.enabled
assert (in_quantizer.round_mode == libpymo.RoundingMode.ROUND_NEAREST)
assert (in_quantizer.quant_scheme == QuantScheme.post_training_tf)
assert (in_quantizer.bitwidth == 8)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_not_in_default_config_file_enforce_false(self):
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'is_input_quantized': 'True', 'is_output_quantized': 'True', 'params': {'weight': {'is_quantized': 'True'}, 'bias': {'is_quantized': 'False'}}}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True, default_param_bw=8, default_output_bw=8, default_data_type=QuantizationDataType.int)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
assert (sim.model.conv1.param_quantizers['weight'].enabled == True)
assert (sim.model.conv1.param_quantizers['weight'].bitwidth == 8)
assert (sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert (sim.model.conv1.output_quantizers[0].bitwidth == 8)
assert (sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.int)
assert sim.model.fc.param_quantizers['weight'].enabled
assert (sim.model.fc.param_quantizers['bias'].enabled == False)
assert (sim.model.fc.param_quantizers['weight'].bitwidth == 8)
assert (sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert (sim.model.fc.param_quantizers['bias'].bitwidth == 8)
assert (sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.int)
assert (sim.model.fc.output_quantizers[0].bitwidth == 8)
assert (sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.int)
assert (sim.model.relu1.output_quantizers[0].bitwidth == 8)
assert (sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.int)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_in_default_config_file_enforce_true(self):
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}, {'activation': {'bitwidth': 8, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}]}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}, {'activation': {'bitwidth': 8, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}], 'is_input_quantized': 'True', 'is_output_quantized': 'True', 'params': {'weight': {'is_quantized': 'True'}, 'bias': {'is_quantized': 'False'}}}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True, default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
assert (sim.model.conv1.param_quantizers['weight'].enabled == True)
assert (sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.conv1.output_quantizers[0].bitwidth == 16)
assert (sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.float)
assert sim.model.fc.param_quantizers['weight'].enabled
assert (sim.model.fc.param_quantizers['bias'].enabled == False)
assert (sim.model.fc.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.fc.param_quantizers['bias'].bitwidth == 16)
assert (sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.float)
assert (sim.model.fc.output_quantizers[0].bitwidth == 16)
assert (sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.float)
assert (sim.model.relu1.output_quantizers[0].bitwidth == 16)
assert (sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.float)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_default_quantsim_config_not_in_default_config_file_enforce_true(self):
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}, {'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}]}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'is_input_quantized': 'True', 'is_output_quantized': 'True', 'params': {'weight': {'is_quantized': 'True'}, 'bias': {'is_quantized': 'False'}}}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True, default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
assert (sim.model.conv1.param_quantizers['weight'].enabled == True)
assert (sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.conv1.output_quantizers[0].bitwidth == 16)
assert (sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.float)
assert sim.model.fc.param_quantizers['weight'].enabled
assert (sim.model.fc.param_quantizers['bias'].enabled == False)
assert (sim.model.fc.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.fc.param_quantizers['bias'].bitwidth == 16)
assert (sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.float)
assert (sim.model.fc.output_quantizers[0].bitwidth == 16)
assert (sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.float)
assert (sim.model.relu1.output_quantizers[0].bitwidth == 16)
assert (sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.float)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_check_correctness_of_dtype_bw_rules_valid_case(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}, {'activation': {'bitwidth': 8, 'dtype': 'int'}, 'param': {'bitwidth': 16, 'dtype': 'int'}}]}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}], 'is_input_quantized': 'True', 'is_output_quantized': 'True', 'params': {'weight': {'is_quantized': 'True'}, 'bias': {'is_quantized': 'False'}}}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
config_file = './data/quantsim_config.json'
with open(config_file, 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
from aimet_torch.quantsim_config.quantsim_config import QuantSimConfigurator
dummy_input = torch.randn(INPUT_SHAPE)
connected_graph = ConnectedGraph(model, dummy_input)
qsim_config = QuantSimConfigurator(model, connected_graph, config_file, quantsim_output_bw=8, quantsim_param_bw=8, quantsim_data_type=QuantizationDataType.int)
qsim_dtype_bw = QuantDtypeBwInfo(act_dtype=QuantizationDataType.int, act_bw=8, param_dtype=QuantizationDataType.int, param_bw=8)
assert qsim_config.check_correctness_of_dtype_bw_rules(qsim_dtype_bw)
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_target_rule_enforced_apply_default_and_op_level_overrides_valid_case(self):
model = SingleResidual()
model.eval()
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}, 'supported_kernels': [{'activation': {'bitwidth': 8, 'dtype': 'int'}, 'param': {'bitwidth': 8, 'dtype': 'int'}}, {'activation': {'bitwidth': 4, 'dtype': 'int'}, 'param': {'bitwidth': 4, 'dtype': 'int'}}]}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {'Conv': {'is_input_quantized': 'True', 'is_output_quantized': 'True', 'params': {'weight': {'is_quantized': 'True'}, 'bias': {'is_quantized': 'False'}}, 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
INPUT_SHAPE = (1, 3, 32, 32)
def forward_fn(model, _):
torch.manual_seed(10)
model.eval()
with torch.no_grad():
_ = model(torch.randn(INPUT_SHAPE))
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file='./data/quantsim_config.json', dummy_input=torch.rand(1, 3, 32, 32), in_place=True, default_data_type=QuantizationDataType.int, default_output_bw=4, default_param_bw=4)
sim.compute_encodings(forward_fn, forward_pass_callback_args=None)
assert sim.model.fc.param_quantizers['weight'].enabled
assert (sim.model.fc.param_quantizers['bias'].enabled == False)
assert (sim.model.fc.param_quantizers['weight'].bitwidth == 8)
assert (sim.model.fc.param_quantizers['weight'].data_type == QuantizationDataType.int)
assert (sim.model.fc.param_quantizers['bias'].bitwidth == 8)
assert (sim.model.fc.param_quantizers['bias'].data_type == QuantizationDataType.int)
assert (sim.model.fc.output_quantizers[0].bitwidth == 8)
assert (sim.model.fc.output_quantizers[0].data_type == QuantizationDataType.int)
assert (sim.model.relu1.output_quantizers[0].bitwidth == 8)
assert (sim.model.relu1.output_quantizers[0].data_type == QuantizationDataType.int)
assert (sim.model.conv1.param_quantizers['weight'].enabled == True)
assert (sim.model.conv1.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.conv1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.conv1.output_quantizers[0].bitwidth == 8)
assert (sim.model.conv1.output_quantizers[0].data_type == QuantizationDataType.int)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_target_rule_enforced_apply_op_level_overrides_fp16(self):
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}}, 'params': {}, 'op_type': {'LayerNorm': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}, 'GELU': {'is_output_quantized': 'True', 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
torch.manual_seed(10)
model = ModelWithBertCustomLayerNormGelu()
model.eval()
random_input = torch.rand(1, 4, 4)
def forward_pass(model, args):
model.eval()
with torch.no_grad():
model(*random_input)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=random_input, default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8, config_file='./data/quantsim_config.json')
assert (sim.model.customln1.param_quantizers['weight'].data_type == QuantizationDataType.float)
assert (sim.model.customln1.param_quantizers['weight'].bitwidth == 16)
assert (sim.model.customln1.output_quantizers[0].data_type == QuantizationDataType.float)
assert (sim.model.customln1.output_quantizers[0].bitwidth == 16)
assert (sim.model.gelu1.output_quantizers[0].data_type == QuantizationDataType.float)
assert (sim.model.gelu1.output_quantizers[0].bitwidth == 16)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_QuantDtypeBwInfo_data_class_1(self):
q1 = QuantDtypeBwInfo(QuantizationDataType.int, 8, QuantizationDataType.float, 16)
q2 = QuantDtypeBwInfo(QuantizationDataType.int, 16, QuantizationDataType.float, 16)
q3 = QuantDtypeBwInfo(QuantizationDataType.int, 8, QuantizationDataType.float, 16)
assert (q1 != q2)
assert (q1 == q3)
def test_fp16_back_to_back_overrides(self):
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True'}, 'params': {'is_quantized': 'True'}}, 'params': {}, 'op_type': {'PRelu': {'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}, 'Add': {'is_output_quantized': 'True', 'supported_kernels': [{'activation': {'bitwidth': 16, 'dtype': 'float'}, 'param': {'bitwidth': 16, 'dtype': 'float'}}]}}, 'supergroups': [], 'model_input': {'is_input_quantized': 'True'}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
class ModelForFP16Override(torch.nn.Module):
def __init__(self):
super(ModelForFP16Override, self).__init__()
self.prelu1 = torch.nn.PReLU()
self.prelu2 = torch.nn.PReLU()
self.relu1 = torch.nn.ReLU()
self.add1 = Add()
self.prelu3 = torch.nn.PReLU()
self.prelu4 = torch.nn.PReLU()
self.add2 = Add()
def forward(self, x1, x2, x3):
x1 = self.prelu1(x1)
x1 = self.prelu2(x1)
x1 = self.relu1(x1)
x1 = self.add1(x1, x2)
x1 = self.prelu3(x1)
x3 = self.prelu4(x3)
x1 = self.add2(x1, x3)
return x1
model = ModelForFP16Override()
model.eval()
random_input = (torch.rand(1, 2), torch.rand(1, 2), torch.rand(1, 2))
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = True
sim = QuantizationSimModel(model, quant_scheme=QuantScheme.post_training_tf, dummy_input=random_input, default_data_type=QuantizationDataType.int, default_output_bw=8, default_param_bw=8, config_file='./data/quantsim_config.json')
for (name, module) in sim.quant_wrappers():
if (name == 'prelu2'):
assert (module.input_quantizers[0].bitwidth == 16)
assert (module.input_quantizers[0].data_type == QuantizationDataType.float)
assert (module.output_quantizers[0].bitwidth == 8)
assert (module.output_quantizers[0].data_type == QuantizationDataType.int)
elif (name == 'relu1'):
assert (module.input_quantizers[0].bitwidth == 8)
assert (module.input_quantizers[0].data_type == QuantizationDataType.int)
assert (module.output_quantizers[0].bitwidth == 8)
assert (module.output_quantizers[0].data_type == QuantizationDataType.int)
elif (name == 'add1'):
assert (module.input_quantizers[0].bitwidth == 8)
assert (module.input_quantizers[0].data_type == QuantizationDataType.int)
assert (module.input_quantizers[1].bitwidth == 8)
assert (module.input_quantizers[1].data_type == QuantizationDataType.int)
assert (module.output_quantizers[0].bitwidth == 16)
assert (module.output_quantizers[0].data_type == QuantizationDataType.float)
else:
for input_q in module.input_quantizers:
assert (input_q.bitwidth == 16)
assert (input_q.data_type == QuantizationDataType.float)
for output_q in module.output_quantizers:
assert (output_q.bitwidth == 16)
assert (output_q.data_type == QuantizationDataType.float)
for param_q in module.param_quantizers.values():
assert (param_q.bitwidth == 16)
assert (param_q.data_type == QuantizationDataType.float)
qsim_config.ENFORCE_TARGET_DTYPE_BITWIDTH_CONFIG = False
if os.path.exists('./data/quantsim_config.json'):
os.remove('./data/quantsim_config.json')
def test_encoding_constraints(self):
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'False', 'is_symmetric': 'True'}}, 'params': {}, 'op_type': {'Softmax': {'encoding_constraints': {'min': (- 5.0), 'max': 5.0}}}, 'supergroups': [], 'model_input': {}, 'model_output': {}}
with open('./data/quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f)
class SoftmaxModel(torch.nn.Module):
def __init__(self):
super(SoftmaxModel, self).__init__()
self.softmax = torch.nn.Softmax()
def forward(self, inp):
x = self.softmax(inp)
return x
model = SoftmaxModel()
dummy_input = torch.tensor([0.5, 0.5])
for config_file in [None, './data/quantsim_config.json']:
qsim = QuantizationSimModel(model, dummy_input, quant_scheme=QuantScheme.post_training_tf_enhanced, config_file=config_file)
qsim.compute_encodings((lambda m, _: m(dummy_input)), None)
if (not config_file):
assert (qsim.model.softmax.output_quantizers[0].quant_scheme == QuantScheme.post_training_tf_enhanced)
assert (qsim.model.softmax.output_quantizers[0].encoding.min == 0.0)
assert np.allclose(qsim.model.softmax.output_quantizers[0].encoding.max, 0.5, atol=0.01)
else:
assert (qsim.model.softmax.output_quantizers[0].quant_scheme == QuantScheme.post_training_tf)
assert np.allclose(qsim.model.softmax.output_quantizers[0].encoding.min, (- 5.0), atol=0.1)
assert np.allclose(qsim.model.softmax.output_quantizers[0].encoding.max, 5.0, atol=0.1) |
def generateDebianChangelog(package, logFile, version, maintainer):
releases = []
current_version = None
current_log = None
current_date = None
with open(logFile) as file_:
for line in file_.readlines():
match = re.match((package + '-(\\d+\\.\\d+\\.\\d+(\\.\\d+)?)\\s*(\\d+-\\d+-\\d+)\\s*$'), line)
if (match is None):
if (current_log is not None):
current_log.append(line)
else:
if (current_log is not None):
releases.append((current_version, current_log, current_date))
(current_version, current_date) = (match.groups()[0], match.groups()[2])
current_log = []
if (releases[0][0] != version):
raise Exception(('Latest release in changelog (%s) does not match current release (%s)\n' % (releases[0][0], version)))
output = []
for (release, changes, date) in releases:
date = time.strptime(date, '%Y-%m-%d')
changeset = (([('python-%s (%s-1) UNRELEASED; urgency=low\n' % (package, release)), '\n'] + changes) + [(' -- %s %s -0%d00\n' % (maintainer, time.strftime('%a, %d %b %Y %H:%M:%S', date), (time.timezone / 3600))), '\n'])
clean = ''
lastBlank = True
for line in changeset:
if (line.strip() == ''):
if lastBlank:
continue
else:
clean += line
lastBlank = True
else:
clean += line
lastBlank = False
output.append(clean)
output.append('')
return ('\n'.join(output) + '\n') |
def test_uninstall_man_page(pipx_temp_env):
man_page_path = ((constants.LOCAL_MAN_DIR / 'man6') / 'pycowsay.6')
assert (not run_pipx_cli(['install', 'pycowsay']))
assert man_page_path.exists()
assert (not run_pipx_cli(['uninstall', 'pycowsay']))
assert (not file_or_symlink(man_page_path)) |
class File(ClangObject):
def from_name(translation_unit, file_name):
return File(conf.lib.clang_getFile(translation_unit, file_name))
def name(self):
return conf.lib.clang_getCString(conf.lib.clang_getFileName(self))
def time(self):
return conf.lib.clang_getFileTime(self)
def __bytes__(self):
return self.name
def __repr__(self):
return ('<File: %s>' % self.name)
def from_cursor_result(res, fn, args):
assert isinstance(res, File)
res._tu = args[0]._tu
return res |
def test_singleaxis_aoi_gh1221():
loc = pvlib.location.Location(40.1134, (- 88.3695))
dr = pd.date_range(start='02-Jun-1998 00:00:00', end='02-Jun-1998 23:55:00', freq='5T', tz='Etc/GMT+6')
sp = loc.get_solarposition(dr)
tr = pvlib.tracking.singleaxis(sp['apparent_zenith'], sp['azimuth'], axis_tilt=90, axis_azimuth=180, max_angle=0.001, backtrack=False)
fixed = pvlib.irradiance.aoi(90, 180, sp['apparent_zenith'], sp['azimuth'])
fixed[np.isnan(tr['aoi'])] = np.nan
assert np.allclose(tr['aoi'], fixed, equal_nan=True) |
class ScheduleItemFactory(DjangoModelFactory):
conference = factory.SubFactory(ConferenceFactory)
submission = factory.SubFactory(SubmissionFactory)
language = factory.SubFactory(LanguageFactory)
title = factory.Faker('text', max_nb_chars=100)
slug = factory.Faker('slug')
description = factory.Faker('text')
type = factory.fuzzy.FuzzyChoice(['submission', 'custom'])
image = factory.django.ImageField()
def _create(cls, model_class, *args, **kwargs):
_type = kwargs.get('type', None)
if (_type == ScheduleItem.TYPES.custom):
kwargs.pop('submission', None)
return super()._create(model_class, *args, **kwargs)
_generation
def rooms(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
for room in extracted:
self.rooms.add(room)
_generation
def additional_speakers(self, create, extracted=0, **kwargs):
if (not create):
return
size = (extracted or 0)
self.additional_speakers.set(ScheduleItemAdditionalSpeakerFactory.simple_generate_batch(create, size, **kwargs))
class Meta():
model = ScheduleItem |
class HRL_agent():
def __init__(self, args, agent_id, char_index, graph_helper, deterministic=False, action_space=['open', 'pickplace'], seed=123):
self.args = args
self.mode = ('train' if (not args.evaluation) else 'test')
self.agent_type = 'RL_MCTS'
self.max_num_objects = args.max_num_objects
self.actions = []
self.seed = seed
(self.objects1, self.objects2) = ([], [])
grabbed_obj = graph_helper.object_dict_types['objects_grab']
container_obj = graph_helper.object_dict_types['objects_inside']
surface_obj = graph_helper.object_dict_types['objects_surface']
for act in action_space:
if (act == 'open'):
self.actions.append('open')
self.objects1.append('None')
self.objects2 += container_obj
if (act == 'pickplace'):
self.actions.append('pickplace')
self.objects2 += (container_obj + surface_obj)
self.objects1 += grabbed_obj
self.objects1 = list(set(self.objects1))
self.objects2 = list(set(self.objects2))
self.objects2 = ['coffeetable', 'kitchentable', 'dishwasher', 'fridge']
self.obj2_dict = {}
self.obj2_dict = {}
self.num_actions = len(self.actions)
self.num_object_classes = graph_helper.num_classes
self.num_states = graph_helper.num_states
self.char_index = char_index
self.sim_env = VhGraphEnv()
self.sim_env.pomdp = True
self.belief = None
self.last_action = None
self.action_count = 0
base_kwargs = {'hidden_size': args.hidden_size, 'max_nodes': self.max_num_objects, 'num_classes': self.num_object_classes, 'num_states': self.num_states}
self.graph_helper = graph_helper
self.agent_id = agent_id
self.char_index = char_index
self.epsilon = args.init_epsilon
self.deterministic = deterministic
self.hidden_size = args.hidden_size
self.action_space = spaces.Tuple((spaces.Discrete(len(self.objects1)), spaces.Discrete(len(self.objects2))))
self.actor_critic = actor_critic_hl_mcts.ActorCritic(self.action_space, base_name=args.base_net, base_kwargs=base_kwargs, seed=seed)
self.actor_critic.base.main.main.bad_transformer = False
self.id2node = None
self.hidden_state = self.init_hidden_state()
if torch.cuda.is_available():
self.actor_critic.cuda()
self.previous_belief_graph = None
def filtering_graph(self, graph):
new_edges = []
edge_dict = {}
for edge in graph['edges']:
key = (edge['from_id'], edge['to_id'])
if (key not in edge_dict):
edge_dict[key] = [edge['relation_type']]
new_edges.append(edge)
elif (edge['relation_type'] not in edge_dict[key]):
edge_dict[key] += [edge['relation_type']]
new_edges.append(edge)
graph['edges'] = new_edges
return graph
def sample_belief(self, obs_graph):
new_graph = self.belief.update_graph_from_gt_graph(obs_graph)
self.previous_belief_graph = self.filtering_graph(new_graph)
return new_graph
def init_hidden_state(self):
h_state = torch.zeros(1, self.hidden_size)
c_state = torch.zeros(1, self.hidden_size)
return (h_state, c_state)
def reset(self, observed_graph, gt_graph, task_goal={}, seed=0):
self.action_count = 0
self.belief = belief.Belief(gt_graph, agent_id=self.agent_id, seed=seed)
self.belief.sample_from_belief()
graph_belief = self.sample_belief(observed_graph)
self.sim_env.reset(graph_belief, task_goal)
self.sim_env.to_pomdp()
self.id2node = {node['id']: node for node in gt_graph['nodes']}
self.hidden_state = self.init_hidden_state()
def evaluate(self, rollout):
pass
def get_action(self, observation, goal_spec, action_space_ids=None, action_indices=None, full_graph=None):
if (full_graph is not None):
observation_belief = self.sample_belief(full_graph)
else:
observation_belief = self.sample_belief(observation)
self.sim_env.reset(self.previous_belief_graph, {0: goal_spec, 1: goal_spec})
rnn_hxs = self.hidden_state
masks = torch.ones(rnn_hxs[0].shape).type(rnn_hxs[0].type())
if torch.cuda.is_available():
rnn_hxs = (rnn_hxs[0].cuda(), rnn_hxs[1].cuda())
masks = masks.cuda()
(inputs, info) = self.graph_helper.build_graph(observation, include_edges=(self.args.base_net == 'GNN'), action_space_ids=action_space_ids, character_id=self.agent_id)
visible_objects = info[(- 1)]
action_space_ids = info[(- 2)]
target_obj_class = ([self.graph_helper.object_dict.get_id('no_obj')] * 6)
target_loc_class = ([self.graph_helper.object_dict.get_id('no_obj')] * 6)
mask_goal_pred = ([0.0] * 6)
pre_id = 0
(obj_pred_names, loc_pred_names) = ([], [])
for (predicate, info) in goal_spec.items():
(count, required, reward) = info
if ((count == 0) or (not required) or ('sit' in predicate)):
continue
elements = predicate.split('_')
obj_class_id = int(self.graph_helper.object_dict.get_id(elements[1]))
loc_class_id = int(self.graph_helper.object_dict.get_id(self.id2node[int(elements[2])]['class_name']))
obj_pred_names.append(elements[1])
loc_pred_names.append(self.id2node[int(elements[2])]['class_name'])
for _ in range(count):
try:
target_obj_class[pre_id] = obj_class_id
target_loc_class[pre_id] = loc_class_id
mask_goal_pred[pre_id] = 1.0
pre_id += 1
except:
pdb.set_trace()
inputs.update({'affordance_matrix': self.graph_helper.obj1_affordance, 'target_obj_class': target_obj_class, 'target_loc_class': target_loc_class, 'mask_goal_pred': mask_goal_pred, 'gt_goal': obj_class_id})
inputs_tensor = {}
for (input_name, inp) in inputs.items():
if (type(inp) is int):
inp_tensor = torch.tensor(inp).unsqueeze(0)
else:
inp_tensor = torch.tensor(inp.copy()).unsqueeze(0)
if (inp_tensor.type() == 'torch.DoubleTensor'):
inp_tensor = inp_tensor.float()
inputs_tensor[input_name] = inp_tensor
if (self.action_count == 0):
(value, action, action_probs, rnn_state, out_dict) = self.actor_critic.act(inputs_tensor, rnn_hxs, masks, deterministic=self.deterministic, epsilon=self.epsilon, action_indices=action_indices)
self.hidden_state = rnn_state
info_model = {}
info_model['probs'] = action_probs
info_model['value'] = value
info_model['actions'] = action
info_model['state_inputs'] = copy.deepcopy(inputs_tensor)
info_model['num_objects'] = inputs['mask_object'].sum((- 1))
info_model['num_objects_action'] = inputs['mask_action_node'].sum((- 1))
info_model['visible_ids'] = [node[1] for node in visible_objects]
info_model['action_space_ids'] = action_space_ids
next_action = info_model['actions']
self.last_action = info_model['actions']
else:
next_action = self.last_action
info_model = {}
info_model['action_space_ids'] = action_space_ids
info_model['visible_ids'] = [node[1] for node in visible_objects]
info_model['mcts_action'] = True
info_model['actions'] = next_action
info_model['obs'] = observation['nodes']
(action_str, action_tried, plan, predicate) = self.get_action_instr(next_action, visible_objects, observation_belief)
if ('put' in predicate):
p_spl = predicate.split('_')
obj_pred = p_spl[1]
container_pred = p_spl[2]
if ((obj_pred not in obj_pred_names) or ((container_pred not in loc_pred_names) and (self.mode == 'train'))):
info_model['bad_predicate'] = True
action_str = None
action_tried += ' (offgoal)'
if (action_str is not None):
self.action_count += 1
if ((len(plan) == 1) or (self.action_count >= self.args.num_steps_mcts)):
self.action_count = 0
else:
self.action_count = 0
info_model['action_tried'] = action_tried
info_model['predicate'] = predicate
return (action_str, info_model)
def get_action_instr(self, action, visible_objects, current_graph):
if (self.objects1[action[0].item()] == 'None'):
if (self.objects2[action[1].item()] not in self.graph_helper.object_dict_types['objects_inside']):
return (None, 'open_{}'.format(self.objects2[action[1].item()]), [], 'open_{}'.format(self.objects2[action[1].item()]))
target_id = [node['id'] for node in current_graph['nodes'] if ((node['class_name'] == self.objects2[action[1].item()]) and (node['states'] == 'CLOSED'))]
if (len(target_id) == 0):
return (None, 'open_{}'.format(self.objects2[action[1].item()]), [], 'open_{}'.format(self.objects2[action[1].item()]))
target_goal = 'open_{}'.format(target_id[0])
(actions, _) = open_heuristic(self.agent_id, 0, current_graph, self.sim_env, target_goal)
else:
obj_name = self.objects1[action[0].item()]
container_name = self.objects2[action[1].item()]
container_id = [node['id'] for node in current_graph['nodes'] if (node['class_name'] == self.objects2[action[1].item()])]
if (len(container_id) == 0):
return (None, 'put_{}_{}'.format(obj_name, container_name), [], 'put_{}_{}'.format(obj_name, container_name))
obj_rel_container = [edge['from_id'] for edge in current_graph['edges'] if ((edge['to_id'] == container_id[0]) and (edge['relation_type'] in ['ON', 'INSIDE']))]
object_id = [node['id'] for node in current_graph['nodes'] if ((node['class_name'] == self.objects1[action[0].item()]) and (node['id'] not in obj_rel_container))]
if (len(object_id) == 0):
return (None, 'put_{}_{}'.format(obj_name, container_name), [], 'put_{}_{}'.format(obj_name, container_name))
min_cost = 0
actions = None
for obj_id in range(len(object_id)):
target_goal = 'put_{}_{}'.format(object_id[obj_id], container_id[0])
if (container_name in self.graph_helper.object_dict_types['objects_surface']):
(actions_curr, cost) = put_heuristic(self.agent_id, self.char_index, current_graph, self.sim_env, target_goal)
else:
(actions_curr, cost) = putIn_heuristic(self.agent_id, self.char_index, current_graph, self.sim_env, target_goal)
if ((cost is None) or (len(cost) == 0)):
continue
curr_cost_plan = sum(cost)
if ((obj_id == 0) or (curr_cost_plan < min_cost)):
min_cost = curr_cost_plan
actions = actions_curr
if (actions is None):
return (None, 'put_{}_{}'.format(obj_name, container_name), actions, 'put_{}_{}'.format(obj_name, container_name))
action_name = actions[0][0]
if ('put' in action_name):
obj_id_action = 2
else:
obj_id_action = 1
(o1, o1_id) = actions[0][obj_id_action]
action_name = action_name.replace('walk', 'walktowards')
action = utils_rl_agent.can_perform_action(action_name, o1, o1_id, self.agent_id, current_graph, graph_helper=self.graph_helper, teleport=self.args.teleport)
action_try = '{} [{}] ({})'.format(action_name, o1, o1_id)
return (action, action_try, actions, 'put_{}_{}'.format(obj_name, container_name)) |
class QFI(QFIBase):
def convert(self, operator: CircuitStateFn, params: Optional[Union[(ParameterExpression, ParameterVector, List[ParameterExpression])]]=None) -> ListOp:
expec_op = PauliExpectation(group_paulis=False).convert(operator).reduce()
cleaned_op = self._factor_coeffs_out_of_composed_op(expec_op)
return self.qfi_method.convert(cleaned_op, params) |
def test_remote_usage_prog(pytester: pytest.Pytester, request) -> None:
if (not hasattr(request.config._parser, 'prog')):
pytest.skip('prog not available in config parser')
pytester.makeconftest('\n import pytest\n\n config_parser = None\n\n \n def get_config_parser():\n return config_parser\n\n def pytest_configure(config):\n global config_parser\n config_parser = config._parser\n ')
pytester.makepyfile('\n import sys\n\n def test(get_config_parser, request):\n get_config_parser._getparser().error("my_usage_error")\n ')
result = pytester.runpytest_subprocess('-n1')
assert (result.ret == 1)
result.stdout.fnmatch_lines(['*usage: *', '*error: my_usage_error']) |
class resnet_v1_101_fpn_dcn_rcnn_oneshot_v3(Symbol):
def __init__(self):
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5', 'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[(name + '_weight')] = mx.sym.Variable((name + '_weight'))
self.shared_param_dict[(name + '_bias')] = mx.sym.Variable((name + '_bias'))
self.constants_dict = {}
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-05):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(*[scale2a_branch1, scale2a_branch2c], name='res2a')
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(*[res2a_relu, scale2b_branch2c], name='res2b')
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(*[res2b_relu, scale2c_branch2c], name='res2c')
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(*[scale3a_branch1, scale3a_branch2c], name='res3a')
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a, act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b, act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(*[res3a_relu, scale3b1_branch2c], name='res3b1')
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a, act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b, act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(*[res3b1_relu, scale3b2_branch2c], name='res3b2')
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a, act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, offset=res3b3_branch2b_offset, num_filter=128, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b, act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(*[res3b2_relu, scale3b3_branch2c], name='res3b3')
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(*[scale4a_branch1, scale4a_branch2c], name='res4a')
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a, act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b, act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(*[res4a_relu, scale4b1_branch2c], name='res4b1')
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a, act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b, act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(*[res4b1_relu, scale4b2_branch2c], name='res4b2')
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a, act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b, act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(*[res4b2_relu, scale4b3_branch2c], name='res4b3')
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a, act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b, act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(*[res4b3_relu, scale4b4_branch2c], name='res4b4')
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a, act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b, act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(*[res4b4_relu, scale4b5_branch2c], name='res4b5')
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a, act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b, act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(*[res4b5_relu, scale4b6_branch2c], name='res4b6')
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a, act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b, act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(*[res4b6_relu, scale4b7_branch2c], name='res4b7')
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a, act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b, act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(*[res4b7_relu, scale4b8_branch2c], name='res4b8')
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a, act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b, act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(*[res4b8_relu, scale4b9_branch2c], name='res4b9')
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a, act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b, act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(*[res4b9_relu, scale4b10_branch2c], name='res4b10')
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a, act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b, act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(*[res4b10_relu, scale4b11_branch2c], name='res4b11')
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a, act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b, act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(*[res4b11_relu, scale4b12_branch2c], name='res4b12')
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a, act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b, act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(*[res4b12_relu, scale4b13_branch2c], name='res4b13')
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a, act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b, act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(*[res4b13_relu, scale4b14_branch2c], name='res4b14')
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a, act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b, act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(*[res4b14_relu, scale4b15_branch2c], name='res4b15')
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a, act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b, act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(*[res4b15_relu, scale4b16_branch2c], name='res4b16')
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a, act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b, act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(*[res4b16_relu, scale4b17_branch2c], name='res4b17')
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a, act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b, act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(*[res4b17_relu, scale4b18_branch2c], name='res4b18')
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a, act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b, act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(*[res4b18_relu, scale4b19_branch2c], name='res4b19')
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a, act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b, act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(*[res4b19_relu, scale4b20_branch2c], name='res4b20')
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a, act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b, act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(*[res4b20_relu, scale4b21_branch2c], name='res4b21')
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a, act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu, num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, offset=res4b22_branch2b_offset, num_filter=256, pad=(1, 1), kernel=(3, 3), num_deformable_group=4, stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b, act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(*[res4b21_relu, scale4b22_branch2c], name='res4b22')
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(*[scale5a_branch1, scale5a_branch2c], name='res5a')
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(*[res5a_relu, scale5b_branch2c], name='res5b')
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512, pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate, kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(*[res5b_relu, scale5c_branch2c], name='res5c')
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return (res2c_relu, res3b3_relu, res4b22_relu, res5c_relu)
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return (fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6)
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name=('rpn_conv_' + suffix), weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name=('rpn_relu_' + suffix))
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(2 * num_anchors), name=('rpn_cls_score_' + suffix), weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=(4 * num_anchors), name=('rpn_bbox_pred_' + suffix), weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, (- 1), 0), name=('rpn_cls_score_t1_' + suffix))
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, (- 1)), name=('rpn_cls_score_t2_' + suffix))
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name=('rpn_cls_prob_' + suffix))
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, (2 * num_anchors), (- 1), 0), name=('rpn_cls_prob_t_' + suffix))
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, (- 1)), name=('rpn_bbox_pred_t_' + suffix))
return (rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred)
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7, sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name=(('offset_' + name) + '_t'), data=data, rois=rois, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name=('offset_' + name), data=offset, num_hidden=((part_size * part_size) * 2), lr_mult=0.01, weight=self.shared_param_dict[(('offset_' + param_name) + '_weight')], bias=self.shared_param_dict[(('offset_' + param_name) + '_bias')])
offset_reshape = mx.sym.Reshape(data=offset, shape=((- 1), 2, part_size, part_size), name=('offset_reshape_' + name))
output = mx.contrib.sym.DeformablePSROIPooling(name=('deformable_roi_pool_' + name), data=data, rois=rois, trans=offset_reshape, group_size=group_size, pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim, spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_constant_symbol(self, const_val):
if (const_val in self.constants_dict):
return self.constants_dict[const_val]
name = 'const_eq_{0}'.format(const_val)
c = mx.sym.Variable(name, shape=(1,), init=MyConstant(value=[const_val]))
c = mx.sym.BlockGrad(c)
self.constants_dict[const_val] = c
return c
def cos_sim_2_dist_generic(self, cos_sim, x=None, y=None, x_is_norm=True, y_is_norm=True):
if x_is_norm:
x_norm = self.get_constant_symbol(1)
else:
assert (x is not None), 'if x is not L2 normalized then x must be provided'
x_norm = mx.sym.sum_axis(mx.sym.square(x), axis=0, keepdims=True)
x_norm = mx.sym.transpose(x_norm, axes=(1, 0))
if y_is_norm:
y_norm = self.get_constant_symbol(1)
else:
assert (y is not None), 'if y is not L2 normalized then y must be provided'
y_norm = mx.sym.sum_axis(mx.sym.square(y), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(x_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), y_norm)
return dist
def cos_sim_2_dist(self, cos_sim, cfg=None, embd=None, reps=None):
if cfg.network.EMBED_L2_NORM:
embd_norm = self.get_constant_symbol(1)
else:
assert (embd is not None), 'if embedding is not L2 normalized then embd must be provided'
embd_norm = mx.sym.sum_axis(mx.sym.square(embd), axis=1, keepdims=True)
embd_norm = mx.sym.reshape(embd_norm, shape=(0, 1, 1))
if cfg.network.REP_L2_NORM:
reps_norm = self.get_constant_symbol(1)
else:
assert (reps is not None), 'if representatives are not L2 normalized then reps must be provided'
reps_norm = mx.sym.sum_axis(mx.sym.square(reps), axis=0, keepdims=True)
dist = mx.sym.broadcast_add(mx.sym.broadcast_sub(embd_norm, mx.sym.broadcast_mul(self.get_constant_symbol(2), cos_sim)), reps_norm)
return dist
def get_symbol(self, cfg, is_train=True):
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name='data')
im_info = mx.sym.Variable(name='im_info')
(res2, res3, res4, res5) = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
(fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6) = self.get_fpn_feature(res2, res3, res4, res5)
(rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2) = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
(rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3) = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
(rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4) = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
(rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5) = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
(rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6) = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {'rpn_cls_prob_stride64': rpn_prob_p6, 'rpn_cls_prob_stride32': rpn_prob_p5, 'rpn_cls_prob_stride16': rpn_prob_p4, 'rpn_cls_prob_stride8': rpn_prob_p3, 'rpn_cls_prob_stride4': rpn_prob_p2}
rpn_bbox_pred_dict = {'rpn_bbox_pred_stride64': rpn_bbox_pred_p6, 'rpn_bbox_pred_stride32': rpn_bbox_pred_p5, 'rpn_bbox_pred_stride16': rpn_bbox_pred_p4, 'rpn_bbox_pred_stride8': rpn_bbox_pred_p3, 'rpn_bbox_pred_stride4': rpn_bbox_pred_p2}
arg_dict = dict((rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items()))
if is_train:
if (not cfg.network.base_net_lock):
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name='gt_boxes')
if (not cfg.network.base_net_lock):
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid', use_ignore=True, ignore_label=(- 1), name='rpn_cls_prob')
rpn_bbox_loss = (rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target)))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=(1.0 / cfg.TRAIN.RPN_BATCH_SIZE))
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N, 'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=((- 1), 5), name='gt_boxes_reshape')
(rois, label, bbox_target, bbox_weight) = mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES, batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {'op_type': 'pyramid_proposal', 'name': 'rois', 'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE), 'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS), 'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N, 'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE}
rois = mx.sym.Custom(**dict((arg_dict.items() + aux_dict.items())))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5, offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias, offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias, offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias, offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias, rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
if (is_train and cfg.network.base_net_lock):
fc_new_2_relu = mx.sym.BlockGrad(fc_new_2_relu)
rois = mx.sym.BlockGrad(rois)
label = mx.sym.BlockGrad(label)
bbox_target = mx.sym.BlockGrad(bbox_target)
bbox_weight = mx.sym.BlockGrad(bbox_weight)
lr_mult = cfg.TRAIN.REPS_LR_MULT
if cfg.network.SEPARABLE_REPS:
base = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_base', num_hidden=(cfg.network.EMBEDDING_DIM * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
offset = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives_offset', num_hidden=(cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS), no_bias=True, lr_mult=lr_mult)
base = mx.sym.reshape(base, shape=(cfg.network.EMBEDDING_DIM, 1, (num_classes - 1)))
offset = mx.sym.reshape(offset, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
representatives = mx.sym.broadcast_add(base, offset, name='fc_representatives')
else:
representatives = mx.sym.FullyConnected(data=self.get_constant_symbol(1), name='fc_representatives', num_hidden=((cfg.network.EMBEDDING_DIM * cfg.network.REPS_PER_CLASS) * (num_classes - 1)), no_bias=True, lr_mult=lr_mult)
representatives = mx.sym.reshape(representatives, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, (num_classes - 1)))
if cfg.network.REP_L2_NORM:
representatives = mx.sym.transpose(mx.sym.L2Normalization(mx.sym.transpose(representatives, axes=(1, 0, 2)), mode='channel'), axes=(1, 0, 2))
extra_outputs = [mx.sym.BlockGrad(representatives)]
batch_embed = mx.symbol.FullyConnected(name='batch_embed', data=fc_new_2_relu, num_hidden=cfg.network.EMBEDDING_DIM)
if cfg.network.EMBED_L2_NORM:
batch_embed = mx.sym.L2Normalization(data=batch_embed, name='batch_embed_nrm', mode='instance')
cos_sim = mx.sym.dot(batch_embed, representatives, transpose_b=False)
all_cls_rep_dist = self.cos_sim_2_dist(cos_sim, cfg, embd=batch_embed, reps=representatives)
if (is_train and cfg.network.EMBED_LOSS_ENABLED):
if cfg.network.SMOOTH_MIN:
sum_exp_dist = mx.sym.sum_axis(mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(cfg.network.SMOOTH_CONST))), axis=1, keepdims=True)
all_cls_min_dist = mx.sym.broadcast_div(mx.sym.log(sum_exp_dist), self.get_constant_symbol(cfg.network.SMOOTH_CONST))
else:
all_cls_min_dist = mx.sym.min_axis(all_cls_rep_dist, axis=1, keepdims=True)
all_cls_min_dist = mx.sym.reshape(all_cls_min_dist, shape=(0, (num_classes - 1)))
mod_true_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1, off_value=0), axis=1, begin=1, end=None)
mod_false_class = mx.sym.slice_axis(mx.sym.one_hot(label, depth=num_classes, on_value=1000, off_value=0), axis=1, begin=1, end=None)
min_dist_true = mx.sym.sum_axis(mx.sym.broadcast_mul(all_cls_min_dist, mod_true_class), axis=1)
min_dist_false = mx.sym.min_axis(mx.sym.broadcast_add(all_cls_min_dist, mod_false_class), axis=1)
embed_loss_val = mx.sym.broadcast_sub(min_dist_true, min_dist_false)
embed_loss_val = mx.sym.broadcast_add(embed_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
embed_loss_val = mx.sym.relu(embed_loss_val)
embed_loss_val = mx.sym.reshape(embed_loss_val, shape=(0, 1))
if (is_train and cfg.network.REPS_CLS_LOSS):
mask_block_ones = mx.sym.ones(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask_block_zeros = mx.sym.zeros(shape=(cfg.network.REPS_PER_CLASS, cfg.network.REPS_PER_CLASS))
mask = None
for iC1 in range((num_classes - 1)):
mask_row = None
for iC2 in range((num_classes - 1)):
if (iC1 == iC2):
cblock = mask_block_ones
else:
cblock = mask_block_zeros
if (mask_row is None):
mask_row = cblock
else:
mask_row = mx.sym.concat(mask_row, cblock, dim=1)
if (mask is None):
mask = mask_row
else:
mask = mx.sym.concat(mask, mask_row, dim=0)
mask_NC = mx.sym.broadcast_mul(self.get_constant_symbol(1000), mask)
mask_C = mx.sym.broadcast_sub(self.get_constant_symbol(1000), mask_NC)
mask_C = mx.sym.BlockGrad(mask_C)
mask_NC = mx.sym.BlockGrad(mask_NC)
R = mx.sym.reshape(mx.sym.transpose(representatives, axes=(0, 2, 1)), shape=(0, (- 1)))
R2R_cos_sim = mx.sym.dot(R, R, transpose_a=True)
R2R = self.cos_sim_2_dist_generic(R2R_cos_sim, x=R, y=R, x_is_norm=cfg.network.REP_L2_NORM, y_is_norm=cfg.network.REP_L2_NORM)
C2C = mx.sym.broadcast_add(R2R, mask_C)
C2NC = mx.sym.broadcast_add(R2R, mask_NC)
min_dist_C = mx.sym.topk(C2C, axis=1, k=2, ret_typ='value', is_ascend=True)
min_dist_C = mx.sym.slice_axis(min_dist_C, axis=1, begin=1, end=2)
min_dist_NC = mx.sym.min_axis(C2NC, axis=1, keepdims=True)
reps_cls_loss_val = mx.sym.broadcast_sub(min_dist_C, min_dist_NC)
reps_cls_loss_val = mx.sym.broadcast_add(reps_cls_loss_val, self.get_constant_symbol(cfg.network.EMBED_LOSS_MARGIN))
reps_cls_loss_val = mx.sym.relu(reps_cls_loss_val)
probs = mx.sym.exp(mx.sym.broadcast_mul(all_cls_rep_dist, self.get_constant_symbol(((- 0.5) / float((cfg.network.SIGMA ** 2))))))
comb_cls_scores = mx.sym.max_axis(probs, axis=1, keepdims=False)
comb_cls_scores = mx.sym.broadcast_add(comb_cls_scores, self.get_constant_symbol(1e-07))
bg_scores = mx.sym.broadcast_sub(self.get_constant_symbol((1 + 1e-07)), mx.sym.max_axis(comb_cls_scores, axis=1, keepdims=True))
cls_score = mx.sym.concat(bg_scores, comb_cls_scores, dim=1, name='bg_concat')
cls_score = mx.sym.reshape(cls_score, shape=(0, (- 1)))
if cfg.network.SOFTMAX_ENABLED:
cls_score = mx.sym.broadcast_mul(self.get_constant_symbol(cfg.network.SOFTMAX_MUL), cls_score)
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_score_lin = mx.symbol.FullyConnected(name='cls_score_lin', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=(num_reg_classes * 4))
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
rm_last = int(cfg.TRAIN.RM_LAST)
(labels_ohem, bbox_weights_ohem) = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes, num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM, cls_score=cls_score, bbox_pred=bbox_pred, labels=label, bbox_targets=bbox_target, bbox_weights=bbox_weight, rm_last=rm_last)
if cfg.network.SOFTMAX_ENABLED:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
else:
zz = mx.sym.zeros_like(label)
cls_prob = mx.sym.BlockGrad(cls_score)
invalid = mx.sym.broadcast_equal(labels_ohem, self.get_constant_symbol((- 1)))
minoh_labels = mx.sym.one_hot(mx.sym.broadcast_add(mx.sym.cast(invalid, dtype='float32'), labels_ohem), depth=num_classes, on_value=(- 1), off_value=0)
ce_loss = mx.sym.where(invalid, x=zz, y=mx.sym.sum(mx.sym.broadcast_mul(minoh_labels, mx.sym.log(mx.sym.broadcast_add(cls_score, self.get_constant_symbol(1e-07)))), axis=1))
ce_loss = mx.sym.MakeLoss(ce_loss, normalization='valid')
bbox_loss_ = (bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
rcnn_label = labels_ohem
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=(- 1))
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weights_ohem, axis=1, begin=(- 1), end=None) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS_OHEM))
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = (bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target)))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
rcnn_label = label
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
cls_prob_lin = mx.sym.SoftmaxOutput(name='cls_prob_lin', data=cls_score_lin, label=label, normalization='valid')
if cfg.network.EMBED_LOSS_ENABLED:
embed_loss_ = (mx.sym.slice_axis(bbox_weight, axis=1, begin=0, end=1) * embed_loss_val)
embed_loss = mx.sym.MakeLoss(name='embed_loss', data=embed_loss_, grad_scale=(1.0 / cfg.TRAIN.BATCH_ROIS))
if cfg.network.EMBED_LOSS_ENABLED:
extra_outputs += [embed_loss]
if cfg.network.REPS_CLS_LOSS:
extra_outputs += [mx.sym.MakeLoss(name='reps_cls_loss', data=reps_cls_loss_val, grad_scale=(1.0 / (cfg.network.REPS_PER_CLASS * (num_classes - 1))))]
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
extra_outputs += [cls_prob_lin]
if (not cfg.network.SOFTMAX_ENABLED):
extra_outputs += [ce_loss]
extra_outputs += [mx.sym.BlockGrad(rois), mx.sym.identity(mx.sym.BlockGrad(batch_embed), name='psp_final_embed')]
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1)), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_loss_reshape')
if cfg.network.base_net_lock:
group = mx.sym.Group(([cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
group = mx.sym.Group(([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)] + extra_outputs))
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, (- 1), (4 * num_reg_classes)), name='bbox_pred_reshape')
cls_score_orig = cls_score
if cfg.network.SOFTMAX_ENABLED:
cls_score_orig = mx.sym.broadcast_div(cls_score_orig, self.get_constant_symbol(cfg.network.SOFTMAX_MUL))
cls_score_orig = mx.sym.Reshape(data=cls_score_orig, shape=(cfg.TEST.BATCH_IMAGES, (- 1), num_classes))
group = mx.sym.Group([rois, cls_prob, bbox_pred, mx.sym.identity(batch_embed, name='psp_final_embed'), mx.sym.identity(cls_score_orig, name='cls_score')])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
arg_params['batch_embed_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['batch_embed_weight'])
arg_params['batch_embed_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['batch_embed_bias'])
name = 'fc_representatives'
if cfg.network.SEPARABLE_REPS:
arg_params[(name + '_base_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_base_weight')])
arg_params[(name + '_offset_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_offset_weight')])
elif cfg.network.SEPARABLE_REPS_INIT:
C = mx.random.normal(0, 0.1, shape=(cfg.network.EMBEDDING_DIM, 1, (cfg.dataset.NUM_CLASSES - 1)))
R = mx.random.normal(0, 0.05, shape=(cfg.network.EMBEDDING_DIM, cfg.network.REPS_PER_CLASS, 1))
CR = (C + R)
arg_params[(name + '_weight')] = mx.nd.reshape(CR, shape=((- 1), 1))
else:
arg_params[(name + '_weight')] = mx.random.normal(0, 0.1, shape=self.arg_shape_dict[(name + '_weight')])
if cfg.network.ADDITIONAL_LINEAR_CLS_LOSS:
arg_params['cls_score_lin_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_lin_weight'])
arg_params['cls_score_lin_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_lin_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
(arg_params2, aux_params2) = ({}, {})
for name in self.shared_param_list:
if ('offset' in name):
arg_params2[(name + '_weight')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_weight')])
else:
arg_params2[(name + '_weight')] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[(name + '_weight')])
arg_params2[(name + '_bias')] = mx.nd.zeros(shape=self.arg_shape_dict[(name + '_bias')])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if cfg.network.pretrained_weights_are_priority:
if ((k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape)):
arg_params[k] = arg_params2[k]
else:
arg_params[k] = arg_params2[k]
for k in aux_params2:
if cfg.network.pretrained_weights_are_priority:
if (k not in aux_params):
aux_params[k] = aux_params2[k]
else:
aux_params[k] = aux_params2[k] |
class SimpleNet(nn.Module):
def __init__(self, cfg, model_cfg, num_classes, **kwargs):
super().__init__()
self.backbone = build_backbone(model_cfg.BACKBONE.NAME, verbose=cfg.VERBOSE, pretrained=model_cfg.BACKBONE.PRETRAINED, **kwargs)
fdim = self.backbone.out_features
self.head = None
if (model_cfg.HEAD.NAME and model_cfg.HEAD.HIDDEN_LAYERS):
self.head = build_head(model_cfg.HEAD.NAME, verbose=cfg.VERBOSE, in_features=fdim, hidden_layers=model_cfg.HEAD.HIDDEN_LAYERS, activation=model_cfg.HEAD.ACTIVATION, bn=model_cfg.HEAD.BN, dropout=model_cfg.HEAD.DROPOUT, **kwargs)
fdim = self.head.out_features
self.classifier = None
if (num_classes > 0):
self.classifier = nn.Linear(fdim, num_classes)
self._fdim = fdim
def fdim(self):
return self._fdim
def forward(self, x, return_feature=False):
f = self.backbone(x)
if (self.head is not None):
f = self.head(f)
if (self.classifier is None):
return f
y = self.classifier(f)
if return_feature:
return (y, f)
return y |
class TestConsecutiveDuplicates(TestCase):
def setUp(self):
samples = [1, 2, 2, 3, 3, 3]
dates = pd.date_range(start='2018-05-13', periods=len(samples))
self.test_series = QFSeries(data=samples, index=dates)
def test_drop_consecutive_duplicates_keep_first(self):
expected_series_with_no_duplicates = self.test_series.iloc[[0, 1, 3]]
actual_series_with_no_duplicates = drop_consecutive_duplicates(self.test_series, Method.KEEP_FIRST)
assert_series_equal(expected_series_with_no_duplicates, actual_series_with_no_duplicates)
def test_drop_consecutive_duplicates_keep_last(self):
expected_series_with_no_duplicates = self.test_series.iloc[[0, 2, 5]]
actual_series_with_no_duplicates = drop_consecutive_duplicates(self.test_series, Method.KEEP_LAST)
assert_series_equal(expected_series_with_no_duplicates, actual_series_with_no_duplicates)
def test_correct_behaviour_when_there_are_no_duplicates(self):
no_duplicates_series = self.test_series.iloc[[0, 1, 3]]
actual_series = drop_consecutive_duplicates(no_duplicates_series, Method.KEEP_FIRST)
assert_series_equal(no_duplicates_series, actual_series)
actual_series = drop_consecutive_duplicates(no_duplicates_series, Method.KEEP_LAST)
assert_series_equal(no_duplicates_series, actual_series) |
class Album(Collection, HasKey):
_property
def peoplesort(self):
return util.human_sort_key(self.get('~peoplesort').split('\n')[0])
_property
def genre(self):
return util.human_sort_key(self.get('genre').split('\n')[0])
def date(self):
return self.get('date')
def title(self):
return self.get('album')
def __init__(self, song):
super().__init__()
self.songs = set()
self.sort = util.human_sort_key(song('albumsort'))
self.key = song.album_key
def str_key(self):
return str(self.key)
def finalize(self):
super().finalize()
self.__dict__.pop('peoplesort', None)
self.__dict__.pop('genre', None)
def __repr__(self):
return ('Album(%s)' % repr(self.key)) |
class Migration(migrations.Migration):
dependencies = [('events', '0001_initial')]
operations = [migrations.AddField(model_name='occurringrule', name='all_day', field=models.BooleanField(default=False), preserve_default=True), migrations.AddField(model_name='recurringrule', name='all_day', field=models.BooleanField(default=False), preserve_default=True)] |
_datapipe('threadpool_map')
class ThreadPoolMapperIterDataPipe(IterDataPipe[T_co]):
datapipe: IterDataPipe
fn: Callable
def __init__(self, source_datapipe: IterDataPipe, fn: Callable, input_col=None, output_col=None, scheduled_tasks: int=128, max_workers: Optional[int]=None, **threadpool_kwargs) -> None:
super().__init__()
self.datapipe = source_datapipe
_check_unpickable_fn(fn)
self.fn = fn
if (scheduled_tasks <= 0):
raise ValueError("'scheduled_tasks' is required to be a positive integer.")
self.scheduled_tasks = scheduled_tasks
if ((max_workers is not None) and (max_workers <= 0)):
raise ValueError("'max_workers' is required to be a positive integer.")
self.max_workers = max_workers
self.threadpool_kwargs = threadpool_kwargs
self.input_col = input_col
if ((input_col is None) and (output_col is not None)):
raise ValueError('`output_col` must be None when `input_col` is None.')
if isinstance(output_col, (list, tuple)):
if (len(output_col) > 1):
raise ValueError('`output_col` must be a single-element list or tuple')
output_col = output_col[0]
self.output_col = output_col
validate_input_col(fn, input_col)
def _apply_fn(self, data):
if ((self.input_col is None) and (self.output_col is None)):
return self.fn(data)
if (self.input_col is None):
res = self.fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple((data[col] for col in self.input_col))
res = self.fn(*args)
else:
res = self.fn(data[self.input_col])
if isinstance(data, tuple):
t_flag = True
data = list(data)
else:
t_flag = False
if (self.output_col is None):
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
elif (self.output_col == (- 1)):
data.append(res)
else:
data[self.output_col] = res
return (tuple(data) if t_flag else data)
def __iter__(self) -> Iterator[T_co]:
with futures.ThreadPoolExecutor(max_workers=self.max_workers, **self.threadpool_kwargs) as executor:
futures_deque: deque = deque()
has_next = True
itr = iter(self.datapipe)
for _ in range(self.scheduled_tasks):
try:
futures_deque.append(executor.submit(self._apply_fn, next(itr)))
except StopIteration:
has_next = False
break
while (len(futures_deque) > 0):
if has_next:
try:
futures_deque.append(executor.submit(self._apply_fn, next(itr)))
except StopIteration:
has_next = False
(yield futures_deque.popleft().result())
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.