code stringlengths 281 23.7M |
|---|
def xtype_from_derivation(derivation: str) -> str:
bip32_indices = convert_bip32_strpath_to_intpath(derivation)
if (len(bip32_indices) >= 1):
if (bip32_indices[0] == (84 + BIP32_PRIME)):
return 'p2wpkh'
elif (bip32_indices[0] == (49 + BIP32_PRIME)):
return 'p2wpkh-p2sh'
elif (bip32_indices[0] == (44 + BIP32_PRIME)):
return 'standard'
elif (bip32_indices[0] == (45 + BIP32_PRIME)):
return 'standard'
if (len(bip32_indices) >= 4):
if (bip32_indices[0] == (48 + BIP32_PRIME)):
script_type_int = (bip32_indices[3] - BIP32_PRIME)
script_type = PURPOSE48_SCRIPT_TYPES_INV.get(script_type_int)
if (script_type is not None):
return script_type
return 'standard' |
class SEResNeXtUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width):
super(SEResNeXtUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
self.body = ResNeXtBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = (x + identity)
x = self.activ(x)
return x |
class UpBlock2D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_upsample=True, transformer_layers_per_block: int=1, num_attention_heads=1, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, use_pixelwise_attention=True):
super().__init__()
resnets = []
pixel_attentions = []
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
pixel_attentions.append((Transformer2DModel(num_attention_heads, (out_channels // num_attention_heads), in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=out_channels, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention) if use_pixelwise_attention else None))
self.resnets = nn.ModuleList(resnets)
self.pixel_attentions = nn.ModuleList(pixel_attentions)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, pixelwise_hidden_states=None, cross_attention_kwargs=None):
for (resnet, pix_attn) in zip(self.resnets, self.pixel_attentions):
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if (pixelwise_hidden_states is not None):
pixelwise_hidden_state = pixelwise_hidden_states[(- 1)]
pixelwise_hidden_states = pixelwise_hidden_states[:(- 1)]
pixelwise_hidden_state = rearrange(pixelwise_hidden_state, 'b c h w -> b (h w) c')
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
if is_torch_version('>=', '1.11.0'):
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, use_reentrant=False)
else:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
else:
hidden_states = resnet(hidden_states, temb)
if (pixelwise_hidden_states is not None):
hidden_states = pix_attn(hidden_states, encoder_hidden_states=pixelwise_hidden_state, cross_attention_kwargs=cross_attention_kwargs, attention_mask=None, encoder_attention_mask=None, return_dict=False)[0]
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states |
(config_path='config', config_name='train_tracking_default')
def main(cfg: DictConfig) -> None:
print(os.getcwd())
os.mkdir('checkpoints')
datamodule = SparseUnet3DTrackingDataModule2(**cfg.datamodule)
batch_size = datamodule.kwargs['batch_size']
pipeline_model = GarmentTrackingPipeline(batch_size=batch_size, **cfg.garment_tracking_model)
category = pathlib.Path(cfg.datamodule.zarr_path).stem
cfg.logger.tags.append(category)
logger = pl.loggers.WandbLogger(project=os.path.basename(__file__), **cfg.logger)
wandb_run = logger.experiment
wandb_meta = {'run_name': wandb_run.name, 'run_id': wandb_run.id}
all_config = {'config': OmegaConf.to_container(cfg, resolve=True), 'output_dir': os.getcwd(), 'wandb': wandb_meta}
yaml.dump(all_config, open('config.yaml', 'w'), default_flow_style=False)
logger.log_hyperparams(all_config)
checkpoint_callback = pl.callbacks.ModelCheckpoint(dirpath='checkpoints', filename='{epoch}-{val_loss:.4f}', monitor='val_loss', save_last=True, save_top_k=5, mode='min', save_weights_only=False, every_n_epochs=1, save_on_train_epoch_end=True)
trainer = pl.Trainer(callbacks=[checkpoint_callback], checkpoint_callback=True, logger=logger, check_val_every_n_epoch=1, **cfg.trainer)
trainer.fit(model=pipeline_model, datamodule=datamodule) |
def save_config_file_for_per_channel_quantization():
quantsim_config = {'defaults': {'ops': {'is_output_quantized': 'True', 'is_symmetric': 'False'}, 'params': {'is_quantized': 'True', 'is_symmetric': 'True'}, 'per_channel_quantization': 'True'}, 'params': {'bias': {'is_quantized': 'False'}}, 'op_type': {}, 'supergroups': [{'op_list': ['Conv', 'Relu']}, {'op_list': ['Conv', 'Clip']}, {'op_list': ['Add', 'Relu']}, {'op_list': ['Gemm', 'Relu']}, {'op_list': ['Conv', 'BatchNormalization']}, {'op_list': ['Gemm', 'BatchNormalization']}, {'op_list': ['ConvTranspose', 'BatchNormalization']}], 'model_input': {}, 'model_output': {}}
with open('./quantsim_config.json', 'w') as f:
json.dump(quantsim_config, f) |
def require_files(*name_patterns: str) -> None:
cwd = Path.cwd()
matches = 0
for pattern in name_patterns:
if list(cwd.glob(pattern)):
matches += 1
if (matches == len(name_patterns)):
return
frame = inspect.currentframe()
if (not frame):
raise Exception('workflow: require_files() called from unknown frame')
caller_frame = frame.f_back
if (not caller_frame):
raise Exception('workflow: require_files() called from unknown caller')
caller_info = inspect.getframeinfo(caller_frame)
caller_file = caller_info.filename
caller_directory = Path(caller_file).parent
raise RequiredFilesNotFoundError(name_patterns=name_patterns, root=caller_directory) |
def _induce_cliques(adjtable, clique_to_members, fill_value=1):
adj_across_clique = adjtable.merge(clique_to_members['input_index'], left_on='focal', right_index=True).explode('input_index').rename(columns={'input_index': 'subclique_focal'}).merge(clique_to_members['input_index'], left_on='neighbor', right_index=True).explode('input_index').rename(columns={'input_index': 'subclique_neighbor'}).reset_index().drop(['focal', 'neighbor', 'index'], axis=1).rename(columns={'subclique_focal': 'focal', 'subclique_neighbor': 'neighbor'})[['focal', 'neighbor', 'weight']]
is_multimember_clique = (clique_to_members['input_index'].str.len() > 1)
adj_within_clique = clique_to_members[is_multimember_clique]['input_index'].apply((lambda x: list(permutations(x, 2)))).explode().apply(pd.Series).rename(columns={0: 'focal', 1: 'neighbor'}).assign(weight=fill_value)
new_adj = pd.concat((adj_across_clique, adj_within_clique), ignore_index=True, axis=0).reset_index(drop=True)
return new_adj |
def call_func(t):
import numpy as N
import random
N.random.seed(random.randint(0, ))
if ('func' in t):
assert ('module' not in t)
assert ('method' not in t)
func = t['func']
else:
modu = importlib.import_module(t['module'])
func = getattr(modu, t['method'])
r = func(*t['args'], **t['kwargs'])
return {'id': t['id'], 'result': r} |
class Configure(object):
def get_file_cfg(file):
cfgargs = Args()
parser = configparser.ConfigParser()
parser.read(file)
for section in parser.sections():
setattr(cfgargs, section, Args())
for item in parser.items(section):
setattr(getattr(cfgargs, section), item[0], String.to_basic(item[1]))
return cfgargs
def refresh_args_by_file_cfg(file, prev_args):
args = Configure.get_file_cfg(file)
if (args.dir is not Args):
args.dir = Args()
args.dir.model = DEFAULT_MODEL_DIR
args.dir.dataset = DEFAULT_DATASET_DIR
args.dir.configure = DEFAULT_CONFIGURE_DIR
for (arg_name, arg) in prev_args:
if (arg is None):
continue
if (arg_name != 'cfg'):
names = arg_name.split('.')
cur = args
for name in names[:(- 1)]:
if (getattr(cur, name) is None):
setattr(cur, name, Args())
cur = getattr(cur, name)
if (getattr(cur, names[(- 1)]) is None):
setattr(cur, names[(- 1)], arg)
return args
def Get(cfg):
args = Configure.get_file_cfg(os.path.join(DEFAULT_CONFIGURE_DIR, cfg))
if (args.dir is not Args):
args.dir = Args()
args.dir.model = DEFAULT_MODEL_DIR
args.dir.dataset = DEFAULT_DATASET_DIR
args.dir.configure = DEFAULT_CONFIGURE_DIR
return args |
def _get_via_file_cache(cls, app_data, path, exe, env):
path_text = str(path)
try:
path_modified = path.stat().st_mtime
except OSError:
path_modified = (- 1)
if (app_data is None):
app_data = AppDataDisabled()
(py_info, py_info_store) = (None, app_data.py_info(path))
with py_info_store.locked():
if py_info_store.exists():
data = py_info_store.read()
(of_path, of_st_mtime, of_content) = (data['path'], data['st_mtime'], data['content'])
if ((of_path == path_text) and (of_st_mtime == path_modified)):
py_info = cls._from_dict(of_content.copy())
sys_exe = py_info.system_executable
if ((sys_exe is not None) and (not os.path.exists(sys_exe))):
py_info_store.remove()
py_info = None
else:
py_info_store.remove()
if (py_info is None):
(failure, py_info) = _run_subprocess(cls, exe, app_data, env)
if (failure is None):
data = {'st_mtime': path_modified, 'path': path_text, 'content': py_info._to_dict()}
py_info_store.write(data)
else:
py_info = failure
return py_info |
class Pool(base.Pool):
def __init__(self, url, loop, init=None, bakery=None, prebake=True, **kwargs):
self._url = url
self._loop = loop
self._kwargs = kwargs
self._pool = None
self._conn_init = init
self._bakery = bakery
self._prebake = prebake
async def _init(self):
args = self._kwargs.copy()
args.update(loop=self._loop, host=self._url.host, port=self._url.port, user=self._url.username, db=self._url.database, password=self._url.password)
args.setdefault('autocommit', None)
self._pool = (await aiomysql.create_pool(**args))
return self
def __await__(self):
return self._init().__await__()
def raw_pool(self):
return self._pool
async def acquire(self, *, timeout=None):
if (timeout is None):
conn = (await self._pool.acquire())
else:
conn = (await asyncio.wait_for(self._pool.acquire(), timeout=timeout))
if (self._conn_init is not None):
try:
(await self._conn_init(conn))
except:
(await self.release(conn))
raise
return conn
async def release(self, conn):
(await self._pool.release(conn))
async def close(self):
self._pool.close()
(await self._pool.wait_closed())
def repr(self, color):
if (color and (not click)):
warnings.warn('Install click to get colorful repr.', ImportWarning)
if (color and click):
return '<{classname} max={max} min={min} cur={cur} use={use}>'.format(classname=click.style(((self._pool.__class__.__module__ + '.') + self._pool.__class__.__name__), fg='green'), max=click.style(repr(self._pool.maxsize), fg='cyan'), min=click.style(repr(self._pool._minsize), fg='cyan'), cur=click.style(repr(self._pool.size), fg='cyan'), use=click.style(repr(len(self._pool._used)), fg='cyan'))
else:
return '<{classname} max={max} min={min} cur={cur} use={use}>'.format(classname=((self._pool.__class__.__module__ + '.') + self._pool.__class__.__name__), max=self._pool.maxsize, min=self._pool._minsize, cur=self._pool.size, use=len(self._pool._used)) |
class TestExcitationPreserving(QiskitNatureTestCase):
def setUp(self):
super().setUp()
self.seed = 50
algorithm_globals.random_seed = self.seed
self.reference_energy = (- 1.)
_test
((not _optionals.HAS_PYSCF), 'pyscf not available.')
def test_excitation_preserving(self):
driver = PySCFDriver()
mapper = ParityMapper()
problem = driver.run()
_ = problem.second_q_ops()
num_particles = problem.num_particles
num_spatial_orbitals = problem.num_spatial_orbitals
optimizer = SLSQP(maxiter=100)
initial_state = HartreeFock(num_spatial_orbitals, num_particles, mapper)
num_qubits = (2 * num_spatial_orbitals)
wavefunction = ExcitationPreserving(int(num_qubits))
wavefunction.compose(initial_state, front=True, inplace=True)
solver = VQE(ansatz=wavefunction, optimizer=optimizer, estimator=Estimator())
gsc = GroundStateEigensolver(mapper, solver)
result = gsc.solve(problem)
self.assertAlmostEqual(result.total_energies[0], self.reference_energy, places=4) |
class QlLoaderPE_UEFI(QlLoader):
def __init__(self, ql: Qiling):
super().__init__(ql)
self.ql = ql
self.modules = []
self.events = {}
self.notify_list = []
self.dxe_context: DxeContext
self.smm_context: SmmContext
self.context: UefiContext
__save_members = ('modules', 'events', 'notify_list', 'tpl')
def save(self) -> Mapping[(str, Any)]:
saved_state = super(QlLoaderPE_UEFI, self).save()
for member in QlLoaderPE_UEFI.__save_members:
saved_state[member] = getattr(self, member)
saved_state['heap'] = self.ql.os.heap.save()
return saved_state
def restore(self, saved_state: Mapping[(str, Any)]):
super(QlLoaderPE_UEFI, self).restore(saved_state)
for member in QlLoaderPE_UEFI.__save_members:
setattr(self, member, saved_state[member])
self.ql.os.heap.restore(saved_state['heap'])
def install_loaded_image_protocol(self, image_base: int, image_size: int):
fields = {'gST': self.gST, 'image_base': image_base, 'image_size': image_size}
descriptor = EfiLoadedImageProtocol.make_descriptor(fields)
self.context.install_protocol(descriptor, image_base)
self.context.loaded_image_protocol_modules.append(image_base)
def map_and_load(self, path: str, context: UefiContext, exec_now: bool=False):
ql = self.ql
pe = PE(path, fast_load=True)
image_base = (pe.OPTIONAL_HEADER.ImageBase or context.next_image_base)
image_size = ql.mem.align_up(pe.OPTIONAL_HEADER.SizeOfImage)
assert ((image_base % ql.mem.pagesize) == 0), 'image base is expected to be page-aligned'
if (image_base != pe.OPTIONAL_HEADER.ImageBase):
pe.relocate_image(image_base)
pe.parse_data_directories()
data = bytes(pe.get_memory_mapped_image())
ql.mem.map(image_base, image_size, info='[module]')
ql.mem.write(image_base, data)
ql.log.info(f'Module {path} loaded to {image_base:#x}')
entry_point = (image_base + pe.OPTIONAL_HEADER.AddressOfEntryPoint)
ql.log.info(f'Module entry point at {entry_point:#x}')
if (self.entry_point == 0):
self.entry_point = entry_point
self.install_loaded_image_protocol(image_base, image_size)
self.images.append(Image(image_base, (image_base + image_size), path))
context.next_image_base = (image_base + image_size)
module_info = (path, image_base, entry_point, context)
if exec_now:
self.execute_module(*module_info, eoe_trap=None)
else:
self.modules.append(module_info)
def call_function(self, addr: int, args: Sequence[int], ret: Optional[int]):
types = ((PARAM_INTN,) * len(args))
targs = tuple(zip(types, args))
self.ql.os.fcall.call_native(addr, targs, ret)
def unload_modules(self, context: UefiContext) -> bool:
for handle in context.loaded_image_protocol_modules:
struct_addr = context.protocols[handle][self.loaded_image_protocol_guid]
loaded_image_protocol = EfiLoadedImageProtocol.EFI_LOADED_IMAGE_PROTOCOL.loadFrom(self.ql, struct_addr)
unload_ptr = loaded_image_protocol.Unload.value
if (unload_ptr != 0):
self.ql.log.info(f'Unloading module {handle:#x}, calling {unload_ptr:#x}')
self.ql.os.fcall.call_native(unload_ptr, ((POINTER, handle),), context.end_of_execution_ptr)
context.loaded_image_protocol_modules.remove(handle)
return True
return False
def execute_module(self, path: str, image_base: int, entry_point: int, context: UefiContext, eoe_trap: Optional[int]):
ImageHandle = image_base
SystemTable = self.gST
self.ql.os.heap = context.heap
self.ql.arch.regs.rsp = context.top_of_stack
self.ql.arch.regs.rbp = context.top_of_stack
self.ql.os.fcall.call_native(entry_point, ((POINTER, ImageHandle), (POINTER, SystemTable)), eoe_trap)
self.ql.os.running_module = path
self.ql.os.entry_point = entry_point
self.ql.log.info(f'Running from {entry_point:#010x} of {path}')
def execute_next_module(self):
if (not self.modules):
return
(path, image_base, entry_point, context) = self.modules.pop(0)
if self.ql.os.notify_before_module_execution(path):
return
self.execute_module(path, image_base, entry_point, context, context.end_of_execution_ptr)
def __init_dxe_environment(self, ql: Qiling) -> DxeContext:
profile = ql.os.profile['DXE']
context = DxeContext(ql)
heap_base = int(profile['heap_address'], 0)
heap_size = int(profile['heap_size'], 0)
context.init_heap(heap_base, heap_size)
ql.log.info(f'DXE heap at {heap_base:#010x}')
stack_base = int(profile['stack_address'], 0)
stack_size = int(profile['stack_size'], 0)
context.init_stack(stack_base, stack_size)
ql.log.info(f'DXE stack at {context.top_of_stack:#010x}')
context.next_image_base = int(profile['image_address'], 0)
gST = context.heap.alloc((4 * 1024))
conf_data = context.heap.alloc((64 * 1024))
context.conf_table_data_ptr = conf_data
context.conf_table_data_next_ptr = conf_data
context.end_of_execution_ptr = gST
st.initialize(ql, context, gST)
protocols = (EfiSmmAccess2Protocol, EfiSmmBase2Protocol)
for p in protocols:
context.install_protocol(p.descriptor, 1)
return context
def __init_smm_environment(self, ql: Qiling) -> SmmContext:
profile = ql.os.profile['SMM']
context = SmmContext(ql)
context.smram_base = int(profile['smram_base'], 0)
context.smram_size = int(profile['smram_size'], 0)
heap_base = int(profile['heap_address'], 0)
heap_size = int(profile['heap_size'], 0)
context.init_heap(heap_base, heap_size)
ql.log.info(f'SMM heap at {heap_base:#010x}')
stack_base = int(profile['stack_address'], 0)
stack_size = int(profile['stack_size'], 0)
context.init_stack(stack_base, stack_size)
ql.log.info(f'SMM stack at {context.top_of_stack:#010x}')
context.next_image_base = int(profile['image_address'], 0)
gSmst = context.heap.alloc((4 * 1024))
conf_data = context.heap.alloc((64 * 1024))
context.conf_table_data_ptr = conf_data
context.conf_table_data_next_ptr = conf_data
context.end_of_execution_ptr = gSmst
smst.initialize(ql, context, gSmst)
protocols = (EfiSmmCpuProtocol, EfiSmmSwDispatch2Protocol)
for p in protocols:
context.install_protocol(p.descriptor, 1)
return context
def run(self):
ql = self.ql
if (ql.arch.type not in (QL_ARCH.X86, QL_ARCH.X8664)):
raise QlErrorArch('Unsupported architecture')
if (ql.arch.type != QL_ARCH.X8664):
raise QlErrorArch('Only 64-bit modules are supported at the moment')
self.loaded_image_protocol_guid = ql.os.profile['LOADED_IMAGE_PROTOCOL']['Guid']
self.tpl = 4
self.dxe_context = self.__init_dxe_environment(ql)
self.smm_context = self.__init_smm_environment(ql)
self.entry_point = 0
self.load_address = 0
try:
for dependency in ql.argv:
is_smm_module = ('Smm' in dependency)
if is_smm_module:
self.context = self.smm_context
else:
self.context = self.dxe_context
self.map_and_load(dependency, self.context)
ql.log.info(f'Done loading modules')
except QlMemoryMappedError:
ql.log.critical('Could not map dependency')
self.set_exit_hook(self.dxe_context.end_of_execution_ptr)
self.set_exit_hook(self.smm_context.end_of_execution_ptr)
self.execute_next_module()
def set_exit_hook(self, address: int):
def __module_exit_trap(ql: Qiling):
if ql.os.notify_after_module_execution(len(self.modules)):
return
if utils.execute_protocol_notifications(ql):
return
if self.modules:
self.execute_next_module()
else:
if (self.unload_modules(self.smm_context) or self.unload_modules(self.dxe_context)):
return
ql.log.info(f'No more modules to run')
ql.os.stop()
self.ql.hook_address(__module_exit_trap, address) |
def test_attrs(fake_manager):
obj = helpers.FakeObject(fake_manager, {'foo': 'bar'})
assert ('bar' == obj.foo)
with pytest.raises(AttributeError):
getattr(obj, 'bar')
obj.bar = 'baz'
assert ('baz' == obj.bar)
assert ({'foo': 'bar'} == obj._attrs)
assert ({'bar': 'baz'} == obj._updated_attrs) |
def set_literal_values(builder: IRBuilder, items: Sequence[Expression]) -> (list[object] | None):
values: list[object] = []
for item in items:
const_value = constant_fold_expr(builder, item)
if (const_value is not None):
values.append(const_value)
continue
if isinstance(item, RefExpr):
if (item.fullname == 'builtins.None'):
values.append(None)
elif (item.fullname == 'builtins.True'):
values.append(True)
elif (item.fullname == 'builtins.False'):
values.append(False)
elif isinstance(item, TupleExpr):
tuple_values = set_literal_values(builder, item.items)
if (tuple_values is not None):
values.append(tuple(tuple_values))
if (len(values) != len(items)):
return None
return values |
def create_COCO_img_mask(data):
(img_id, dst_img_dir, dst_mask_dir) = data
img_info = coco.loadImgs(img_id)[0]
h = img_info['height']
w = img_info['width']
mask_all = np.zeros((h, w), np.uint8)
anno_ids = coco.getAnnIds(imgIds=img_info['id'])
anno_list = coco.loadAnns(anno_ids)
obj_cnt = 1
for (idx, anno) in enumerate(anno_list):
if (anno['area'] < 500):
continue
mask = coco.annToMask(anno)
mask_all[(mask > 0)] = (mask[(mask > 0)] * obj_cnt)
obj_cnt += 1
if (obj_cnt > 1):
mask_all = Image.fromarray(mask_all)
mask_all.putpalette(mask_palette)
img_name = (img_info['file_name'][:(- 4)] + '.png')
dst_path = os.path.join(dst_mask_dir, img_name)
mask_all.save(dst_path)
tmp = img_info['coco_url'].split('/')[(- 2):]
img_path_src = os.path.join(args.src, tmp[0], tmp[1])
img_path_dst = os.path.join(dst_img_dir, img_info['file_name'])
copyfile(img_path_src, img_path_dst) |
def build_from_path(in_dir, out_dir):
index = 1
texts = []
with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
for line in f.readlines():
if ((index % 100) == 0):
print('{:d} Done'.format(index))
parts = line.strip().split('|')
wav_path = os.path.join(in_dir, 'wavs', ('%s.wav' % parts[0]))
text = parts[2]
texts.append(_process_utterance(out_dir, index, wav_path, text))
index = (index + 1)
return texts |
def test_get_current_tag_with_single_existing_tag(initialized_db):
repo = model.repository.create_repository('devtable', 'newrepo', None)
(manifest, _) = create_manifest_for_testing(repo, '1')
t = manifest.tag_set.get()
tag = get_current_tag(repo.id, t.name)
assert (tag.id == t.id) |
.parametrize('token_lifetime, time_since', [('1m', '2m'), ('2m', '1m'), ('1h', '1m')])
def test_validation_code(token_lifetime, time_since, initialized_db):
user = create_user_noverify('foobar', '', email_required=False)
created = (datetime.now() - convert_to_timedelta(time_since))
(verification_code, unhashed) = Credential.generate()
confirmation = EmailConfirmation.create(user=user, pw_reset=True, created=created, verification_code=verification_code)
encoded = encode_public_private_token(confirmation.code, unhashed)
with patch('data.model.config.app_config', {'USER_RECOVERY_TOKEN_LIFETIME': token_lifetime}):
result = validate_reset_code(encoded)
expect_success = (convert_to_timedelta(token_lifetime) >= convert_to_timedelta(time_since))
assert (expect_success == (result is not None)) |
_fixtures(WebFixture, InputGroupFixture)
def test_input_group(web_fixture, input_group_fixture):
fixture = input_group_fixture
tester = WidgetTester(fixture.input_group)
[outer_div] = tester.xpath('//div')
assert (outer_div.attrib['class'] == 'has-validation input-group')
if fixture.expects_before_html:
rendered_html = tester.get_html_for('//div/input/preceding-sibling::span')
assert (rendered_html == fixture.expects_before_html)
else:
assert (not tester.is_element_present('//div/input/preceding-sibling::span'))
children = outer_div.getchildren()
the_input = (children[1] if fixture.expects_before_html else children[0])
assert (the_input.tag == 'input')
assert (the_input.name == 'test-an_attribute')
if fixture.expects_after_html:
rendered_html = tester.get_html_for('//div/input/following-sibling::span')
assert (rendered_html == fixture.expects_after_html)
else:
assert (not tester.is_element_present('//div/input/following-sibling::span')) |
_REGISTRY.register()
def build_p37_fcos_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = dla34(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
assert (cfg.MODEL.BIFPN.NUM_LEVELS == 5)
top_levels = 2
backbone = BiFPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, num_top_levels=top_levels, num_repeats=num_repeats, norm=cfg.MODEL.BIFPN.NORM)
return backbone |
def main(argv):
global LOG
LOG = file(WRAPPER_LOG, 'a+')
if LOG_OPTIONS['argv']:
((print >> LOG), ' '.join(argv))
(flags, argv) = make_flags(argv)
new_argv = compiler_argv(flags, argv)
start_time = time.time()
ret = subprocess.call(new_argv)
end_time = time.time()
if LOG_OPTIONS['time']:
((print >> LOG), 'Time elapsed: {:.3f} seconds'.format((end_time - start_time)))
LOG.close()
return ret |
('pypyr.steps.filewrite.Path')
def test_filewrite_pass_with_non_string_substitutions(mock_path):
context = Context({'k1': 'v1', 'p': '/arb/path', 'intkey': 123, 'is_bin': False, 'is_append': 0, 'fileWrite': {'path': '{p}', 'payload': '{intkey}', 'binary': '{is_bin}', 'append': '{is_append}'}})
with io.StringIO() as out_text:
with patch('pypyr.steps.filewrite.open', mock_open()) as mock_output:
mock_output.return_value.write.side_effect = out_text.write
filewrite.run_step(context)
payload = out_text.getvalue()
mock_path.assert_called_once_with('/arb/path')
mocked_path = mock_path.return_value
mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True)
mock_output.assert_called_once_with(mocked_path, 'w', encoding=None)
assert (payload == '123') |
def rename_key(orig_key):
if ('model' in orig_key):
orig_key = orig_key.replace('model.', '')
if ('norm1' in orig_key):
orig_key = orig_key.replace('norm1', 'attention.output.LayerNorm')
if ('norm2' in orig_key):
orig_key = orig_key.replace('norm2', 'output.LayerNorm')
if ('norm' in orig_key):
orig_key = orig_key.replace('norm', 'LayerNorm')
if ('transformer' in orig_key):
layer_num = orig_key.split('.')[0].split('_')[(- 1)]
orig_key = orig_key.replace(f'transformer_{layer_num}', f'encoder.layer.{layer_num}')
if ('mha.attn' in orig_key):
orig_key = orig_key.replace('mha.attn', 'attention.self')
if ('mha' in orig_key):
orig_key = orig_key.replace('mha', 'attention')
if ('W_q' in orig_key):
orig_key = orig_key.replace('W_q', 'self.query')
if ('W_k' in orig_key):
orig_key = orig_key.replace('W_k', 'self.key')
if ('W_v' in orig_key):
orig_key = orig_key.replace('W_v', 'self.value')
if ('ff1' in orig_key):
orig_key = orig_key.replace('ff1', 'intermediate.dense')
if ('ff2' in orig_key):
orig_key = orig_key.replace('ff2', 'output.dense')
if ('ff' in orig_key):
orig_key = orig_key.replace('ff', 'output.dense')
if ('mlm_class' in orig_key):
orig_key = orig_key.replace('mlm.mlm_class', 'cls.predictions.decoder')
if ('mlm' in orig_key):
orig_key = orig_key.replace('mlm', 'cls.predictions.transform')
if ('cls' not in orig_key):
orig_key = ('yoso.' + orig_key)
return orig_key |
.parametrize('numeric_type_funcs', _calcparams_correct_Python_type_numeric_type_cases())
def test_calcparams_desoto_returns_correct_Python_type(numeric_type_funcs, cec_module_params):
numeric_args = dict(effective_irradiance=numeric_type_funcs[0](800.0), temp_cell=numeric_type_funcs[1](25))
out = pvsystem.calcparams_desoto(**numeric_args, alpha_sc=cec_module_params['alpha_sc'], a_ref=cec_module_params['a_ref'], I_L_ref=cec_module_params['I_L_ref'], I_o_ref=cec_module_params['I_o_ref'], R_sh_ref=cec_module_params['R_sh_ref'], R_s=cec_module_params['R_s'], EgRef=1.121, dEgdT=(- 0.0002677))
assert all((_calcparams_correct_Python_type_check(a, numeric_args.values()) for a in out)) |
def load_args(filename):
with open(filename, 'r') as f:
args = json.load(f)
if ('data_distribution' not in args):
args['data_distribution'] = None
(probl, *dist) = args['problem'].split('_')
if (probl == 'op'):
args['problem'] = probl
args['data_distribution'] = dist[0]
return args |
class TargetProfileNameValidator(BaseValidator):
def __init__(self):
BaseValidator.__init__(self)
def Clone(self):
return TargetProfileNameValidator()
def Validate(self, win):
entityEditor = win.Parent.parent
textCtrl = self.GetWindow()
text = textCtrl.GetValue().strip()
try:
if (len(text) == 0):
raise ValueError(_t('You must supply a name for your Target Profile!'))
elif (text in [x.rawName for x in entityEditor.choices]):
raise ValueError(_t('Target Profile name already in use, please choose another.'))
return True
except ValueError as e:
pyfalog.error(e)
wx.MessageBox('{}'.format(e), _t('Error'))
textCtrl.SetFocus()
return False |
def batch_sample_anchors(node_vec, ratio, node_mask=None, device=None):
idx = []
num_anchors = []
max_num_anchors = 0
for i in range(node_vec.size(0)):
tmp_num_nodes = int(node_mask[i].sum().item())
tmp_num_anchors = int((ratio * tmp_num_nodes))
g_idx = torch.randperm(tmp_num_nodes)[:tmp_num_anchors]
idx.append(g_idx)
num_anchors.append(len(g_idx))
if (max_num_anchors < len(g_idx)):
max_num_anchors = len(g_idx)
anchor_vec = batch_select_from_tensor(node_vec, idx, max_num_anchors, device)
anchor_mask = create_mask(num_anchors, max_num_anchors, device)
return (anchor_vec, anchor_mask, idx, max_num_anchors) |
_cache()
def setup_logger(output=None, distributed_rank=0, *, color=True, name='log', abbrev_name=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (abbrev_name is None):
abbrev_name = name
plain_formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s: %(message)s', datefmt='%m/%d %H:%M:%S')
if (distributed_rank == 0):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter((colored('[%(asctime)s %(name)s]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(abbrev_name))
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
if (output is not None):
if (output.endswith('.txt') or output.endswith('.log')):
filename = output
else:
filename = os.path.join(output, 'log.txt')
if (distributed_rank > 0):
filename = (filename + f'.rank{distributed_rank}')
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger |
def make_valid_identifier(string: str) -> str:
string = str(string).strip()
string = string.replace('-', '_')
string = string.replace(' ', '_')
string = re.sub('[^_a-zA-Z0-9]', '', string)
string = string.lower()
if is_valid_identifier(string):
return string
raise InvalidIdentifier('String cannot be converted to a valid identifier.') |
class SetInitialGoal():
def __init__(self, obj_position, class_name_size, init_pool_tasks, task_name, same_room=True, goal_template=None, rand=None):
self.task_name = task_name
self.init_pool_tasks = init_pool_tasks
self.obj_position = obj_position
self.class_name_size = class_name_size
self.object_id_count = 1000
self.surface_size = {}
self.surface_used_size = {}
self.max_num_place = 50
self.goal_template = goal_template
self.min_num_other_object = 0
self.max_num_other_object = 0
self.add_goal_obj_success = True
if (rand is not None):
self.rand = rand
else:
self.rand = random.Random()
self.set_goal()
self.same_room = same_room
def set_goal(self):
if (self.task_name in ['setup_table', 'clean_table', 'put_dishwasher', 'unload_dishwasher', 'put_fridge', 'read_book', 'prepare_food', 'watch_tv']):
self.init_pool = self.init_pool_tasks[self.task_name]
elif (self.task_name == 'setup_table_prepare_food'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['setup_table'])
self.init_pool.update(self.init_pool_tasks['prepare_food'])
elif (self.task_name == 'setup_table_read_book'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['setup_table'])
self.init_pool.update(self.init_pool_tasks['read_book'])
elif (self.task_name == 'setup_table_watch_tv'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['setup_table'])
self.init_pool.update(self.init_pool_tasks['watch_tv'])
elif (self.task_name == 'setup_table_put_fridge'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['setup_table'])
self.init_pool.update(self.init_pool_tasks['put_fridge'])
elif (self.task_name == 'setup_table_put_dishwasher'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['setup_table'])
self.init_pool.update(self.init_pool_tasks['put_dishwasher'])
elif (self.task_name == 'prepare_food_put_dishwasher'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['prepare_food'])
self.init_pool.update(self.init_pool_tasks['put_dishwasher'])
elif (self.task_name == 'put_fridge_put_dishwasher'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['put_fridge'])
self.init_pool.update(self.init_pool_tasks['put_dishwasher'])
elif (self.task_name == 'put_dishwasher_read_book'):
self.init_pool = copy.deepcopy(self.init_pool_tasks['put_dishwasher'])
self.init_pool.update(self.init_pool_tasks['read_book'])
deb = "\n while 1:\n self.goal = {}\n for k,v in self.init_pool.items():\n self.goal[k] = random.randint(v['min_num'], v['max_num'])\n\n # break\n\n count = 0\n for k,v in self.goal.items():\n count+=v\n "
if (self.goal_template is not None):
self.goal = {}
for (predicate, count) in self.goal_template.items():
elements = predicate.split('_')
for e in elements:
if (e in self.init_pool):
self.goal[e] = count
print(self.goal_template)
print(self.goal)
else:
while 1:
self.goal = {}
for (k, v) in self.init_pool.items():
self.goal[k] = self.rand.randint(v['min_num'], v['max_num'])
count = 0
for (k, v) in self.goal.items():
count += v
if (((self.task_name == 'read_book') and (2 <= count <= 4)) or ((self.task_name == 'watch_tv') and (2 <= count <= 4))):
break
if (((2 <= count <= 6) and (self.task_name not in ['clean_table', 'unload_dishwasher'])) or (3 <= count <= 6)):
break
def get_obj_room(self, obj_id):
room_ids = [node['id'] for node in graph['nodes'] if (node['category'] == 'Rooms')]
room_info = [edge['to_id'] for edge in graph['edges'] if ((edge['to_id'] in room_ids) and (edge['relation_type'] == 'INSIDE') and (edge['from_id'] == obj_id))]
assert (len(room_info) == 1)
objs_in_room = [edge['from_id'] for edge in graph['edges'] if ((edge['to_id'] == room_info[0]) and (edge['relation_type'] == 'INSIDE'))]
return objs_in_room
def check_graph(self, graph, apartment, original_graph):
current_objects = {node['id']: node['class_name'] for node in graph['nodes']}
current_object_ids = list(current_objects.keys())
OBJ_LIST = ['plate', 'waterglass', 'wineglass', 'cutleryfork', 'cupcake', 'juice', 'pancake', 'poundcake', 'wine', 'pudding', 'apple', 'coffeepot', 'cutleryknife']
nodes_to_check = current_object_ids
id2node = {node['id']: node for node in graph['nodes']}
connected_edges = {id: [] for id in nodes_to_check}
for edge in graph['edges']:
if ((edge['from_id'] in nodes_to_check) and (edge['relation_type'] != 'CLOSE') and (id2node[edge['to_id']]['category'] != 'Rooms')):
connected_edges[edge['from_id']].append(edge)
ori_id2node = {node['id']: node for node in original_graph['nodes']}
ori_connected_edges = {id: [] for id in nodes_to_check}
for edge in original_graph['edges']:
if ((edge['from_id'] in nodes_to_check) and (edge['relation_type'] != 'CLOSE') and (ori_id2node[edge['to_id']]['category'] != 'Rooms')):
ori_connected_edges[edge['from_id']].append(edge)
print('num nodes:')
print(len(connected_edges), len(ori_connected_edges))
for (node_id, edges) in connected_edges.items():
if (len(edges) < 1):
if (node_id in ori_connected_edges):
pass
else:
print('add new object error')
print(node_id, id2node[node_id]['class_name'])
return False
return True
def check_goal_achievable(self, graph, comm, env_goal, apartment):
graph_copy = copy.deepcopy(graph)
if (('setup_table' in self.task_name) or ('put_dishwasher' in self.task_name) or ('put_fridge' in self.task_name) or ('prepare_food' in self.task_name)):
curr_task_name = list(env_goal.keys())[0]
for goal in env_goal[curr_task_name]:
subgoal_name = list(goal.keys())[0]
num_obj = list(goal.values())[0]
obj = subgoal_name.split('_')[1]
target_id = int(subgoal_name.split('_')[3])
if self.same_room:
objs_in_room = self.get_obj_room(target_id)
else:
objs_in_room = None
obj_ids = [node['id'] for node in graph_copy['nodes'] if (obj == node['class_name'])]
if (len(obj_ids) < num_obj):
print(subgoal_name, num_obj, obj_ids)
return 0
graph_copy = self.remove_obj(graph_copy, obj_ids)
(self.object_id_count, graph, success_add_obj) = self.add_obj(graph_copy, obj, num_obj, self.object_id_count, objs_in_room=objs_in_room, only_position=target_id)
if (not success_add_obj):
return False
comm.reset(apartment)
(success, message) = comm.expand_scene(graph_copy)
id2node = {node['id']: node for node in graph_copy['nodes']}
if (not success):
if ('unaligned_ids' in message):
for id in message['unaligned_ids']:
print(id2node[id])
elif ('unplaced' in message):
for string in message['unplaced']:
elements = string.split('.')
obj_id = int(elements[1])
print([edge for edge in graph_copy['edges'] if (edge['from_id'] == obj_id)])
else:
success = 1
message = self.task_name
return success
def convert_size(self, envsize):
size = (envsize[0] * envsize[2])
return size
def check_placeable(self, graph, surface_id, obj_name):
obj_size = self.convert_size(self.class_name_size[obj_name])
surface_node = [node for node in graph['nodes'] if (node['id'] == surface_id)]
if (surface_id not in self.surface_size):
surface_node = [node for node in graph['nodes'] if (node['id'] == surface_id)]
assert len(surface_node)
self.surface_size[surface_id] = self.convert_size(self.class_name_size[surface_node[0]['class_name']])
if (surface_id not in self.surface_used_size):
objs_on_surface = [edge['from_id'] for edge in graph['edges'] if (edge['to_id'] == surface_id)]
objs_on_surface_node = [node for node in graph['nodes'] if (node['id'] in objs_on_surface)]
objs_on_surface_size = [self.convert_size(self.class_name_size[node['class_name']]) for node in objs_on_surface_node]
self.surface_used_size[surface_id] = np.sum(objs_on_surface_size)
if ((self.surface_size[surface_id] / 2) > (self.surface_used_size[surface_id] + obj_size)):
self.surface_used_size[surface_id] += obj_size
return 1
else:
return 0
def remove_obj(self, graph, obj_ids):
graph['nodes'] = [node for node in graph['nodes'] if (node['id'] not in obj_ids)]
graph['edges'] = [edge for edge in graph['edges'] if ((edge['from_id'] not in obj_ids) and (edge['to_id'] not in obj_ids))]
return graph
def add_obj(self, graph, obj_name, num_obj, object_id, objs_in_room=None, only_position=None, except_position=None, goal_obj=False):
if isinstance(except_position, int):
except_position = [except_position]
if isinstance(only_position, int):
only_position = [only_position]
edges = []
nodes = []
ids_class = {}
for node in graph['nodes']:
class_name = node['class_name']
if (class_name not in ids_class):
ids_class[class_name] = []
ids_class[class_name].append(node['id'])
if (obj_name == 'cutleryknife'):
pdb.set_trace()
candidates = [(obj_rel_name[0], obj_rel_name[1]) for obj_rel_name in self.obj_position[obj_name] if (obj_rel_name[1] in ids_class.keys())]
id2node = {node['id']: node for node in graph['nodes']}
success_add = 0
for i in range(num_obj):
num_place = 0
while 1:
if (num_place > self.max_num_place):
break
if (only_position != None):
num_place2 = 0
while 1:
if (num_place2 > self.max_num_place):
break
target_id = self.rand.choice(only_position)
if (self.same_room and goal_obj):
if (target_id in objs_in_room):
break
else:
num_place2 += 1
else:
break
target_id_name = [node['class_name'] for node in graph['nodes'] if (node['id'] == target_id)]
if (('livingroom' in target_id_name) and (obj_name == 'plate')):
pdb.set_trace()
target_pool = [k for (k, v) in ids_class.items() if (target_id in v)]
target_position_pool = [tem[0] for tem in self.obj_position[obj_name] if (tem[1] in target_pool)]
if ((len(target_pool) == 0) or (len(target_position_pool) == 0) or (num_place2 > self.max_num_place)):
num_place += 1
continue
else:
relation = self.rand.choice(target_position_pool)
else:
num_place2 = 0
while 1:
if (num_place2 > self.max_num_place):
break
(relation, target_classname) = self.rand.choice(candidates)
target_id = self.rand.choice(ids_class[target_classname])
target_id_name = [node['class_name'] for node in graph['nodes'] if (node['id'] == target_id)]
if (('livingroom' in target_id_name) and (obj_name == 'plate')):
pdb.set_trace()
if (self.same_room and goal_obj):
if (target_id in objs_in_room):
break
else:
num_place2 += 1
else:
break
if (((except_position != None) and (target_id in except_position)) or (num_place2 > self.max_num_place)):
num_place += 1
continue
placeable = self.check_placeable(graph, target_id, obj_name)
if placeable:
new_node = {'id': object_id, 'class_name': obj_name, 'properties': ['GRABBABLE'], 'states': [], 'category': 'added_object'}
nodes.append(new_node)
edges.append({'from_id': object_id, 'relation_type': relation, 'to_id': target_id})
object_id += 1
success_add += 1
break
else:
num_place += 1
graph['nodes'] += nodes
graph['edges'] += edges
if goal_obj:
if (success_add != num_obj):
return (None, None, False)
return (object_id, graph, True)
def setup_other_objs(self, graph, object_id, objs_in_room=None, except_position=None):
new_object_pool = [tem for tem in self.obj_position.keys() if (tem not in list(self.goal.keys()))]
self.num_other_obj = self.rand.choice(list(range(self.min_num_other_object, (self.max_num_other_object + 1))))
for i in range(self.num_other_obj):
obj_name = self.rand.choice(new_object_pool)
obj_in_graph = [node for node in graph['nodes'] if (node['class_name'] == obj_name)]
(object_id, graph) = self.add_obj(graph, obj_name, 1, object_id, objs_in_room=objs_in_room, only_position=None, except_position=except_position)
return (object_id, graph)
def set_tv_off(self, graph, tv_id):
node = [n for n in graph['nodes'] if (n['id'] == tv_id)]
assert (len(node) == 1)
node[0]['states'] = ['OFF']
return graph |
class TestSolve():
def op_numpy(self, A, b):
return np.linalg.solve(A, b)
def _gen_op(self, N, dtype):
return qutip.rand_unitary(N, dtype=dtype).data
def _gen_ket(self, N, dtype):
return qutip.rand_ket(N, dtype=dtype).data
.parametrize(['method', 'opt'], [('spsolve', {}), ('splu', {'csc': True}), ('gmres', {'atol': 1e-08}), ('lsqr', {}), ('solve', {}), ('lstsq', {}), pytest.param('mkl_spsolve', {}, marks=skip_no_mkl)], ids=['spsolve', 'splu', 'gmres', 'lsqr', 'solve', 'lstsq', 'mkl_spsolve'])
.parametrize('dtype', [CSR, Dia])
def test_mathematically_correct_sparse(self, method, opt, dtype):
if ((dtype is Dia) and (method == 'mkl_spsolve')):
pytest.skip('mkl is not supported for dia matrix')
A = self._gen_op(10, dtype)
b = self._gen_ket(10, Dense)
expected = self.op_numpy(A.to_array(), b.to_array())
test = _data.solve_csr_dense(A, b, method, opt)
test1 = _data.solve(A, b, method, opt)
assert (test.shape == expected.shape)
np.testing.assert_allclose(test.to_array(), expected, atol=1e-07, rtol=1e-07)
np.testing.assert_allclose(test1.to_array(), expected, atol=1e-07, rtol=1e-07)
.parametrize(['method', 'opt'], [('solve', {}), ('lstsq', {})])
def test_mathematically_correct_Dense(self, method, opt):
A = self._gen_op(10, Dense)
b = self._gen_ket(10, Dense)
expected = self.op_numpy(A.to_array(), b.to_array())
test = _data.solve_dense(A, b, method, opt)
test1 = _data.solve(A, b, method, opt)
assert (test.shape == expected.shape)
np.testing.assert_allclose(test.to_array(), expected, atol=1e-07, rtol=1e-07)
np.testing.assert_allclose(test1.to_array(), expected, atol=1e-07, rtol=1e-07)
def test_singular(self):
A = qutip.num(2).data
b = qutip.basis(2, 1).data
with pytest.raises(ValueError) as err:
test1 = _data.solve(A, b)
assert ('singular' in str(err.value).lower())
def test_incorrect_shape_non_square(self):
A = qutip.Qobj(np.random.rand(5, 10)).data
b = qutip.Qobj(np.random.rand(10, 1)).data
with pytest.raises(ValueError):
test1 = _data.solve(A, b)
def test_incorrect_shape_mismatch(self):
A = qutip.Qobj(np.random.rand(10, 10)).data
b = qutip.Qobj(np.random.rand(9, 1)).data
with pytest.raises(ValueError):
test1 = _data.solve(A, b) |
class RegWalk(Task):
def __init__(self, file):
Task.__init__(self, file, 'RegistryWalk')
def CreateCommandLine(self):
key = ''
self.RootKey = self.RootKey.strip('"')
if (self.RootKey == 'HKEY_LOCAL_MACHINE'):
key = 'L'
elif (self.RootKey == 'HKEY_USERS'):
key = 'U'
elif (self.RootKey == 'HKEY_CURRENT_USER'):
key = 'C'
elif (self.RootKey == 'HKEY_CURRENT_CONFIG'):
key = 'G'
elif (self.RootKey == 'HKEY_CLASSES_ROOT'):
key = 'R'
else:
dsz.ui.Echo(('Unknown key: %s' % self.RootKey), dsz.ERROR)
depth = ''
if ('Depth' in self.__dict__):
if (int(self.Depth) == 1):
depth = '-recursive'
return [('registryquery -hive %s -key %s %s' % (key, self.Subkey, depth))] |
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(ch_out), nn.ReLU(inplace=True))
def forward(self, x):
x = self.up(x)
return x |
class DuplicatesTreeModel(Gtk.TreeStore):
def i(x):
return x
TAG_MAP = [('artist', i), ('title', i), ('album', i), ('~#length', (lambda s: util.format_time_display(int(s)))), ('~#filesize', (lambda s: util.format_size(int(s)))), ('~#bitrate', i), ('~filename', i)]
tag_functions = {}
for (t, f) in TAG_MAP:
tag_functions[t] = f
def group_value(cls, group, tag):
try:
vals = []
for comment in group[tag]:
vals.append(comment.get_markup())
group_val = '\n'.join(vals)
except KeyError:
return ''
else:
try:
group_val = cls.tag_functions[tag](group_val)
except (ValueError, TypeError):
pass
return group_val.replace('\n', ', ')
def find_row(self, song):
for parent in self:
for row in parent.iterchildren():
if (row[0] == song):
self.__iter = row.iter
self.sourced = True
return row
return None
def add_to_existing_group(self, key, song):
for parent in self:
if (key == parent[0]):
print_d('Found group', self)
return self.append(parent.iter, self.__make_row(song))
return None
def __make_row(cls, song):
return ([song] + [util.escape(str(f(song.comma(tag)))) for (tag, f) in cls.TAG_MAP])
def add_group(self, key, songs):
group = AudioFileGroup(songs, real_keys_only=False)
parent = self.append(None, ([key] + [self.group_value(group, tag) for (tag, f) in self.TAG_MAP]))
for s in songs:
self.append(parent, self.__make_row(s))
def go_to(self, song, explicit=False):
self.__iter = None
if isinstance(song, Gtk.TreeIter):
self.__iter = song
self.sourced = True
elif (not self.find_row(song)):
print_d('Failed to find song')
return self.__iter
def remove(self, itr):
if (self.__iter and (self[itr].path == self[self.__iter].path)):
self.__iter = None
super().remove(itr)
def get(self):
return [row[0] for row in self]
def get_current(self):
if (self.__iter is None):
return None
elif self.is_empty():
return None
else:
return self[self.__iter][0]
def get_current_path(self):
if (self.__iter is None):
return None
elif self.is_empty():
return None
else:
return self[self.__iter].path
def get_current_iter(self):
if (self.__iter is None):
return None
elif self.is_empty():
return None
else:
return self.__iter
def is_empty(self):
return (not len(self))
def __init__(self):
super().__init__(object, str, str, str, str, str, str, str) |
class TestFastLayerNorm(unittest.TestCase):
def assertAll(self, l):
if (not all(l)):
print(l)
for x in l:
self.assertTrue(x)
def test_all_configs(self):
hidden_sizes = [768, 1024, 1536, 2048, 2304, 3072, 3840, 4096, 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480, 24576, 25600, 30720, 32768, 40960, 49152, 65536]
for h in hidden_sizes:
with self.subTest(f'hidden_size={h}'):
self.assertAll(test_(256, 2, h, fp32, fp32))
self.assertAll(test_(256, 2, h, fp16, fp16))
self.assertAll(test_(256, 2, h, fp32, fp16))
def test_run_benchmark(self):
for (S, B, hidden_size, runs) in ((512, 32, 768, 1000), (512, 32, 1024, 1000), (512, 8, 4096, 1000), (512, 8, 5120, 1000), (512, 8, 6144, 1000), (256, 2, 20480, 500), (256, 2, 25600, 500), (256, 2, 40960, 250), (256, 2, 65536, 250)):
with self.subTest(f'(S, B, hidden_size)=({S}, {B}, {hidden_size})'):
benchmark_(S, B, hidden_size, fp16, fp16, runs)
def test_compat_with_autocast(self):
autocast_dtypes = ((torch.half, torch.bfloat16) if torch.cuda.is_bf16_supported() else (torch.half,))
input_shape = (512, 32, 768)
layer_norm = LayerNorm(input_shape[(- 1)]).cuda()
input = torch.randn(input_shape).cuda()
for dtype in autocast_dtypes:
layer_norm.zero_grad(set_to_none=True)
with self.subTest(f'autocast_dtype={dtype}'):
with torch.cuda.amp.autocast(enabled=True, dtype=dtype):
out = layer_norm(input)
self.assertEqual(dtype, out.dtype)
grad = torch.randn_like(out)
out.backward(grad)
self.assertEqual(torch.float32, layer_norm.weight.grad.dtype) |
def initialize_config(root, cli_opts, scaling_factor, pathcache, statusbar, session):
global _CONFIG
if (_CONFIG is not None):
return
logger.debug('Initializing config: (root: %s, cli_opts: %s, tk_vars: %s, pathcache: %s, statusbar: %s, session: %s)', root, cli_opts, scaling_factor, pathcache, statusbar, session)
_CONFIG = Config(root, cli_opts, scaling_factor, pathcache, statusbar, session) |
def rb_cnotdihedral_execution(rb_opts: dict, shots: int):
backend = qiskit.Aer.get_backend('qasm_simulator')
basis_gates = ['u1', 'u2', 'u3', 'cx', 'id']
(rb_cnotdihedral_z_circs, xdata, rb_cnotdihedral_x_circs) = rb.randomized_benchmarking_seq(**rb_opts)
noise_model = create_depolarizing_noise_model()
cnotdihedral_x_results = []
for circuit in rb_cnotdihedral_x_circs:
cnotdihedral_x_results.append(qiskit.execute(circuit, backend=backend, basis_gates=basis_gates, shots=shots, noise_model=noise_model, seed_simulator=SEED).result())
cnotdihedral_z_results = []
for circuit in rb_cnotdihedral_z_circs:
cnotdihedral_z_results.append(qiskit.execute(circuit, backend=backend, basis_gates=basis_gates, shots=shots, noise_model=noise_model, seed_simulator=SEED).result())
return (cnotdihedral_x_results, xdata, cnotdihedral_z_results) |
class L2Norm(Func):
def __init__(self, mult=1.0):
self.mult = mult
def _eval(self, x):
return (self.mult * euclid_norm(x))
def _prox(self, x, step):
return L2_prox(x=x, mult=(self.mult * step))
def is_smooth(self):
return False
def is_proximable(self):
return True |
class Style():
def __init__(self, style: Dict[(str, str)]=None, css_class: str=None):
self.style = (style if (style is not None) else dict())
self.css_class = (css_class.split() if (css_class is not None) else [])
self.logger = qf_logger.getChild(self.__class__.__name__)
def add_css_class(self, css_classes: Sequence[str]):
self.css_class.extend(css_classes)
def remove_css_class(self, css_classes: Sequence[str]):
for class_name in css_classes:
try:
self.css_class.remove(class_name)
except ValueError:
self.logger.warning('The css class {} can not be removed, as it does not exist'.format(class_name))
def add_styles(self, styles_dict: Dict[(str, str)]):
styles_dict = {key.replace(' ', ''): value.replace(' ', '') for (key, value) in styles_dict.items()}
self.style.update(styles_dict)
def remove_styles(self, styles: Union[(Dict[(str, str)], Sequence[str])]):
properties = (styles.keys() if (type(styles) is dict) else styles)
for property_name in properties:
try:
del self.style[property_name]
except KeyError:
self.logger.warning('The css style for proptyety {} can not be removed, as it does not exist'.format(property_name))
def styles(self):
def merge_styles(styles_dict: Dict[(str, str)]) -> str:
return ''.join([('%s:%s;' % (key, value)) for (key, value) in styles_dict.items()])
styles = merge_styles(self.style)
styles = ('""' if (len(styles) == 0) else styles)
return styles
def classes(self):
def merge_classes(css_classes_list: Sequence[str]) -> str:
return ' '.join(css_classes_list)
css_classes = merge_classes(self.css_class)
return css_classes |
def argparser():
parser = argparse.ArgumentParser(description='Ape-X')
parser.add_argument('--seed', type=int, default=1122, help='Random seed')
parser.add_argument('--n_steps', type=int, default=3, help='Number of steps in multi-step learning')
parser.add_argument('--gamma', type=float, default=0.99, help='Discount factor for multi-step learning')
parser.add_argument('--env', type=str, default='SeaquestNoFrameskip-v4', help='Atari environment to use')
parser.add_argument('--episode_life', type=int, default=1, help='Whether env has episode life(1) or not(0)')
parser.add_argument('--clip_rewards', type=int, default=1, help='Whether env clip rewards(1) or not(0)')
parser.add_argument('--frame_stack', type=int, default=1, help='Whether env stacks frame(1) or not(0)')
parser.add_argument('--scale', type=int, default=0, help='Whether env scales(1) or not(0)')
parser.add_argument('--send_interval', type=int, default=50, help='Number of samples batch to be transferred to replay will contain')
parser.add_argument('--update_interval', type=int, default=400, help='Interval of fetching parameters from learner')
parser.add_argument('--max_episode_length', type=int, default=50000, help='Maximum length of episode')
parser.add_argument('--max_outstanding', type=int, default=3, help='Maximum number of outstanding batch push requests')
parser.add_argument('--eps_base', type=float, default=0.4)
parser.add_argument('--eps_alpha', type=float, default=7.0)
parser.add_argument('--alpha', type=float, default=0.6, help='Priority exponent')
parser.add_argument('--beta', type=float, default=0.4, help='Importance sampling exponent')
parser.add_argument('--replay_buffer_size', type=int, default=2000000, help='Size of prioritized replay buffer')
parser.add_argument('--threshold_size', type=int, default=50000, help='Threshold for starting to transfer batches to learner')
parser.add_argument('--batch_size', type=int, default=512, help='Size of samples prefetched batches will contain')
parser.add_argument('--n_recv_batch_worker', type=int, default=4)
parser.add_argument('--n_recv_prios_worker', type=int, default=4)
parser.add_argument('--n_send_batch_worker', type=int, default=8)
parser.add_argument('--lr', type=float, default=6.25e-05)
parser.add_argument('--queue_size', type=int, default=16, help='Size of local queue. If this value is set to 16, local queue can contain up to 16 prefetched batches')
parser.add_argument('--prios_queue_size', type=int, default=16)
parser.add_argument('--max_norm', type=float, default=40.0, help='Maximum gradient norm to clip')
parser.add_argument('--cuda', action='store_true', default=False, help='Enables CUDA training')
parser.add_argument('--target_update_interval', type=int, default=2500, help='Interval of updating target network')
parser.add_argument('--publish_param_interval', type=int, default=25, help='Interval of publishing parameter to actors')
parser.add_argument('--save_interval', type=int, default=5000, help='Interval of saving model parameters')
parser.add_argument('--bps_interval', type=int, default=100, help='Interval of logging BPS')
parser.add_argument('--n_recv_batch_process', type=int, default=4, help='Number of processes to receive batch from replay')
parser.add_argument('--render', action='store_true', default=False)
args = parser.parse_args()
args.cuda = (args.cuda and torch.cuda.is_available())
args.device = torch.device(('cuda' if args.cuda else 'cpu'))
return args |
def create_terminal_writer(config: Config, file: Optional[TextIO]=None) -> TerminalWriter:
tw = TerminalWriter(file=file)
if (config.option.color == 'yes'):
tw.hasmarkup = True
elif (config.option.color == 'no'):
tw.hasmarkup = False
if (config.option.code_highlight == 'yes'):
tw.code_highlight = True
elif (config.option.code_highlight == 'no'):
tw.code_highlight = False
return tw |
def data_for_url(url: QUrl) -> Tuple[(str, bytes)]:
norm_url = url.adjusted((QUrl.UrlFormattingOption.NormalizePathSegments | QUrl.UrlFormattingOption.StripTrailingSlash))
if (norm_url != url):
raise Redirect(norm_url)
path = url.path()
host = url.host()
query = url.query()
log.misc.debug('url: {}, path: {}, host {}'.format(url.toDisplayString(), path, host))
if ((not path) or (not host)):
new_url = QUrl()
new_url.setScheme('qute')
if host:
new_url.setHost(host)
else:
new_url.setHost(path)
new_url.setPath('/')
if query:
new_url.setQuery(query)
if new_url.host():
raise Redirect(new_url)
try:
handler = _HANDLERS[host]
except KeyError:
raise NotFoundError('No handler found for {}'.format(url.toDisplayString()))
try:
(mimetype, data) = handler(url)
except OSError as e:
raise SchemeOSError(e)
assert (mimetype is not None), url
if ((mimetype == 'text/html') and isinstance(data, str)):
data = data.encode('utf-8', errors='xmlcharrefreplace')
assert isinstance(data, bytes)
return (mimetype, data) |
def spectral_normed_weight(W, u=None, num_iters=1, update_collection=None, with_sigma=False):
W_shape = W.shape.as_list()
W_reshaped = tf.reshape(W, [(- 1), W_shape[(- 1)]])
if (u is None):
u = tf.get_variable('u', [1, W_shape[(- 1)]], initializer=tf.truncated_normal_initializer(), trainable=False)
def power_iteration(i, u_i, v_i):
v_ip1 = _l2normalize(tf.matmul(u_i, tf.transpose(W_reshaped)))
u_ip1 = _l2normalize(tf.matmul(v_ip1, W_reshaped))
return ((i + 1), u_ip1, v_ip1)
(_, u_final, v_final) = tf.while_loop(cond=(lambda i, _1, _2: (i < num_iters)), body=power_iteration, loop_vars=(tf.constant(0, dtype=tf.int32), u, tf.zeros(dtype=tf.float32, shape=[1, W_reshaped.shape.as_list()[0]])))
if (update_collection is None):
warnings.warn('Setting update_collection to None will make u being updated every W execution. This maybe undesirable. Please consider using a update collection instead.')
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[(0, 0)]
W_bar = (W_reshaped / sigma)
with tf.control_dependencies([u.assign(u_final)]):
W_bar = tf.reshape(W_bar, W_shape)
else:
sigma = tf.matmul(tf.matmul(v_final, W_reshaped), tf.transpose(u_final))[(0, 0)]
W_bar = (W_reshaped / sigma)
W_bar = tf.reshape(W_bar, W_shape)
if (update_collection != NO_OPS):
tf.add_to_collection(update_collection, u.assign(u_final))
if with_sigma:
return (W_bar, sigma)
else:
return W_bar |
def view_route(f):
def decorator(*args, **kwargs):
rv = f(*args, **kwargs)
if isinstance(rv, (int, float)):
res = ResMsg()
res.update(data=rv)
return jsonify(res.data)
elif isinstance(rv, tuple):
if (len(rv) >= 3):
return (jsonify(rv[0]), rv[1], rv[2])
else:
return (jsonify(rv[0]), rv[1])
elif isinstance(rv, dict):
return jsonify(rv)
elif isinstance(rv, bytes):
rv = rv.decode('utf-8')
return jsonify(rv)
else:
return jsonify(rv)
return decorator |
class ValidEpoch(Epoch):
def __init__(self, model, loss, metrics, device='cpu', verbose=True):
super().__init__(model=model, loss=loss, metrics=metrics, stage_name='valid', device=device, verbose=verbose)
def on_epoch_start(self):
self.model.eval()
def batch_update(self, x, y):
with torch.no_grad():
prediction = self.model.forward(x)
loss = self.loss(prediction, y)
return (loss, prediction) |
.parametrize('line', ['text/plain', 'text/markdown', 'text/csv', 'text/rtf', 'text/javascript', 'text/html', 'text/xml'])
def test_validate_content_type_invalid(line: str):
warnings = [warning for (_, warning) in check_peps._validate_content_type(1, line)]
assert (warnings == ["Content-Type must be 'text/x-rst'"]), warnings |
def test_drop_event(tmpdir, qtbot):
output_dir = str(tmpdir.mkdir('tmpdir'))
filename = str(os.path.join(output_dir, 'tmp.vtk'))
mesh = pyvista.Cone()
mesh.save(filename)
assert os.path.isfile(filename)
plotter = BackgroundPlotter(update_app_icon=False)
with qtbot.wait_exposed(plotter.app_window, timeout=10000):
plotter.app_window.show()
point = QPointF(0, 0)
data = QMimeData()
data.setUrls([QUrl(filename)])
event = QDropEvent(point, Qt.DropAction.IgnoreAction, data, Qt.MouseButton.NoButton, Qt.KeyboardModifier.NoModifier)
plotter.dropEvent(event)
plotter.close() |
def main():
parser = argparse.ArgumentParser(description='Read and write COLMAP binary and text models')
parser.add_argument('input_model', help='path to input model folder')
parser.add_argument('input_format', choices=['.bin', '.txt'], help='input model format')
parser.add_argument('--output_model', metavar='PATH', help='path to output model folder')
parser.add_argument('--output_format', choices=['.bin', '.txt'], help='outut model format', default='.txt')
args = parser.parse_args()
(cameras, images, points3D) = read_model(path=args.input_model, ext=args.input_format)
print('num_cameras:', len(cameras))
print('num_images:', len(images))
print('num_points3D:', len(points3D))
if (args.output_model is not None):
write_model(cameras, images, points3D, path=args.output_model, ext=args.output_format) |
def binary_search(level, cand, low, high):
if (low > high):
return (- 1)
while (low <= high):
mid = int(((low + high) / 2))
if (cand == freArr[(level - 1)][mid][0:(level - 1)]):
s_low = low
s_high = mid
if (cand == freArr[(level - 1)][low][0:(level - 1)]):
start = low
else:
while (s_low < s_high):
start = int(((s_low + s_high) / 2))
if (cand == freArr[(level - 1)][start][0:(level - 1)]):
s_high = start
else:
s_low = (start + 1)
start = s_low
return start
elif (cand < freArr[(level - 1)][mid][0:(level - 1)]):
high = (mid - 1)
else:
low = (mid + 1)
return (- 1) |
_ordering
class Parse(entity):
str2int = {'w': '1', 's': '2'}
def __init__(self, meter, totalSlots):
self.positions = []
self.meter = meter
self.constraints = meter.constraints
self.constraintScores = {}
for constraint in self.constraints:
self.constraintScores[constraint] = 0
self.constraintNames = [c.name for c in self.constraints]
self.numSlots = 0
self.totalSlots = totalSlots
self.isBounded = False
self.boundedBy = None
self.unmetrical = False
self.comparisonNums = set()
self.comparisonParses = []
self.parseNum = 0
self.totalScore = None
self.pauseComparisons = False
def __copy__(self):
other = Parse(self.meter, self.totalSlots)
other.numSlots = self.numSlots
for pos in self.positions:
other.positions.append(copy(pos))
other.comparisonNums = copy(self.comparisonNums)
for (k, v) in list(self.constraintScores.items()):
other.constraintScores[k] = copy(v)
return other
def slots(self, by_word=False):
slots = []
last_word_i = None
for pos in self.positions:
for slot in pos.slots:
if (not by_word):
slots.append(slot)
else:
if ((last_word_i == None) or (last_word_i != slot.i_word)):
slots.append([])
slots[(- 1)].append(slot)
last_word_i = slot.i_word
return slots
def str_meter(self, word_sep=''):
str_meter = ''
wordTokNow = None
for pos in self.positions:
for slot in pos.slots:
if (word_sep and wordTokNow and (slot.wordtoken != wordTokNow)):
str_meter += word_sep
wordTokNow = slot.wordtoken
str_meter += pos.meterVal
return str_meter
def extend(self, slot):
from MeterPosition import MeterPosition
self.totalScore = None
self.numSlots += 1
extendedParses = [self]
sPos = MeterPosition(self.meter, 's')
sPos.append(slot)
wPos = MeterPosition(self.meter, 'w')
wPos.append(slot)
if (len(self.positions) == 0):
wParse = copy(self)
self.positions.append(sPos)
wParse.positions.append(wPos)
extendedParses.append(wParse)
else:
lastPos = self.positions[(- 1)]
if (lastPos.meterVal == 's'):
if ((len(lastPos.slots) < self.meter.maxS()) and (not slot.issplit)):
sParse = copy(self)
sParse.positions[(- 1)].append(slot)
extendedParses.append(sParse)
self.positions.append(wPos)
else:
if ((len(lastPos.slots) < self.meter.maxW()) and (not slot.issplit)):
wParse = copy(self)
wParse.positions[(- 1)].append(slot)
extendedParses.append(wParse)
self.positions.append(sPos)
pos_i = (len(self.positions) - 2)
for constraint in self.constraints:
vScore = constraint.violationScore(self.positions[(- 2)], pos_i=pos_i, slot_i=(self.numSlots - 1), num_slots=self.totalSlots, all_positions=self.positions, parse=self)
if (vScore == '*'):
self.constraintScores[constraint] = '*'
else:
self.constraintScores[constraint] += vScore
if (self.numSlots == self.totalSlots):
for parse in extendedParses:
for constraint in self.constraints:
vScore = constraint.violationScore(parse.positions[(- 1)], pos_i=(len(parse.positions) - 1), slot_i=(self.numSlots - 1), num_slots=self.totalSlots, all_positions=parse.positions, parse=parse)
if (vScore == '*'):
parse.constraintScores[constraint] = '*'
else:
parse.constraintScores[constraint] += vScore
return extendedParses
def getErrorCount(self):
return self.score()
def getErrorCountN(self):
return (self.getErrorCount() / len(self.positions))
def formatConstraints(self, normalize=True, getKeys=False):
vals = []
keys = []
for (k, v) in sorted(self.constraintScores.items()):
if normalize:
if bool(v):
vals.append(1)
else:
vals.append(0)
else:
vals.append(v)
if getKeys:
keys.append(k)
if getKeys:
return (vals, keys)
else:
return vals
def totalCount(self):
return sum(self.constraintCounts.values())
def constraintCounts(self):
cc = {}
for constraint in self.constraints:
cn = 0
for pos in self.positions:
if pos.constraintScores[constraint]:
cn += 1
cc[constraint] = cn
return cc
def num_sylls(self):
return sum((len(pos.slots) for pos in self.positions))
def score(self):
score = 0
for (constraint, value) in list(self.constraintScores.items()):
if (value == '*'):
self.totalScore = '*'
return self.totalScore
score += value
self.totalScore = score
return (int(self.totalScore) if (int(self.totalScore) == self.totalScore) else self.totalScore)
"\n\tPython 3 DEPRECATED\n\tdef __cmp__(self, other):\n\t\t## : parameterize this: break ties by favoring the more binary parse\n\t\tx,y=self.score(),other.score()\n\t\tif x<y: return -1\n\t\tif x>y: return 1\n\n\t\t# Break tie\n\t\treturn 0\n\n\t\txs=self.str_meter()\n\t\tys=other.str_meter()\n\t\treturn cmp(xs.count('ww')+xs.count('ss'), ys.count('ww')+ys.count('ss'))\n\t\t# if x==y:\n\t\t#\n\t\t# return cmp(self.score(), other.score())\n\t"
def __lt__(self, other):
return (self.score() < other.score())
def __eq__(self, other):
return (self.score() == other.score())
def posString(self, viols=False):
output = []
for pos in self.positions:
x = str(pos)
if (viols and pos.has_viol):
x += '*'
output.append(x)
return '|'.join(output)
def posString2(self, viols=False):
last_word = None
output = ''
for pos in self.positions:
for slot in pos.slots:
slotstr = (slot.token.upper() if (pos.meterVal == 's') else slot.token.lower())
if (last_word != slot.wordtoken):
output += (' ' + slotstr)
last_word = slot.wordtoken
else:
output += ('.' + slotstr)
return output.strip()
def str_stress(self):
output = []
for pos in self.positions:
slotx = []
for slot in pos.slots:
if (not slot.feats['prom.stress']):
slotx.append('U')
elif (slot.feats['prom.stress'] == 1):
slotx.append('P')
else:
slotx.append('S')
output += [''.join(slotx)]
return string.join(output, '|')
def words(self):
last_word = None
words = []
for slot in self.slots():
slot_word = slot.word
slot_wordtoken = slot.wordtoken
if (last_word != slot_wordtoken):
words += [slot_word]
last_word = slot_wordtoken
return words
def wordtokens(self):
last_word = None
words = []
for slot in self.slots():
slot_word = slot.wordtoken
if (last_word != slot_word):
words += [slot_word]
last_word = slot_word
return words
def set_wordtokens_to_best_word_options(self):
for (wordtok, wordobj) in zip(self.wordtokens(), self.words()):
wordtok.set_as_best_word_option(wordobj)
def __repr__(self):
return self.posString()
def __repr2__(self):
return str(self.getErrorCount())
def str_ot(self):
ot = []
for c in self.constraints:
v = self.constraintScores[c]
ot += [(str(v) if (int(v) != float(v)) else str(int(v)))]
return '\t'.join(ot)
def __report__(self, proms=False):
o = ''
i = 0
for pos in self.positions:
unitlist = ''
factlist = ''
for unit in pos.slots:
unitlist += (self.u2s(unit.token) + ' ')
unitlist = unitlist[:(- 1)]
unitlist = makeminlength(unitlist, 10)
if proms:
feats = ''
for unit in pos.slots:
for (k, v) in list(unit.feats.items()):
if (not ('prom.' in k)):
continue
if v:
feats += (('[+' + str(k)) + '] ')
else:
feats += (('[-' + str(k)) + '] ')
feats += '\t'
feats = feats.strip()
viols = ''
for (k, v) in list(pos.constraintScores.items()):
if v:
viols += str(k)
viols = viols.strip()
if proms:
viols = makeminlength(viols, 60)
if (pos.meterVal == 's'):
unitlist = unitlist.upper()
else:
unitlist = unitlist.lower()
i += 1
o += ((((((str(i) + '\t') + pos.meterVal2) + '\t') + unitlist) + '\t') + viols)
if proms:
o += (feats + '\n')
else:
o += '\n'
return o[:(- 1)]
def isIambic(self):
if (len(self.positions) < 2):
return None
else:
return ((self.positions[0].meterVal == 'w') and (self.positions[1].meterVal == 's'))
def canCompare(self, parse):
isTrue = ((self.numSlots == self.totalSlots) or ((self.positions[(- 1)].meterVal == parse.positions[(- 1)].meterVal) and (len(self.positions[(- 1)].slots) == len(parse.positions[(- 1)].slots))))
if isTrue:
pass
return isTrue
def violations(self, boolean=False):
if (not boolean):
return self.constraintScores
else:
return [(k, (v > 0)) for (k, v) in list(self.constraintScores.items())]
def violated(self):
viold = []
for (c, viol) in list(self.constraintScores.items()):
if viol:
viold += [c]
return viold
def constraintScorez(self):
toreturn = {}
for c in self.constraints:
toreturn[c] = 0
for pos in self.positions:
toreturn[c] += pos.constraintScores[c]
return toreturn
def boundingRelation(self, parse):
containsGreaterViolation = False
containsLesserViolation = False
for constraint in self.constraints:
mark = self.constraintScores[constraint]
mark2 = parse.constraintScores[constraint]
if (mark > parse.constraintScores[constraint]):
containsGreaterViolation = True
if (mark < parse.constraintScores[constraint]):
containsLesserViolation = True
if containsGreaterViolation:
if containsLesserViolation:
return Bounding.unequal
else:
return Bounding.bounded
elif containsLesserViolation:
return Bounding.bounds
else:
return Bounding.equal |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def test_upload(requests_mock):
requests_mock.post(f'{API_V1}/observation_photos', json=SAMPLE_DATA['post_observation_photos'], status_code=200)
requests_mock.post(f'{API_V1}/observation_sounds', json=SAMPLE_DATA['post_observation_sounds'], status_code=200)
response = upload(1234, BytesIO(), BytesIO(), access_token='token')
assert (response[0]['id'] == 1234)
assert (response[0]['created_at'] == '2020-09-24T21:06:16.964-05:00')
assert (response[0]['photo']['native_username'] == 'username')
assert (response[1]['id'] == 233946)
assert (response[1]['created_at'] == '2021-05-30T17:36:40.286-05:00')
assert (response[1]['sound']['file_content_type'] == 'audio/mpeg') |
def test_scenarios(testdir):
p = testdir.makepyfile('\n from reahl.tofu import Fixture, scenario\n from reahl.tofu.pytestsupport import with_fixtures\n class Scenarios(Fixture):\n \n def one(self):\n self.n = 1\n \n def two(self):\n self.n = 2\n\n Scenarios.runs = []\n\n _fixtures(Scenarios)\n def test_something(scenario_fixture):\n Scenarios.runs.append(scenario_fixture)\n\n def test_all_scenarios_run_with_correct_setups():\n fixture_1, fixture_2 = Scenarios.runs\n\n assert fixture_1.n == 1\n assert fixture_2.n == 2\n ')
result = testdir.runpytest(p)
result.reprec.assertoutcome(passed=3) |
('iM_product_vect_jvp_translation')
def _iM_product_vect_jvp_translation(c, q, vect, q_tan, vect_tan):
(type_in, size_xla, dims_spec) = check_dim_imputs((q, vect, q_tan, vect_tan), c)
op_name = (b'iM_prod_vect_jvp_wrapper_f32' if (type_in == np.float32) else b'iM_prod_vect_jvp_wrapper_f64')
return xops.CustomCallWithLayout(c, op_name, operands=(size_xla, q, vect, q_tan, vect_tan), operand_shapes_with_layout=dims_spec, shape_with_layout=dims_spec[2]) |
class TestWeightedAverageControlCurve():
def test_constant(self, three_storage_model):
m = three_storage_model
m.nodes['Storage 0'].max_volume = 16.0
curve0 = ConstantParameter(three_storage_model, 0.25)
curve1 = ConstantParameter(three_storage_model, 0.7)
storages = [m.nodes['Storage 0'], m.nodes['Storage 1']]
profiles = [curve0, curve1]
agg_curves = WeightedAverageProfileParameter(m, storages, profiles)
m.setup()
assert_array_almost_equal(agg_curves.get_daily_values(), np.full(366, 0.5))
def test_monthly_and_constant(self, three_storage_model):
m = three_storage_model
m.nodes['Storage 0'].max_volume = 16.0
profile_vals = [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
curve0 = MonthlyProfileParameter(three_storage_model, profile_vals)
curve1 = ConstantParameter(three_storage_model, 0.7)
storages = [m.nodes['Storage 0'], m.nodes['Storage 1']]
profiles = [curve0, curve1]
agg_curves = WeightedAverageProfileParameter(m, storages, profiles)
m.setup()
assert_array_almost_equal(agg_curves.get_daily_values(), np.full(366, 0.5))
def test_monthly_and_daily(self, three_storage_model):
m = three_storage_model
m.nodes['Storage 0'].max_volume = 16.0
profile_vals = [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
curve0 = MonthlyProfileParameter(three_storage_model, profile_vals)
curve1 = DailyProfileParameter(three_storage_model, np.full(366, 0.7))
storages = [m.nodes['Storage 0'], m.nodes['Storage 1']]
profiles = [curve0, curve1]
agg_curves = WeightedAverageProfileParameter(m, storages, profiles)
m.setup()
assert_array_almost_equal(agg_curves.get_daily_values(), np.full(366, 0.5))
def test_varying_curve(self, three_storage_model):
m = three_storage_model
m.nodes['Storage 0'].max_volume = 16.0
profile_vals = [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125, 0.8125]
curve0 = MonthlyProfileParameter(three_storage_model, profile_vals)
curve1 = ConstantParameter(three_storage_model, 0.7)
storages = [m.nodes['Storage 0'], m.nodes['Storage 1']]
profiles = [curve0, curve1]
agg_curves = WeightedAverageProfileParameter(m, storages, profiles)
m.setup()
expected = np.append(np.full(182, 0.5), np.full(184, 0.75))
assert_array_almost_equal(agg_curves.get_daily_values(), expected)
def test_interpolated_curve(self, three_storage_model):
m = three_storage_model
m.nodes['Storage 0'].max_volume = 16.0
profile_vals = [0.25, 0.25, 0.25, 0.5, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
curve0 = MonthlyProfileParameter(three_storage_model, profile_vals, interp_day='first')
curve1 = ConstantParameter(three_storage_model, 0.7)
storages = [m.nodes['Storage 0'], m.nodes['Storage 1']]
profiles = [curve0, curve1]
agg_curves = WeightedAverageProfileParameter(m, storages, profiles)
m.setup()
interp_vals = np.interp(np.arange(62), [0, 31, 62], [0.25, 0.5, 0.25])
interp_curve = (((interp_vals * 16.0) + (np.full(62, 0.7) * 20.0)) / 36)
expected = np.append(np.append(np.full(60, 0.5), interp_curve), np.full(302, 0.5))
assert_array_almost_equal(agg_curves.get_daily_values()[58:62], expected[58:62]) |
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(config, os.path.join(os.path.dirname(cfg_file), cfg))
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze() |
def _query_attribute(program_id: int, index: int):
asize = GLint()
atype = GLenum()
buf_size = 192
aname = create_string_buffer(buf_size)
try:
glGetActiveAttrib(program_id, index, buf_size, None, asize, atype, aname)
return (aname.value.decode(), atype.value, asize.value)
except GLException as exc:
raise ShaderException from exc |
class SelecSLSBlock(nn.Module):
def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1):
super(SelecSLSBlock, self).__init__()
self.stride = stride
self.is_first = is_first
assert (stride in [1, 2])
self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation)
self.conv2 = conv_bn(mid_chs, mid_chs, 1)
self.conv3 = conv_bn(mid_chs, (mid_chs // 2), 3)
self.conv4 = conv_bn((mid_chs // 2), mid_chs, 1)
self.conv5 = conv_bn(mid_chs, (mid_chs // 2), 3)
self.conv6 = conv_bn(((2 * mid_chs) + (0 if is_first else skip_chs)), out_chs, 1)
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if (not isinstance(x, list)):
x = [x]
assert (len(x) in [1, 2])
d1 = self.conv1(x[0])
d2 = self.conv3(self.conv2(d1))
d3 = self.conv5(self.conv4(d2))
if self.is_first:
out = self.conv6(torch.cat([d1, d2, d3], 1))
return [out, out]
else:
return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] |
.parametrize(('name', 'expected'), [('foo', 'foo'), ('Foo', 'foo'), ('fOo', 'foo'), ('foo.bar', 'foo-bar'), ('Foo.Bar', 'foo-bar'), ('Foo.....Bar', 'foo-bar'), ('foo_bar', 'foo-bar'), ('foo___bar', 'foo-bar'), ('foo-bar', 'foo-bar'), ('foo----bar', 'foo-bar')])
def test_is_normalized_name(name, expected):
assert is_normalized_name(expected)
if (name != expected):
assert (not is_normalized_name(name)) |
class RenameFiles(Gtk.VBox):
title = _('Rename Files')
FILTERS = [SpacesToUnderscores, ReplaceColons, StripWindowsIncompat, StripDiacriticals, StripNonASCII, Lowercase]
handler = RenameFilesPluginHandler()
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'bmp']
def init_plugins(cls):
PluginManager.instance.register_handler(cls.handler)
def __init__(self, parent, library):
super().__init__(spacing=6)
self.__skip_interactive = False
self.set_border_width(12)
hbox = Gtk.HBox(spacing=6)
cbes_defaults = NBP_EXAMPLES.split('\n')
self.combo = ComboBoxEntrySave(NBP, cbes_defaults, title=_('Path Patterns'), edit_title=_('Edit saved patterns...'))
self.combo.show_all()
hbox.pack_start(self.combo, True, True, 0)
self.preview = qltk.Button(_('_Preview'), Icons.VIEW_REFRESH)
self.preview.show()
hbox.pack_start(self.preview, False, True, 0)
self.pack_start(hbox, False, True, 0)
self.combo.get_child().connect('changed', self._changed)
model = ObjectStore()
self.view = Gtk.TreeView(model=model)
self.view.show()
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(self.view)
self.pack_start(sw, True, True, 0)
self.pack_start(Gtk.VBox(), False, True, 0)
rename_options = Gtk.HBox()
filter_box = FilterPluginBox(self.handler, self.FILTERS)
filter_box.connect('preview', self.__filter_preview)
filter_box.connect('changed', self.__filter_changed)
self.filter_box = filter_box
frame_filename_options = Frame(_('File names'), filter_box)
frame_filename_options.show_all()
rename_options.pack_start(frame_filename_options, False, True, 0)
albumart_box = Gtk.VBox()
moveart_box = Gtk.VBox()
self.moveart = ConfigCheckButton(_('_Move album art'), 'rename', 'move_art', populate=True)
self.moveart.set_tooltip_text(_("See '[albumart] search_filenames' config entry for which images will be moved"))
self.moveart.show()
moveart_box.pack_start(self.moveart, False, True, 0)
self.moveart_overwrite = ConfigCheckButton(_('_Overwrite album art at target'), 'rename', 'move_art_overwrite', populate=True)
self.moveart_overwrite.show()
moveart_box.pack_start(self.moveart_overwrite, False, True, 0)
albumart_box.pack_start(moveart_box, False, True, 0)
removeemptydirs_box = Gtk.VBox()
self.removeemptydirs = ConfigCheckButton(_('_Remove empty directories'), 'rename', 'remove_empty_dirs', populate=True)
self.removeemptydirs.show()
removeemptydirs_box.pack_start(self.removeemptydirs, False, True, 0)
albumart_box.pack_start(removeemptydirs_box, False, True, 0)
frame_albumart_options = Frame(_('Album art'), albumart_box)
frame_albumart_options.show_all()
rename_options.pack_start(frame_albumart_options, False, True, 0)
self.pack_start(rename_options, False, True, 0)
self.save = Button(_('_Save'), Icons.DOCUMENT_SAVE)
self.save.show()
bbox = Gtk.HButtonBox()
bbox.set_layout(Gtk.ButtonBoxStyle.END)
bbox.pack_start(self.save, True, True, 0)
self.pack_start(bbox, False, True, 0)
render = Gtk.CellRendererText()
column = TreeViewColumn(title=_('File'))
column.pack_start(render, True)
def cell_data_file(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property('text', entry.name)
column.set_cell_data_func(render, cell_data_file)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self.view.append_column(column)
render = Gtk.CellRendererText()
render.set_property('editable', True)
column = TreeViewColumn(title=_('New Name'))
column.pack_start(render, True)
def cell_data_new_name(column, cell, model, iter_, data):
entry = model.get_value(iter_)
cell.set_property('text', (entry.new_name or ''))
column.set_cell_data_func(render, cell_data_new_name)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self.view.append_column(column)
connect_obj(self.preview, 'clicked', self._preview, None)
connect_obj(parent, 'changed', self.__class__._preview, self)
connect_obj(self.save, 'clicked', self._rename, library)
render.connect('edited', self.__row_edited)
for child in self.get_children():
child.show()
def __filter_preview(self, *args):
Gtk.Button.clicked(self.preview)
def __filter_changed(self, *args):
self._changed(self.combo.get_child())
def _changed(self, entry):
self.save.set_sensitive(False)
self.preview.set_sensitive(bool(entry.get_text()))
def __row_edited(self, renderer, path, new):
path = Gtk.TreePath.new_from_string(path)
model = self.view.get_model()
entry = model[path][0]
if (entry.new_name != new):
entry.new_name = new
self.preview.set_sensitive(True)
self.save.set_sensitive(True)
model.path_changed(path)
def _rename(self, library):
model = self.view.get_model()
win = WritingWindow(self, len(model))
win.show()
was_changed = set()
skip_all = self.__skip_interactive
self.view.freeze_child_notify()
should_move_art = config.getboolean('rename', 'move_art')
moveart_sets = {}
remove_empty_dirs = config.getboolean('rename', 'remove_empty_dirs')
for entry in model.values():
if (entry.new_name is None):
continue
song = entry.song
old_name = entry.name
old_pathfile = song['~filename']
new_name = entry.new_name
new_pathfile = ''
if (os.path.abspath(new_name) != os.path.abspath(os.path.join(os.getcwd(), new_name))):
new_pathfile = new_name
else:
new_pathfile = os.path.join(os.path.dirname(old_pathfile), new_name)
try:
library.rename(song, text2fsn(new_name), changed=was_changed)
except Exception:
util.print_exc()
if skip_all:
continue
msg = qltk.Message(Gtk.MessageType.ERROR, win, _('Unable to rename file'), (_('Renaming %(old-name)s to %(new-name)s failed. Possibly the target file already exists, or you do not have permission to make the new file or remove the old one.') % {'old-name': util.bold(old_name), 'new-name': util.bold(new_name)}), buttons=Gtk.ButtonsType.NONE)
msg.add_button(_('Ignore _All Errors'), RESPONSE_SKIP_ALL)
msg.add_icon_button(_('_Stop'), Icons.PROCESS_STOP, Gtk.ResponseType.CANCEL)
msg.add_button(_('_Continue'), Gtk.ResponseType.OK)
msg.set_default_response(Gtk.ResponseType.OK)
resp = msg.run()
skip_all |= (resp == RESPONSE_SKIP_ALL)
mods = Gdk.Display.get_default().get_pointer()[3]
skip_all |= (mods & Gdk.ModifierType.SHIFT_MASK)
library.reload(song, changed=was_changed)
if ((resp != Gtk.ResponseType.OK) and (resp != RESPONSE_SKIP_ALL)):
break
if should_move_art:
self._moveart(moveart_sets, old_pathfile, new_pathfile, song)
if remove_empty_dirs:
path_old = os.path.dirname(old_pathfile)
if (not os.listdir(path_old)):
try:
os.rmdir(path_old)
print_d(('Removed empty directory: %r' % path_old), self)
except Exception:
util.print_exc()
if win.step():
break
self.view.thaw_child_notify()
win.destroy()
library.changed(was_changed)
self.save.set_sensitive(False)
def _moveart(self, art_sets, pathfile_old, pathfile_new, song):
path_old = os.path.dirname(os.path.realpath(pathfile_old))
path_new = os.path.dirname(os.path.realpath(pathfile_new))
if (os.path.realpath(path_old) == os.path.realpath(path_new)):
return
if ((path_old in art_sets.keys()) and (not art_sets[path_old])):
return
images = []
if (path_old in art_sets.keys()):
images = art_sets[path_old]
else:
def glob_escape(s):
for c in '[*?':
s = s.replace(c, (('[' + c) + ']'))
return s
art_sets[path_old] = images
path_old_escaped = glob_escape(path_old)
for suffix in self.IMAGE_EXTENSIONS:
images.extend(glob.glob(os.path.join(path_old_escaped, ('*.' + suffix))))
if images:
filenames = config.getstringlist('albumart', 'search_filenames')
moves = []
for fn in filenames:
fn = os.path.join(path_old, fn)
if ('<' in fn):
fnres = ArbitraryExtensionFileFromPattern(fn).format(song)
if ((fnres in images) and (fnres not in moves)):
moves.append(fnres)
elif ('*' in fn):
moves.extend((f for f in glob.glob(fn) if ((f in images) and (f not in moves))))
elif ((fn in images) and (fn not in moves)):
moves.append(fn)
if (len(moves) > 0):
overwrite = config.getboolean('rename', 'move_art_overwrite')
for fnmove in moves:
try:
fnmoveto = os.path.join(path_new, os.path.split(fnmove)[1])
fnmoveto_orig = ''
if os.path.exists(fnmoveto):
fnmoveto_orig = (fnmoveto + '.orig')
if (not os.path.exists(fnmoveto_orig)):
os.rename(fnmoveto, fnmoveto_orig)
else:
suffix = 1
while os.path.exists(((fnmoveto_orig + '.') + str(suffix))):
suffix += 1
fnmoveto_orig = ((fnmoveto_orig + '.') + str(suffix))
os.rename(fnmoveto, fnmoveto_orig)
print_d(f'Renaming image {fnmove!r} to {fnmoveto!r}', self)
shutil.move(fnmove, fnmoveto)
if (overwrite and fnmoveto_orig):
os.remove(fnmoveto_orig)
images.remove(fnmove)
except Exception as e:
print_e(f"Couldn't move file ({e})")
util.print_exc()
def _preview(self, songs):
model = self.view.get_model()
if (songs is None):
songs = [e.song for e in model.values()]
pattern_text = self.combo.get_child().get_text()
try:
pattern = FileFromPattern(pattern_text)
except ValueError:
qltk.ErrorMessage(self, _('Path is not absolute'), (_('The pattern\n\t%s\ncontains / but does not start from root. To avoid misnamed folders, root your pattern by starting it with / or ~/.') % util.bold(pattern_text))).run()
return
else:
if pattern:
self.combo.prepend_text(pattern_text)
self.combo.write(NBP)
orignames = [song['~filename'] for song in songs]
newnames = [fsn2text(pattern.format(song)) for song in songs]
for f in self.filter_box.filters:
if f.active:
newnames = f.filter_list(orignames, newnames)
model.clear()
for (song, newname) in zip(songs, newnames, strict=False):
entry = Entry(song)
entry.new_name = newname
model.append(row=[entry])
self.preview.set_sensitive(False)
self.save.set_sensitive(bool(pattern_text))
for song in songs:
if (not song.is_file):
self.set_sensitive(False)
break
else:
self.set_sensitive(True)
def test_mode(self):
return self.__skip_interactive
_mode.setter
def test_mode(self, value):
self.__skip_interactive = value |
def resolve_logger_callbacks(loggers, defined_loggers) -> List[Callback]:
init_loggers = {JsonLoggerCallback(), CSVLoggerCallback()}
if (loggers is None):
return list(init_loggers)
if (not isinstance(loggers, list)):
raise TypeError('`loggers` must be a list of str or tune logger callbacks.')
for log in loggers:
if isinstance(log, str):
if (log == 'tensorboard'):
init_loggers.add(TBXLoggerCallback())
elif (log == 'csv'):
init_loggers.add(CSVLoggerCallback())
elif (log == 'mlflow'):
init_loggers.add(MLflowLoggerCallback())
elif (log == 'json'):
init_loggers.add(JsonLoggerCallback())
else:
raise ValueError(f'{log} is not one of the defined loggers: {defined_loggers}')
elif isinstance(log, LoggerCallback):
init_loggers.add(log)
elif (inspect.isclass(log) and issubclass(log, LoggerCallback)):
init_loggers.add(log())
elif (inspect.isclass(log) and issubclass(log, Logger)):
warnings.warn('Passing `Logger`s is deprecated - please use `LoggerCallback`s instead.', DeprecationWarning)
init_loggers.add(LegacyLoggerCallback(log))
else:
raise TypeError('`loggers` must be a list of str or tune logger callbacks.')
return list(init_loggers) |
def test_edit_units(data, runner):
inputfile = str(data.join('RGB.byte.tif'))
result = runner.invoke(main_group, ['edit-info', inputfile, '--bidx', '1', '--units', 'DN'], catch_exceptions=False)
assert (result.exit_code == 0)
with rasterio.open(inputfile) as src:
assert (src.units[0] == 'DN') |
def convert_examples_to_features(examples, seq_length, tokenizer):
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
_truncate_seq_pair(tokens_a, tokens_b, (seq_length - 3))
elif (len(tokens_a) > (seq_length - 2)):
tokens_a = tokens_a[0:(seq_length - 2)]
tokens = []
input_type_ids = []
tokens.append('[CLS]')
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append('[SEP]')
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append('[SEP]')
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < seq_length):
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert (len(input_ids) == seq_length)
assert (len(input_mask) == seq_length)
assert (len(input_type_ids) == seq_length)
if (ex_index < 5):
tf.logging.info('*** Example ***')
tf.logging.info(('unique_id: %s' % example.unique_id))
tf.logging.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
tf.logging.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
tf.logging.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
tf.logging.info(('input_type_ids: %s' % ' '.join([str(x) for x in input_type_ids])))
features.append(InputFeatures(unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids))
return features |
_to_zarr_if('cache_sensor_angles', sanitize_args_func=_sanitize_observer_look_args)
def _get_sensor_angles_from_sat_pos(sat_lon, sat_lat, sat_alt, start_time, area_def, chunks):
(lons, lats) = _get_valid_lonlats(area_def, chunks)
res = da.map_blocks(_get_sensor_angles_ndarray, lons, lats, start_time, sat_lon, sat_lat, sat_alt, dtype=lons.dtype, meta=np.array((), dtype=lons.dtype), new_axis=[0], chunks=((2,) + lons.chunks))
return (res[0], res[1]) |
class SplitTransformDataset(Dataset):
def __init__(self, root, in_memory=False, need_name=False, perturb=True, img_suffix='_im.jpg'):
self.root = root
self.need_name = need_name
self.in_memory = in_memory
self.perturb = perturb
self.img_suffix = img_suffix
imgs = os.listdir(self.root)
self.im_list = [im for im in imgs if ('_im' in im)]
self.gt_list = [im for im in imgs if ('_gt' in im)]
print(('%d ground truths found' % len(self.gt_list)))
if perturb:
self.im_transform = transforms.Compose([transforms.ColorJitter(0.2, 0.2, 0.2, 0.2), transforms.RandomGrayscale(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
self.im_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.gt_transform = transforms.Compose([transforms.ToTensor()])
self.seg_transform = transforms.Compose([transforms.ToTensor(), seg_normalization])
self.gt_to_im = []
for im in self.gt_list:
end_idx = im[:(- 8)].rfind('_')
self.gt_to_im.append(im[:end_idx])
if self.in_memory:
self.images = {}
for im in progressbar.progressbar(self.im_list):
self.images[im.replace(self.img_suffix, '')] = Image.open(self.join_path(im)).convert('RGB')
print('Images loaded to memory.')
self.gts = []
for im in progressbar.progressbar(self.gt_list):
self.gts.append(Image.open(self.join_path(im)).convert('L'))
print('Ground truths loaded to memory')
if (not self.perturb):
self.segs = []
for im in progressbar.progressbar(self.gt_list):
self.segs.append(Image.open(self.join_path(im.replace('_gt', '_seg'))).convert('L'))
print('Input segmentations loaded to memory')
def join_path(self, im):
return os.path.join(self.root, im)
def __getitem__(self, idx):
if self.in_memory:
gt = self.gts[idx]
im = self.images[self.gt_to_im[idx]]
if (not self.perturb):
seg = self.segs[idx]
else:
gt = Image.open(self.join_path(self.gt_list[idx])).convert('L')
im = Image.open(self.join_path((self.gt_to_im[idx] + self.img_suffix))).convert('RGB')
if (not self.perturb):
seg = Image.open(self.join_path(self.gt_list[idx].replace('_gt', '_seg'))).convert('L')
if self.perturb:
(im_width, im_height) = gt.size
try:
bb_pos = get_bb_position(np.array(gt))
bb_pos = mod_bb(*bb_pos, im_height, im_width, 0.1, 0.1)
(rmin, rmax, cmin, cmax) = scale_bb_by(*bb_pos, im_height, im_width, 0.25, 0.25)
except:
print('Failed to get bounding box')
rmin = cmin = 0
rmax = im_height
cmax = im_width
else:
(im_width, im_height) = seg.size
try:
bb_pos = get_bb_position(np.array(seg))
(rmin, rmax, cmin, cmax) = scale_bb_by(*bb_pos, im_height, im_width, 0.25, 0.25)
except:
print('Failed to get bounding box')
rmin = cmin = 0
rmax = im_height
cmax = im_width
if (((rmax - rmin) == 0) or ((cmax - cmin) == 0)):
print('No GT, no cropping is done.')
crop_lambda = (lambda x: x)
else:
crop_lambda = (lambda x: transforms.functional.crop(x, rmin, cmin, (rmax - rmin), (cmax - cmin)))
im = crop_lambda(im)
gt = crop_lambda(gt)
if self.perturb:
iou_max = 1.0
iou_min = 0.7
iou_target = ((np.random.rand() * (iou_max - iou_min)) + iou_min)
seg = boundary_modification.modify_boundary(((np.array(gt) > 0.5).astype('uint8') * 255), iou_target=iou_target)
seg = Image.fromarray(seg)
else:
seg = crop_lambda(seg)
im = self.im_transform(im)
gt = self.gt_transform(gt)
seg = self.seg_transform(seg)
if self.need_name:
return (im, seg, gt, os.path.basename(self.gt_list[idx][:(- 7)]))
else:
return (im, seg, gt)
def __len__(self):
return len(self.gt_list) |
def check_win(board: dict[(int, str)]) -> bool:
return any(((board[1] == board[2] == board[3]), (board[4] == board[5] == board[6]), (board[7] == board[8] == board[9]), (board[1] == board[4] == board[7]), (board[2] == board[5] == board[8]), (board[3] == board[6] == board[9]), (board[1] == board[5] == board[9]), (board[3] == board[5] == board[7]))) |
class FeatureFlagsConfiguration(BaseModel):
features: Optional[dict[(str, Any)]]
_validator('features', mode='before')
def validate_features(cls, value):
validator = SchemaValidator(value)
try:
validator.validate()
except Exception as exc:
raise ValueError(str(exc)) from exc
return value |
class MaxxVit(nn.Module):
def __init__(self, cfg: MaxxVitCfg, img_size: Union[(int, Tuple[(int, int)])]=224, in_chans: int=3, num_classes: int=1000, global_pool: str='avg', drop_rate: float=0.0, drop_path_rate: float=0.0):
super().__init__()
img_size = to_2tuple(img_size)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = cfg.embed_dim[(- 1)]
self.embed_dim = cfg.embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.stem = Stem(in_chs=in_chans, out_chs=cfg.stem_width, act_layer=cfg.conv_cfg.act_layer, norm_layer=cfg.conv_cfg.norm_layer, norm_eps=cfg.conv_cfg.norm_eps)
stride = self.stem.stride
feat_size = tuple([(i // s) for (i, s) in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert (len(cfg.depths) == num_stages)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
stages = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(((r - 1) // stage_stride) + 1) for r in feat_size])
stages += [MaxxVitStage(in_chs, out_chs, depth=cfg.depths[i], block_types=cfg.block_type[i], conv_cfg=cfg.conv_cfg, transformer_cfg=transformer_cfg, feat_size=feat_size, drop_path=dpr[i])]
stride *= stage_stride
in_chs = out_chs
self.stages = nn.Sequential(*stages)
final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps)
assert (cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff'))
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
.ignore
def no_weight_decay(self):
return {k for (k, _) in self.named_parameters() if any(((n in k) for n in ['relative_position_bias_table', 'rel_pos.mlp']))}
.ignore
def group_matcher(self, coarse=False):
matcher = dict(stem='^stem', blocks=[('^stages\\.(\\d+)', None), ('^norm', (99999,))])
return matcher
.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if (global_pool is None):
global_pool = self.head.global_pool.pool_type
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
features = []
for i in range(len(self.stages)):
x = self.stages[i](x)
if (i == (len(self.stages) - 1)):
features.append(self.norm(x))
else:
features.append(x)
return features
def forward_head(self, x, pre_logits: bool=False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x):
x = self.forward_features(x)
return x |
.parametrize('mean, scale, size', [(np.array(10, dtype=config.floatX), np.array(1, dtype=config.floatX), None), (np.array(10, dtype=config.floatX), np.array(1, dtype=config.floatX), []), (np.array(10, dtype=config.floatX), np.array(1, dtype=config.floatX), [2, 3]), (np.full((1, 2), 10, dtype=config.floatX), np.array(1, dtype=config.floatX), None)])
def test_wald_samples(mean, scale, size):
compare_sample_values(wald, mean, scale, size=size) |
class Character(Object):
def from_dict(self):
super().from_dict()
self.name = self._data.get('name')
self.team_id = self._data.get('teamId')
self.health = self._data.get('health')
self.location = Location(self._data.get('location', {}))
self.ranking = self._data.get('ranking')
self.account_id = self._data.get('accountId')
self.is_in_blue_zone = self._data.get('isInBlueZone')
self.is_in_red_zone = self._data.get('isInRedZone')
self.zone = self._data.get('zone', []) |
def setup(opt):
if (opt.caption_model == 'fc'):
model = FCModel(opt)
elif (opt.caption_model == 'language_model'):
model = LMModel(opt)
elif (opt.caption_model == 'newfc'):
model = NewFCModel(opt)
elif (opt.caption_model == 'show_tell'):
model = ShowTellModel(opt)
elif (opt.caption_model == 'att2in'):
model = Att2inModel(opt)
elif (opt.caption_model == 'att2in2'):
model = Att2in2Model(opt)
elif (opt.caption_model == 'att2all2'):
model = Att2all2Model(opt)
elif (opt.caption_model == 'adaatt'):
model = AdaAttModel(opt)
elif (opt.caption_model == 'adaattmo'):
model = AdaAttMOModel(opt)
elif (opt.caption_model == 'topdown'):
model = TopDownModel(opt)
elif (opt.caption_model == 'stackatt'):
model = StackAttModel(opt)
elif (opt.caption_model == 'denseatt'):
model = DenseAttModel(opt)
elif (opt.caption_model == 'transformer'):
model = TransformerModel(opt)
elif (opt.caption_model == 'aoa'):
model = AoAModel(opt)
elif (opt.caption_model == 'aoa0'):
model = AoAModel0(opt)
elif (opt.caption_model == 'aoa1'):
model = AoAModel1(opt)
elif (opt.caption_model == 'aoa3'):
model = AoAModel3(opt)
elif (opt.caption_model == 'aoa3_old'):
model = AoAModel3_old(opt)
elif (opt.caption_model == 'aoa3d1'):
model = AoAModel3_d1(opt)
elif (opt.caption_model == 'aoa3d2'):
model = AoAModel3_d2(opt)
elif (opt.caption_model == 'aoa3d3'):
model = AoAModel3_d3(opt)
elif (opt.caption_model == 'aoa3_no_c'):
model = AoAModel3_no_c(opt)
elif (opt.caption_model == 'aoa3_no_p'):
model = AoAModel3_no_p(opt)
elif (opt.caption_model == 'aoa3l1'):
model = AoAModel3_l1(opt)
elif (opt.caption_model == 'aoa3l2'):
model = AoAModel3_l2(opt)
elif (opt.caption_model == 'aoa3l3'):
model = AoAModel3_l3(opt)
elif (opt.caption_model == 'aoa3d1w2'):
model = AoAModel3_d1_w2(opt)
elif (opt.caption_model == 'aoa3d1w4'):
model = AoAModel3_d1_w4(opt)
elif (opt.caption_model == 'aoa3d1_24h'):
model = AoAModel3_d1_24heads(opt)
elif (opt.caption_model == 'aoa4'):
model = AoAModel4(opt)
elif (opt.caption_model == 'aoarelative'):
model = AoAModel_relative(opt)
elif (opt.caption_model == 'aoab'):
model = AoAModel_b(opt)
elif (opt.caption_model == 'aoab_old'):
model = AoAModel_b_old(opt)
else:
raise Exception('Caption model not supported: {}'.format(opt.caption_model))
if (vars(opt).get('start_from', None) is not None):
name_append = opt.name_append
if ((len(name_append) > 0) and (name_append[0] != '-')):
name_append = ('_' + name_append)
assert os.path.isdir(opt.start_from), (' %s must be a a path' % opt.start_from)
print(os.path.join(opt.start_from, (('infos' + name_append) + '.pkl')))
assert os.path.isfile(os.path.join(opt.start_from, (('infos' + name_append) + '.pkl'))), 'infos.pkl file does not exist in path {}'.format(opt.start_from)
model_name = (('model' + name_append) + '.pth')
print('Loading model {}......'.format(model_name))
model.load_state_dict(torch.load(os.path.join(opt.start_from, model_name)))
return model |
class Reader():
def __init__(self, instance_name):
if True:
file = open(os.path.join(os.path.dirname(__file__), '../../instances.json'), 'r')
data = json.load(file)
instance = [inst for inst in data if (inst['name'] == instance_name)]
if (len(instance) == 0):
print(('There is no instance named %s' % instance_name))
quit()
instance = instance[0]
path = os.path.abspath(os.path.join(os.path.dirname(__file__), (('%s' % '../../') + instance['path'])))
optimum = instance['optimum']
if (not optimum):
if (not instance['bounds']):
print(('Successfully loaded instance "%s" - best lower bound: nan' % instance_name))
else:
bound = instance['bounds']['lower']
print(('Successfully loaded instance "%s" - best lower bound: %s' % (instance_name, bound)))
else:
print(('Successfully loaded instance "%s" - known optimum: %s' % (instance_name, optimum)))
jobs = []
machines = []
job_counter = 0
with open(path) as file:
for line in file.readlines():
if (line[0] != '#'):
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace(' ', ' ')
line = line.replace('\n', '')
elements = line.strip().split(' ')
if (len(elements) == 2):
num_jobs = int(elements[0])
num_machines = int(elements[1])
machines = [Machine(id=i) for i in range(num_machines)]
else:
job_counter += 1
job = Job(job_counter)
jobs.append(job)
if ((len(elements) % 2) != 0):
print('Job specification has an error. Each line that describes a job needs to have an even number of numbers')
raise SystemExit(0)
task_counter = 0
while (len(elements) > 0):
task_counter += 1
name = ('j_%i_t_%i' % (job_counter, task_counter))
task_machine_id = int(elements.pop(0))
task_machine = machines[task_machine_id]
task_length = int(elements.pop(0))
task = Task(name, task_machine, task_length)
job.append_task(task)
instance_name = path.split('/')[(- 1)].split('\\')[(- 1)]
self.instance = Instance(instance_name, jobs, machines)
else:
print('Could not read the problem specification. Check if the path is correct and the problem specification is in the expected format.')
raise SystemExit(0)
def get_instance(self):
return self.instance |
def env_settings():
env_module_name = 'ltr.admin.local'
try:
env_module = importlib.import_module(env_module_name)
return env_module.EnvironmentSettings()
except:
env_file = os.path.join(os.path.dirname(__file__), 'local.py')
create_default_local_file()
raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\n Go to "{}" and set all the paths you need. Then try to run again.'.format(env_file)) |
class HflixIn(SimpleDecrypter):
__name__ = 'HflixIn'
__type__ = 'decrypter'
__version__ = '0.12'
__status__ = 'testing'
__pattern__ = '
__description__ = 'Hflix.in decrypter plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
def decrypt(self, pyfile):
headers = self.load(pyfile.url, just_header=True)
if (('refresh' in headers) and headers['refresh']):
m = re.search('\\d+;url=(.+)', headers['refresh'])
if (m and (' not in m.group(1))):
self.packages.append((pyfile.package().name, [m.group(1)], pyfile.package().name))
else:
self.offline() |
def train_image_diffusion(cfg):
training_steps = 50000
image = imread(f'./images/{cfg.image_name}')
crop_size = int((min(image[0].shape[(- 2):]) * 0.95))
train_dataset = CropSet(image=image, crop_size=crop_size, use_flip=False)
train_loader = DataLoader(train_dataset, batch_size=1, num_workers=4, shuffle=True)
model = NextNet(in_channels=3, filters_per_layer=cfg.network_filters, depth=cfg.network_depth)
diffusion = Diffusion(model, training_target='x0', timesteps=cfg.diffusion_timesteps, auto_sample=True, sample_size=image[0].shape[(- 2):])
model_callbacks = [pl.callbacks.ModelSummary(max_depth=(- 1))]
model_callbacks.append(pl.callbacks.ModelCheckpoint(filename='single-level-{step}', save_last=True, save_top_k=3, monitor='train_loss', mode='min'))
tb_logger = pl.loggers.TensorBoardLogger('lightning_logs/', name=cfg.image_name, version=cfg.run_name)
trainer = pl.Trainer(max_steps=training_steps, gpus=1, auto_select_gpus=True, logger=tb_logger, log_every_n_steps=10, callbacks=model_callbacks)
trainer.fit(diffusion, train_loader) |
def test_varyings_remove2():
code1 = '\n fn vs_main() -> Varyings {\n var varyings : Varyings;\n varyings.foo = f32(something1);\n varyings.bar = vec2<f32>(something2);\n varyings.spam = vec3<f32>(something3);\n return varyings;\n }\n\n fn fs_main(varyings : Varyings) {\n use(varyings.bar);\n }\n '
code2 = '\n struct Varyings {\n (0) bar : vec2<f32>,\n };\n\n fn vs_main() -> Varyings {\n var varyings : Varyings;\n // unused: varyings.foo = f32(something1);\n varyings.bar = vec2<f32>(something2);\n // unused: varyings.spam = vec3<f32>(something3);\n return varyings;\n }\n\n fn fs_main(varyings : Varyings) {\n use(varyings.bar);\n }\n '
code3 = resolve_varyings(code1)
assert (code3.strip() == code2.strip()) |
class R2RBatch(object):
def __init__(self, feat_db, instr_data, connectivity_dir, batch_size=64, angle_feat_size=4, seed=0, name=None, sel_data_idxs=None, is_reverie=False, anno_dir=None):
self.env = EnvBatch(connectivity_dir, feat_db=feat_db, batch_size=batch_size)
self.is_reverie = is_reverie
self.data = instr_data
self.scans = set([x['scan'] for x in self.data])
self.gt_trajs = self._get_gt_trajs(self.data)
if (sel_data_idxs is not None):
(t_split, n_splits) = sel_data_idxs
ndata_per_split = (len(self.data) // n_splits)
start_idx = (ndata_per_split * t_split)
if (t_split == (n_splits - 1)):
end_idx = None
else:
end_idx = (start_idx + ndata_per_split)
self.data = self.data[start_idx:end_idx]
self.connectivity_dir = connectivity_dir
self.angle_feat_size = angle_feat_size
self.name = name
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.sim = new_simulator(self.connectivity_dir)
self.angle_feature = get_all_point_angle_feature(self.sim, self.angle_feat_size)
self.buffered_state_dict = {}
if is_reverie:
self.obj2viewpoint = {}
bbox_data = json.load(open(os.path.join(anno_dir, 'BBoxes.json')))
for (scanvp, value) in bbox_data.items():
(scan, vp) = scanvp.split('_')
for (objid, objinfo) in value.items():
if objinfo['visible_pos']:
self.obj2viewpoint.setdefault(((scan + '_') + objid), [])
self.obj2viewpoint[((scan + '_') + objid)].append(vp)
print(('%s loaded with %d instructions, using splits: %s' % (self.__class__.__name__, len(self.data), self.name)))
def _get_gt_trajs(self, data):
if self.is_reverie:
return {x['instr_id']: (x['scan'], x['path'], x['objId']) for x in self.data}
else:
return {x['instr_id']: (x['scan'], x['path']) for x in data}
def size(self):
return len(self.data)
def _load_nav_graphs(self):
print(('Loading navigation graphs for %d scans' % len(self.scans)))
self.graphs = load_nav_graphs(self.connectivity_dir, self.scans)
self.shortest_paths = {}
for (scan, G) in self.graphs.items():
self.shortest_paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.shortest_distances = {}
for (scan, G) in self.graphs.items():
self.shortest_distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _next_minibatch(self, batch_size=None, **kwargs):
if (batch_size is None):
batch_size = self.batch_size
batch = self.data[self.ix:(self.ix + batch_size)]
if (len(batch) < batch_size):
random.shuffle(self.data)
self.ix = (batch_size - len(batch))
batch += self.data[:self.ix]
else:
self.ix += batch_size
self.batch = batch
def reset_epoch(self, shuffle=False):
if shuffle:
random.shuffle(self.data)
self.ix = 0
def _shortest_path_action(self, state, goalViewpointId):
if (state.location.viewpointId == goalViewpointId):
return goalViewpointId
path = self.shortest_paths[state.scanId][state.location.viewpointId][goalViewpointId]
nextViewpointId = path[1]
return nextViewpointId
def make_candidate(self, feature, scanId, viewpointId, viewId):
def _loc_distance(loc):
return np.sqrt(((loc.rel_heading ** 2) + (loc.rel_elevation ** 2)))
base_heading = ((viewId % 12) * math.radians(30))
adj_dict = {}
long_id = ('%s_%s' % (scanId, viewpointId))
if (long_id not in self.buffered_state_dict):
for ix in range(36):
if (ix == 0):
self.sim.newEpisode([scanId], [viewpointId], [0], [math.radians((- 30))])
elif ((ix % 12) == 0):
self.sim.makeAction([0], [1.0], [1.0])
else:
self.sim.makeAction([0], [1.0], [0])
state = self.sim.getState()[0]
assert (state.viewIndex == ix)
heading = (state.heading - base_heading)
elevation = state.elevation
visual_feat = feature[ix]
for (j, loc) in enumerate(state.navigableLocations[1:]):
distance = _loc_distance(loc)
loc_heading = (heading + loc.rel_heading)
loc_elevation = (elevation + loc.rel_elevation)
angle_feat = angle_feature(loc_heading, loc_elevation, self.angle_feat_size)
if ((loc.viewpointId not in adj_dict) or (distance < adj_dict[loc.viewpointId]['distance'])):
adj_dict[loc.viewpointId] = {'heading': loc_heading, 'elevation': loc_elevation, 'normalized_heading': (state.heading + loc.rel_heading), 'scanId': scanId, 'viewpointId': loc.viewpointId, 'pointId': ix, 'distance': distance, 'idx': (j + 1), 'feature': np.concatenate((visual_feat, angle_feat), (- 1)), 'position': np.array([loc.x, loc.y, loc.z], dtype=np.float32)}
candidate = list(adj_dict.values())
self.buffered_state_dict[long_id] = [{key: c[key] for key in ['normalized_heading', 'elevation', 'scanId', 'viewpointId', 'pointId', 'idx', 'position']} for c in candidate]
return candidate
else:
candidate = self.buffered_state_dict[long_id]
candidate_new = []
for c in candidate:
c_new = c.copy()
ix = c_new['pointId']
normalized_heading = c_new['normalized_heading']
visual_feat = feature[ix]
loc_heading = (normalized_heading - base_heading)
c_new['heading'] = loc_heading
angle_feat = angle_feature(c_new['heading'], c_new['elevation'], self.angle_feat_size)
c_new['feature'] = np.concatenate((visual_feat, angle_feat), (- 1))
c_new.pop('normalized_heading')
candidate_new.append(c_new)
return candidate_new
def _teacher_path_action(self, state, path, t=None, shortest_teacher=False):
if shortest_teacher:
return self._shortest_path_action(state, path[(- 1)])
teacher_vp = None
if (t is not None):
teacher_vp = (path[(t + 1)] if (t < (len(path) - 1)) else state.location.viewpointId)
elif (state.location.viewpointId in path):
cur_idx = path.index(state.location.viewpointId)
if (cur_idx == (len(path) - 1)):
teacher_vp = state.location.viewpointId
else:
teacher_vp = path[(cur_idx + 1)]
return teacher_vp
def _get_obs(self, t=None, shortest_teacher=False):
obs = []
for (i, (feature, state)) in enumerate(self.env.getStates()):
item = self.batch[i]
base_view_id = state.viewIndex
if (feature is None):
feature = np.zeros((36, 2048))
candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
feature = np.concatenate((feature, self.angle_feature[base_view_id]), (- 1))
abs_position = np.array([state.location.x, state.location.y, state.location.z])
goal_vp = item['path'][(- 1)]
self.sim.newEpisode([state.scanId], [goal_vp], [0], [0])
goal_state = self.sim.getState()[0]
goal_pos = np.array([goal_state.location.x, goal_state.location.y])
obs.append({'instr_id': item['instr_id'], 'scan': state.scanId, 'viewpoint': state.location.viewpointId, 'viewIndex': state.viewIndex, 'heading': state.heading, 'elevation': state.elevation, 'feature': feature, 'candidate': candidate, 'navigableLocations': state.navigableLocations, 'instruction': item['instruction'], 'teacher': self._teacher_path_action(state, item['path'], t=t, shortest_teacher=shortest_teacher), 'gt_path': item['path'], 'path_id': item['path_id'], 'position': abs_position, 'goal_position': goal_pos})
if ('instr_encoding' in item):
obs[(- 1)]['instr_encoding'] = item['instr_encoding']
goal = item['path'][(- 1)]
obs[(- 1)]['distance'] = self.shortest_distances[state.scanId][state.location.viewpointId][goal]
for cand in obs[(- 1)]['candidate']:
cand['distance'] = self.shortest_distances[state.scanId][cand['viewpointId']][goal]
return obs
def reset(self, **kwargs):
self._next_minibatch(**kwargs)
scanIds = [item['scan'] for item in self.batch]
viewpointIds = [item['path'][0] for item in self.batch]
headings = [item['heading'] for item in self.batch]
self.env.newEpisodes(scanIds, viewpointIds, headings)
return self._get_obs(t=0)
def step(self, actions, t=None):
self.env.makeActions(actions)
return self._get_obs(t=t)
def _get_nearest(self, shortest_distances, goal_id, path):
near_id = path[0]
near_d = shortest_distances[near_id][goal_id]
for item in path:
d = shortest_distances[item][goal_id]
if (d < near_d):
near_id = item
near_d = d
return near_id
def _eval_item(self, scan, path, gt_path, gt_objid=None):
scores = {}
shortest_distances = self.shortest_distances[scan]
assert (gt_path[0] == path[0]), 'Result trajectories should include the start position'
nearest_position = self._get_nearest(shortest_distances, gt_path[(- 1)], path)
scores['nav_error'] = shortest_distances[path[(- 1)]][gt_path[(- 1)]]
scores['oracle_error'] = shortest_distances[nearest_position][gt_path[(- 1)]]
scores['trajectory_steps'] = (len(path) - 1)
scores['trajectory_lengths'] = np.sum([shortest_distances[a][b] for (a, b) in zip(path[:(- 1)], path[1:])])
gt_lengths = np.sum([shortest_distances[a][b] for (a, b) in zip(gt_path[:(- 1)], gt_path[1:])])
if self.is_reverie:
goal_viewpoints = set(self.obj2viewpoint[('%s_%s' % (scan, str(gt_objid)))])
scores['success'] = float((path[(- 1)] in goal_viewpoints))
scores['oracle_success'] = float(any(((x in goal_viewpoints) for x in path)))
scores['spl'] = ((scores['success'] * gt_lengths) / max(scores['trajectory_lengths'], gt_lengths, 0.01))
else:
scores['success'] = float((scores['nav_error'] < ERROR_MARGIN))
scores['spl'] = ((scores['success'] * gt_lengths) / max(scores['trajectory_lengths'], gt_lengths, 0.01))
scores['oracle_success'] = float((scores['oracle_error'] < ERROR_MARGIN))
scores.update(cal_dtw(shortest_distances, path, gt_path, scores['success'], ERROR_MARGIN))
scores['CLS'] = cal_cls(shortest_distances, path, gt_path, ERROR_MARGIN)
return scores
def eval_metrics(self, preds):
print(('eval %d predictions' % len(preds)))
sr_dicts = []
metrics = defaultdict(list)
for item in preds:
instr_id = item['instr_id']
traj = [x[0] for x in item['trajectory']]
if self.is_reverie:
(scan, gt_traj, gt_objid) = self.gt_trajs[instr_id]
traj_scores = self._eval_item(scan, traj, gt_traj, gt_objid=gt_objid)
else:
(scan, gt_traj) = self.gt_trajs[instr_id]
traj_scores = self._eval_item(scan, traj, gt_traj)
for (k, v) in traj_scores.items():
metrics[k].append(v)
metrics['instr_id'].append(instr_id)
sr_dicts.append({'instr_id': instr_id, 'sr': traj_scores['success']})
avg_metrics = {'steps': np.mean(metrics['trajectory_steps']), 'lengths': np.mean(metrics['trajectory_lengths']), 'nav_error': np.mean(metrics['nav_error']), 'oracle_error': np.mean(metrics['oracle_error']), 'sr': (np.mean(metrics['success']) * 100), 'oracle_sr': (np.mean(metrics['oracle_success']) * 100), 'spl': (np.mean(metrics['spl']) * 100), 'nDTW': (np.mean(metrics['nDTW']) * 100), 'SDTW': (np.mean(metrics['SDTW']) * 100), 'CLS': (np.mean(metrics['CLS']) * 100)}
return (avg_metrics, metrics, sr_dicts) |
def short_platform(r=None, p=None):
if (r is None):
r = platform.release()
if (p is None):
p = platform.platform()
sp = r.split('-')
if (len(sp) < 2):
return p
kernel_version = sp[0].split('.')
if (len(kernel_version) <= 2):
return p
sp[0] = '.'.join(kernel_version[:2])
rest = sp[1].split('.')
while len(rest):
if rest[0].isdigit():
del rest[0]
else:
break
sp[1] = '.'.join(rest)
sr = '-'.join(sp)
p = p.replace(r, sr)
return p |
def get_openssl_cnf_path(opts):
global generated_cnf_file
try:
if path.exists(generated_cnf_file):
return generated_cnf_file
except TypeError:
pass
cn = opts.common_name
client_alt_name = (opts.client_alt_name or opts.common_name)
server_alt_name = (opts.server_alt_name or opts.common_name)
cnf_path = p.openssl_cnf_path()
tmp_cnf_path = None
with tempfile.NamedTemporaryFile(mode='w', delete=False) as outfile:
with open(cnf_path, 'r') as infile:
in_cnf = infile.read()
out_cnf0 = in_cnf.replace('_', cn)
out_cnf1 = out_cnf0.replace('_ALT_', client_alt_name)
out_cnf2 = out_cnf1.replace('_ALT_', server_alt_name)
outfile.write(out_cnf2)
tmp_cnf_path = outfile.name
generated_cnf_file = tmp_cnf_path
return tmp_cnf_path |
def _do_check_version(current_version: Union[(Version, LegacyVersion)], raiden: 'RaidenService') -> bool:
content = requests.get(LATEST).json()
if ('tag_name' not in content):
click.secho('Error while contacting github for latest version. API rate limit exceeded?', fg='red')
return False
latest_release = parse_version(content['tag_name'])
security_message = re.search(SECURITY_EXPRESSION, content['body'])
if security_message:
notification = Notification(id=NotificationIDs.VERSION_SECURITY_WARNING.value, summary='Security Warning', body=security_message.group(0), urgency='high')
raiden.add_notification(notification, click_opts={'fg': 'red'})
if (current_version < latest_release):
msg = f"You're running version {current_version}. The latest version is {latest_release}It's time to update! Releases: {RELEASE_PAGE}"
notification = Notification(id=NotificationIDs.VERSION_OUTDATED.value, summary='Your version is outdated', body=msg, urgency='normal')
raiden.add_notification(notification, click_opts={'fg': 'red'})
return False
return True |
def test_set_defaults_pass_no_substitutions():
context = Context({'key1': 'value1', 'key2': 'value2', 'key3': 'value3'})
add_me = {'key2': 'value4', 'key4': 'value5'}
context.set_defaults(add_me)
assert (context['key1'] == 'value1')
assert (context['key2'] == 'value2')
assert (context['key3'] == 'value3')
assert (context['key4'] == 'value5') |
class AppEngineServer(ServerAdapter):
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
module = sys.modules.get('__main__')
if (module and (not hasattr(module, 'main'))):
module.main = (lambda : util.run_wsgi_app(handler))
util.run_wsgi_app(handler) |
def suggest_mlp_params(trial):
params = {}
params['lr'] = trial.suggest_loguniform('lr', 5e-05, 0.005)
params['dropout'] = _suggest_optional(trial, 'uniform', 'dropout', 0.0, 0.5)
params['weight_decay'] = _suggest_optional(trial, 'loguniform', 'weight_decay', 1e-06, 0.01)
params['d_layers'] = _suggest_mlp_layers(trial, [1, 8, 6, 10])
return params |
def load_bin_vec(fname, vocab):
word_vecs = {}
with open(fname, 'rb') as f:
header = f.readline()
(vocab_size, layer1_size) = map(int, header.split())
binary_len = (np.dtype('float32').itemsize * layer1_size)
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if (ch == ' '):
word = ''.join(word)
break
if (ch != '\n'):
word.append(ch)
if (word in vocab):
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs |
class OptaxStatePartitionRules():
_RULES = {amos.ScaleByAmosState: amos_helper.state_partition_rule, optax.AddNoiseState: (lambda state, params_axes: optax.AddNoiseState(count=None, rng_key=None)), optax.DifferentiallyPrivateAggregateState: (lambda state, params_axes: optax.DifferentiallyPrivateAggregateState(rng_key=None)), optax.EmaState: (lambda state, params_axes: optax.EmaState(count=None, ema=params_axes)), optax.EmptyState: (lambda state, params_axes: optax.EmptyState()), optax.TraceState: (lambda state, params_axes: optax.TraceState(trace=params_axes)), optax.ScaleByAdamState: (lambda state, params_axes: optax.ScaleByAdamState(count=None, mu=params_axes, nu=params_axes)), optax.ScaleByBeliefState: (lambda state, params_axes: optax.ScaleByBeliefState(count=None, mu=params_axes, nu=params_axes)), optax.ScaleByRssState: (lambda state, params_axes: optax.ScaleByRssState(sum_of_squares=params_axes)), optax.ScaleByRmsState: (lambda state, params_axes: optax.ScaleByRmsState(nu=params_axes)), optax.ScaleByRStdDevState: (lambda state, params_axes: optax.ScaleByRStdDevState(mu=params_axes, nu=params_axes)), optax.ScaleBySM3State: (lambda state, params_axes: optax.ScaleBySM3State(mu=params_axes, nu=params_axes)), optax.ScaleByTrustRatioState: (lambda state, params_axes: optax.ScaleByTrustRatioState()), optax.ScaleByScheduleState: (lambda state, params_axes: optax.ScaleByScheduleState(count=None)), optax.ZeroNansState: (lambda state, params_axes: optax.ZeroNansState(found_nan=None)), optax.MaskedState: (lambda state, params_axes: optax.MaskedState(inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_state, params_axes))), optax.InjectHyperparamsState: (lambda state, params_axes: optax.InjectHyperparamsState(count=None, hyperparams=jax.tree_map((lambda x: None), state.hyperparams), inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_state, params_axes))), optax.MultiStepsState: (lambda state, params_axes: optax.MultiStepsState(mini_step=None, gradient_step=None, inner_opt_state=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_opt_state, params_axes), acc_grads=params_axes)), optax.ApplyIfFiniteState: (lambda state, params_axes: optax.ApplyIfFiniteState(notfinite_count=None, last_finite=None, total_notfinite=None, inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_state, params_axes))), optax.MaybeUpdateState: (lambda state, params_axes: optax.MaybeUpdateState(inner_state=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_state, params_axes), step=None)), optax.MultiTransformState: (lambda state, params_axes: optax.MultiTransformState(inner_states=OptaxStatePartitionRules.derive_optax_logical_axes(state.inner_states, params_axes)))}
def _is_optax_state(cls, x):
is_named_tuple = (isinstance(x, tuple) and hasattr(x, '_asdict') and hasattr(x, '_fields'))
result = (is_named_tuple and type(x).__name__.endswith('State'))
return result
def derive_optax_logical_axes(cls, optax_state, params_axes):
(flattened_state, tree_def) = jax.tree_util.tree_flatten(optax_state, is_leaf=cls._is_optax_state)
def derive_fn(x):
if (type(x) not in cls._RULES):
if cls._is_optax_state(x):
raise ValueError(f'Encountered unregistered optax state type {type(x).__name__}')
return None
return cls._RULES[type(x)](x, params_axes)
flattened_axes = [derive_fn(x) for x in flattened_state]
derived_axes = jax.tree_util.tree_unflatten(tree_def, flattened_axes)
return derived_axes |
def get_trainer(args, return_trainer_only=True):
ckpt_path = os.path.abspath(args.downstream_model_dir)
os.makedirs(ckpt_path, exist_ok=True)
checkpoint_callback = ModelCheckpoint(dirpath=ckpt_path, save_top_k=args.save_top_k, monitor=args.monitor.split()[1], mode=args.monitor.split()[0], filename='{epoch}-{val_loss:.2f}')
trainer = Trainer(max_epochs=args.epochs, fast_dev_run=args.test_mode, num_sanity_val_steps=(None if args.test_mode else 0), callbacks=[checkpoint_callback], default_root_dir=ckpt_path, deterministic=(torch.cuda.is_available() and (args.seed is not None)), gpus=(torch.cuda.device_count() if torch.cuda.is_available() else None), precision=(16 if args.fp16 else 32), tpu_cores=(args.tpu_cores if args.tpu_cores else None))
if return_trainer_only:
return trainer
else:
return (checkpoint_callback, trainer) |
def test_bn_reestimation():
tf.keras.backend.clear_session()
np.random.seed(0)
input_data = np.random.randn(1024, 32, 32, 3).astype(np.float32)
batch_size = 4
dataset = tf.data.Dataset.from_tensor_slices(input_data)
dataset = dataset.batch(batch_size=batch_size)
it = iter(dataset)
dummy_inputs = next(it)
inputs = tf.keras.Input(shape=(32, 32, 3))
bn = tf.keras.layers.BatchNormalization(fused=True, beta_initializer='random_uniform', gamma_initializer='random_uniform', moving_mean_initializer='random_uniform', moving_variance_initializer='ones')(inputs)
model_fp32 = tf.keras.Model(inputs=inputs, outputs=bn)
_reestimate_and_compare_results(model_fp32, dataset)
qsim = QuantizationSimModel(model_fp32)
qsim.compute_encodings((lambda m, _: m.predict((dummy_inputs + 1))), None)
qsim.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.MeanSquaredError())
_reestimate_and_compare_results(qsim.model, dataset) |
class TestHarness(Component):
def construct(s, dut_class, src_msgs, sink_msgs, latency, src_lat, sink_lat):
s.src = TestSrcCL(None, src_msgs, 0, src_lat)
s.dut = dut_class(latency)
s.sink = TestSinkCL(None, sink_msgs, 0, sink_lat)
connect(s.src.send, s.dut.enq)
if (dut_class is DelayPipeDeqCL):
_once
def up_adapt():
if (s.dut.deq.rdy() and s.sink.recv.rdy()):
s.sink.recv(s.dut.deq())
elif (dut_class is DelayPipeSendCL):
connect(s.dut.send, s.sink.recv)
def done(s):
return (s.src.done() and s.sink.done())
def line_trace(s):
return ((((s.src.line_trace() + ' >>> ') + s.dut.line_trace()) + ' >>> ') + s.sink.line_trace()) |
class EpisodicDataset():
def __init__(self, data, num_classes, transforms=[], episode_size=args.batch_size, device=args.dataset_device, use_hd=False):
if torch.is_tensor(data):
self.length = data.shape[0]
self.data = data.to(device)
else:
self.data = data
self.length = len(self.data)
self.episode_size = episode_size
self.transforms = transforms
self.num_classes = num_classes
self.n_batches = args.episodes_per_epoch
self.use_hd = use_hd
self.device = device
def __iter__(self):
for i in range(self.n_batches):
classes = np.random.permutation(np.arange(self.num_classes))[:args.n_ways]
indices = []
for c in range(args.n_ways):
class_indices = np.random.permutation(np.arange((self.length // self.num_classes)))[:(self.episode_size // args.n_ways)]
indices += list((class_indices + (classes[c] * (self.length // self.num_classes))))
targets = torch.repeat_interleave(torch.arange(args.n_ways), (self.episode_size // args.n_ways)).to(self.device)
if torch.is_tensor(self.data):
(yield (self.transforms(self.data[indices]), targets))
elif self.use_hd:
(yield (torch.stack([self.transforms(transforms.ToTensor()(np.array(Image.open(self.data[x]).convert('RGB'))).to(self.device)) for x in indices]), targets))
else:
(yield (torch.stack([self.transforms(self.data[x].to(self.device)) for x in indices]), targets))
def __len__(self):
return self.n_batches |
def pair_within_simultaneously_binned(binned_majoranas: list) -> tuple:
iterators = [pair_within_simultaneously(bn) for bn in binned_majoranas]
for pairing in _parallel_iter(iterators, flatten=True):
(yield pairing)
num_bins = len(binned_majoranas)
if ((max([len(bn) for bn in binned_majoranas]) > 1) and (num_bins > 1)):
iterators = [pair_within(bn) for bn in binned_majoranas]
for pairing in _asynchronous_iter(iterators, flatten=True):
(yield pairing)
for bin_gap in range(1, (num_bins // 2)):
iterators = []
for bin_index in range(num_bins):
if (bin_index < (bin_index ^ bin_gap)):
iterators.append(pair_between(binned_majoranas[bin_index], binned_majoranas[(bin_index ^ bin_gap)]))
for pairing in _asynchronous_iter(iterators, flatten=True):
(yield pairing) |
class _MockBase():
public_proxy = ('example',)
def __init__(self, name, fields=()):
self.test_data = {}
self.name = name
self.fields = fields
def track_call(func):
def wrapped(self, *args, **kwargs):
self.test_data[func.__name__] = True
return func(self, *args, **kwargs)
return wrapped
_call
def __eq__(self, other):
return (id(self) == id(other))
_call
def __hash__(self):
return hash(id(self))
_call
def get_field_names(self):
return self.fields
_call
def get_name(self):
return self.name
_call
def get_kind(self):
return 'tester'
_call
def validate_union(self, other):
pass
_call
def validate_intersection(self, other):
pass
_call
def is_element(self, value):
return self.name.startswith(value)
_call
def collapse_intersection(self, other):
return super().collapse_intersection(other)
_call
def is_symbol_subtype(self, other):
return (self.name == other.name)
_call
def is_symbol_supertype(self, other):
return (self.name == other.name)
_call
def update_ast(self, ast):
ast['extra_junk'] = self.name
def validate_field(self, name, field):
self.test_data['validate_field'] = name
if (field.name == 'InvalidMember'):
raise TypeError('InvalidMember cannot be used')
_call
def validate_predicate(self, predicate):
pass
_call
def example(self):
return ... |
def dump_pages(asinlist, filelist, mf, dirpath, fil, is_verbose):
row = get_pages(dirpath, fil, is_verbose)
if (row is None):
return
if (row[0] in asinlist):
return
if (row[6] in filelist):
return
with open(mf, 'ab') as o:
print('* Updating book pages CSV file...')
csvwrite = csv.writer(o, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
csvwrite.writerow(row) |
def main(args):
qas = load_qas_(args.qas)
collection = load_collection_(args.collection, retain_titles=True)
rankings = load_ranking(args.ranking)
parallel_pool = Pool(30)
print_message('#> Tokenize the answers in the Q&As in parallel...')
qas = list(parallel_pool.map(tokenize_all_answers, qas))
qid2answers = {qid: tok_answers for (qid, _, tok_answers) in qas}
assert (len(qas) == len(qid2answers)), (len(qas), len(qid2answers))
print_message('#> Lookup passages from PIDs...')
expanded_rankings = [(qid, pid, rank, collection[pid], qid2answers[qid]) for (qid, pid, rank, *_) in rankings]
print_message('#> Assign labels in parallel...')
labeled_rankings = list(parallel_pool.map(assign_label_to_passage, enumerate(expanded_rankings)))
print_message('#> Dumping output to', args.output, '...')
qid2rankings = groupby_first_item(labeled_rankings)
(num_judged_queries, num_ranked_queries) = check_sizes(qid2answers, qid2rankings)
(success, counts) = compute_and_write_labels(args.output, qid2answers, qid2rankings)
with open(args.output_metrics, 'w') as f:
d = {'num_ranked_queries': num_ranked_queries, 'num_judged_queries': num_judged_queries}
extra = ('__WARNING' if (num_judged_queries != num_ranked_queries) else '')
d[f'success{extra}'] = {k: (v / num_judged_queries) for (k, v) in success.items()}
d[f'counts{extra}'] = {k: (v / num_judged_queries) for (k, v) in counts.items()}
d['arguments'] = get_metadata(args)
f.write((format_metadata(d) + '\n'))
print('\n\n')
print(args.output)
print(args.output_metrics)
print('#> Done\n') |
def test_charclass_fsm_2() -> None:
bc = from_charclass(Charclass('bc'))
assert (bc.alphabet == {Charclass('bc'), (~ Charclass('bc'))})
assert (bc.map == {0: {Charclass('bc'): 1, (~ Charclass('bc')): 2}, 1: {Charclass('bc'): 2, (~ Charclass('bc')): 2}, 2: {Charclass('bc'): 2, (~ Charclass('bc')): 2}})
assert (not bc.accepts(''))
assert (not bc.accepts('a'))
assert bc.accepts('b')
assert bc.accepts('c')
assert (not bc.accepts('d'))
assert (not bc.accepts('bc')) |
class Logger(object):
def __init__(self, log_dir):
if LOG:
self.writer = tf.summary.FileWriter(log_dir)
self.f = open((log_dir + '/log.txt'), 'w')
else:
os.mkdir(log_dir)
self.f = open((log_dir + '/log.txt'), 'w')
def write(self, txt):
self.f.write(txt)
def close(self):
self.f.close()
def scalar_summary(self, tag, value, step):
if LOG:
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
img_summaries = []
for (i, img) in enumerate(images):
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(img).save(s, format='png')
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), height=img.shape[0], width=img.shape[1])
img_summaries.append(tf.Summary.Value(tag=('%s/%d' % (tag, i)), image=img_sum))
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
(counts, bin_edges) = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum((values ** 2)))
bin_edges = bin_edges[1:]
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush() |
class QuantSimConfigurator(AimetCommonQuantSimConfigurator):
def __init__(self, connected_graph: ConnectedGraph, quant_scheme: Union[(QuantScheme, str)], rounding_mode: str, default_output_bw: int, default_param_bw: int, default_data_type: QuantizationDataType=QuantizationDataType.int, config_file: str=None):
super(QuantSimConfigurator, self).__init__(config_file, default_data_type, default_output_bw, default_param_bw)
self._connected_graph = connected_graph
self._layer_to_affected_quantizer_info_dict = self._create_layer_to_affected_quantizer_info_dict()
self._layer_to_config_dict = TreeLikeDictionary()
self._layer_to_quantizers_dict = TreeLikeDictionary()
self._set_quantsim_configs()
self.per_channel_quantization_flag = self._parse_per_channel_quantization().get('defaults')
self._initialize_quantizers_by_layer(quant_scheme, rounding_mode, default_output_bw, default_param_bw, default_data_type)
def _create_layer_to_affected_quantizer_info_dict(self) -> Dict[(layers.Layer, LayerAffectedQuantizerTupleType)]:
layer_to_tensor_quantizers_dict = {}
for op in self._connected_graph.ordered_ops:
affected_quantizers_when_input_enabled = _get_affected_tensor_quantizers_by_true_setting(op, 'input')
affected_quantizers_when_output_enabled = _get_affected_tensor_quantizers_by_true_setting(op, 'output')
affected_quantizers_when_input_disabled = _get_affected_tensor_quantizers_by_false_setting(op, 'input')
affected_quantizers_when_output_disabled = _get_affected_tensor_quantizers_by_false_setting(op, 'output')
layer_to_tensor_quantizers_dict[op.get_module()] = (affected_quantizers_when_input_enabled, affected_quantizers_when_output_enabled, affected_quantizers_when_input_disabled, affected_quantizers_when_output_disabled)
return layer_to_tensor_quantizers_dict
def _set_default_configs(self, default_configs: DefaultsType):
optional_configs = [ConfigDictKeys.STRICT_SYMMETRIC, ConfigDictKeys.UNSIGNED_SYMMETRIC, ConfigDictKeys.PER_CHANNEL_QUANTIZATION]
for op in self._connected_graph.ordered_ops:
layer = op.get_module()
self._layer_to_config_dict[layer] = TreeLikeDictionary()
for (config_key, config_val) in default_configs[ConfigDictKeys.OPS].items():
self._layer_to_config_dict[layer][config_key][SETTING] = config_val
self._layer_to_config_dict[layer][config_key][AFFECTED_QUANTIZERS] = self._get_affected_quantizers_by_config(layer, config_key, config_val)
for (config_key, config_val) in default_configs[ConfigDictKeys.PARAMS].items():
self._layer_to_config_dict[layer][ConfigDictKeys.PARAMS][config_key][SETTING] = config_val
for config_key in optional_configs:
if (config_key in default_configs):
self._layer_to_config_dict[layer][config_key][SETTING] = default_configs[config_key]
def _get_affected_quantizers_by_config(self, layer: layers.Layer, setting_name: str, quantizer_setting: bool) -> List[Tuple[(layers.Layer, str)]]:
(input_true_list, output_true_list, input_false_list, output_false_list) = self._layer_to_affected_quantizer_info_dict[layer]
if ((setting_name == ConfigDictKeys.IS_INPUT_QUANTIZED) and quantizer_setting):
return input_true_list
if ((setting_name == ConfigDictKeys.IS_OUTPUT_QUANTIZED) and quantizer_setting):
return output_true_list
if ((setting_name == ConfigDictKeys.IS_INPUT_QUANTIZED) and (not quantizer_setting)):
return input_false_list
if ((setting_name == ConfigDictKeys.IS_OUTPUT_QUANTIZED) and (not quantizer_setting)):
return output_false_list
if (setting_name == ConfigDictKeys.IS_SYMMETRIC):
return (input_false_list + output_false_list)
_logger.error('Encountered unrecognized case for setting name %s, setting value %s', setting_name, quantizer_setting)
raise ValueError
def _set_param_configs(self, param_configs: ParamType):
for op in self._connected_graph.ordered_ops:
layer = op.get_module()
self._update_layer_param_config(layer, param_configs)
def _set_op_type_configs(self, op_configs: OpTypeType):
for op in self._connected_graph.ordered_ops:
layer = op.get_module()
if (op.type in op_configs):
for (config_key, config_val) in op_configs[op.type].items():
if (config_key == ConfigDictKeys.PARAMS):
self._update_layer_param_config(layer, config_val)
else:
self._layer_to_config_dict[layer][config_key][SETTING] = config_val
if (config_key == ConfigDictKeys.PER_CHANNEL_QUANTIZATION):
continue
self._layer_to_config_dict[layer][config_key][AFFECTED_QUANTIZERS] = self._get_affected_quantizers_by_config(layer, config_key, config_val)
def _update_layer_param_config(self, layer: layers.Layer, param_configs: ParamType):
for (param_type, param_config_dict) in param_configs.items():
for (config_key, config_val) in param_config_dict.items():
self._layer_to_config_dict[layer][ConfigDictKeys.PARAMS][param_type][config_key][SETTING] = config_val
def _set_supergroup_configs(self, supergroups_configs: List[SupergroupType]):
def find_scale_foldable_bns(cg):
conv_bn_pairs = []
def handler(_, op_list):
(conv, bn) = op_list
conv_bn_pairs.append((conv, bn))
patterns_with_callbacks = []
conv_types = ['Conv', 'ConvTranspose']
linear_types = ['Gemm', 'MatMul']
for op_type in (conv_types + linear_types):
patterns_with_callbacks.append(PatternType(pattern=[op_type, 'BatchNormalization'], action=handler))
graph_searcher = GraphSearcher(cg, patterns_with_callbacks)
graph_searcher.find_all_patterns_in_graph_apply_actions()
return conv_bn_pairs
conv_bn_pairs = find_scale_foldable_bns(self._connected_graph)
foldable_bns = [bn for (_, bn) in conv_bn_pairs]
patterns_with_callbacks = []
for supergroup_config in supergroups_configs:
callback = SupergroupConfigCallback(self._layer_to_config_dict)
op_list = supergroup_config[ConfigDictKeys.OP_LIST]
patterns_with_callbacks.append(PatternType(pattern=op_list, action=callback))
if patterns_with_callbacks:
graph_searcher = GraphSearcher(self._connected_graph, patterns_with_callbacks)
graph_searcher.find_all_patterns_in_graph_apply_actions(ignore=foldable_bns)
def fuse_config(conv: Op, bn: Op):
conv_layer = conv.get_module()
bn_layer = bn.get_module()
if (conv_layer not in self._layer_to_config_dict):
return
if (bn_layer not in self._layer_to_config_dict):
return
self._layer_to_config_dict[bn_layer][ConfigDictKeys.IS_INPUT_QUANTIZED][SETTING] = False
self._layer_to_config_dict[bn_layer][ConfigDictKeys.PARAMS][ConfigDictKeys.IS_QUANTIZED][SETTING] = False
self._layer_to_config_dict[bn_layer][ConfigDictKeys.IS_OUTPUT_QUANTIZED][SETTING] = self._layer_to_config_dict[conv_layer][ConfigDictKeys.IS_OUTPUT_QUANTIZED][SETTING]
self._layer_to_config_dict[conv_layer][ConfigDictKeys.IS_OUTPUT_QUANTIZED][SETTING] = False
for (conv, bn) in conv_bn_pairs:
fuse_config(conv, bn)
def _set_model_input_configs(self, model_input_configs: ConfigType):
input_ops = get_all_input_ops(self._connected_graph)
for op in input_ops:
layer = op.get_module()
for (config_key, config_val) in model_input_configs.items():
self._layer_to_config_dict[layer][config_key][SETTING] = config_val
self._layer_to_config_dict[layer][config_key][AFFECTED_QUANTIZERS] = self._get_affected_quantizers_by_config(layer, config_key, config_val)
def _set_model_output_configs(self, model_output_configs: ConfigType):
output_ops = get_all_output_ops(self._connected_graph)
for op in output_ops:
layer = op.get_module()
for (config_key, config_val) in model_output_configs.items():
self._layer_to_config_dict[layer][config_key][SETTING] = config_val
self._layer_to_config_dict[layer][config_key][AFFECTED_QUANTIZERS] = self._get_affected_quantizers_by_config(layer, config_key, config_val)
def _initialize_quantizers_by_layer(self, quant_scheme: Union[(QuantScheme, str)], rounding_mode: str, default_output_bw: int, default_param_bw: int, default_data_type: QuantizationDataType=QuantizationDataType.int):
for (layer, config_dict) in self._layer_to_config_dict.items():
use_unsigned_symmetric = config_dict[ConfigDictKeys.UNSIGNED_SYMMETRIC].get(SETTING, False)
use_strict_symmetric = config_dict[ConfigDictKeys.STRICT_SYMMETRIC].get(SETTING, False)
ops_is_symmetric = config_dict[ConfigDictKeys.IS_SYMMETRIC].get(SETTING, False)
input_quantizer_enabled = config_dict[ConfigDictKeys.IS_INPUT_QUANTIZED].get(SETTING, False)
output_quantizer_enabled = config_dict[ConfigDictKeys.IS_OUTPUT_QUANTIZED].get(SETTING, False)
activation_quant_settings = QuantizerSettings(default_output_bw, default_data_type, rounding_mode, quant_scheme, ops_is_symmetric, use_unsigned_symmetric, use_strict_symmetric)
self._check_existence_of_conflict_case(layer, ConfigDictKeys.IS_INPUT_QUANTIZED, input_quantizer_enabled)
self._check_existence_of_conflict_case(layer, ConfigDictKeys.IS_OUTPUT_QUANTIZED, output_quantizer_enabled)
self._check_existence_of_conflict_case(layer, ConfigDictKeys.IS_SYMMETRIC, ops_is_symmetric)
self._layer_to_quantizers_dict[layer][INPUT_QUANTIZERS] = _initialize_input_quantizers(layer, activation_quant_settings, input_quantizer_enabled)
self._layer_to_quantizers_dict[layer][OUTPUT_QUANTIZERS] = _initialize_output_quantizers(layer, activation_quant_settings, output_quantizer_enabled)
param_config_dict = config_dict[ConfigDictKeys.PARAMS]
param_is_symmetric = param_config_dict[ConfigDictKeys.IS_SYMMETRIC].get(SETTING, False)
param_quantizer_enabled = param_config_dict[ConfigDictKeys.IS_QUANTIZED].get(SETTING, False)
param_quant_settings = QuantizerSettings(default_param_bw, default_data_type, rounding_mode, quant_scheme, param_is_symmetric, use_unsigned_symmetric, use_strict_symmetric, enabled=param_quantizer_enabled)
per_channel_quantization_flag = config_dict[ConfigDictKeys.PER_CHANNEL_QUANTIZATION].get(SETTING, self.per_channel_quantization_flag)
self._layer_to_quantizers_dict[layer][PARAM_QUANTIZERS] = _initialize_param_quantizers(layer, param_config_dict, param_quant_settings, per_channel_quantization_flag)
def _check_existence_of_conflict_case(self, layer: layers.Layer, config_key: str, current_setting: bool):
for (affected_layer, direction) in self._layer_to_config_dict[layer][config_key][AFFECTED_QUANTIZERS]:
if (config_key in [ConfigDictKeys.IS_INPUT_QUANTIZED, ConfigDictKeys.IS_OUTPUT_QUANTIZED]):
config_key_to_check = f'is_{direction}_quantized'
elif (config_key == ConfigDictKeys.IS_SYMMETRIC):
config_key_to_check = config_key
else:
raise ValueError('Unsupported case of config key')
quantizer_setting = self._layer_to_config_dict[affected_layer][config_key_to_check].get(SETTING, False)
if (current_setting != quantizer_setting):
_logger.error('Conflicting tensor quantizer settings for %s, expected: %s, actual: %s', config_key, current_setting, quantizer_setting)
raise RuntimeError
def get_quantizers_dict(self, layer: layers.Layer) -> TreeLikeDictionary:
return self._layer_to_quantizers_dict.get(layer)
def _override_default_act_bw_dtype(self, data_type: QuantizationDataType, bitwidth: int):
def _override_default_param_bw_dtype(self, data_type: QuantizationDataType, bitwidth: int):
def _override_param_bw_dtype(self, quantizer_data, data_type: QuantizationDataType, bitwidth: int):
def _override_act_bw_dtype(self, quantizer_data, data_type: QuantizationDataType, bitwidth: int):
def _generate_and_apply_op_instance_specific_config(self): |
class TestDataset(object):
def check_keys_contain(result_keys, target_keys):
return set(target_keys).issubset(set(result_keys))
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(osp.dirname(__file__)), 'data')
cls.frame_ann_file = osp.join(cls.data_prefix, 'frame_test_list.txt')
cls.frame_ann_file_with_offset = osp.join(cls.data_prefix, 'frame_test_list_with_offset.txt')
cls.frame_ann_file_multi_label = osp.join(cls.data_prefix, 'frame_test_list_multi_label.txt')
cls.video_ann_file = osp.join(cls.data_prefix, 'video_test_list.txt')
cls.action_ann_file = osp.join(cls.data_prefix, 'action_test_anno.json')
cls.proposal_ann_file = osp.join(cls.data_prefix, 'proposal_test_list.txt')
cls.proposal_norm_ann_file = osp.join(cls.data_prefix, 'proposal_normalized_list.txt')
cls.frame_pipeline = [dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode', io_backend='disk')]
cls.video_pipeline = [dict(type='OpenCVInit'), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode')]
cls.action_pipeline = []
cls.proposal_pipeline = [dict(type='SampleProposalFrames', clip_len=1, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5), dict(type='FrameSelector', io_backend='disk')]
cls.proposal_test_pipeline = [dict(type='SampleProposalFrames', clip_len=1, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5, mode='test'), dict(type='FrameSelector', io_backend='disk')]
cls.proposal_train_cfg = ConfigDict(dict(ssn=dict(assigner=dict(positive_iou_threshold=0.7, background_iou_threshold=0.01, incomplete_iou_threshold=0.5, background_coverage_threshold=0.02, incomplete_overlap_threshold=0.01), sampler=dict(num_per_video=8, positive_ratio=1, background_ratio=1, incomplete_ratio=6, add_gt_as_proposals=True), loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), debug=False)))
cls.proposal_test_cfg = ConfigDict(dict(ssn=dict(sampler=dict(test_interval=6, batch_size=16), evaluater=dict(top_k=2000, nms=0.2, softmax_before_filter=True, cls_top_k=2))))
cls.proposal_test_cfg_topall = ConfigDict(dict(ssn=dict(sampler=dict(test_interval=6, batch_size=16), evaluater=dict(top_k=(- 1), nms=0.2, softmax_before_filter=True, cls_top_k=2))))
def test_rawframe_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
assert (rawframe_infos == ([dict(frame_dir=frame_dir, total_frames=5, label=127)] * 2))
assert (rawframe_dataset.start_index == 1)
def test_rawframe_dataset_with_offset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file_with_offset, self.frame_pipeline, self.data_prefix, with_offset=True)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
assert (rawframe_infos == ([dict(frame_dir=frame_dir, offset=2, total_frames=5, label=127)] * 2))
assert (rawframe_dataset.start_index == 1)
def test_rawframe_dataset_multi_label(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file_multi_label, self.frame_pipeline, self.data_prefix, multi_class=True, num_classes=100)
rawframe_infos = rawframe_dataset.video_infos
frame_dir = osp.join(self.data_prefix, 'test_imgs')
label0 = torch.zeros(100)
label0[[1]] = 1.0
label1 = torch.zeros(100)
label1[[3, 5]] = 1.0
labels = [label0, label1]
for (info, label) in zip(rawframe_infos, labels):
assert (info['frame_dir'] == frame_dir)
assert (info['total_frames'] == 5)
assert torch.all((info['label'] == label))
assert (rawframe_dataset.start_index == 1)
def test_dataset_realpath(self):
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, '.')
assert (dataset.data_prefix == osp.realpath('.'))
dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, 's3://good')
assert (dataset.data_prefix == 's3://good')
def test_video_dataset(self):
video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline, data_prefix=self.data_prefix)
video_infos = video_dataset.video_infos
video_filename = osp.join(self.data_prefix, 'test.mp4')
assert (video_infos == ([dict(filename=video_filename, label=0)] * 2))
assert (video_dataset.start_index == 0)
def test_rawframe_pipeline(self):
target_keys = ['frame_dir', 'total_frames', 'label', 'filename_tmpl', 'start_index', 'modality']
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix, test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix, multi_class=True, num_classes=400, test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
rawframe_dataset = RawframeDataset(self.frame_ann_file_with_offset, self.frame_pipeline, self.data_prefix, with_offset=True, num_classes=400, test_mode=False)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), (target_keys + ['offset']))
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix, test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix, multi_class=True, num_classes=400, test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
rawframe_dataset = RawframeDataset(self.frame_ann_file_with_offset, self.frame_pipeline, self.data_prefix, with_offset=True, num_classes=400, test_mode=True)
result = rawframe_dataset[0]
assert self.check_keys_contain(result.keys(), (target_keys + ['offset']))
def test_video_pipeline(self):
target_keys = ['filename', 'label', 'start_index', 'modality']
video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline, data_prefix=self.data_prefix, test_mode=False)
result = video_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline, data_prefix=self.data_prefix, test_mode=True)
result = video_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_action_pipeline(self):
target_keys = ['video_name', 'data_prefix']
action_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix, test_mode=False)
result = action_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
action_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix, test_mode=True)
result = action_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_proposal_pipeline(self):
target_keys = ['frame_dir', 'video_id', 'total_frames', 'gts', 'proposals', 'filename_tmpl', 'modality', 'out_proposals', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', 'proposal_type', 'start_index']
proposal_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
proposal_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix, video_centric=False)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
target_keys = ['frame_dir', 'video_id', 'total_frames', 'gts', 'proposals', 'filename_tmpl', 'modality', 'relative_proposal_list', 'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts', 'start_index']
proposal_dataset = SSNDataset(self.proposal_ann_file, self.proposal_test_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix, test_mode=True)
result = proposal_dataset[0]
assert self.check_keys_contain(result.keys(), target_keys)
def test_rawframe_evaluate(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix)
with pytest.raises(TypeError):
rawframe_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
rawframe_dataset.evaluate(([0] * 5))
with pytest.raises(TypeError):
rawframe_dataset.evaluate(([0] * len(rawframe_dataset)), topk=1.0)
with pytest.raises(KeyError):
rawframe_dataset.evaluate(([0] * len(rawframe_dataset)), metrics='iou')
results = ([np.array([0.1, 0.5, 0.4])] * 2)
eval_result = rawframe_dataset.evaluate(results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert (set(eval_result.keys()) == set(['top1_acc', 'top5_acc', 'mean_class_accuracy']))
def test_video_evaluate(self):
video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline, data_prefix=self.data_prefix)
with pytest.raises(TypeError):
video_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
video_dataset.evaluate(([0] * 5))
with pytest.raises(TypeError):
video_dataset.evaluate(([0] * len(video_dataset)), topk=1.0)
with pytest.raises(KeyError):
video_dataset.evaluate(([0] * len(video_dataset)), metrics='iou')
results = ([np.array([0.1, 0.5, 0.4])] * 2)
eval_result = video_dataset.evaluate(results, metrics=['top_k_accuracy', 'mean_class_accuracy'])
assert (set(eval_result.keys()) == set(['top1_acc', 'top5_acc', 'mean_class_accuracy']))
def test_base_dataset(self):
video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline, data_prefix=self.data_prefix, start_index=3)
assert (len(video_dataset) == 2)
assert (video_dataset.start_index == 3)
def test_repeat_dataset(self):
rawframe_dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline, self.data_prefix)
repeat_dataset = RepeatDataset(rawframe_dataset, 5)
assert (len(repeat_dataset) == 10)
result_a = repeat_dataset[0]
result_b = repeat_dataset[2]
assert (set(result_a.keys()) == set(result_b.keys()))
for key in result_a:
if isinstance(result_a[key], np.ndarray):
assert np.equal(result_a[key], result_b[key]).all()
elif isinstance(result_a[key], list):
assert all((np.array_equal(a, b) for (a, b) in zip(result_a[key], result_b[key])))
else:
assert (result_a[key] == result_b[key])
def test_activitynet_dataset(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix)
activitynet_infos = activitynet_dataset.video_infos
assert (activitynet_infos == [dict(video_name='v_test1', duration_second=1, duration_frame=30, annotations=[dict(segment=[0.3, 0.6], label='Rock climbing')], feature_frame=30, fps=30.0, rfps=30), dict(video_name='v_test2', duration_second=2, duration_frame=48, annotations=[dict(segment=[1.0, 2.0], label='Drinking beer')], feature_frame=48, fps=24.0, rfps=24.0)])
def test_activitynet_proposals2json(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix)
results = [dict(video_name='v_test1', proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), dict(video_name='v_test2', proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])]
result_dict = activitynet_dataset.proposals2json(results)
assert (result_dict == dict(test1=[{'segment': [0.1, 0.9], 'score': 0.1}], test2=[{'segment': [10.1, 20.9], 'score': 0.9}]))
result_dict = activitynet_dataset.proposals2json(results, True)
assert (result_dict == dict(test1=[{'segment': [0.1, 0.9], 'score': 0.1}], test2=[{'segment': [10.1, 20.9], 'score': 0.9}]))
def test_activitynet_evaluate(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix)
with pytest.raises(TypeError):
activitynet_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
activitynet_dataset.evaluate(([0] * 5))
with pytest.raises(KeyError):
activitynet_dataset.evaluate(([0] * len(activitynet_dataset)), metrics='iou')
results = [dict(video_name='v_test1', proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), dict(video_name='v_test2', proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])]
eval_result = activitynet_dataset.evaluate(results, metrics=[''])
assert (set(eval_result) == set(['auc', '', '', '', '']))
def test_activitynet_dump_results(self):
activitynet_dataset = ActivityNetDataset(self.action_ann_file, self.action_pipeline, self.data_prefix)
results = [dict(video_name='v_test1', proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]), dict(video_name='v_test2', proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])]
dump_results = {'version': 'VERSION 1.3', 'results': {'test1': [{'segment': [0.1, 0.9], 'score': 0.1}], 'test2': [{'segment': [10.1, 20.9], 'score': 0.9}]}, 'external_data': {}}
tmp_filename = osp.join(tempfile.gettempdir(), 'result.json')
activitynet_dataset.dump_results(results, tmp_filename, 'json')
assert osp.isfile(tmp_filename)
with open(tmp_filename, 'r+') as f:
load_obj = mmcv.load(f, file_format='json')
assert (load_obj == dump_results)
os.remove(tmp_filename)
results = [('test_video', np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))]
with tempfile.TemporaryDirectory() as tmpdir:
activitynet_dataset.dump_results(results, tmpdir, 'csv')
load_obj = np.loadtxt(osp.join(tmpdir, 'test_video.csv'), dtype=np.float32, delimiter=',', skiprows=1)
assert_array_equal(load_obj, np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=np.float32))
def test_ssn_dataset(self):
ssn_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
assert (ssn_infos[0]['video_id'] == 'test_imgs')
assert (ssn_infos[0]['total_frames'] == 5)
ssn_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix, verbose=True)
ssn_infos = ssn_dataset.video_infos
assert (ssn_infos[0]['video_id'] == 'test_imgs')
assert (ssn_infos[0]['total_frames'] == 5)
with pytest.raises(Exception):
ssn_dataset = SSNDataset(self.proposal_norm_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix)
ssn_infos = ssn_dataset.video_infos
ssn_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix, reg_normalize_constants=[[[(- 0.0603), 0.0325], [0.0752, 0.1596]]])
ssn_infos = ssn_dataset.video_infos
assert (ssn_infos[0]['video_id'] == 'test_imgs')
assert (ssn_infos[0]['total_frames'] == 5)
with pytest.raises(TypeError):
ssn_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix, aug_ratio=('error', 'error'))
ssn_infos = ssn_dataset.video_infos
def test_ssn_evaluate(self):
ssn_dataset = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg, data_prefix=self.data_prefix)
ssn_dataset_topall = SSNDataset(self.proposal_ann_file, self.proposal_pipeline, self.proposal_train_cfg, self.proposal_test_cfg_topall, data_prefix=self.data_prefix)
with pytest.raises(TypeError):
ssn_dataset.evaluate('0.5')
with pytest.raises(AssertionError):
ssn_dataset.evaluate(([0] * 5))
with pytest.raises(KeyError):
ssn_dataset.evaluate(([0] * len(ssn_dataset)), metrics='iou')
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [[results_relative_proposal_list, results_activity_scores, results_completeness_scores, results_bbox_preds]]
eval_result = ssn_dataset.evaluate(results, metrics=['mAP'])
assert (set(eval_result) == set(['.10', '.20', '.30', '.40', '.50', '.50', '.60', '.70', '.80', '.90']))
results_relative_proposal_list = np.random.randn(16, 2)
results_activity_scores = np.random.randn(16, 21)
results_completeness_scores = np.random.randn(16, 20)
results_bbox_preds = np.random.randn(16, 20, 2)
results = [[results_relative_proposal_list, results_activity_scores, results_completeness_scores, results_bbox_preds]]
eval_result = ssn_dataset_topall.evaluate(results, metrics=['mAP'])
assert (set(eval_result) == set(['.10', '.20', '.30', '.40', '.50', '.50', '.60', '.70', '.80', '.90'])) |
class TestWasserstein1D(MetricClassTester):
def _get_scipy_equivalent(self, x: torch.Tensor, y: torch.Tensor, x_weights: Optional[torch.Tensor]=None, y_weights: Optional[torch.Tensor]=None, device: str='cpu') -> torch.Tensor:
x_np = x.numpy().flatten()
y_np = y.numpy().flatten()
if (x_weights is not None):
x_weights_np = x_weights.numpy().flatten()
if (y_weights is not None):
y_weights_np = y_weights.numpy().flatten()
scipy_result = [sp_wasserstein(x_np, y_np, x_weights_np, y_weights_np)]
return torch.tensor(scipy_result, device=device).to(torch.float)
def _check_against_scipy(self, x: torch.Tensor, y: torch.Tensor, x_weights: Optional[torch.Tensor]=None, y_weights: Optional[torch.Tensor]=None, device: str='cpu') -> None:
compute_result = self._get_scipy_equivalent(x.to('cpu'), y.to('cpu'), x_weights.to('cpu'), y_weights.to('cpu'), device)
self.run_class_implementation_tests(metric=Wasserstein1D(device=device), state_names={'dist_1_samples', 'dist_2_samples', 'dist_1_weights', 'dist_2_weights'}, update_kwargs={'new_samples_dist_1': x, 'new_samples_dist_2': y, 'new_weights_dist_1': x_weights, 'new_weights_dist_2': y_weights}, compute_result=compute_result, num_total_updates=NUM_TOTAL_UPDATES, num_processes=NUM_PROCESSES)
def test_wasserstein1d_valid_input(self) -> None:
metric = Wasserstein1D()
x = torch.tensor([5, (- 5), (- 7), 9, (- 3)])
y = torch.tensor([9, (- 7), 5, (- 4), (- 2)])
metric.update(x, y)
result = metric.compute()
expected = torch.tensor([0.])
torch.testing.assert_close(result, expected, equal_nan=True, atol=0.0001, rtol=0.001)
metric = Wasserstein1D()
x = torch.tensor([(- 13), (- 9), (- 19), 11, (- 18), (- 20), 8, 2, (- 8), (- 18)])
y = torch.tensor([9, 6, (- 5), (- 11), 9, (- 4), (- 13), (- 19), (- 14), 4])
x_weights = torch.tensor([3, 3, 1, 2, 2, 3, 2, 2, 2, 3])
y_weights = torch.tensor([2, 2, 1, 1, 2, 2, 1, 1, 1, 1])
metric.update(x, y, x_weights, y_weights)
result = metric.compute()
expected = torch.tensor([8.])
torch.testing.assert_close(result, expected, equal_nan=True, atol=0.0001, rtol=0.001)
metric = Wasserstein1D()
x = torch.tensor([5, (- 5), (- 7), 9, (- 3)])
y = torch.tensor([9, (- 7), 5, (- 4), (- 2), 4, (- 1)])
metric.update(x, y)
result = metric.compute()
expected = torch.tensor([1.])
torch.testing.assert_close(result, expected, equal_nan=True, atol=0.0001, rtol=0.001)
metric = Wasserstein1D()
x = torch.tensor([(- 13), (- 9), (- 19), 11, (- 18), (- 20), 8, 2, (- 8), (- 18)])
x_weights = torch.tensor([3, 3, 1, 2, 2, 3, 2, 2, 2, 3])
metric.update(x, x, x_weights, x_weights)
result = metric.compute()
expected = torch.tensor([0.0])
torch.testing.assert_close(result, expected, equal_nan=True, atol=0.0001, rtol=0.001)
def test_wasserstein1d_random_data_getter(self) -> None:
for _ in range(10):
(x, y, x_weights, y_weights) = get_rand_data_wasserstein1d(num_updates=NUM_TOTAL_UPDATES, batch_size=BATCH_SIZE)
self._check_against_scipy(x, y, x_weights, y_weights)
def test_wasserstein1d_invalid_input(self) -> None:
metric = Wasserstein1D()
with self.assertRaisesRegex(ValueError, 'Distribution has to be one dimensional.'):
metric.update(torch.rand(4, 2), torch.rand(7))
with self.assertRaisesRegex(ValueError, 'Distribution has to be one dimensional.'):
metric.update(torch.rand(4), torch.rand(7, 3))
with self.assertRaisesRegex(ValueError, 'Distribution cannot be empty.'):
metric.update(torch.rand(4), torch.tensor([]))
with self.assertRaisesRegex(ValueError, 'Distribution cannot be empty.'):
metric.update(torch.tensor([]), torch.rand(5))
with self.assertRaisesRegex(ValueError, 'Weight tensor sum must be positive-finite.'):
metric.update(torch.rand(4), torch.rand(4), torch.tensor([torch.inf]), torch.rand(4))
with self.assertRaisesRegex(ValueError, 'Weight tensor sum must be positive-finite.'):
metric.update(torch.rand(4), torch.rand(4), torch.rand(4), torch.tensor([torch.inf]))
with self.assertRaisesRegex(ValueError, 'Distribution values and weight tensors must be of the same shape, got shapes torch.Size\\(\\[4\\]\\) and torch.Size\\(\\[7\\]\\).'):
metric.update(torch.rand(4), torch.rand(4), torch.rand(7), torch.rand(4))
with self.assertRaisesRegex(ValueError, 'Distribution values and weight tensors must be of the same shape, got shapes torch.Size\\(\\[6\\]\\) and torch.Size\\(\\[10\\]\\).'):
metric.update(torch.rand(6), torch.rand(6), torch.rand(6), torch.rand(10))
with self.assertRaisesRegex(ValueError, 'All weights must be non-negative.'):
metric.update(torch.rand(4), torch.rand(4), torch.tensor([1, (- 1), 2, 3]), torch.rand(4))
with self.assertRaisesRegex(ValueError, 'All weights must be non-negative.'):
metric.update(torch.rand(4), torch.rand(4), torch.rand(4), torch.tensor([1, (- 1), 2, 3]))
with self.assertRaisesRegex(ValueError, 'All weights must be non-negative.'):
metric.update(torch.rand(4), torch.rand(4), torch.tensor([(- 1.0), (- 2.0), 0.0, 1.0]), torch.rand(4))
with self.assertRaisesRegex(ValueError, 'All weights must be non-negative.'):
metric.update(torch.rand(4), torch.rand(4), torch.rand(4), torch.tensor([(- 1.5), (- 1.0), 0.5, 0.75])) |
class BatchedFusedEmbeddingBag(BaseBatchedEmbeddingBag[torch.Tensor], FusedOptimizerModule):
def __init__(self, config: GroupedEmbeddingConfig, pg: Optional[dist.ProcessGroup]=None, device: Optional[torch.device]=None) -> None:
super().__init__(config, pg, device)
managed: List[EmbeddingLocation] = []
compute_devices: List[ComputeDevice] = []
for table in config.embedding_tables:
assert ((table.local_cols % 4) == 0), f'table {table.name} has local_cols={table.local_cols} not divisible by 4. '
if ((device is not None) and (device.type == 'cuda')):
compute_devices.append(ComputeDevice.CUDA)
managed.append(compute_kernel_to_embedding_location(table.compute_kernel))
elif ((device is not None) and (device.type == 'mtia')):
compute_devices.append(ComputeDevice.MTIA)
managed.append(EmbeddingLocation.HOST)
else:
compute_devices.append(ComputeDevice.CPU)
managed.append(EmbeddingLocation.HOST)
weights_precision = data_type_to_sparse_type(config.data_type)
fused_params = (config.fused_params or {})
if ('cache_precision' not in fused_params):
fused_params['cache_precision'] = weights_precision
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = SplitTableBatchedEmbeddingBagsCodegen(embedding_specs=list(zip(self._local_rows, self._local_cols, managed, compute_devices)), feature_table_map=self._feature_table_map, pooling_mode=self._pooling, weights_precision=weights_precision, device=device, **fused_params)
self._optim: EmbeddingFusedOptimizer = EmbeddingFusedOptimizer(config, self._emb_module, pg)
self._param_per_table: Dict[(str, TableBatchedEmbeddingSlice)] = dict(_gen_named_parameters_by_table_fused(emb_module=self._emb_module, table_name_to_count=self.table_name_to_count.copy(), config=self._config, pg=pg))
self.init_parameters()
def emb_module(self) -> SplitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def fused_optimizer(self) -> FusedOptimizer:
return self._optim
def named_buffers(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[(str, torch.Tensor)]]:
(yield from ())
def named_parameters(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[(str, nn.Parameter)]]:
for (name, tensor) in self.named_split_embedding_weights(prefix, recurse, remove_duplicate):
param = nn.Parameter(tensor)
param._in_backward_optimizers = [EmptyFusedOptimizer()]
(yield (name, param))
def flush(self) -> None:
self._emb_module.flush() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.