code stringlengths 281 23.7M |
|---|
class TextualInversionCLIPTextModel(CLIPTextModel):
def __init__(self, config: CLIPTextConfig):
super().__init__(config)
(vocab_size, embed_dim) = self.text_model.embeddings.token_embedding.weight.size()
self.text_model.embeddings.token_embedding = SplitEmbedding(vocab_size, embed_dim)
def patch_emb(self, init_embedding: torch.Tensor):
self.text_model.embeddings.token_embedding.concept_token = Parameter(init_embedding.unsqueeze(0))
self.text_model.embeddings.token_embedding.weight.requires_grad = False |
def get_variants(args):
env_params = ENV_PARAMS[args.env]
params = COMMON_PARAMS
params.update(env_params)
vg = VariantGenerator()
for (key, val) in params.items():
if isinstance(val, list):
vg.add(key, val)
else:
vg.add(key, [val])
return vg |
class Migration(migrations.Migration):
dependencies = [('projects', '0044_meta')]
operations = [migrations.AddField(model_name='value', name='file', field=models.FileField(blank=True, help_text='The file stored for this value.', null=True, upload_to=rdmo.projects.models.value.get_file_upload_to, verbose_name='File')), migrations.AlterField(model_name='value', name='option', field=models.ForeignKey(blank=True, help_text='The option stored for this value.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='values', to='options.Option', verbose_name='Option')), migrations.AlterField(model_name='value', name='value_type', field=models.CharField(choices=[('text', 'Text'), ('url', 'URL'), ('integer', 'Integer'), ('float', 'Float'), ('boolean', 'Boolean'), ('datetime', 'Datetime'), ('option', 'Option'), ('file', 'File')], default='text', help_text='Type of this value.', max_length=8, verbose_name='Value type'))] |
def _load_event_fixtures(fixture_dir):
fixtures = os.listdir(fixture_dir)
for filename in fixtures:
with open(os.path.join(fixture_dir, filename), 'r') as fp:
fixtures = yaml.load(fp.read())
for fixture in fixtures:
event = fixture.pop('event')
result = fixture.pop('result')
expected = (_cattr.structure(result, Download) if isinstance(result, dict) else result)
assert (fixture == {})
(yield (event, expected)) |
def test_dataset_transform_override():
data1 = MemoryDataset({'x': [pic(1), pic(2), pic(3)], 'y': ['a', 'b', 'c']}, transform=Lambda((lambda x: (np.array(x)[(0, 0)] * 2))))
data2 = MemoryDataset({'x': [pic(4), pic(5), pic(6)], 'y': ['d', 'e', 'f']}, transform=Lambda((lambda x: (np.array(x)[(0, 0)] * 3))))
data3 = MemoryDataset({'x': [pic(7), pic(8), pic(9)], 'y': ['g', 'h', 'i']}, transform=Lambda((lambda x: (np.array(x)[(0, 0)] + 10))))
ds = ConcatDataset([data1, ConcatDataset([data2, data3])])
(x1, y1) = zip(*[ds[i] for i in range(len(ds))])
with override_dataset_transform(ds, Lambda((lambda x: np.array(x)[(0, 0)]))) as ds_overriden:
(x2, y2) = zip(*[ds_overriden[i] for i in range(len(ds_overriden))])
(x3, y3) = zip(*[ds[i] for i in range(len(ds))])
assert np.array_equal(x1, [2, 4, 6, 12, 15, 18, 17, 18, 19])
assert np.array_equal(x2, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert np.array_equal(x3, x1) |
class CheckpointFunction(torch.autograd.Function):
def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args):
if torch.is_grad_enabled():
checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = utils.get_rng_state()
(tensor_inputs, packed_non_tensor_inputs) = split_non_tensors(args)
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
with torch.no_grad():
(unpacked_args, unpacked_kwargs) = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
if isinstance(outputs, torch.Tensor):
return outputs
else:
(outputs, packed_non_tensor_outputs) = split_non_tensors(outputs)
parent_ctx_dict['packed_non_tensor_outputs'] = packed_non_tensor_outputs
return outputs
def backward(ctx, *args):
if (not torch.autograd._is_checkpoint_valid()):
raise RuntimeError('Checkpointing is not compatible with .grad(), please use .backward() if possible')
tensor_inputs = ctx.saved_tensors
tensor_inputs = checkpoint.detach_variable(tensor_inputs)
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
bwd_rng_state = utils.get_rng_state()
utils.set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad():
(unpacked_args, unpacked_kwargs) = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
(tensor_outputs, _) = split_non_tensors(outputs)
utils.set_rng_state(bwd_rng_state)
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if (len(outputs_with_grad) == 0):
raise RuntimeError('None of the outputs have requires_grad=True, this checkpoint() is not necessary')
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(((inp.grad if isinstance(inp, torch.Tensor) else None) for inp in inputs))
return ((None, None, None) + grads) |
def detect_slides_recursively(ctr_entities):
for ce in ctr_entities:
if isinstance(ce, (EBlock, EToGather, EToSatisfy)):
detect_slides_recursively(ce.entities)
elif (isinstance(ce, ESlide) and (len(ce.entities) == 1)):
son = ce.entities[0]
if (isinstance(son, EToGather) and (len(son.entities) == 1)):
grand_son = son.entities[0]
if (isinstance(grand_son, EGroup) and (len(grand_son.entities) > 0)):
res = _identify_slide(ce.entities[0].entities[0])
if res:
(ce.scope, ce.offset, ce.circular) = res
else:
print('Warning: The slide is not really a slide but a group') |
def get_hessianloader(dataset, hessian_batch_size):
if (dataset == 'cifar10'):
hessian_loader = torch.utils.data.DataLoader(datasets.CIFAR10('../data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])), batch_size=hessian_batch_size, shuffle=False)
elif (dataset == 'cifar100'):
hessian_loader = torch.utils.data.DataLoader(datasets.CIFAR100('../data/', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))])), batch_size=hessian_batch_size, shuffle=False)
elif (dataset == 'imagenet'):
(transform_train, transform_test) = get_transforms(dataset)
trainset = torchvision.datasets.ImageFolder('/rscratch/data/imagenet12/train', transform=transform_train)
hessian_loader = torch.utils.data.DataLoader(trainset, batch_size=hessian_batch_size, shuffle=True, num_workers=32)
else:
raise ValueError('No valid dataset is given.')
return hessian_loader |
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
grad = tf.clip_by_value(grad, (- FLAGS.grad_clip), FLAGS.grad_clip)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads |
class DisplayQueryUseCase():
def __init__(self, room_repo: RoomRDBRepository, db_session: Callable[([], ContextManager[Session])]):
self.room_repo = room_repo
self.db_session = db_session
def get_rooms(self, room_status: RoomStatus) -> List[Room]:
with self.db_session() as session:
rooms: List[Room] = list(self.room_repo.get_rooms_by_status(session=session, room_status=room_status))
return rooms |
def load_partition_data_mnist_by_device_id(batch_size, device_id, train_path='MNIST_mobile', test_path='MNIST_mobile'):
train_path += ((('/' + device_id) + '/') + 'train')
test_path += ((('/' + device_id) + '/') + 'test')
return load_partition_data_mnist(batch_size, train_path, test_path) |
def _format_diff_text_and_options(diff, **kwargs):
valid_instructions = ('KEEP', 'REMOVE', 'ADD', 'UPDATE')
def _visualize(obj, rootname, get_name=False):
if utils.is_iter(obj):
if get_name:
return (obj[0] if obj[0] else '<unset>')
if (rootname == 'attrs'):
return '{} |W=|n {} |W(category:|n {}|W, locks:|n {}|W)|n'.format(*obj)
elif (rootname == 'tags'):
return '{} |W(category:|n {}|W)|n'.format(obj[0], obj[1])
return '{}'.format(obj)
def _parse_diffpart(diffpart, optnum, *args):
typ = type(diffpart)
texts = []
options = []
if ((typ == tuple) and (len(diffpart) == 3) and (diffpart[2] in valid_instructions)):
rootname = args[0]
(old, new, instruction) = diffpart
if (instruction == 'KEEP'):
texts.append(' |gKEEP|W:|n {old}'.format(old=_visualize(old, rootname)))
else:
vold = _visualize(old, rootname)
vnew = _visualize(new, rootname)
vsep = ('' if (len(vold) < 78) else '\n')
vinst = ('|rREMOVE|n' if (instruction == 'REMOVE') else '|y{}|n'.format(instruction))
texts.append(' |c[{num}] {inst}|W:|n {old} |W->|n{sep} {new}'.format(inst=vinst, num=optnum, old=vold, sep=vsep, new=vnew))
options.append({'key': str(optnum), 'desc': '|gKEEP|n ({}) {}'.format(rootname, _visualize(old, args[(- 1)], get_name=True)), 'goto': (_keep_diff, dict((('path', args), ('diff', diff)), **kwargs))})
optnum += 1
else:
for key in sorted(list(diffpart.keys())):
subdiffpart = diffpart[key]
(text, option, optnum) = _parse_diffpart(subdiffpart, optnum, *(args + (key,)))
texts.extend(text)
options.extend(option)
return (texts, options, optnum)
texts = []
options = []
optnum = 1
for root_key in sorted(diff):
diffpart = diff[root_key]
(text, option, optnum) = _parse_diffpart(diffpart, optnum, root_key)
heading = '- |w{}:|n '.format(root_key)
if (root_key in ('attrs', 'tags', 'permissions')):
texts.append(heading)
elif text:
text = ([(heading + text[0])] + text[1:])
else:
text = [heading]
texts.extend(text)
options.extend(option)
return (texts, options) |
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=False, BatchNorm=nn.BatchNorm2d):
super(SeparableConv2d, self).__init__()
if (dilation > (kernel_size // 2)):
padding = dilation
else:
padding = (kernel_size // 2)
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias)
self.bn = nn.BatchNorm2d(in_channels)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.bn(x)
x = self.pointwise(x)
return x |
def auth_from_yaml(file_path, username=None, password=None):
auth_configs = yaml.load(open(file_path), Loader=yaml.FullLoader)
if ((username is None) or (password is None)):
auth_file = auth_configs.get('auth_file', False)
if (auth_file and os.path.isfile(auth_file)):
(username, password) = open(auth_file, 'r').read().strip().split('\n')
else:
raise ValueError(('Need either username and password args or valid auth_file, cannot find: %s' % auth_file))
auth = build_auth(auth_configs, username, password)
auth.update(auth_configs)
return auth |
def test_it_should_remove_installed_packages_if_required() -> None:
transaction = Transaction([Package('a', '1.0.0'), Package('b', '2.0.0'), Package('c', '3.0.0')], [(Package('a', '1.0.0'), 1), (Package('b', '2.1.0'), 2), (Package('d', '4.0.0'), 0)], installed_packages=[Package('a', '1.0.0'), Package('b', '2.0.0'), Package('c', '3.0.0'), Package('e', '5.0.0')])
check_operations(transaction.calculate_operations(synchronize=True), [{'job': 'remove', 'package': Package('c', '3.0.0')}, {'job': 'remove', 'package': Package('e', '5.0.0')}, {'job': 'update', 'from': Package('b', '2.0.0'), 'to': Package('b', '2.1.0')}, {'job': 'install', 'package': Package('a', '1.0.0'), 'skipped': True}, {'job': 'install', 'package': Package('d', '4.0.0')}]) |
def get_tf_weights_as_numpy(path) -> Dict:
init_vars = tf.train.list_variables(path)
tf_weights = {}
ignore_name = ['global_step']
for (name, shape) in tqdm(init_vars, desc='converting tf checkpoint to dict'):
skip_key = any([(pat in name) for pat in ignore_name])
if skip_key:
continue
array = tf.train.load_variable(path, name)
tf_weights[name] = array
return tf_weights |
class AdaptivePadding(nn.Module):
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert (padding in ('same', 'corner'))
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
(input_h, input_w) = input_shape
(kernel_h, kernel_w) = self.kernel_size
(stride_h, stride_w) = self.stride
output_h = math.ceil((input_h / stride_h))
output_w = math.ceil((input_w / stride_w))
pad_h = max((((((output_h - 1) * stride_h) + ((kernel_h - 1) * self.dilation[0])) + 1) - input_h), 0)
pad_w = max((((((output_w - 1) * stride_w) + ((kernel_w - 1) * self.dilation[1])) + 1) - input_w), 0)
return (pad_h, pad_w)
def forward(self, x):
(pad_h, pad_w) = self.get_pad_shape(x.size()[(- 2):])
if ((pad_h > 0) or (pad_w > 0)):
if (self.padding == 'corner'):
x = F.pad(x, [0, pad_w, 0, pad_h])
elif (self.padding == 'same'):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))])
return x |
def test_loader_no_get_pipeline_definition():
loader_cache.clear()
import sys
current_module = sys.modules[__name__]
pipeline = Pipeline('arb pipe', context_args='arb context input', loader=__name__)
with patch_logger('pypyr.cache.loadercache', logging.ERROR) as mock_logger_error:
with pytest.raises(AttributeError) as err:
pipeline.run(Context())
assert (str(err.value) == f"module '{__name__}' has no attribute 'get_pipeline_definition'")
mock_logger_error.assert_called_once_with(f"The pipeline loader {current_module} doesn't have a get_pipeline_definition(pipeline_name, parent) function.") |
def main(args):
device = 'cuda'
print('Loading ResNext101 model...')
model = nn.DataParallel(resnet101(sample_duration=16).cuda())
model.load_state_dict(torch.load('resnext-101-kinetics.pth')['state_dict'])
print('Loading video paths...')
if (args.dataset == 'uva'):
files = glob.glob((args.data_path + '/*.mp4'))
data_type = 'video'
else:
raise NotImplementedError
(mu, sigma) = fid.calculate_activation_statistics(files, data_type, model, args.batch_size, args.size, args.length, args.dims, device)
np.savez_compressed((('./stats/' + args.dataset) + '.npz'), mu=mu, sigma=sigma)
print('finished') |
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False):
layers = []
for _ in range(num_blocks):
layers.append(conv3x3(inplanes, planes, dilation))
if with_bn:
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
inplanes = planes
layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
return layers |
class TestNodeFinder():
def test_straightforward(self):
class MyType(Type):
def __init__(self, name):
self.name = name
def filter(self, *args, **kwargs):
raise NotImplementedError()
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __eq__(self, other):
return isinstance(other, MyType)
class MyOp(Op):
__props__ = ('nin', 'name')
def __init__(self, nin, name):
self.nin = nin
self.name = name
def make_node(self, *inputs):
def as_variable(x):
assert isinstance(x, Variable)
return x
assert (len(inputs) == self.nin)
inputs = list(map(as_variable, inputs))
for input in inputs:
if (not isinstance(input.type, MyType)):
raise Exception('Error 1')
outputs = [MyType((self.name + '_R'))()]
return Apply(self, inputs, outputs)
def __str__(self):
return self.name
def perform(self, *args, **kwargs):
raise NotImplementedError()
sigmoid = MyOp(1, 'Sigmoid')
add = MyOp(2, 'Add')
dot = MyOp(2, 'Dot')
def MyVariable(name):
return Variable(MyType(name), None, None)
def inputs():
x = MyVariable('x')
y = MyVariable('y')
z = MyVariable('z')
return (x, y, z)
(x, y, z) = inputs()
e0 = dot(y, z)
e = add(add(sigmoid(x), sigmoid(sigmoid(z))), dot(add(x, y), e0))
g = FunctionGraph([x, y, z], [e], clone=False)
g.attach_feature(NodeFinder())
assert hasattr(g, 'get_nodes')
for (type, num) in ((add, 3), (sigmoid, 3), (dot, 2)):
if (len(list(g.get_nodes(type))) != num):
raise Exception(('Expected: %i times %s' % (num, type)))
new_e0 = add(y, z)
assert (e0.owner in g.get_nodes(dot))
assert (new_e0.owner not in g.get_nodes(add))
g.replace(e0, new_e0)
assert (e0.owner not in g.get_nodes(dot))
assert (new_e0.owner in g.get_nodes(add))
for (type, num) in ((add, 4), (sigmoid, 3), (dot, 1)):
if (len(list(g.get_nodes(type))) != num):
raise Exception(('Expected: %i times %s' % (num, type))) |
def gen_efficientnet_lite_kwargs(channel_multiplier=1.0, depth_multiplier=1.0, drop_rate=0.2):
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=nn.ReLU6, drop_rate=drop_rate, drop_path_rate=0.2)
return model_kwargs |
class AlbertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None:
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs)
super().__init__(do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
if (not hasattr(self, 'sp_model_kwargs')):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text: str) -> List[str]:
text = self.preprocess_text(text)
pieces = self.sp_model.encode(text, out_type=str)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == str(',')) and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
return self.sp_model.IdToPiece(index)
def convert_tokens_to_string(self, tokens):
return self.sp_model.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
elif (not os.path.isfile(self.vocab_file)):
with open(out_vocab_file, 'wb') as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,) |
class ObjectDetectionEvaluation():
def __init__(self, num_gt_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, recall_lower_bound=0.0, recall_upper_bound=1.0, use_weighted_mean_ap=False, label_id_offset=0, group_of_weight=0.0, per_image_eval_class=PerImageEvaluation):
if (num_gt_classes < 1):
raise ValueError('Need at least 1 groundtruth class for evaluation.')
self.per_image_eval = per_image_eval_class(num_gt_classes=num_gt_classes, matching_iou_threshold=matching_iou_threshold, nms_iou_threshold=nms_iou_threshold, nms_max_output_boxes=nms_max_output_boxes, group_of_weight=group_of_weight)
self.recall_lower_bound = recall_lower_bound
self.recall_upper_bound = recall_upper_bound
self.group_of_weight = group_of_weight
self.num_class = num_gt_classes
self.use_weighted_mean_ap = use_weighted_mean_ap
self.label_id_offset = label_id_offset
self.gt_boxes = {}
self.gt_class_labels = {}
self.gt_masks = {}
self.gt_is_difficult_list = {}
self.gt_is_group_of_list = {}
self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
self._initialize_detections()
def _initialize_detections(self):
self.detection_keys = set()
self.scores_per_class = [[] for _ in range(self.num_class)]
self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
self.average_precision_per_class = np.empty(self.num_class, dtype=float)
self.average_precision_per_class.fill(np.nan)
self.precisions_per_class = ([np.nan] * self.num_class)
self.recalls_per_class = ([np.nan] * self.num_class)
self.sum_tp_class = ([np.nan] * self.num_class)
self.corloc_per_class = np.ones(self.num_class, dtype=float)
def clear_detections(self):
self._initialize_detections()
def add_single_ground_truth_image_info(self, image_key, gt_boxes, gt_class_labels, gt_is_difficult_list=None, gt_is_group_of_list=None, gt_masks=None):
if (image_key in self.gt_boxes):
logging.warning('image %s has already been added to the ground truth database.', image_key)
return
self.gt_boxes[image_key] = gt_boxes
self.gt_class_labels[image_key] = gt_class_labels
self.gt_masks[image_key] = gt_masks
if (gt_is_difficult_list is None):
num_boxes = gt_boxes.shape[0]
gt_is_difficult_list = np.zeros(num_boxes, dtype=bool)
gt_is_difficult_list = gt_is_difficult_list.astype(dtype=bool)
self.gt_is_difficult_list[image_key] = gt_is_difficult_list
if (gt_is_group_of_list is None):
num_boxes = gt_boxes.shape[0]
gt_is_group_of_list = np.zeros(num_boxes, dtype=bool)
if (gt_masks is None):
num_boxes = gt_boxes.shape[0]
mask_presence_indicator = np.zeros(num_boxes, dtype=bool)
else:
mask_presence_indicator = (np.sum(gt_masks, axis=(1, 2)) == 0).astype(dtype=bool)
gt_is_group_of_list = gt_is_group_of_list.astype(dtype=bool)
self.gt_is_group_of_list[image_key] = gt_is_group_of_list
masked_gt_is_difficult_list = (gt_is_difficult_list | mask_presence_indicator)
for class_index in range(self.num_class):
num_gt_instances = np.sum((gt_class_labels[((~ masked_gt_is_difficult_list) & (~ gt_is_group_of_list))] == class_index))
num_groupof_gt_instances = (self.group_of_weight * np.sum((gt_class_labels[(gt_is_group_of_list & (~ masked_gt_is_difficult_list))] == class_index)))
self.num_gt_instances_per_class[class_index] += (num_gt_instances + num_groupof_gt_instances)
if np.any((gt_class_labels == class_index)):
self.num_gt_imgs_per_class[class_index] += 1
def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels, detected_masks=None):
if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))):
raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores), len(detected_class_labels))
if (image_key in self.detection_keys):
logging.warning('image %s has already been added to the detection result database', image_key)
return
self.detection_keys.add(image_key)
if (image_key in self.gt_boxes):
gt_boxes = self.gt_boxes[image_key]
gt_class_labels = self.gt_class_labels[image_key]
gt_masks = self.gt_masks.pop(image_key)
gt_is_difficult_list = self.gt_is_difficult_list[image_key]
gt_is_group_of_list = self.gt_is_group_of_list[image_key]
else:
gt_boxes = np.empty(shape=[0, 4], dtype=float)
gt_class_labels = np.array([], dtype=int)
if (detected_masks is None):
gt_masks = None
else:
gt_masks = np.empty(shape=[0, 1, 1], dtype=float)
gt_is_difficult_list = np.array([], dtype=bool)
gt_is_group_of_list = np.array([], dtype=bool)
(scores, tp_fp_labels, is_class_correctly_detected_in_image) = self.per_image_eval.compute_object_detection_metrics(detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detected_class_labels, gt_boxes=gt_boxes, gt_class_labels=gt_class_labels, gt_is_difficult_list=gt_is_difficult_list, gt_is_group_of_list=gt_is_group_of_list, detected_masks=detected_masks, gt_masks=gt_masks)
for i in range(self.num_class):
if (scores[i].shape[0] > 0):
self.scores_per_class[i].append(scores[i])
self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image
def evaluate(self):
if (self.num_gt_instances_per_class == 0).any():
logging.warning('The following classes have no ground truth examples: %s', (np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0))) + self.label_id_offset))
if self.use_weighted_mean_ap:
all_scores = np.array([], dtype=float)
all_tp_fp_labels = np.array([], dtype=bool)
for class_index in range(self.num_class):
if (self.num_gt_instances_per_class[class_index] == 0):
continue
if (not self.scores_per_class[class_index]):
scores = np.array([], dtype=float)
tp_fp_labels = np.array([], dtype=float)
else:
scores = np.concatenate(self.scores_per_class[class_index])
tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
if self.use_weighted_mean_ap:
all_scores = np.append(all_scores, scores)
all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
(precision, recall) = compute_precision_recall(scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
recall_within_bound_indices = [index for (index, value) in enumerate(recall) if ((value >= self.recall_lower_bound) and (value <= self.recall_upper_bound))]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
self.precisions_per_class[class_index] = precision_within_bound
self.recalls_per_class[class_index] = recall_within_bound
self.sum_tp_class[class_index] = tp_fp_labels.sum()
average_precision = compute_average_precision(precision_within_bound, recall_within_bound)
self.average_precision_per_class[class_index] = average_precision
logging.debug('average_precision: %f', average_precision)
self.corloc_per_class = compute_cor_loc(self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class)
if self.use_weighted_mean_ap:
num_gt_instances = np.sum(self.num_gt_instances_per_class)
(precision, recall) = compute_precision_recall(all_scores, all_tp_fp_labels, num_gt_instances)
recall_within_bound_indices = [index for (index, value) in enumerate(recall) if ((value >= self.recall_lower_bound) and (value <= self.recall_upper_bound))]
recall_within_bound = recall[recall_within_bound_indices]
precision_within_bound = precision[recall_within_bound_indices]
mean_ap = compute_average_precision(precision_within_bound, recall_within_bound)
else:
mean_ap = np.nanmean(self.average_precision_per_class)
mean_corloc = np.nanmean(self.corloc_per_class)
return dict(per_class_ap=self.average_precision_per_class, mean_ap=mean_ap, per_class_precision=self.precisions_per_class, per_class_recall=self.recalls_per_class, per_class_corlocs=self.corloc_per_class, mean_corloc=mean_corloc) |
def write_ppm(im, filename):
magic = 'P6\n'
maxval = 255
w = len(im)
h = len(im[0])
with open(filename, 'w', encoding='latin1', newline='') as fp:
fp.write(magic)
fp.write(('%i %i\n%i\n' % (w, h, maxval)))
for j in range(h):
for i in range(w):
val = im[i][j]
c = (val * 255)
fp.write(('%c%c%c' % (c, c, c))) |
def test(args):
inputs = tf.placeholder(tf.float32, (1, 2048, 3))
gt = tf.placeholder(tf.float32, (1, args.num_gt_points, 3))
reconstruction = tf.placeholder(tf.float32, (1, (args.step_ratio * 1024), 3))
is_training_pl = tf.placeholder(tf.bool, shape=(), name='is_training')
mean_feature = tf.placeholder(tf.float32, (1, 1024), 'mean_features')
model_module = importlib.import_module(('.%s' % args.model_type), 'models')
with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):
features_partial = model_module.create_encoder(inputs)
(_, fine) = model_module.create_decoder(features_partial, inputs, args.step_ratio, num_extract=512, mean_feature=mean_feature)
(dist1_fine, dist2_fine) = tf_util.chamfer_distance(reconstruction, gt)
if (args.loss_type == 'CD_P'):
total_loss = ((tf.reduce_mean(tf.sqrt(dist1_fine)) + tf.reduce_mean(tf.sqrt(dist2_fine))) / 2)
elif (args.loss_type == 'CD_T'):
total_loss = (tf.reduce_mean(dist1_fine) + tf.reduce_mean(dist2_fine))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
saver = tf.train.Saver()
data_all = h5py.File(args.data_dir, 'r')
partial_all = data_all['incomplete_pcds'][()]
complete_all = data_all['complete_pcds'][()]
model_list = data_all['labels'][()].astype(int)
saver.restore(sess, os.path.join(args.checkpoint))
file_mean_feature = h5py.File(args.mean_features, 'r')
mean_feature_data = file_mean_feature['mean_features'][()]
file_mean_feature.close()
total_time = 0
cd_per_cat = {}
total_cd = 0
for (i, model_id) in enumerate(model_list):
partial = partial_all[i]
complete = complete_all[i]
start = time.time()
label = model_list[i]
mean_feature_input = mean_feature_data[label].reshape(1, 1024)
completion = sess.run(fine, feed_dict={inputs: [partial], is_training_pl: False, mean_feature: mean_feature_input})
cd = sess.run(total_loss, feed_dict={reconstruction: completion, gt: [complete], is_training_pl: False})
total_time += (time.time() - start)
total_cd += cd
category = objects[label]
key_list = list(snc_synth_id_to_category.keys())
val_list = list(snc_synth_id_to_category.values())
synset_id = key_list[val_list.index(category)]
if (not cd_per_cat.get(synset_id)):
cd_per_cat[synset_id] = []
cd_per_cat[synset_id].append(cd)
print(('Average Chamfer distance: %f' % (total_cd / len(model_list))))
print('Chamfer distance per category')
for synset_id in sorted(cd_per_cat.keys()):
print(synset_id, ('%f' % np.mean(cd_per_cat[synset_id])))
sess.close()
data_all.close() |
class Delta(Distribution):
def dim(self):
return 0
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
return None
def kl(self, old_dist_info, new_dist_info):
return None
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
raise NotImplementedError
def entropy(self, dist_info):
raise NotImplementedError
def log_likelihood_sym(self, x_var, dist_info_vars):
raise NotImplementedError
def likelihood_sym(self, x_var, dist_info_vars):
return TT.exp(self.log_likelihood_sym(x_var, dist_info_vars))
def log_likelihood(self, xs, dist_info):
return None
def dist_info_keys(self):
return None
def entropy(self, dist_info):
return 0 |
class InputsAndButtonsDemo(ttk.Frame):
def __init__(self, parent):
super().__init__(parent, style='Card.TFrame', padding=15)
self.columnconfigure(0, weight=1)
self.add_widgets()
def add_widgets(self):
self.entry = ttk.Entry(self)
self.entry.insert(0, 'Type here')
self.entry.grid(row=0, column=0, padx=5, pady=(0, 10), sticky='ew')
self.spinbox = ttk.Spinbox(self, from_=0, to=100, increment=0.01)
self.spinbox.insert(0, '3.14')
self.spinbox.grid(row=1, column=0, padx=5, pady=10, sticky='ew')
combo_list = ['Lorem', 'Ipsum', 'Dolor']
self.combobox = ttk.Combobox(self, values=combo_list)
self.combobox.current(0)
self.combobox.grid(row=2, column=0, padx=5, pady=10, sticky='ew')
self.readonly_combo = ttk.Combobox(self, state='readonly', values=combo_list)
self.readonly_combo.current(1)
self.readonly_combo.grid(row=3, column=0, padx=5, pady=10, sticky='ew')
self.menu = tkinter.Menu(self)
for n in range(1, 5):
self.menu.add_command(label=f'Menu item {n}')
self.menubutton = ttk.Menubutton(self, text='Dropdown', menu=self.menu)
self.menubutton.grid(row=4, column=0, padx=5, pady=10, sticky='nsew')
self.separator = ttk.Separator(self)
self.separator.grid(row=5, column=0, pady=10, sticky='ew')
self.button = ttk.Button(self, text='Click me!')
self.button.grid(row=6, column=0, padx=5, pady=10, sticky='ew')
self.accentbutton = ttk.Button(self, text=' I love it!', style='Accent.TButton')
self.accentbutton.grid(row=7, column=0, padx=5, pady=10, sticky='ew')
self.togglebutton = ttk.Checkbutton(self, text='Toggle me!', style='Toggle.TButton')
self.togglebutton.grid(row=8, column=0, padx=5, pady=10, sticky='nsew') |
class BaseMultiLocation(MacroElement):
def __init__(self, locations: TypeMultiLine, popup: Union[(Popup, str, None)]=None, tooltip: Union[(Tooltip, str, None)]=None):
super().__init__()
self.locations = validate_multi_locations(locations)
if (popup is not None):
self.add_child((popup if isinstance(popup, Popup) else Popup(str(popup))))
if (tooltip is not None):
self.add_child((tooltip if isinstance(tooltip, Tooltip) else Tooltip(str(tooltip))))
def _get_self_bounds(self) -> List[List[Optional[float]]]:
return get_bounds(self.locations) |
def _enter_pdb(node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport) -> BaseReport:
tw = node.config.pluginmanager.getplugin('terminalreporter')._tw
tw.line()
showcapture = node.config.option.showcapture
for (sectionname, content) in (('stdout', rep.capstdout), ('stderr', rep.capstderr), ('log', rep.caplog)):
if ((showcapture in (sectionname, 'all')) and content):
tw.sep('>', ('captured ' + sectionname))
if (content[(- 1):] == '\n'):
content = content[:(- 1)]
tw.line(content)
tw.sep('>', 'traceback')
rep.toterminal(tw)
tw.sep('>', 'entering PDB')
tb = _postmortem_traceback(excinfo)
rep._pdbshown = True
post_mortem(tb)
return rep |
def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path('marian_converted')):
save_dir = Path('marian_ckpt')
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
save_paths = []
if (model_list is None):
model_list: list = make_registry(repo_path=repo_path)
for (k, prepro, download, test_set_url) in tqdm(model_list):
if ('SentencePiece' not in prepro):
continue
if (not os.path.exists((save_dir / k))):
download_and_unzip(download, (save_dir / k))
pair_name = convert_opus_name_to_hf_name(k)
convert((save_dir / k), (dest_dir / f'opus-mt-{pair_name}'))
save_paths.append((dest_dir / f'opus-mt-{pair_name}'))
return save_paths |
def save_image(img, img_path, antialias=True, auto_open=False):
imgSize = (img.getbbox()[2], img.getbbox()[3])
if antialias:
size = (int((imgSize[0] * 0.5)), int((imgSize[1] * 0.5)))
img.thumbnail(size, Image.LANCZOS)
img.save(img_path)
if auto_open:
os.startfile(img_path) |
class QuackCounter(Quackable):
duck: Quackable
numberOfQuacks: List[int] = [0]
def __init__(self, duck: Quackable):
self.duck = duck
def quack(self) -> None:
self.duck.quack()
self.numberOfQuacks[0] += 1
def getQuacks() -> int:
return QuackCounter.numberOfQuacks[0]
def __str__(self) -> str:
return str(self.duck)
def __repr__(self) -> str:
return repr(self.duck) |
class User():
def __init__(self, username, password=None, admin=False):
self.username = username
if (password is not None):
self.encodeAndSetPassword(password)
self.admin = admin
def encodeAndSetPassword(self, pw):
h = hashlib.new('sha256')
salt = ''.join([random.choice(string.letters) for _ in range(32)])
h.update(pw)
h.update(salt)
self.password = ('%s%s' % (h.hexdigest(), salt))
def isPasswordValid(self, pw):
if (self.password is None):
return False
salt = self.password[(- 32):]
h = hashlib.new('sha256')
h.update(pw)
h.update(salt)
return (self.password == ('%s%s' % (h.hexdigest(), salt)))
('ID', 'username', 'password', 'admin')
def validator(self, key, val):
map = {'ID': (lambda _val: isinstance(_val, int)), 'username': (lambda _val: isinstance(_val, str)), 'password': (lambda _val: (isinstance(_val, str) and (len(_val) == 96))), 'admin': (lambda _val: isinstance(_val, bool))}
if (not map[key](val)):
raise ValueError(((str(val) + ' is not a valid value for ') + key))
else:
return val |
class Podcasts(Browser):
__feeds = Gtk.ListStore(object)
headers = 'title artist performer ~people album date website language copyright organization license contact'.split()
name = _('Podcasts')
accelerated_name = _('_Podcasts')
keys = ['AudioFeeds', 'Podcasts']
priority = 20
uses_main_library = False
def pack(self, songpane):
container = qltk.ConfigRHPaned('browsers', 'audiofeeds_pos', 0.4)
self.show()
container.pack1(self, True, False)
container.pack2(songpane, True, False)
return container
def unpack(self, container, songpane):
container.remove(songpane)
container.remove(self)
def cell_data(col, render, model, iter, data):
if model[iter][0].changed:
render.markup = util.bold(model[iter][0].name)
else:
render.markup = util.escape(model[iter][0].name)
render.set_property('markup', render.markup)
def changed(cls, feeds):
for row in cls.__feeds:
if (row[0] in feeds):
row[0].changed = True
row[0] = row[0]
Podcasts.write()
def write(cls):
feeds = [row[0] for row in cls.__feeds]
with open(FEEDS, 'wb') as f:
pickle_dump(feeds, f, 2)
def init(cls, library):
uris = set()
feeds = []
try:
with open(FEEDS, 'rb') as fileobj:
feeds = pickle_load(fileobj)
except (OSError, PickleError):
try:
with open(FEEDS, 'rb') as fileobj:
feeds = hacky_py2_unpickle_recover(fileobj)
except Exception:
pass
for feed in feeds:
if (feed.uri in uris):
continue
cls.__feeds.append(row=[feed])
uris.add(feed.uri)
GLib.idle_add(cls.__do_check)
def reload(cls, library):
cls.__feeds = Gtk.ListStore(object)
cls.init(library)
def __do_check(cls):
thread = threading.Thread(target=cls.__check, args=(), daemon=True)
thread.start()
def __check(cls):
for row in cls.__feeds:
feed = row[0]
if (feed.get_age() < ((2 * 60) * 60)):
continue
elif feed.parse():
feed.changed = True
row[0] = feed
cls.write()
GLib.timeout_add(((60 * 60) * 1000), cls.__do_check)
def __init__(self, library):
super().__init__(spacing=6)
self.set_orientation(Gtk.Orientation.VERTICAL)
self._view = view = AllTreeView()
self.__render = render = Gtk.CellRendererText()
render.set_property('ellipsize', Pango.EllipsizeMode.END)
col = Gtk.TreeViewColumn('Audio Feeds', render)
col.set_cell_data_func(render, Podcasts.cell_data)
view.append_column(col)
view.set_model(self.__feeds)
view.set_rules_hint(True)
view.set_headers_visible(False)
swin = ScrolledWindow()
swin.set_shadow_type(Gtk.ShadowType.IN)
swin.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
swin.add(view)
self.pack_start(swin, True, True, 0)
new = Button(_('_Add Feed...'), Icons.LIST_ADD, Gtk.IconSize.MENU)
new.connect('clicked', self.__new_feed)
view.get_selection().connect('changed', self.__changed)
view.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
view.connect('popup-menu', self._popup_menu)
targets = [('text/uri-list', 0, DND_URI_LIST), ('text/x-moz-url', 0, DND_MOZ_URL)]
targets = [Gtk.TargetEntry.new(*t) for t in targets]
view.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.COPY)
view.connect('drag-data-received', self.__drag_data_received)
view.connect('drag-motion', self.__drag_motion)
view.connect('drag-leave', self.__drag_leave)
connect_obj(self, 'destroy', self.__save, view)
self.pack_start(Align(new, left=3, bottom=3), False, True, 0)
for child in self.get_children():
child.show_all()
def menu(self, songs, library, items):
return SongsMenu(library, songs, download=True, items=items)
def __drag_motion(self, view, ctx, x, y, time):
targets = [t.name() for t in ctx.list_targets()]
if ('text/x-quodlibet-songs' not in targets):
view.get_parent().drag_highlight()
return True
return False
def __drag_leave(self, view, ctx, time):
view.get_parent().drag_unhighlight()
def __drag_data_received(self, view, ctx, x, y, sel, tid, etime):
view.emit_stop_by_name('drag-data-received')
targets = [('text/uri-list', 0, DND_URI_LIST), ('text/x-moz-url', 0, DND_MOZ_URL)]
targets = [Gtk.TargetEntry.new(*t) for t in targets]
view.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.COPY)
if (tid == DND_URI_LIST):
uri = sel.get_uris()[0]
elif (tid == DND_MOZ_URL):
uri = sel.data.decode('utf16', 'replace').split('\n')[0]
else:
ctx.finish(False, False, etime)
return
ctx.finish(True, False, etime)
feed = Feed(uri.encode('ascii', 'replace'))
feed.changed = feed.parse()
if feed:
self.__feeds.append(row=[feed])
Podcasts.write()
else:
self.feed_error(feed).run()
def _popup_menu(self, view: Gtk.Widget) -> (Gtk.Menu | None):
(model, paths) = self._view.get_selection().get_selected_rows()
menu = Gtk.Menu()
refresh = MenuItem(_('_Refresh'), Icons.VIEW_REFRESH, tooltip=_('Search source for new episodes'))
rebuild = MenuItem(_('_Rebuild'), Icons.EDIT_FIND_REPLACE, tooltip=_('Remove all existing episodes then reload from source'))
delete = MenuItem(_('_Delete'), Icons.EDIT_DELETE, tooltip=_('Remove this podcast and its episodes'))
connect_obj(refresh, 'activate', self.__refresh, [model[p][0] for p in paths])
connect_obj(rebuild, 'activate', self.__rebuild, [model[p][0] for p in paths])
connect_obj(delete, 'activate', self.__remove_paths, model, paths)
menu.append(refresh)
menu.append(rebuild)
menu.append(delete)
menu.show_all()
menu.connect('selection-done', (lambda m: m.destroy()))
if self._view.popup_menu(menu, 0, Gtk.get_current_event_time()):
return menu
return None
def __save(self, view):
Podcasts.write()
def __refresh(self, feeds):
changed = list(filter(Feed.parse, feeds))
Podcasts.changed(changed)
def __rebuild(self, feeds):
for feed in feeds:
feed.clear()
changed = list(filter(Feed.parse, feeds))
Podcasts.changed(changed)
def __remove_paths(self, model, paths):
for path in paths:
model.remove(model.get_iter(path))
def activate(self):
self.__changed(self._view.get_selection())
def __changed(self, selection):
(model, paths) = selection.get_selected_rows()
if (model and paths):
songs = []
for path in paths:
model[path][0].changed = False
songs.extend(model[path][0])
self.songs_selected(songs, True)
config.set('browsers', 'audiofeeds', '\t'.join([model[path][0].name for path in paths]))
def __new_feed(self, activator):
feed = AddFeedDialog(self).run()
if (feed is not None):
feed.changed = feed.parse()
if feed:
self.__feeds.append(row=[feed])
Podcasts.write()
else:
self.feed_error(feed).run()
def feed_error(self, feed: Feed) -> ErrorMessage:
return ErrorMessage(self, _('Unable to add feed'), (_('%s could not be added. The server may be down, or the location may not be a podcast / audio feed.') % util.bold(util.escape(feed.uri))), escape_desc=False)
def restore(self):
try:
names = config.get('browsers', 'audiofeeds').split('\t')
except Exception:
pass
else:
self._view.select_by_func((lambda r: (r[0].name in names))) |
class UpResBlock(nn.Module):
def __init__(self, in_channel):
super(UpResBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channel, in_channel, 3, 1, 1), nn.LeakyReLU(0.2, True), nn.Conv2d(in_channel, in_channel, 3, 1, 1))
def forward(self, x):
out = (x + self.body(x))
return out |
class test_pgpass(unittest.TestCase):
def runTest(self):
sample1 = client_pgpass.parse(StringIO(passfile_sample))
sample2 = client_pgpass.parse(StringIO(difficult_passfile_sample))
for (k, pw) in passfile_sample_map.items():
lpw = client_pgpass.lookup_password(sample1, k)
self.assertEqual(lpw, pw, ('password lookup incongruity, expecting %r got %r with %r in \n%s' % (pw, lpw, k, passfile_sample)))
for (k, pw) in difficult_passfile_sample_map.items():
lpw = client_pgpass.lookup_password(sample2, k)
self.assertEqual(lpw, pw, ('password lookup incongruity, expecting %r got %r with %r in \n%s' % (pw, lpw, k, difficult_passfile_sample))) |
class KnownValues(unittest.TestCase):
def test_from_fcivec(self):
myci = scf.UHF(gto.M()).apply(ci.CISD)
(nocca, noccb) = nelec = (3, 2)
(nvira, nvirb) = (5, 6)
myci.nocc = nocc = (nocca, noccb)
nmo = 8
myci.nmo = (nmo, nmo)
numpy.random.seed(12)
civec = numpy.random.random(myci.vector_size())
ci0 = ucisd.to_fcivec(civec, nmo, nelec)
self.assertAlmostEqual(abs((civec - ucisd.from_fcivec(ci0, nmo, nelec))).max(), 0, 9)
nocc = 3
nvir = 5
nmo = (nocc + nvir)
c1a = numpy.random.random((nocc, nvir))
c1b = numpy.random.random((nocc, nvir))
c2aa = numpy.random.random((nocc, nocc, nvir, nvir))
c2bb = numpy.random.random((nocc, nocc, nvir, nvir))
c2ab = numpy.random.random((nocc, nocc, nvir, nvir))
c1 = (c1a, c1b)
c2 = (c2aa, c2ab, c2bb)
cisdvec = ucisd.amplitudes_to_cisdvec(1.0, c1, c2)
fcivec = ucisd.to_fcivec(cisdvec, nmo, (nocc * 2))
cisdvec1 = ucisd.from_fcivec(fcivec, nmo, (nocc * 2))
self.assertAlmostEqual(abs((cisdvec - cisdvec1)).max(), 0, 12)
ci1 = ucisd.to_fcivec(cisdvec1, nmo, (nocc, nocc))
self.assertAlmostEqual(abs((fcivec - ci1)).max(), 0, 12)
def test_h4(self):
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [['H', (1.0, (- 1.0), 0.0)], ['H', (0.0, (- 1.0), (- 1.0))], ['H', (1.0, (- 0.5), 0.0)], ['H', (0.0, (- 1.0), 1.0)]]
mol.charge = 2
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
myci = ci.CISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, (- 0.), 8)
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = ci.CISD(mf)
ecisd = myci.kernel()[0]
self.assertAlmostEqual(ecisd, (- 0.), 8)
self.assertAlmostEqual(myci.e_tot, (- 0.), 8)
eris = myci.ao2mo()
ecisd = myci.kernel(eris=eris)[0]
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0], mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
(efci, fcivec) = fci.direct_uhf.kernel((h1a, h1b), (eri_aa, eri_ab, eri_bb), h1a.shape[0], mol.nelec)
self.assertAlmostEqual((mf.e_tot + ecisd), (efci + mol.energy_nuc()), 9)
(dm1ref, dm2ref) = fci.direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
rdm1 = myci.make_rdm1(myci.ci, myci.get_nmo(), myci.get_nocc())
rdm2 = myci.make_rdm2(myci.ci, myci.get_nmo(), myci.get_nocc())
self.assertAlmostEqual(abs((dm1ref[0] - rdm1[0])).max(), 0, 4)
self.assertAlmostEqual(abs((dm1ref[1] - rdm1[1])).max(), 0, 4)
self.assertAlmostEqual(abs((dm2ref[0] - rdm2[0])).max(), 0, 4)
self.assertAlmostEqual(abs((dm2ref[1] - rdm2[1])).max(), 0, 4)
self.assertAlmostEqual(abs((dm2ref[2] - rdm2[2])).max(), 0, 4)
def test_h4_a(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['H', (1.0, (- 1.0), 0.0)], ['H', (0.0, (- 1.0), (- 1.0))], ['H', (1.0, (- 0.5), 0.0)], ['H', (0.0, (- 1.0), 1.0)]]
mol.charge = (- 2)
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
ehf0 = (mf.e_tot - mol.energy_nuc())
myci = ci.CISD(mf)
numpy.random.seed(10)
nao = mol.nao_nr()
mo = numpy.random.random((2, nao, nao))
eris = myci.ao2mo(mo)
self.assertAlmostEqual(lib.fp(myci.make_diagonal(eris)), (- 838.), 6)
numpy.random.seed(12)
(nocca, noccb) = mol.nelec
nmo = mf.mo_occ[0].size
nvira = (nmo - nocca)
nvirb = (nmo - noccb)
c1a = (0.1 * numpy.random.random((nocca, nvira)))
c1b = (0.1 * numpy.random.random((noccb, nvirb)))
c2aa = (0.1 * numpy.random.random((nocca, nocca, nvira, nvira)))
c2bb = (0.1 * numpy.random.random((noccb, noccb, nvirb, nvirb)))
c2ab = (0.1 * numpy.random.random((nocca, noccb, nvira, nvirb)))
cisdvec = myci.amplitudes_to_cisdvec(1.0, (c1a, c1b), (c2aa, c2ab, c2bb))
hcisd0 = myci.contract(myci.amplitudes_to_cisdvec(1.0, (c1a, c1b), (c2aa, c2ab, c2bb)), eris)
self.assertAlmostEqual(lib.fp(hcisd0), 466., 6)
eris = myci.ao2mo(mf.mo_coeff)
hcisd0 = myci.contract(cisdvec, eris)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0], mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
h2e = fci.direct_uhf.absorb_h1e((h1a, h1b), (eri_aa, eri_ab, eri_bb), h1a.shape[0], mol.nelec, 0.5)
nmo = mf.mo_coeff[0].shape[1]
fcivec = myci.to_fcivec(cisdvec, nmo, mol.nelec)
hci1 = fci.direct_uhf.contract_2e(h2e, fcivec, h1a.shape[0], mol.nelec)
hci1 -= (ehf0 * fcivec)
hcisd1 = myci.from_fcivec(hci1, nmo, mol.nelec)
self.assertAlmostEqual(abs((hcisd1 - hcisd0)).max(), 0, 8)
ecisd = myci.kernel(eris=eris)[0]
efci = fci.direct_uhf.kernel((h1a, h1b), (eri_aa, eri_ab, eri_bb), h1a.shape[0], mol.nelec)[0]
self.assertAlmostEqual(ecisd, (- 0.), 8)
self.assertTrue((((myci.e_tot - mol.energy_nuc()) - efci) < 0.002))
def test_rdm_h4(self):
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [['O', (0.0, 0.0, 0.0)], ['H', (0.0, (- 0.757), 0.587)], ['H', (0.0, 0.757, 0.587)]]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = ucisd.UCISD(mf)
(ecisd, civec) = myci.kernel()
self.assertAlmostEqual(ecisd, (- 0.), 8)
nmoa = nmob = nmo = mf.mo_coeff[1].shape[1]
nocc = (6, 4)
ci0 = myci.to_fcivec(civec, nmo, nocc)
(ref1, ref2) = fci.direct_uhf.make_rdm12s(ci0, nmo, nocc)
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
self.assertAlmostEqual(abs((rdm1[0] - ref1[0])).max(), 0, 6)
self.assertAlmostEqual(abs((rdm1[1] - ref1[1])).max(), 0, 6)
self.assertAlmostEqual(abs((rdm2[0] - ref2[0])).max(), 0, 6)
self.assertAlmostEqual(abs((rdm2[1] - ref2[1])).max(), 0, 6)
self.assertAlmostEqual(abs((rdm2[2] - ref2[2])).max(), 0, 6)
dm1a = (numpy.einsum('ijkk->ji', rdm2[0]) / (mol.nelectron - 1))
dm1a += (numpy.einsum('ijkk->ji', rdm2[1]) / (mol.nelectron - 1))
self.assertAlmostEqual(abs((rdm1[0] - dm1a)).max(), 0, 9)
dm1b = (numpy.einsum('kkij->ji', rdm2[2]) / (mol.nelectron - 1))
dm1b += (numpy.einsum('kkij->ji', rdm2[1]) / (mol.nelectron - 1))
self.assertAlmostEqual(abs((rdm1[1] - dm1b)).max(), 0, 9)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0], compact=False).reshape(([nmoa] * 4))
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1], compact=False).reshape(([nmob] * 4))
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0], mf.mo_coeff[1], mf.mo_coeff[1]], compact=False)
eri_ab = eri_ab.reshape(nmoa, nmoa, nmob, nmob)
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
e2 = ((((numpy.einsum('ij,ji', h1a, rdm1[0]) + numpy.einsum('ij,ji', h1b, rdm1[1])) + (numpy.einsum('ijkl,ijkl', eri_aa, rdm2[0]) * 0.5)) + numpy.einsum('ijkl,ijkl', eri_ab, rdm2[1])) + (numpy.einsum('ijkl,ijkl', eri_bb, rdm2[2]) * 0.5))
e2 += mol.energy_nuc()
self.assertAlmostEqual(myci.e_tot, e2, 9)
def test_rdm12(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['O', (0.0, 0.0, 0.0)], ['H', (0.0, (- 0.757), 0.587)], ['H', (0.0, 0.757, 0.587)]]
mol.basis = {'H': 'sto-3g', 'O': 'sto-3g'}
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-12)
myci = mf.CISD()
eris = myci.ao2mo()
(ecisd, civec) = myci.kernel(eris=eris)
self.assertAlmostEqual(ecisd, (- 0.), 8)
nmoa = mf.mo_energy[0].size
nmob = mf.mo_energy[1].size
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0], compact=False).reshape(([nmoa] * 4))
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1], compact=False).reshape(([nmob] * 4))
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0], mf.mo_coeff[1], mf.mo_coeff[1]], compact=False)
eri_ab = eri_ab.reshape(nmoa, nmoa, nmob, nmob)
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
e2 = ((((numpy.einsum('ij,ji', h1a, rdm1[0]) + numpy.einsum('ij,ji', h1b, rdm1[1])) + (numpy.einsum('ijkl,ijkl', eri_aa, rdm2[0]) * 0.5)) + numpy.einsum('ijkl,ijkl', eri_ab, rdm2[1])) + (numpy.einsum('ijkl,ijkl', eri_bb, rdm2[2]) * 0.5))
self.assertAlmostEqual(((ecisd + mf.e_tot) - mol.energy_nuc()), e2, 8)
from_dm2 = ((numpy.einsum('ijkk->ji', rdm2[0]) + numpy.einsum('ijkk->ji', rdm2[1])) / (mol.nelectron - 1))
self.assertAlmostEqual(abs((rdm1[0] - from_dm2)).max(), 0, 8)
from_dm2 = ((numpy.einsum('ijkk->ji', rdm2[2]) + numpy.einsum('kkij->ji', rdm2[1])) / (mol.nelectron - 1))
self.assertAlmostEqual(abs((rdm1[1] - from_dm2)).sum(), 0, 8)
def test_ao_direct(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['O', (0.0, 0.0, 0.0)], ['H', (0.0, (- 0.757), 0.587)], ['H', (0.0, 0.757, 0.587)]]
mol.spin = 2
mol.basis = 'ccpvdz'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = ci.CISD(mf)
myci.max_memory = 0.1
myci.frozen = [[1, 2], [1, 2]]
myci.direct = True
(ecisd, civec) = myci.kernel()
self.assertAlmostEqual(ecisd, (- 0.), 8)
def test_trans_rdm_with_frozen(self):
mol = gto.M(atom='\n O 0. 0. .0\n H 0. -0.757 0.587\n H 0. 0.757 0.587', basis='sto3g')
mf = scf.UHF(mol).run()
def check_frozen(frozen):
myci = ci.UCISD(mf)
myci.frozen = frozen
myci.nroots = 2
myci.kernel()
nocc = myci.nocc
nmo = myci.nmo
norb = mf.mo_coeff[0].shape[1]
nfroz = len(frozen[0])
cibra = ((myci.ci[0] + myci.ci[1]) * numpy.sqrt(0.5))
fcibra = ucisd.to_fcivec(cibra, norb, mol.nelec, myci.frozen)
fciket = ucisd.to_fcivec(myci.ci[1], norb, mol.nelec, myci.frozen)
fcidm1 = fci.direct_spin1.trans_rdm1s(fcibra, fciket, norb, mol.nelec)
cidm1 = myci.trans_rdm1(cibra, myci.ci[1], nmo, nocc)
self.assertAlmostEqual(abs((fcidm1[0] - cidm1[0])).max(), 0, 12)
self.assertAlmostEqual(abs((fcidm1[1] - cidm1[1])).max(), 0, 12)
check_frozen([[5], [6]])
check_frozen([[3], [5]])
check_frozen([[1, 3], [2, 5]])
check_frozen([[2, 5], [5]])
def test_overlap(self):
nmo = 8
nocc = (nocca, noccb) = (4, 3)
numpy.random.seed(2)
(nvira, nvirb) = ((nmo - nocca), (nmo - noccb))
cibra = ucisd.amplitudes_to_cisdvec(numpy.random.rand(1)[0], (numpy.random.rand(nocca, nvira), numpy.random.rand(noccb, nvirb)), (numpy.random.rand(nocca, nocca, nvira, nvira), numpy.random.rand(nocca, noccb, nvira, nvirb), numpy.random.rand(noccb, noccb, nvirb, nvirb)))
ciket = ucisd.amplitudes_to_cisdvec(numpy.random.rand(1)[0], (numpy.random.rand(nocca, nvira), numpy.random.rand(noccb, nvirb)), (numpy.random.rand(nocca, nocca, nvira, nvira), numpy.random.rand(nocca, noccb, nvira, nvirb), numpy.random.rand(noccb, noccb, nvirb, nvirb)))
fcibra = ucisd.to_fcivec(cibra, nmo, nocc)
fciket = ucisd.to_fcivec(ciket, nmo, nocc)
s_mo = numpy.random.random((nmo, nmo))
s0 = fci.addons.overlap(fcibra, fciket, nmo, nocc, s_mo)
s1 = ucisd.overlap(cibra, ciket, nmo, nocc, (s_mo, s_mo))
self.assertAlmostEqual(s1, s0, 9)
def test_cisdvec_to_amplitudes_overwritten(self):
mol = gto.M()
myci = scf.UHF(mol).apply(ci.UCISD)
nelec = (3, 3)
nocc = nelec
nmo = (5, 5)
myci.nocc = nocc
myci.nmo = nmo
vec = numpy.zeros(myci.vector_size())
vec_orig = vec.copy()
(c0, t1, t2) = myci.cisdvec_to_amplitudes(vec)
(t1a, t1b) = t1
(t2aa, t2ab, t2bb) = t2
t1a[:] = 1
t1b[:] = 1
t2aa[:] = 1
t2ab[:] = 1
t2bb[:] = 1
self.assertAlmostEqual(abs((vec - vec_orig)).max(), 0, 15)
def test_with_df_s0(self):
mol = gto.Mole()
mol.atom = [[8, (0.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]]
mol.basis = '631g'
mol.build()
rhf = scf.RHF(mol).density_fit(auxbasis='weigend')
rhf.conv_tol_grad = 1e-08
rhf.kernel()
mf = scf.addons.convert_to_uhf(rhf)
myci = ci.UCISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, (- 76.), 8)
def test_with_df_s2(self):
mol = gto.Mole()
mol.atom = [[8, (0.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).density_fit(auxbasis='weigend')
mf.conv_tol_grad = 1e-08
mf.kernel()
myci = ci.UCISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, (- 75.), 8) |
def test_ellipsoid__semi_minor_not_computed():
cc = CRS('+proj=geos +lon_0=-89.5 +a=6378137.0 +b=6356752.31 h=12345')
assert (cc.datum.ellipsoid.semi_minor_metre == 6356752.31)
assert (cc.datum.ellipsoid.semi_major_metre == 6378137.0)
assert (not cc.datum.ellipsoid.is_semi_minor_computed) |
(number=strategies.integers(min_value=1), base=strategies.integers(min_value=2))
(number=125, base=5)
def test_ceil_log_hypothesis(number, base):
exponent = utils.ceil_log(number, base)
assert ((base ** exponent) >= number)
if (exponent > 1):
assert ((base ** (exponent - 1)) < number) |
def load_clip_to_cpu(cfg):
backbone_name = cfg.MODEL.BACKBONE.NAME
url = clip._MODELS[backbone_name]
model_path = clip._download(url)
try:
model = torch.jit.load(model_path, map_location='cpu').eval()
state_dict = None
except RuntimeError:
state_dict = torch.load(model_path, map_location='cpu')
model = clip.build_model((state_dict or model.state_dict()))
return model |
class TestEnsureParentDirFunc(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
def test(self):
path1 = os.path.join(self._temp_dir.name, 'sub', 'dir')
path2 = os.path.join(path1, 'file.txt')
self.assertEqual(False, os.path.isdir(path1))
misc.ensure_parent_dir(path2)
self.assertEqual(True, os.path.isdir(path1)) |
def _migrate_v19(preset: dict) -> dict:
if (preset['game'] == 'cave_story'):
itemconfig = preset['configuration']['major_items_configuration']['items_state']
ammoconfig = preset['configuration']['ammo_configuration']['items_state']
if (itemconfig.get('Base Missiles') is not None):
return preset
itemconfig['Base Missiles'] = {'num_included_in_starting_items': 1, 'included_ammo': [5]}
itemconfig['Missile Launcher'].pop('included_ammo', None)
itemconfig['Super Missile Launcher'].pop('included_ammo', None)
itemconfig['Progressive Missile Launcher'].pop('included_ammo', None)
itemconfig['Small Life Capsule'] = itemconfig.pop('3HP Life Capsule')
itemconfig['Medium Life Capsule'] = itemconfig.pop('4HP Life Capsule')
itemconfig['Large Life Capsule'] = itemconfig.pop('5HP Life Capsule')
ammoconfig['Large Missile Expansion'] = ammoconfig.pop('Missile Expansion (24)')
preset['configuration']['major_items_configuration']['items_state'] = itemconfig
preset['configuration']['ammo_configuration']['items_state'] = ammoconfig
return preset |
class TestElasticSearchCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ElasticSearchCollector', {})
self.collector = ElasticSearchCollector(config, None)
def test_import(self):
self.assertTrue(ElasticSearchCollector)
def test_new__instances_default(self):
config = get_collector_config('ElasticSearchCollector', {})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {'': ('127.0.0.1', 9200)})
def test_new__instances_single(self):
config = get_collector_config('ElasticSearchCollector', {'instances': 'bla'})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {'default': ('bla', 9200)})
def test_new__instances_multi(self):
config = get_collector_config('ElasticSearchCollector', {'instances': ['something', '', ':1234']})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(self.collector.instances, {'default': ('something', 9200), 'foo': ('1234', 9200), 'bar': ('bla', 1234)})
(Collector, 'publish')
def test_should_work_with_real_data_and_basic_auth(self, publish_mock):
self.collector.config['user'] = 'user'
self.collector.config['password'] = 'password'
self.test_should_work_with_real_data()
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
returns = [self.getFixture('stats'), self.getFixture('cluster_stats'), self.getFixture('indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
self.collector.config['cluster'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 3)
metrics = {' 1, 'indices.docs.count': , 'indices.docs.deleted': 2692068, 'indices.datastore.size': , 'indices._all.docs.count': 4, 'indices._all.docs.deleted': 0, 'indices._all.datastore.size': 2674, 'indices.test.docs.count': 4, 'indices.test.docs.deleted': 0, 'indices.test.datastore.size': 2674, 'process.cpu.percent': 58, 'process.mem.resident': , 'process.mem.share': , 'process.mem.virtual': , 'disk.reads.count': 55996, 'disk.reads.size': , 'disk.writes.count': 5808198, 'disk.writes.size': , 'thread_pool.generic.threads': 1, 'network.tcp.active_opens': 2299, 'jvm.mem.pools.CMS_Old_Gen.used': , 'cluster_health.nodes.total': 3, 'cluster_health.nodes.data': 3, 'cluster_health.shards.active_primary': 5, 'cluster_health.shards.active': 10, 'cluster_health.shards.relocating': 0, 'cluster_health.shards.unassigned': 0, 'cluster_health.shards.initializing': 0, 'cluster_health.status': 2}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_data_v2(self, publish_mock):
returns = [self.getFixture('stats'), self.getFixture('cluster_stats_v2'), self.getFixture('indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
self.collector.config['cluster'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 3)
metrics = {' 1, 'indices.docs.count': , 'indices.docs.deleted': 2692068, 'indices.datastore.size': , 'indices._all.docs.count': 4, 'indices._all.docs.deleted': 0, 'indices._all.datastore.size': 2674, 'indices.test.docs.count': 4, 'indices.test.docs.deleted': 0, 'indices.test.datastore.size': 2674, 'process.cpu.percent': 58, 'process.mem.resident': , 'process.mem.share': , 'process.mem.virtual': , 'disk.reads.count': 55996, 'disk.reads.size': , 'disk.writes.count': 5808198, 'disk.writes.size': , 'thread_pool.generic.threads': 1, 'network.tcp.active_opens': 2299, 'jvm.mem.pools.CMS_Old_Gen.used': , 'cluster_health.nodes.pending_tasks': 266, 'cluster_health.nodes.data': 4, 'cluster_health.nodes.total': 8, 'cluster_health.shards.active_primary': 10, 'cluster_health.shards.active': 30, 'cluster_health.shards.active_percent': 100, 'cluster_health.shards.delayed_unassigned': 0, 'cluster_health.shards.relocating': 0, 'cluster_health.shards.unassigned': 0, 'cluster_health.shards.initializing': 0, 'cluster_health.status': 2}
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_data_logstash_mode(self, publish_mock):
returns = [self.getFixture('stats'), self.getFixture('logstash_indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
self.collector.config['logstash_mode'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 2)
metrics = {'indices.docs.count': , 'indices.docs.deleted': 2692068, 'indices.datastore.size': , 'indices._all.docs.count': , 'indices._all.docs.deleted': 0, 'indices._all.datastore.size': , 'indices._all.get.exists_time_in_millis': 0, 'indices._all.get.exists_total': 0, 'indices._all.get.missing_time_in_millis': 0, 'indices._all.get.missing_total': 0, 'indices._all.get.time_in_millis': 0, 'indices._all.get.total': 0, 'indices._all.indexing.delete_time_in_millis': 0, 'indices._all.indexing.delete_total': 0, 'indices._all.indexing.index_time_in_millis': , 'indices._all.indexing.index_total': , 'indices._all.search.fetch_time_in_millis': 6962, 'indices._all.search.fetch_total': 4084, 'indices._all.search.query_time_in_millis': 41211, 'indices._all.search.query_total': 4266, 'indices._all.store.throttle_time_in_millis': 0, 'indices.logstash-adm-syslog.indexes_in_group': 3, 'indices.logstash-adm-syslog.datastore.size': , 'indices.logstash-adm-syslog.docs.count': , 'indices.logstash-adm-syslog.docs.deleted': 0, 'indices.logstash-adm-syslog.get.exists_time_in_millis': 0, 'indices.logstash-adm-syslog.get.exists_total': 0, 'indices.logstash-adm-syslog.get.missing_time_in_millis': 0, 'indices.logstash-adm-syslog.get.missing_total': 0, 'indices.logstash-adm-syslog.get.time_in_millis': 0, 'indices.logstash-adm-syslog.get.total': 0, 'indices.logstash-adm-syslog.indexing.delete_time_in_millis': 0, 'indices.logstash-adm-syslog.indexing.delete_total': 0, 'indices.logstash-adm-syslog.indexing.index_time_in_millis': , 'indices.logstash-adm-syslog.indexing.index_total': , 'indices.logstash-adm-syslog.search.fetch_time_in_millis': 6962, 'indices.logstash-adm-syslog.search.fetch_total': 4084, 'indices.logstash-adm-syslog.search.query_time_in_millis': 41211, 'indices.logstash-adm-syslog.search.query_total': 4266, 'indices.logstash-adm-syslog.store.throttle_time_in_millis': 0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_data_logstash_hourlymode(self, publish_mock):
returns = [self.getFixture('stats'), self.getFixture('logstash_hourly_indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
self.collector.config['logstash_mode'] = True
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 2)
metrics = {'indices.docs.count': , 'indices.docs.deleted': 2692068, 'indices.datastore.size': , 'indices._all.docs.count': , 'indices._all.docs.deleted': 0, 'indices._all.datastore.size': , 'indices._all.get.exists_time_in_millis': 0, 'indices._all.get.exists_total': 0, 'indices._all.get.missing_time_in_millis': 0, 'indices._all.get.missing_total': 0, 'indices._all.get.time_in_millis': 0, 'indices._all.get.total': 0, 'indices._all.indexing.delete_time_in_millis': 0, 'indices._all.indexing.delete_total': 0, 'indices._all.indexing.index_time_in_millis': , 'indices._all.indexing.index_total': , 'indices._all.search.fetch_time_in_millis': 6962, 'indices._all.search.fetch_total': 4084, 'indices._all.search.query_time_in_millis': 41211, 'indices._all.search.query_total': 4266, 'indices._all.store.throttle_time_in_millis': 0, 'indices.logstash-adm-syslog.indexes_in_group': 3, 'indices.logstash-adm-syslog.datastore.size': , 'indices.logstash-adm-syslog.docs.count': , 'indices.logstash-adm-syslog.docs.deleted': 0, 'indices.logstash-adm-syslog.get.exists_time_in_millis': 0, 'indices.logstash-adm-syslog.get.exists_total': 0, 'indices.logstash-adm-syslog.get.missing_time_in_millis': 0, 'indices.logstash-adm-syslog.get.missing_total': 0, 'indices.logstash-adm-syslog.get.time_in_millis': 0, 'indices.logstash-adm-syslog.get.total': 0, 'indices.logstash-adm-syslog.indexing.delete_time_in_millis': 0, 'indices.logstash-adm-syslog.indexing.delete_total': 0, 'indices.logstash-adm-syslog.indexing.index_time_in_millis': , 'indices.logstash-adm-syslog.indexing.index_total': , 'indices.logstash-adm-syslog.search.fetch_time_in_millis': 6962, 'indices.logstash-adm-syslog.search.fetch_total': 4084, 'indices.logstash-adm-syslog.search.query_time_in_millis': 41211, 'indices.logstash-adm-syslog.search.query_total': 4266, 'indices.logstash-adm-syslog.store.throttle_time_in_millis': 0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_0_90_data(self, publish_mock):
returns = [self.getFixture('stats0.90'), self.getFixture('indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 2)
metrics = {'cache.filter.size': 1700, 'cache.filter.evictions': 9, 'cache.id.size': 98, 'fielddata.size': 1448, 'fielddata.evictions': 12}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
urlopen_mock = patch('urllib2.urlopen', Mock(return_value=self.getFixture('stats_blank')))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertPublishedMany(publish_mock, {})
(Collector, 'publish')
def test_multi_instances_with_real_data(self, publish_mock):
config = get_collector_config('ElasticSearchCollector', {'instances': ['.10.10.201:9200', '.10.10.202:9200']})
self.collector = ElasticSearchCollector(config, None)
self.assertEqual(len(self.collector.instances), 2)
returns = [self.getFixture('stats'), self.getFixture('indices_stats'), self.getFixture('stats2'), self.getFixture('indices_stats2')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 4)
metrics = {'esprodata01. 1, 'esprodata02. 2, 'esprodata01.indices.docs.count': , 'esprodata02.indices.docs.count': , 'esprodata01.thread_pool.generic.threads': 1, 'esprodata02.thread_pool.generic.threads': 2, 'esprodata01.jvm.mem.pools.Par_Survivor_Space.max': 8716288, 'esprodata02.jvm.mem.pools.Par_Survivor_Space.max': 8710000, 'esprodata01.indices._all.docs.count': 4, 'esprodata02.indices._all.docs.count': 8}
self.assertPublishedMany(publish_mock, metrics)
(Collector, 'publish')
def test_should_work_with_real_1_7_data(self, publish_mock):
returns = [self.getFixture('stats1.7'), self.getFixture('indices_stats')]
urlopen_mock = patch('urllib2.urlopen', Mock(side_effect=(lambda *args: returns.pop(0))))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertEqual(urlopen_mock.new.call_count, 2)
metrics = {'segments.count': 7, 'segments.mem.size': 75726, 'segments.index_writer.mem.size': 0, 'segments.index_writer.mem.max_size': , 'segments.version_map.mem.size': 0, 'segments.fixed_bit_set.mem.size': 0}
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics) |
def queue_tool() -> None:
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--max-messages', type=int, default=10, help='if creating the queue, what to set the maximum queue length to')
parser.add_argument('--max-message-size', type=int, default=8096, help='if creating the queue, what to set the maximum message size to')
parser.add_argument('queue_name', help='the name of the queue to consume')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--create', action='store_const', dest='mode', const='create', help="create the named queue if it doesn't exist and exit")
group.add_argument('--read', action='store_const', dest='mode', const='read', help='read, log, and discard messages from the named queue')
group.add_argument('--write', action='store_const', dest='mode', const='write', help='read messages from stdin and write them to the named queue')
args = parser.parse_args()
queue = MessageQueue(args.queue_name, args.max_messages, args.max_message_size)
if (args.mode == 'read'):
while True:
item = queue.get()
print(item.decode())
elif (args.mode == 'write'):
for line in sys.stdin:
queue.put(line.rstrip('\n').encode()) |
class ACE():
def __init__(self):
pass
def from_bytes(data, sd_object_type=None):
return ACE.from_buffer(io.BytesIO(data), sd_object_type)
def from_buffer(buff, sd_object_type=None):
hdr = ACEHeader.pre_parse(buff)
obj = acetype2ace.get(hdr.AceType)
if (not obj):
raise Exception(('ACE type %s not implemented!' % hdr.AceType))
return obj.from_buffer(io.BytesIO(buff.read(hdr.AceSize)), sd_object_type)
def to_buffer(self, buff):
pass
def to_bytes(self):
buff = io.BytesIO()
self.to_buffer(buff)
buff.seek(0)
return buff.read()
def to_ssdl(self, sd_object_type=None):
pass
def add_padding(x):
if (((4 + len(x)) % 4) != 0):
x += (b'\x00' * ((4 + len(x)) % 4))
return x
def from_ssdl(x):
pass |
class Tracklet_3D(object):
def __init__(self, label_file):
(lines, num_lines) = load_txt_file(label_file)
self.data = dict()
for line in lines:
self.load_line(line)
def load_line(self, line):
line = line.split(' ')
obj_type = line[2]
frame = int(line[0])
obj_id = int(line[1])
rest = [float(x) for x in line[3:]]
obj = Object_3D(obj_type=obj_type, trunc=rest[0], occ=rest[1], alpha=rest[2], xmin=rest[3], ymin=rest[4], xmax=rest[5], ymax=rest[6], h=rest[7], w=rest[8], l=rest[9], x=rest[10], y=rest[11], z=rest[12], ry=rest[13], s=rest[14], id=obj_id)
if (frame not in self.data):
self.data[frame] = dict()
assert (obj_id not in self.data[frame]), ('error! object ID %d already in the frame %d' % (obj_id, frame))
self.data[frame][obj_id] = obj |
class RemoteTempFileTests(ProvyTestCase):
def any_context(self):
return {'used_roles': {}}
def setUp(self):
super(RemoteTempFileTests, self).setUp()
self.instance = Role(None, self.any_context())
self.patcher = patch('provy.core.roles.Role.remote_temp_dir', Mock(return_value='/tmp'))
self.patcher.start()
self.ensure_dir_patcher = patch('provy.core.roles.Role.ensure_dir', Mock(return_value='/tmp'))
self.ensure_dir_patcher.start()
def tearDown(self):
self.patcher.stop()
self.ensure_dir_patcher.stop()
def file_created_in_tempdir(self):
file = self.instance.create_remote_temp_file('foo')
self.assertTrue(file.startswith('/tmp'))
def directory_created_with_proper_name(self):
dir = self.instance.create_remote_temp_dir('foobar')
self.assertEqual('/tmp/foobar', dir)
self.assertEqual(list(self.instance._paths_to_remove)[0], '/tmp/foobar')
def directory_created_without_cleanup(self):
dir = self.instance.create_remote_temp_dir('foobar', cleanup=False)
self.assertEqual('/tmp/foobar', dir)
self.assertEqual(self.instance._paths_to_remove, set())
def file_created_with_proper_suffix(self):
file = self.instance.create_remote_temp_file(suffix='sql')
self.assertEqual(file[(- 3):], 'sql')
def files_will_be_deleted_on_cleanup_if_requested(self):
file = self.instance.create_remote_temp_file(cleanup=True)
self.assertIn(file, self.instance._paths_to_remove)
def directories_will_be_deleted_on_cleanup_if_requested(self):
directory = self.instance.create_remote_temp_dir(cleanup=True)
self.assertIn(directory, self.instance._paths_to_remove)
def files_will_not_be_deleted_on_cleanup_if_requested(self):
file = self.instance.create_remote_temp_file(cleanup=False)
self.assertNotIn(file, self.instance._paths_to_remove)
def directories_will_not_be_deleted_on_cleanup_if_requested(self):
dirs = self.instance.create_remote_temp_file(cleanup=False)
self.assertNotIn(dirs, self.instance._paths_to_remove)
def check_if_random_files_have_different_names(self):
dirs = set()
for _ in range(100):
dirs.add(self.instance.create_remote_temp_file())
self.assertEqual(len(dirs), 100)
def check_if_random_directories_have_different_names(self):
dirs = set()
for _ in range(100):
dirs.add(self.instance.create_remote_temp_dir())
self.assertEqual(len(dirs), 100)
def check_if_directories_have_proper_mode(self):
mode = '666'
with self.mock_role_method('change_path_mode') as change_path_mode:
dir = self.instance.create_remote_temp_dir(chmod=mode)
change_path_mode.assert_called_once_with(dir, mode)
def check_if_directories_have_proper_owner(self):
owner = 'user'
self.instance.create_remote_temp_dir(owner=owner)
self.instance.ensure_dir.assert_called_once_with(ANY, owner, ANY) |
def test_requirement_source_disable_pip_editable_without_egg_fragment(req_file):
source = _init_requirement([(req_file(), '-e file:flask.py')], disable_pip=True, no_deps=True)
specs = list(source.collect())
assert (SkippedDependency(name='-e file:flask.py', skip_reason='could not deduce package version from URL requirement') in specs) |
class BatchMiner():
def __init__(self, opt):
self.par = opt
self.name = 'semihard'
self.margin = vars(opt)[(('loss_' + opt.loss) + '_margin')]
def __call__(self, batch, labels, return_distances=False):
if isinstance(labels, torch.Tensor):
labels = labels.detach().numpy()
bs = batch.size(0)
distances = self.pdist(batch.detach()).detach().cpu().numpy()
(positives, negatives) = ([], [])
anchors = []
for i in range(bs):
(l, d) = (labels[i], distances[i])
neg = (labels != l)
pos = (labels == l)
anchors.append(i)
pos[i] = 0
p = np.random.choice(np.where(pos)[0])
positives.append(p)
neg_mask = np.logical_and(neg, (d > d[p]))
neg_mask = np.logical_and(neg_mask, (d < (self.margin + d[p])))
if (neg_mask.sum() > 0):
negatives.append(np.random.choice(np.where(neg_mask)[0]))
else:
negatives.append(np.random.choice(np.where(neg)[0]))
sampled_triplets = [[a, p, n] for (a, p, n) in zip(anchors, positives, negatives)]
if return_distances:
return (sampled_triplets, distances)
else:
return sampled_triplets
def pdist(self, A):
prod = torch.mm(A, A.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
res = ((norm + norm.t()) - (2 * prod)).clamp(min=0)
return res.clamp(min=0).sqrt() |
def scheduler_init(app):
if (platform.system() != 'Windows'):
fcntl = __import__('fcntl')
f = open('scheduler.lock', 'wb')
try:
fcntl.flock(f, (fcntl.LOCK_EX | fcntl.LOCK_NB))
scheduler.init_app(app)
scheduler.start()
app.logger.debug('Scheduler Started,')
except:
pass
def unlock():
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
atexit.register(unlock)
else:
msvcrt = __import__('msvcrt')
f = open('scheduler.lock', 'wb')
try:
msvcrt.locking(f.fileno(), msvcrt.LK_NBLCK, 1)
scheduler.init_app(app)
scheduler.start()
app.logger.debug('Scheduler Started,')
except:
pass
def _unlock_file():
try:
f.seek(0)
msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK, 1)
except:
pass
atexit.register(_unlock_file) |
def RaisesOp(context, exceptionClass, indent, kws, arglist, node):
exceptionClass.prefix = ''
args = [exceptionClass]
if ('expected_regex' in kws):
expected_regex = kws.get('expected_regex').clone()
expected_regex.prefix = ''
args.append(String(', '))
args.append(KeywordArg(Name('match'), expected_regex))
with_item = Call(Name(context), args)
with_item.prefix = ' '
args = []
arglist = [a.clone() for a in arglist.children[4:]]
if arglist:
arglist[0].prefix = ''
func = None
if ('callableObj' in kws):
func = kws['callableObj']
elif ('callable_obj' in kws):
func = kws['callable_obj']
elif kws['args']:
func = kws['args'][0]
else:
func = None
if (func is None):
return Node(syms.with_stmt, [with_item])
if (func.type == syms.lambdef):
suite = func.children[(- 1)].clone()
else:
suite = Call(func, arglist)
suite.prefix = (indent + (4 * ' '))
return Node(syms.with_stmt, [Name('with'), with_item, Name(':'), Newline(), suite]) |
def encode_images(device, G, encoder, dlatent_avg, images, truncation_psi, num_steps):
lpips_model = stylegan2.external_models.lpips.LPIPS_VGG16(pixel_min=(- 1), pixel_max=1)
proj = stylegan2.project.Projector(G=G, dlatent_avg_samples=10000, dlatent_avg_label=None, dlatent_device=device, dlatent_batch_size=1024, lpips_model=lpips_model, lpips_size=256)
dlatent_param = encoder(F.interpolate(images, (256, 256), mode='bicubic'))
dlatent_param = (dlatent_param + dlatent_avg.repeat(dlatent_param.shape[0], 1, 1))
batch_size = 1
proj.start(target=images, dlatent_param=dlatent_param, num_steps=num_steps, initial_learning_rate=0.1, initial_noise_factor=0.05, lr_rampdown_length=0.25, lr_rampup_length=0.05, noise_ramp_length=0.75, regularize_noise_weight=100000.0, verbose=True, verbose_prefix='Projecting image(s) {}/{}'.format(((0 * batch_size) + len(images)), len(images)))
for j in range(num_steps):
proj.step()
dlatents = proj.get_dlatent()
return dlatents |
def autoencode_eval(gts, res, eval_lang='en'):
assert isinstance(gts, (list, tuple))
assert isinstance(res, (list, tuple))
assert (len(gts) == len(res))
if (eval_lang == 'zh'):
gts = {i: [tokenize_zh_sentence(item)] for (i, item) in enumerate(gts)}
res = {i: [tokenize_zh_sentence(''.join(item.split(' ')))] for (i, item) in enumerate(res)}
else:
gts = {i: [item] for (i, item) in enumerate(gts)}
res = {i: [item] for (i, item) in enumerate(res)}
print('setting up scorers...')
scorers = [(Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), (Rouge(), 'ROUGE_L')]
out = {}
for (scorer, method) in scorers:
print(('computing %s score...' % scorer.method()))
(score, scores) = scorer.compute_score(gts, res)
if (type(method) == list):
for (sc, m) in zip(score, method):
out[m] = sc
print(('%s: %0.3f' % (m, sc)))
else:
out[method] = score
print(('%s: %0.3f' % (method, score)))
return out |
def run_gat_surrogate(args, device, data, model_filename):
(in_feats, n_classes, train_g, val_g, test_g, target_response) = data
train_nid = train_g.nodes()
val_nid = val_g.nodes()
test_nid = test_g.nodes()
n_output_dim = target_response.shape[1]
print('output dim is: ', n_output_dim)
sampler = dgl.dataloading.MultiLayerNeighborSampler([int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.NodeDataLoader(train_g, train_nid, sampler, batch_size=args.batch_size, shuffle=True, drop_last=False, num_workers=args.num_workers)
model_surrogate = GATEMB(in_feats, args.num_hidden, n_output_dim, n_classes, args.num_layers, args.head, args.num_workers, F.relu, args.dropout)
model_surrogate = model_surrogate.to(device)
loss_fcn = nn.MSELoss()
loss_fcn = loss_fcn.to(device)
loss_clf = nn.CrossEntropyLoss()
loss_clf = loss_clf.to(device)
optimizer = optim.Adam(model_surrogate.parameters(), lr=args.lr)
clf = Classification(n_output_dim, n_classes)
clf = clf.to(device)
optimizer_classification = optim.SGD(clf.parameters(), lr=0.01)
avg = 0
iter_tput = []
best_val_score = 0.0
for epoch in range(args.num_epochs):
tic = time.time()
tic_step = time.time()
for (step, (input_nodes, seeds, blocks)) in enumerate(dataloader):
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata['features']
batch_labels = blocks[(- 1)].dstdata['labels']
batch_output_nid = blocks[(- 1)].dstdata['_ID']
embs = model_surrogate(blocks, batch_inputs)
loss = torch.sqrt(loss_fcn(embs, target_response[batch_output_nid]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer_classification.zero_grad()
logists = clf(embs.detach())
loss_sup = loss_clf(logists, batch_labels)
loss_sup.backward()
optimizer_classification.step()
iter_tput.append((len(seeds) / (time.time() - tic_step)))
if ((step % args.log_every) == 0):
acc = compute_acc(logists, batch_labels)
gpu_mem_alloc = ((th.cuda.max_memory_allocated() / 1000000) if th.cuda.is_available() else 0)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(epoch, step, loss.item(), acc, np.mean(iter_tput[3:]), gpu_mem_alloc))
tic_step = time.time()
toc = time.time()
print('Epoch Time(s): {:.4f}'.format((toc - tic)))
if (epoch >= 5):
avg += (toc - tic)
if (((epoch % args.eval_every) == 0) and (epoch != 0)):
(eval_acc, eval_preds, eval_embs) = evaluate_gat_surrogate(model_surrogate, clf, val_g, val_g.ndata['features'], val_g.ndata['labels'], val_nid, args.batch_size, args.head, device)
print('Eval Acc {:.4f}'.format(eval_acc))
(test_acc, test_preds, test_embs) = evaluate_gat_surrogate(model_surrogate, clf, test_g, test_g.ndata['features'], test_g.ndata['labels'], test_nid, args.batch_size, args.head, device)
print('Test Acc: {:.4f}'.format(test_acc))
print('Avg epoch time: {}'.format((avg / (epoch - 4))))
(eval_acc, eval_preds, eval_embs) = evaluate_gat_surrogate(model_surrogate, clf, train_g, train_g.ndata['features'], train_g.ndata['labels'], train_nid, args.batch_size, args.head, device)
detached_classifier = train_detached_classifier(train_g, eval_embs)
return (model_surrogate, clf, detached_classifier) |
def get_vtm_decoder_path(build_dir):
system = platform.system()
try:
elfnames = {'Darwin': 'DecoderApp', 'Linux': 'DecoderAppStatic'}
return os.path.join(build_dir, elfnames[system])
except KeyError as err:
raise RuntimeError(f'Unsupported platform "{system}"') from err |
class SimpleUser(msgspec.Struct, omit_defaults=True):
login: str
id: int
node_id: str
avatar_url: str
gravatar_id: Optional[str]
url: str
html_url: str
followers_url: str
following_url: str
gists_url: str
starred_url: str
subscriptions_url: str
organizations_url: str
repos_url: str
events_url: str
received_events_url: str
type: str
site_admin: bool
name: Optional[str] = None
email: Optional[str] = None
starred_at: Optional[datetime] = None |
def load_diverse_ensemble_for_inference(filenames: List[str], task: Optional[tasks.FairseqTask]=None):
checkpoints_data = []
for filename in filenames:
if (not PathManager.exists(filename)):
raise IOError('Model file not found: {}'.format(filename))
with PathManager.open(filename, 'rb') as f:
checkpoints_data.append(torch.load(f, map_location=(lambda s, l: torch.serialization.default_restore_location(s, 'cpu'))))
def get_cfg(cp, key):
if ('cfg' in cp):
return cp['cfg'][key]
else:
return cp['args']
ensemble = []
if (task is None):
cfg = get_cfg(checkpoints_data[0], 'task')
if hasattr(cfg, 'mode'):
cfg.mode = 'eval'
task = tasks.setup_task(cfg)
for checkpoint_data in checkpoints_data:
cfg = get_cfg(checkpoint_data, 'model')
model = task.build_model(cfg)
model.load_state_dict(checkpoint_data['model'])
ensemble.append(model)
args_list = [get_cfg(s, 'model') for s in checkpoints_data]
return (ensemble, args_list, task) |
class Server(QDialog):
FORTUNES = ("You've been leading a dog's life. Stay off the furniture.", "You've got to think about tomorrow.", 'You will be surprised by a loud noise.', 'You will feel hungry again in another hour.', 'You might have mail.', 'You cannot kill time without injuring eternity.', 'Computers are not intelligent. They only think they are.')
def __init__(self, parent=None):
super(Server, self).__init__(parent)
self.tcpServer = None
self.networkSession = None
self.statusLabel = QLabel()
quitButton = QPushButton('Quit')
quitButton.setAutoDefault(False)
manager = QNetworkConfigurationManager()
if (manager.capabilities() & QNetworkConfigurationManager.NetworkSessionRequired):
settings = QSettings(QSettings.UserScope, 'QtProject')
settings.beginGroup('QtNetwork')
id = settings.value('DefaultNetworkConfiguration', '')
settings.endGroup()
config = manager.configurationFromIdentifier(id)
if ((config.state() & QNetworkConfiguration.Discovered) == 0):
config = manager.defaultConfiguration()
self.networkSession = QNetworkSession(config, self)
self.networkSession.opened.connect(self.sessionOpened)
self.statusLabel.setText('Opening network session.')
self.networkSession.open()
else:
self.sessionOpened()
quitButton.clicked.connect(self.close)
self.tcpServer.newConnection.connect(self.sendFortune)
buttonLayout = QHBoxLayout()
buttonLayout.addStretch(1)
buttonLayout.addWidget(quitButton)
buttonLayout.addStretch(1)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.statusLabel)
mainLayout.addLayout(buttonLayout)
self.setLayout(mainLayout)
self.setWindowTitle('Fortune Server')
def sessionOpened(self):
if (self.networkSession is not None):
config = self.networkSession.configuration()
if (config.type() == QNetworkConfiguration.UserChoice):
id = self.networkSession.sessionProperty('UserChoiceConfiguration')
else:
id = config.identifier()
settings = QSettings(QSettings.UserScope, 'QtProject')
settings.beginGroup('QtNetwork')
settings.setValue('DefaultNetworkConfiguration', id)
settings.endGroup()
self.tcpServer = QTcpServer(self)
if (not self.tcpServer.listen()):
QMessageBox.critical(self, 'Fortune Server', ('Unable to start the server: %s.' % self.tcpServer.errorString()))
self.close()
return
for ipAddress in QNetworkInterface.allAddresses():
if ((ipAddress != QHostAddress.LocalHost) and (ipAddress.toIPv4Address() != 0)):
break
else:
ipAddress = QHostAddress(QHostAddress.LocalHost)
ipAddress = ipAddress.toString()
self.statusLabel.setText(('The server is running on\n\nIP: %s\nport %d\n\nRun the Fortune Client example now.' % (ipAddress, self.tcpServer.serverPort())))
def sendFortune(self):
fortune = self.FORTUNES[random.randint(0, (len(self.FORTUNES) - 1))]
block = QByteArray()
out = QDataStream(block, QIODevice.WriteOnly)
out.setVersion(QDataStream.Qt_4_0)
out.writeUInt16(0)
out.writeQString(fortune)
out.device().seek(0)
out.writeUInt16((block.size() - 2))
clientConnection = self.tcpServer.nextPendingConnection()
clientConnection.disconnected.connect(clientConnection.deleteLater)
clientConnection.write(block)
clientConnection.disconnectFromHost() |
def getTurbulenceVariables(solverSettings):
turbulenceModelName = solverSettings['turbulenceModel']
viscosity_var = getTurbulentViscosityVariable(solverSettings)
if (turbulenceModelName in ['laminar', 'invisid', 'DNS']):
var_list = []
elif (turbulenceModelName in kEpsilon_models):
var_list = ['k', 'epsilon', viscosity_var]
elif (turbulenceModelName in kOmege_models):
var_list = ['k', 'omega', viscosity_var]
elif (turbulenceModelName in spalartAllmaras_models):
var_list = [viscosity_var, 'nuTilda']
else:
print('Error: Turbulence model {} is not supported yet'.format(turbulenceModelName))
return var_list |
class SignalConnection(gui.HBox):
def __init__(self, widget, listenersList, eventConnectionFuncName, eventConnectionFunc, **kwargs):
super(SignalConnection, self).__init__(**kwargs)
self.style.update({'overflow': 'visible', 'height': '24px', 'outline': '1px solid lightgray'})
self.label = gui.Label(eventConnectionFuncName, width='32%')
self.label.style.update({'float': 'left', 'font-size': '10px', 'overflow': 'hidden', 'outline': '1px solid lightgray'})
self.label_do = gui.Label('.do ->', style={'white-space': 'nowrap'})
self.dropdownListeners = gui.DropDown(width='32%', height='100%')
self.dropdownListeners.onchange.do(self.on_listener_selection)
self.dropdownListeners.attributes['title'] = 'The listener who will receive the event'
self.dropdownMethods = gui.DropDown(width='32%', height='100%')
self.dropdownMethods.onchange.do(self.on_connection)
self.dropdownMethods.attributes['title'] = "The listener's method who will receive the event. A custom method is selected by default. You can select another method, but you should check the method parameters."
self.eventConnectionFunc = eventConnectionFunc
self.eventConnectionFuncName = eventConnectionFuncName
self.refWidget = widget
self.listenersList = listenersList
self.dropdownListeners.append(gui.DropDownItem('None'))
for w in listenersList:
ddi = gui.DropDownItem(w.variable_name)
ddi.listenerInstance = w
self.dropdownListeners.append(ddi)
if (not (self.eventConnectionFunc.callback is None)):
try:
connectedListenerName = ''
connectedListenerFunction = None
print(str(type(eventConnectionFunc.callback)))
if issubclass(type(eventConnectionFunc.callback), gui.ClassEventConnector):
connectedListenerName = eventConnectionFunc.callback.event_method_bound.__self__.variable_name
connectedListenerFunction = eventConnectionFunc.callback.event_method_bound
else:
connectedListenerName = eventConnectionFunc.callback.__self__.variable_name
connectedListenerFunction = eventConnectionFunc.callback
self.dropdownListeners.select_by_value(connectedListenerName)
self.on_listener_selection(self.dropdownListeners, connectedListenerName)
print(('connected function name:' + connectedListenerFunction.__name__))
self.dropdownMethods.select_by_value(connectedListenerFunction.__name__)
except Exception:
print(traceback.format_exc())
print(dir(eventConnectionFunc.callback))
self.disconnect()
self.append([self.label, self.label_do, self.dropdownListeners, self.dropdownMethods])
def on_listener_selection(self, widget, dropDownValue):
self.dropdownMethods.empty()
if (self.dropdownListeners.get_value() == 'None'):
self.disconnect()
else:
listener = self.dropdownListeners._selected_item.listenerInstance
l = []
func_members = inspect.getmembers(listener)
for (name, value) in func_members:
if (((name not in ['__init__', 'main', 'idle', 'construct_ui']) and (type(value) == types.MethodType)) or issubclass(type(value), gui.ClassEventConnector)):
ddi = gui.DropDownItem(name)
ddi.listenerInstance = listener
ddi.listenerFunction = value
l.append(ddi)
ddi = gui.DropDownItem('None')
self.dropdownMethods.append(ddi)
if listener.attr_editor_newclass:
custom_listener_name = ((self.eventConnectionFuncName + '_') + self.refWidget.variable_name)
setattr(listener, custom_listener_name, types.MethodType(copy_func(fakeListenerFunc), listener))
getattr(listener, custom_listener_name).__func__.__name__ = custom_listener_name
ddi = gui.DropDownItem(custom_listener_name)
ddi.listenerInstance = listener
ddi.listenerFunction = getattr(listener, custom_listener_name)
ddi.style['color'] = 'green'
ddi.style['font-weight'] = 'bolder'
ddi.attributes['title'] = 'automatically generated method'
self.dropdownMethods.append(ddi)
self.dropdownMethods.append(l)
def disconnect(self):
getattr(self.refWidget, self.eventConnectionFuncName).do(None)
def on_connection(self, widget, dropDownValue):
if (self.dropdownMethods.get_value() == 'None'):
self.disconnect()
return
listener = self.dropdownMethods._selected_item.listenerInstance
kwargs = {}
if hasattr(getattr(self.refWidget, self.eventConnectionFuncName).event_method_bound, '_js_code'):
kwargs['js_stop_propagation'] = (self.eventConnectionFuncName not in ('onmousedown', 'onmousemove', 'onmouseleave', 'onkeydown'))
getattr(self.refWidget, self.eventConnectionFuncName).do(self.dropdownMethods._selected_item.listenerFunction, **kwargs) |
def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int=0):
alive = [i.is_alive() for i in worker_list]
if (not all(alive)):
raise RuntimeError('Some background workers are no longer alive')
not_ready = [(not i.ready()) for i in results_list]
if (sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued)):
return True
return False |
class OverlayProvider(StaticProvider):
def __init__(self, overlays: Iterable[Overlay], chain: Optional[Chain]):
self._chain = chain
self._overlays = ClassMap(*overlays)
_provision_action
def _provide_overlay(self, mediator: Mediator, request: OverlayRequest):
try:
overlay = self._overlays[request.overlay_cls]
except KeyError:
raise CannotProvide
if (self._chain is None):
return overlay
try:
next_overlay = mediator.provide_from_next()
except CannotProvide:
return overlay
if (self._chain == Chain.FIRST):
return next_overlay.merge(overlay)
return overlay.merge(next_overlay) |
def compute_target(answers_dset, ans2label, name, cache_root):
target = []
for ans_entry in answers_dset:
answers = ans_entry['answers']
answer_count = {}
for answer in answers:
answer_ = answer['answer']
answer_count[answer_] = (answer_count.get(answer_, 0) + 1)
labels = []
scores = []
for answer in answer_count:
if (answer not in ans2label):
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
label_counts = {}
for (k, v) in answer_count.items():
if (k in ans2label):
label_counts[ans2label[k]] = v
target.append({'question_id': ans_entry['question_id'], 'question_type': ans_entry['question_type'], 'image_id': ans_entry['image_id'], 'label_counts': label_counts, 'labels': labels, 'scores': scores})
print(cache_root)
utils.create_dir(cache_root)
cache_file = os.path.join(cache_root, (name + '_target.pkl'))
print(cache_file)
with open(cache_file, 'wb') as f:
cPickle.dump(target, f)
return target |
def panfpn_config(min_level, max_level, weight_method=None):
p = OmegaConf.create()
weight_method = (weight_method or 'fastattn')
num_levels = ((max_level - min_level) + 1)
node_ids = {(min_level + i): [i] for i in range(num_levels)}
level_last_id = (lambda level: node_ids[level][(- 1)])
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level, (min_level - 1), (- 1)):
offsets = ([level_last_id(i), level_last_id((i + 1))] if (i != max_level) else [level_last_id(i)])
p.nodes.append({'reduction': (1 << i), 'inputs_offsets': offsets, 'weight_method': weight_method})
node_ids[i].append(next(id_cnt))
for i in range(min_level, (max_level + 1)):
offsets = ([level_last_id(i), level_last_id((i - 1))] if (i != min_level) else [level_last_id(i)])
p.nodes.append({'reduction': (1 << i), 'inputs_offsets': offsets, 'weight_method': weight_method})
node_ids[i].append(next(id_cnt))
return p |
def parse(string, symb=None):
if (symb is not None):
symb = _std_symbol(symb)
raw_data = string.splitlines()
for (i, dat) in enumerate(raw_data):
dat0 = dat.split(None, 1)
if (dat0 and (dat0[0] == symb)):
break
if ((i + 1) == len(raw_data)):
raise BasisNotFoundError(('ECP not found for %s' % symb))
seg = []
for dat in raw_data[i:]:
dat = dat.strip()
if dat:
if (dat[0].isalpha() and (dat.split(None, 1)[0].upper() != symb.upper())):
break
else:
seg.append(dat)
else:
seg = string.splitlines()
ecptxt = []
for dat in seg:
dat = dat.split('#')[0].strip()
dat_upper = dat.upper()
if (dat and (not dat_upper.startswith('END')) and (not dat_upper.startswith('ECP'))):
ecptxt.append(dat)
return _parse_ecp(ecptxt) |
def main():
env = gym.make('Pendulum-v0')
env.seed(args.seed)
agent = Agent()
training_records = []
(running_reward, running_q) = ((- 1000), 0)
for i_ep in range(100):
score = 0
state = env.reset()
for t in range(200):
(action, action_index) = agent.select_action(state)
(state_, reward, done, _) = env.step(action)
score += reward
if args.render:
env.render()
agent.store_transition(Transition(state, action_index, ((reward + 8) / 8), state_))
state = state_
if agent.memory.isfull:
q = agent.update()
running_q = ((0.99 * running_q) + (0.01 * q))
running_reward = ((running_reward * 0.9) + (score * 0.1))
training_records.append(TrainingRecord(i_ep, running_reward))
if ((i_ep % args.log_interval) == 0):
print('Ep {}\tAverage score: {:.2f}\tAverage Q: {:.2f}'.format(i_ep, running_reward, running_q))
if (running_reward > (- 200)):
print('Solved! Running reward is now {}!'.format(running_reward))
env.close()
agent.save_param()
with open('log/dqn_training_records.pkl', 'wb') as f:
pickle.dump(training_records, f)
break
env.close()
plt.plot([r.ep for r in training_records], [r.reward for r in training_records])
plt.title('DQN')
plt.xlabel('Episode')
plt.ylabel('Moving averaged episode reward')
plt.savefig('img/dqn.png')
plt.show() |
class first_conv(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False):
super(first_conv, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
self.layer_type = 'FConv2d'
def forward(self, x):
max = self.weight.data.max()
weight_q = self.weight.div(max).mul(127).round().div(127).mul(max)
weight_q = ((weight_q - self.weight).detach() + self.weight)
return F.conv2d(x, weight_q, self.bias, self.stride, self.padding, self.dilation, self.groups) |
def build_transform(is_train, args):
resize_im = (args.input_size > 32)
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = (IMAGENET_INCEPTION_MEAN if (not imagenet_default_mean_and_std) else IMAGENET_DEFAULT_MEAN)
std = (IMAGENET_INCEPTION_STD if (not imagenet_default_mean_and_std) else IMAGENET_DEFAULT_STD)
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, mean=mean, std=std)
if (not resize_im):
transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4)
return transform
t = []
if resize_im:
if (args.input_size >= 384):
t.append(transforms.Resize((args.input_size, args.input_size), interpolation=transforms.InterpolationMode.BICUBIC))
print(f'Warping {args.input_size} size input images...')
else:
if (args.crop_pct is None):
args.crop_pct = (224 / 256)
size = int((args.input_size / args.crop_pct))
t.append(transforms.Resize(size, interpolation=transforms.InterpolationMode.BICUBIC))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t) |
def test_filerewriter_files_in_to_out_no_in_found_no_out():
rewriter = ArbRewriter('formatter')
with patch_logger('pypyr.utils.filesystem', logging.INFO) as mock_logger_info:
rewriter.files_in_to_out('./arb/*')
assert (mock_logger_info.mock_calls == [call('./arb/* found no files')])
assert (not rewriter.in_out_calls) |
def eval_where(pred, label):
pred_conds = [unit for unit in pred['where'][::2]]
label_conds = [unit for unit in label['where'][::2]]
label_wo_agg = [unit[2] for unit in label_conds]
pred_total = len(pred_conds)
label_total = len(label_conds)
cnt = 0
cnt_wo_agg = 0
for unit in pred_conds:
if (unit in label_conds):
cnt += 1
label_conds.remove(unit)
if (unit[2] in label_wo_agg):
cnt_wo_agg += 1
label_wo_agg.remove(unit[2])
return (label_total, pred_total, cnt, cnt_wo_agg) |
def add_args_to_env(builder: IRBuilder, local: bool=True, base: ((FuncInfo | ImplicitClass) | None)=None, reassign: bool=True) -> None:
fn_info = builder.fn_info
args = fn_info.fitem.arguments
nb = num_bitmap_args(builder, args)
if local:
for arg in args:
rtype = builder.type_to_rtype(arg.variable.type)
builder.add_local_reg(arg.variable, rtype, is_arg=True)
for i in reversed(range(nb)):
builder.add_local_reg(Var(bitmap_name(i)), bitmap_rprimitive, is_arg=True)
else:
for arg in args:
if (is_free_variable(builder, arg.variable) or fn_info.is_generator):
rtype = builder.type_to_rtype(arg.variable.type)
assert (base is not None), 'base cannot be None for adding nonlocal args'
builder.add_var_to_env_class(arg.variable, rtype, base, reassign=reassign) |
def ddpg_heatmap():
from ddpg import ActorNet, CriticNet
(x_pxl, y_pxl) = (300, 400)
state = torch.Tensor([[np.cos(theta), np.sin(theta), thetadot] for thetadot in np.linspace((- 8), 8, y_pxl) for theta in np.linspace((- np.pi), np.pi, x_pxl)])
anet = ActorNet()
anet.load_state_dict(torch.load('param/ddpg_anet_params.pkl'))
action_map = anet(state).view(y_pxl, x_pxl).detach().numpy()
cnet = CriticNet()
cnet.load_state_dict(torch.load('param/ddpg_cnet_params.pkl'))
value_map = cnet(state, anet(state)).view(y_pxl, x_pxl).detach().numpy()
fig = plt.figure()
fig.suptitle('DDPG')
ax = fig.add_subplot(121)
im = ax.imshow(value_map, cmap=plt.cm.spring, interpolation='bicubic')
plt.colorbar(im, shrink=0.5)
ax.set_title('Value Map')
ax.set_xlabel('$\\theta$')
ax.set_xticks(np.linspace(0, x_pxl, 5))
ax.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$\\pi/2$', '$\\pi$'])
ax.set_ylabel('$\\dot{\\theta}$')
ax.set_yticks(np.linspace(0, y_pxl, 5))
ax.set_yticklabels(['-8', '-4', '0', '4', '8'])
ax = fig.add_subplot(122)
im = ax.imshow(action_map, cmap=plt.cm.winter, interpolation='bicubic')
plt.colorbar(im, shrink=0.5)
ax.set_title('Action Map')
ax.set_xlabel('$\\theta$')
ax.set_xticks(np.linspace(0, x_pxl, 5))
ax.set_xticklabels(['$-\\pi$', '$-\\pi/2$', '$0$', '$\\pi/2$', '$\\pi$'])
ax.set_ylabel('$\\dot{\\theta}$')
ax.set_yticks(np.linspace(0, y_pxl, 5))
ax.set_yticklabels(['-8', '-4', '0', '4', '8'])
plt.tight_layout()
plt.savefig('img/ddpg_heatmap.png')
plt.show() |
class InputFeatures(object):
def __init__(self, input_ids, attention_mask, token_type_ids, label, input_len):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.input_len = input_len
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n') |
_model('my_model')
class MyModel(ClassyModel):
def __init__(self):
super().__init__()
self.model = nn.Sequential(nn.AdaptiveAvgPool2d((20, 20)), nn.Flatten(1), nn.Linear(((3 * 20) * 20), 2), nn.Sigmoid())
def forward(self, x):
x = self.model(x)
return x
def from_config(cls, config):
return cls() |
def wait_single_channel_deposit(app_deposit: 'RaidenService', app_partner: 'RaidenService', registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress, total_deposit: TokenAmount, retry_timeout: float) -> None:
wait_for_participant_deposit(raiden=app_deposit, token_network_registry_address=registry_address, token_address=token_address, partner_address=app_partner.address, target_address=app_deposit.address, target_balance=total_deposit, retry_timeout=retry_timeout)
wait_for_participant_deposit(raiden=app_partner, token_network_registry_address=registry_address, token_address=token_address, partner_address=app_deposit.address, target_address=app_deposit.address, target_balance=total_deposit, retry_timeout=retry_timeout) |
def run_ruff(settings: PluginSettings, document_path: str, document_source: str, subcommand: Subcommand=Subcommand.CHECK, fix: bool=False, extra_arguments: Optional[List[str]]=None) -> str:
executable = settings.executable
arguments = subcommand.build_args(document_path, settings, fix, extra_arguments)
p = None
if (executable is not None):
log.debug(f"Calling {executable} with args: {arguments} on '{document_path}'")
try:
cmd = [executable]
cmd.extend(arguments)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except Exception:
log.error(f"Can't execute ruff with given executable '{executable}'.")
if (p is None):
log.debug(f"Calling ruff via '{sys.executable} -m ruff' with args: {arguments} on '{document_path}'")
cmd = [sys.executable, '-m', 'ruff', str(subcommand)]
cmd.extend(arguments)
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(document_source.encode())
if (p.returncode != 0):
log.error(f'Error running ruff: {stderr.decode()}')
return stdout.decode() |
((pgv is None), 'NestedGraph diagram test requires graphviz')
class TestDiagramsNested(TestDiagrams):
machine_cls = HierarchicalGraphMachine
def setUp(self):
super(TestDiagramsNested, self).setUp()
self.states = ['A', 'B', {'name': 'C', 'children': [{'name': '1', 'children': ['a', 'b', 'c']}, '2', '3']}, 'D']
self.transitions = [{'trigger': 'walk', 'source': 'A', 'dest': 'B'}, {'trigger': 'run', 'source': 'B', 'dest': 'C'}, {'trigger': 'sprint', 'source': 'C', 'dest': 'D', 'conditions': 'is_fast'}, {'trigger': 'sprint', 'source': 'C', 'dest': 'B'}, {'trigger': 'reset', 'source': '*', 'dest': 'A'}]
def test_diagram(self):
m = self.machine_cls(states=self.states, transitions=self.transitions, initial='A', auto_transitions=False, title='A test', show_conditions=True, use_pygraphviz=self.use_pygraphviz)
graph = m.get_graph()
self.assertIsNotNone(graph)
self.assertTrue(('digraph' in str(graph)))
(_, nodes, edges) = self.parse_dot(graph)
self.assertEqual(len(edges), 8)
self.assertEqual(set(m.get_nested_state_names()), nodes)
m.walk()
m.run()
target = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
m.get_graph().draw(target.name, prog='dot')
self.assertTrue((os.path.getsize(target.name) > 0))
m.get_graph().draw(target.name, prog='dot')
self.assertTrue((os.path.getsize(target.name) > 0))
target.close()
os.unlink(target.name)
def test_roi(self):
class Model():
def is_fast(self, *args, **kwargs):
return True
model = Model()
m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test', use_pygraphviz=self.use_pygraphviz, show_conditions=True)
model.walk()
model.run()
g1 = model.get_graph(show_roi=True)
(_, nodes, edges) = self.parse_dot(g1)
self.assertEqual(len(edges), 4)
self.assertEqual(len(nodes), 4)
model.sprint()
g2 = model.get_graph(show_roi=True)
(dot, nodes, edges) = self.parse_dot(g2)
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 3)
def test_roi_parallel(self):
class Model():
def is_fast(*args, **kwargs):
return True
self.states[0] = {'name': 'A', 'parallel': ['1', '2']}
model = Model()
m = self.machine_cls(model, states=self.states, transitions=self.transitions, initial='A', title='A test', use_pygraphviz=self.use_pygraphviz, show_conditions=True)
g1 = model.get_graph(show_roi=True)
(_, nodes, edges) = self.parse_dot(g1)
self.assertEqual(len(edges), 2)
print(nodes)
self.assertEqual(len(nodes), 4)
model.walk()
model.run()
model.sprint()
g2 = model.get_graph(show_roi=True)
(dot, nodes, edges) = self.parse_dot(g2)
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 3)
def test_roi_parallel_deeper(self):
states = ['A', 'B', 'C', 'D', {'name': 'P', 'parallel': ['1', {'name': '2', 'parallel': [{'name': 'a'}, {'name': 'b', 'parallel': [{'name': 'x', 'parallel': ['1', '2']}, 'y']}]}]}]
transitions = [['go', 'A', 'P'], ['reset', '*', 'A']]
m = self.machine_cls(states=states, transitions=transitions, initial='A', title='A test', use_pygraphviz=self.use_pygraphviz, show_conditions=True)
m.go()
(_, nodes, edges) = self.parse_dot(m.get_graph(show_roi=True))
self.assertEqual(len(edges), 2)
self.assertEqual(len(nodes), 10)
def test_internal(self):
states = ['A', 'B']
transitions = [['go', 'A', 'B'], dict(trigger='fail', source='A', dest=None, conditions=['failed']), dict(trigger='fail', source='A', dest='B', unless=['failed'])]
m = self.machine_cls(states=states, transitions=transitions, initial='A', show_conditions=True, use_pygraphviz=self.use_pygraphviz)
(_, nodes, edges) = self.parse_dot(m.get_graph())
print(nodes)
self.assertEqual(len(nodes), 2)
self.assertEqual(len([e for e in edges if ('[internal]' in e)]), 1)
def test_internal_wildcards(self):
internal_only_once = '^(?:(?!\\[internal\\]).)*\\[internal\\](?!.*\\[internal\\]).*$'
states = ['initial', 'ready', 'running']
transitions = [['booted', 'initial', 'ready'], {'trigger': 'polled', 'source': 'ready', 'dest': 'running', 'conditions': 'door_closed'}, ['done', 'running', 'ready'], ['polled', '*', None]]
m = self.machine_cls(states=states, transitions=transitions, show_conditions=True, use_pygraphviz=self.use_pygraphviz, initial='initial')
(_, nodes, edges) = self.parse_dot(m.get_graph())
self.assertEqual(len(nodes), 3)
self.assertEqual(len([e for e in edges if re.match(internal_only_once, e)]), 3)
def test_nested_notebook(self):
states = [{'name': 'caffeinated', 'on_enter': 'do_x', 'children': ['dithering', 'running'], 'transitions': [['walk', 'dithering', 'running'], ['drink', 'dithering', '=']]}, {'name': 'standing', 'on_enter': ['do_x', 'do_y'], 'on_exit': 'do_z'}, {'name': 'walking', 'tags': ['accepted', 'pending'], 'timeout': 5, 'on_timeout': 'do_z'}]
transitions = [['walk', 'standing', 'walking'], ['go', 'standing', 'walking'], ['stop', 'walking', 'standing'], {'trigger': 'drink', 'source': '*', 'dest': 'caffeinated{0}dithering'.format(self.machine_cls.state_cls.separator), 'conditions': 'is_hot', 'unless': 'is_too_hot'}, ['relax', 'caffeinated', 'standing'], ['sip', 'standing', 'caffeinated']]
_state_features(Timeout, Tags)
class CustomStateMachine(self.machine_cls):
def is_hot(self):
return True
def is_too_hot(self):
return False
def do_x(self):
pass
def do_z(self):
pass
extra_args = dict(auto_transitions=False, initial='standing', title='Mood Matrix', show_conditions=True, show_state_attributes=True, use_pygraphviz=self.use_pygraphviz)
machine = CustomStateMachine(states=states, transitions=transitions, **extra_args)
g1 = machine.get_graph()
if self.use_pygraphviz:
dot_string = g1.string()
else:
dot_string = g1.source
count = re.findall('-> "?caffeinated{0}dithering"?'.format(machine.state_cls.separator), dot_string)
self.assertEqual(4, len(count))
self.assertTrue(True)
machine.drink()
machine.drink()
g1 = machine.get_graph()
self.assertIsNotNone(g1) |
class KLLoss_t3(nn.Module):
def __init__(self):
super(KLLoss_t3, self).__init__()
def forward(self, pred, label):
T = 3
predict = F.log_softmax((pred / T), dim=1)
target_data = F.softmax((label / T), dim=1)
target_data = (target_data + (10 ** (- 7)))
target = Variable(target_data.data.cuda(), requires_grad=False)
loss = ((T * T) * ((target * (target.log() - predict)).sum(1).sum() / target.size()[0]))
return loss |
(prefer_attrib=..., dict_factory=one_of(just(dict), just(OrderedDict)), detailed_validation=...)
def test_col_overrides(prefer_attrib: bool, dict_factory: Callable, detailed_validation: bool):
c = Converter(prefer_attrib_converters=prefer_attrib, detailed_validation=detailed_validation, dict_factory=dict_factory, unstruct_collection_overrides={list: tuple})
assert (c.unstructure([1, 2, 3]) == (1, 2, 3))
c.register_unstructure_hook(Simple, (lambda s: s.a))
copy = c.copy(unstruct_collection_overrides={})
assert (c is not copy)
assert (c.unstructure([1, 2, 3]) == (1, 2, 3))
assert (copy.unstructure([1, 2, 3]) == [1, 2, 3])
assert (c.unstructure(Simple(1)) == 1)
assert (copy.unstructure(Simple(1)) == 1) |
class LogitGetter(torch.nn.Module):
possible_layer_names = ['fc', 'proxies', 'W']
def __init__(self, classifier, layer_name=None, transpose=None, distance=None, copy_weights=True):
super().__init__()
self.copy_weights = copy_weights
if (layer_name is not None):
self.set_weights(getattr(classifier, layer_name))
else:
for x in self.possible_layer_names:
layer = getattr(classifier, x, None)
if (layer is not None):
self.set_weights(layer)
break
self.distance = (classifier.distance if (distance is None) else distance)
self.transpose = transpose
def forward(self, embeddings):
w = self.weights
if (self.transpose is True):
w = w.t()
elif (self.transpose is None):
if (w.size(0) == embeddings.size(1)):
w = w.t()
return self.distance(embeddings, w)
def set_weights(self, layer):
self.weights = (copy.deepcopy(layer) if self.copy_weights else layer) |
class Problem(qpsolvers.Problem):
name: str
def __init__(self, P: Union[(np.ndarray, spa.csc_matrix)], q: np.ndarray, G: Optional[Union[(np.ndarray, spa.csc_matrix)]], h: Optional[np.ndarray], A: Optional[Union[(np.ndarray, spa.csc_matrix)]], b: Optional[np.ndarray], lb: Optional[np.ndarray], ub: Optional[np.ndarray], name: str):
super().__init__(P, q, G, h, A, b, lb, ub)
self.name = name
def to_dense(self):
return Problem(self.P.toarray().astype(float), self.q, (self.G.toarray().astype(float) if (self.G is not None) else None), self.h, (self.A.toarray().astype(float) if (self.A is not None) else None), self.b, self.lb, self.ub, name=self.name)
def to_sparse(self):
(P, G, A) = (self.P, self.G, self.A)
return Problem((spa.csc_matrix(P) if isinstance(P, np.ndarray) else P), self.q, (spa.csc_matrix(G) if isinstance(G, np.ndarray) else G), self.h, (spa.csc_matrix(A) if isinstance(A, np.ndarray) else A), self.b, self.lb, self.ub, name=self.name)
def load(file: str):
name = os.path.splitext(os.path.basename(file))[0]
loaded = qpsolvers.Problem.load(file)
return Problem(loaded.P, loaded.q, loaded.G, loaded.h, loaded.A, loaded.b, loaded.lb, loaded.ub, name) |
def main(args):
print(args)
split_name = ('dev' if args.dev else 'train')
filter_subset = ('spider' if (not args.full_break) else '')
dataset_break = DatasetBreak(args.qdmr_path, split_name, filter_subset=filter_subset)
if (args.break_idx is not None):
qdmr_name = dataset_break.names[args.break_idx]
qdmr = dataset_break.qdmrs[qdmr_name]
question = dataset_break.questions[qdmr_name]
print()
print(qdmr_name)
check_example(qdmr, qdmr_name, question, verbose=True)
else:
for (qdmr_name, qdmr) in dataset_break:
break_idx = DatasetBreak.get_index_from_name(qdmr_name)
dataset_keyword = DatasetBreak.get_dataset_keyword_from_name(qdmr_name)
question = dataset_break.get_question_by_subset_indx(break_idx, dataset_keyword)
try:
qdmr_corrected = check_example(qdmr, qdmr_name, question, verbose=False)
dataset_break.qdmrs[qdmr_name] = qdmr_corrected
except Exception as e:
print(f'{qdmr_name}: ERROR: {e}')
if args.output_path:
dataset_break.save_break_to_csv_file(args.output_path) |
def modified_precision(candidate, references, n):
tngrams = ((len(candidate) + 1) - n)
counts = Counter([tuple(candidate[i:(i + n)]) for i in range(tngrams)])
if (len(counts) == 0):
return (0, 0)
max_counts = {}
for reference in references:
rngrams = ((len(reference) + 1) - n)
ngrams = [tuple(reference[i:(i + n)]) for i in range(rngrams)]
ref_counts = Counter(ngrams)
for ngram in counts:
mcount = (0 if (ngram not in max_counts) else max_counts[ngram])
rcount = (0 if (ngram not in ref_counts) else ref_counts[ngram])
max_counts[ngram] = max(mcount, rcount)
clipped_counts = {}
for (ngram, count) in counts.items():
clipped_counts[ngram] = min(count, max_counts[ngram])
return (float(sum(clipped_counts.values())), float(sum(counts.values()))) |
.parametrize('bounded', [False, True])
def test_mle_jacobian(bounded):
truth = 10.0
rtol = 0.0001
(start, model, _) = models.simple_normal(bounded_prior=bounded)
with model:
map_estimate = find_MAP(method='BFGS', model=model)
assert_allclose(map_estimate['mu_i'], truth, rtol=rtol) |
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {'CSIDL_APPDATA': 26, 'CSIDL_COMMON_APPDATA': 35, 'CSIDL_LOCAL_APPDATA': 28}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
has_high_char = False
for c in buf:
if (ord(c) > 255):
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value |
def do_checkpoint(prefix, means, stds):
def _callback(iter_no, sym, arg, aux):
arg['bbox_pred_weight_test'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
arg['bbox_pred_bias_test'] = ((arg['bbox_pred_bias'] * mx.nd.array(stds)) + mx.nd.array(means))
mx.model.save_checkpoint(prefix, (iter_no + 1), sym, arg, aux)
arg.pop('bbox_pred_weight_test')
arg.pop('bbox_pred_bias_test')
return _callback |
class FlaxRobertaModelTester(unittest.TestCase):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = RobertaConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, attention_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return (config, inputs_dict)
def prepare_config_and_inputs_for_decoder(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, attention_mask) = config_and_inputs
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask) |
class MockPsutil(ModuleType):
up = 0
down = 0
def net_io_counters(cls, pernic=False, _nowrap=True):
class IOCounters():
def __init__(self):
self.bytes_sent = 100
self.bytes_recv = 1034
if pernic:
return {'wlp58s0': IOCounters(), 'lo': IOCounters()}
return IOCounters() |
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = (self.get_derivation_prefix() + ('/%d/%d' % sequence))
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if ((txin.utxo is None) and (not txin.is_segwit())):
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx) |
class TestForbiddenPythonSyntaxCheckerAllowedsyntax(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = ForbiddenPythonSyntaxChecker
CONFIG = {}
def set_up(self) -> None:
self.setup_method()
def test_allow_break_in_code(self) -> None:
src = '\n for i in range(0, 10):\n break\n '
mod = astroid.parse(src)
(break_node, *_) = mod.nodes_of_class(nodes.Break)
with self.assertNoMessages():
self.checker.visit_default(break_node)
def test_allow_continue_in_code(self) -> None:
src = '\n for i in range(0, 10):\n continue\n '
mod = astroid.parse(src)
(continue_node, *_) = mod.nodes_of_class(nodes.Continue)
with self.assertNoMessages():
self.checker.visit_default(continue_node)
def test_allow_comprehension_in_code(self) -> None:
src = '\n comp = [i ** 2 for i in range(1, 11)]\n '
mod = astroid.parse(src)
(comprehension_node, *_) = mod.nodes_of_class(nodes.Comprehension)
with self.assertNoMessages():
self.checker.visit_default(comprehension_node)
def test_allow_for_in_code(self) -> None:
src = '\n for i in range(0, 10):\n print(i)\n '
mod = astroid.parse(src)
(for_node, *_) = mod.nodes_of_class(nodes.For)
with self.assertNoMessages():
self.checker.visit_default(for_node)
def test_allow_while_in_code(self) -> None:
src = '\n count = 10\n while count > -1:\n count -= 1\n '
mod = astroid.parse(src)
(while_node, *_) = mod.nodes_of_class(nodes.While)
with self.assertNoMessages():
self.checker.visit_default(while_node) |
class TestDraw(unittest.TestCase):
def setUp(self):
pass
def click_ax_center(self, m, dx=0, dy=0, release=True, button=1):
ax = m.ax
cv = m.f.canvas
(x, y) = (((ax.bbox.x0 + ax.bbox.x1) / 2), ((ax.bbox.y0 + ax.bbox.y1) / 2))
button_press_event(cv, (x + dx), (y + dy), button, False)
if release:
button_release_event(cv, (x + dx), (y + dy), button, False)
def test_basic_drawing_capabilities(self):
m = Maps()
m.add_feature.preset.coastline()
m.f.canvas.draw()
m.draw.rectangle(fc='none', ec='r')
self.click_ax_center(m)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(m.draw._artists) == 1))
m.draw.circle(fc='b', ec='g', alpha=0.5)
self.click_ax_center(m, dx=50)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(m.draw._artists) == 2))
m.draw.polygon(fc='g', ec='b', lw=2)
for (i, j) in np.random.randint(0, 100, (20, 2)):
self.click_ax_center(m, dx=i, dy=j)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(m.draw._artists) == 3))
m.new_layer('shapes')
d = m.draw.new_drawer(layer='shapes')
d.rectangle(fc='none', ec='r')
self.click_ax_center(m)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(d._artists) == 1))
d.circle(fc='b', ec='g', alpha=0.5)
self.click_ax_center(m, dx=50)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(d._artists) == 2))
d.polygon(fc='g', ec='b', lw=2)
for (i, j) in np.random.randint(0, 100, (20, 2)):
self.click_ax_center(m, dx=i, dy=j)
self.click_ax_center(m, dx=20, dy=20, button=2)
self.assertTrue((len(d._artists) == 3))
m.show_layer('shapes')
m.draw.remove_last_shape()
self.assertTrue((len(m.draw._artists) == 2))
m.draw.remove_last_shape()
self.assertTrue((len(m.draw._artists) == 1))
m.draw.remove_last_shape()
self.assertTrue((len(m.draw._artists) == 0))
d.remove_last_shape()
self.assertTrue((len(d._artists) == 2))
d.remove_last_shape()
self.assertTrue((len(d._artists) == 1))
d.remove_last_shape()
self.assertTrue((len(d._artists) == 0)) |
def test_hswish():
act = HSwish(inplace=True)
assert act.act.inplace
act = HSwish()
assert (not act.act.inplace)
input = torch.randn(1, 3, 64, 64)
expected_output = ((input * relu6((input + 3))) / 6)
output = act(input)
assert (output.shape == expected_output.shape)
assert torch.equal(output, expected_output) |
class ResidualVectorQuantization(nn.Module):
def __init__(self, *, num_quantizers, **kwargs):
super().__init__()
self.layers = nn.ModuleList([VectorQuantization(**kwargs) for _ in range(num_quantizers)])
def forward(self, x, n_q: tp.Optional[int]=None):
quantized_out = 0.0
residual = x
all_losses = []
all_indices = []
n_q = (n_q or len(self.layers))
for layer in self.layers[:n_q]:
(quantized, indices, loss) = layer(residual)
residual = (residual - quantized)
quantized_out = (quantized_out + quantized)
all_indices.append(indices)
all_losses.append(loss)
(out_losses, out_indices) = map(torch.stack, (all_losses, all_indices))
return (quantized_out, out_indices, out_losses)
def encode(self, x: torch.Tensor, n_q: tp.Optional[int]=None) -> torch.Tensor:
residual = x
all_indices = []
n_q = (n_q or len(self.layers))
for layer in self.layers[:n_q]:
indices = layer.encode(residual)
quantized = layer.decode(indices)
residual = (residual - quantized)
all_indices.append(indices)
out_indices = torch.stack(all_indices)
return out_indices
def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
quantized_out = torch.tensor(0.0, device=q_indices.device)
for (i, indices) in enumerate(q_indices):
layer = self.layers[i]
quantized = layer.decode(indices)
quantized_out = (quantized_out + quantized)
return quantized_out |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def test_text_format_validation_error_message_simple():
validator = Draft7Validator({'properties': {'foo': {'anyOf': [{'type': 'string'}, {'properties': {'bar': {'type': 'array'}}}]}}})
err = next(validator.iter_errors({'foo': {'bar': 1}}))
text_reporter = TextReporter(verbosity=1)
s1 = text_reporter._format_validation_error_message(err, filename='foo.json')
assert (s1 == "\x1b[33mfoo.json::$.foo\x1b[0m: {'bar': 1} is not valid under any of the given schemas")
s2 = text_reporter._format_validation_error_message(err)
assert (s2 == "\x1b[33m$.foo\x1b[0m: {'bar': 1} is not valid under any of the given schemas") |
class RNNAgent(nn.Module):
def __init__(self, input_shape, args):
super(RNNAgent, self).__init__()
self.args = args
self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x = F.relu(self.fc1(inputs))
h_in = hidden_state.reshape((- 1), self.args.rnn_hidden_dim)
h = self.rnn(x, h_in)
q = self.fc2(h)
return (q, h) |
def test_read_setup_py_simple(tmp_path):
with open((tmp_path / 'setup.py'), 'w') as f:
f.write(dedent('\n from setuptools import setup\n\n setup(\n name = "hello",\n other = 23,\n example = ["item", "other"],\n python_requires = "1.23",\n )\n '))
assert (setup_py_python_requires(tmp_path.joinpath('setup.py').read_text()) == '1.23')
assert (get_requires_python_str(tmp_path) == '1.23') |
class ToolProxy(ToolBase):
def load_tc_profile(self):
response = requests.get(self.tool_consumer_profile_url)
self.tc_profile = json.loads(response.text)
def tool_consumer_profile_url(self):
return self.launch_params['tc_profile_url']
def find_registration_url(self):
for service in self.tc_profile['service_offered']:
if (('application/vnd.ims.lti.v2.toolproxy+json' in service['format']) and ('POST' in service['action'])):
return service['endpoint']
def register_proxy(self, tool_profile):
register_url = self.find_registration_url()
r = Request('POST', register_url, data=json.dumps(tool_profile, indent=4), headers={'Content-Type': 'application/vnd.ims.lti.v2.toolproxy+json'}).prepare()
sign = OAuth1(self.launch_params['reg_key'], self.launch_params['reg_password'], signature_type=SIGNATURE_TYPE_AUTH_HEADER, force_include_body=True)
signed = sign(r)
return signed |
.end_to_end()
.parametrize('node_def', ["(PathNode(path=Path('file1.txt')), PathNode(path=Path('file2.txt')))", "(Path('file1.txt'), Path('file2.txt'))"])
def test_return_with_tuple_and_task_decorator(runner, tmp_path, node_def):
source = f'''
from pathlib import Path
from typing_extensions import Annotated
from pytask import task, PathNode
(produces={node_def})
def task_example():
return "Hello,", "World!"
'''
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert (tmp_path.joinpath('file1.txt').read_text() == 'Hello,')
assert (tmp_path.joinpath('file2.txt').read_text() == 'World!') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.