code stringlengths 101 5.91M |
|---|
class GPT2Converter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='', fuse_unk=False))
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
if self.original_tokenizer.add_bos_token:
bos = self.original_tokenizer.bos_token
bos_token_id = self.original_tokenizer.bos_token_id
tokenizer.post_processor = processors.TemplateProcessing(single=f'{bos}:0 $A:0', pair=f'{bos}:0 $A:0 $B:1', special_tokens=[(bos, bos_token_id)])
else:
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer |
def add_download_entity_parser(subparsers, formatter_class):
subparser = subparsers.add_parser('download-entity', formatter_class=formatter_class, help='Download resources for a builtin gazetteer entity')
subparser.add_argument('entity_name', type=str, help='Name of the builtin entity to download, e.g. snips/musicArtist')
subparser.add_argument('language', type=str, help='Language of the builtin entity')
subparser.add_argument('extra_pip_args', nargs='*', type=str, help='Additional arguments to be passed to `pip install` when installing the builtin entity package')
subparser.set_defaults(func=_download_builtin_entity)
return subparser |
def specialization_metric():
spec = 0.0
with torch.no_grad():
for eval_seed in range(100):
scores = np.zeros(args.gt_rules)
rng = np.random.RandomState(eval_seed)
p = rng.dirichlet(alpha=np.ones(args.gt_rules))
for _ in range(10):
(data, label, op) = rules(1000, args.seq_len, args.gt_rules, 2, args.search_version, args.data_seed, False, prob=p)
data = torch.Tensor(data).to(device)
label = torch.Tensor(label).to(device)
op = torch.Tensor(op).to(device)
(out, score) = model(data, op)
scores += score.view((- 1), args.gt_rules).mean(dim=0).detach().cpu().numpy()
spec += specialization_score(p, (scores / 10.0))
return (spec / 100.0) |
def bbh_keys_from_simulation_keys(simulation_keys):
return [simulation_key for simulation_key in simulation_keys if (simulation_key.split(':')[(- 2)] == 'BBH')] |
class GenericCallableAccessibleObject(GenericAccessibleObject, metaclass=abc.ABCMeta):
def __init__(self, owner: (TypeInfo | None), callable_: TypesOfCallables, inferred_signature: InferredSignature, raised_exceptions: set[str]=frozenset()) -> None:
super().__init__(owner)
self._callable = callable_
self._inferred_signature = inferred_signature
self._raised_exceptions = raised_exceptions
def generated_type(self) -> ProperType:
return self._inferred_signature.return_type
def inferred_signature(self) -> InferredSignature:
return self._inferred_signature
def raised_exceptions(self) -> set[str]:
return self._raised_exceptions
def callable(self) -> TypesOfCallables:
return self._callable
def get_num_parameters(self) -> int:
return len(self.inferred_signature.original_parameters)
def get_dependencies(self, memo: dict[(InferredSignature, dict[(str, ProperType)])]) -> OrderedSet[ProperType]:
return OrderedSet(self.inferred_signature.get_parameter_types(memo).values()) |
class TestResource(unittest.TestCase):
def setUp(self):
self.proc_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC)
self.dram_region = NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM)
self.src_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 0), type=NodeRegion.DRAM)
self.dst_data_region = NodeRegion(dim=PhyDim2(2, 1), origin=PhyDim2(0, 1), type=NodeRegion.DRAM)
def test_valid_args(self):
resource = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
self.assertTupleEqual(resource.proc_region.dim, (2, 2), 'proc_region')
self.assertTupleEqual(resource.dram_region.dim, (2, 2), 'dram_region')
self.assertTupleEqual(resource.dim_array, (16, 16), 'dim_array')
self.assertEqual(resource.size_gbuf, 131072, 'size_gbuf')
self.assertEqual(resource.size_regf, 512, 'size_regf')
self.assertEqual(resource.array_bus_width, 8, 'array_bus_width')
self.assertEqual(resource.dram_bandwidth, 128, 'dram_bandwidth')
self.assertFalse(resource.no_time_mux, 'no_time_mux')
def test_invalid_proc_region(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*proc_region.*'):
_ = Resource(proc_region=PhyDim2(2, 2), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_proc_region_dram(self):
with self.assertRaisesRegex(ValueError, 'Resource: .*proc_.*type.*'):
_ = Resource(proc_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.DRAM), dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_dram_region(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*dram_region.*'):
_ = Resource(proc_region=self.proc_region, dram_region=PhyDim2(2, 2), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_dram_region_proc(self):
with self.assertRaisesRegex(ValueError, 'Resource: .*dram_.*type.*'):
_ = Resource(proc_region=self.proc_region, dram_region=NodeRegion(dim=PhyDim2(2, 2), origin=PhyDim2(0, 0), type=NodeRegion.PROC), src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_data_region(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*src_data_.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=PhyDim2(2, 1), dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
with self.assertRaisesRegex(TypeError, 'Resource: .*dst_data_.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=PhyDim2(2, 1), dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_dim_array(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*dim_array.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_size_gbuf(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*size_gbuf.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=(131072,), size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_size_regf(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*size_regf.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=(512,), array_bus_width=8, dram_bandwidth=128, no_time_mux=False)
def test_invalid_array_bus_width(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=1.2, dram_bandwidth=128, no_time_mux=False)
with self.assertRaisesRegex(ValueError, 'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=(- 2), dram_bandwidth=128, no_time_mux=False)
with self.assertRaisesRegex(ValueError, 'Resource: .*array_bus_width.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=0, dram_bandwidth=128, no_time_mux=False)
def test_invalid_dram_bandwidth(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=None, no_time_mux=False)
with self.assertRaisesRegex(ValueError, 'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=(- 3), no_time_mux=False)
with self.assertRaisesRegex(ValueError, 'Resource: .*dram_bandwidth.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=0, no_time_mux=False)
def test_invalid_no_time_mux(self):
with self.assertRaisesRegex(TypeError, 'Resource: .*no_time_mux.*'):
_ = Resource(proc_region=self.proc_region, dram_region=self.dram_region, src_data_region=self.src_data_region, dst_data_region=self.dst_data_region, dim_array=PhyDim2(16, 16), size_gbuf=131072, size_regf=512, array_bus_width=8, dram_bandwidth=128, no_time_mux=None) |
def main():
args = parse_args()
generate_webpage(args.prefix, args.regex, args.column, args.perpage, verbose=True) |
def svm_predict(y, x, m, options=''):
def info(s):
print(s)
predict_probability = 0
argv = options.split()
i = 0
while (i < len(argv)):
if (argv[i] == '-b'):
i += 1
predict_probability = int(argv[i])
elif (argv[i] == '-q'):
info = print_null
else:
raise ValueError('Wrong options')
i += 1
svm_type = m.get_svm_type()
is_prob_model = m.is_probability_model()
nr_class = m.get_nr_class()
pred_labels = []
pred_values = []
if predict_probability:
if (not is_prob_model):
raise ValueError('Model does not support probabiliy estimates')
if (svm_type in [NU_SVR, EPSILON_SVR]):
info(('Prob. model for test data: target value = predicted value + z,\nz: Laplace distribution e^(-|z|/sigma)/(2sigma),sigma=%g' % m.get_svr_probability()))
nr_class = 0
prob_estimates = (c_double * nr_class)()
for xi in x:
(xi, idx) = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_probability(m, xi, prob_estimates)
values = prob_estimates[:nr_class]
pred_labels += [label]
pred_values += [values]
else:
if is_prob_model:
info('Model supports probability estimates, but disabled in predicton.')
if (svm_type in (ONE_CLASS, EPSILON_SVR, NU_SVC)):
nr_classifier = 1
else:
nr_classifier = ((nr_class * (nr_class - 1)) // 2)
dec_values = (c_double * nr_classifier)()
for xi in x:
(xi, idx) = gen_svm_nodearray(xi, isKernel=(m.param.kernel_type == PRECOMPUTED))
label = libsvm.svm_predict_values(m, xi, dec_values)
if (nr_class == 1):
values = [1]
else:
values = dec_values[:nr_classifier]
pred_labels += [label]
pred_values += [values]
(ACC, MSE, SCC) = evaluations(y, pred_labels)
l = len(y)
if (svm_type in [EPSILON_SVR, NU_SVR]):
info(('Mean squared error = %g (regression)' % MSE))
info(('Squared correlation coefficient = %g (regression)' % SCC))
else:
info(('Accuracy = %g%% (%d/%d) (classification)' % (ACC, int(((l * ACC) / 100)), l)))
return (pred_labels, (ACC, MSE, SCC), pred_values) |
def test_compute_quantiles_value_error():
vector = np.random.rand(1000, 1, 1)
alphas = [0.1, 0.2, 0.3]
with pytest.raises(ValueError, match='.*In case of the vector .*'):
compute_quantiles(vector, alphas) |
class MergeFeatures(nn.Module):
def __init__(self, mid_channels, out_channels):
super().__init__()
self.conv1to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to4 = _conv1x1_layer(mid_channels, mid_channels)
self.conv_merge = _conv3x3_layer((mid_channels * 3), mid_channels)
self.conv_last1 = _conv3x3_layer(mid_channels, (mid_channels // 2))
self.conv_last2 = _conv1x1_layer((mid_channels // 2), out_channels)
def forward(self, x1, x2, x4):
x14 = F.interpolate(x1, scale_factor=4, mode='bicubic', align_corners=False)
x14 = F.relu(self.conv1to4(x14))
x24 = F.interpolate(x2, scale_factor=2, mode='bicubic', align_corners=False)
x24 = F.relu(self.conv2to4(x24))
x = F.relu(self.conv_merge(torch.cat((x4, x14, x24), dim=1)))
x = self.conv_last1(x)
x = self.conv_last2(x)
x = torch.clamp(x, (- 1), 1)
return x |
class OFAMobileNetV3(MobileNetV3):
def __init__(self, n_classes=1000, bn_param=(0.1, 1e-05), dropout_rate=0.1, base_stage_width=None, width_mult_list=1.0, ks_list=3, expand_ratio_list=6, depth_list=4):
self.width_mult_list = int2list(width_mult_list, 1)
self.ks_list = int2list(ks_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.depth_list = int2list(depth_list, 1)
self.base_stage_width = base_stage_width
self.width_mult_list.sort()
self.ks_list.sort()
self.expand_ratio_list.sort()
self.depth_list.sort()
base_stage_width = [16, 24, 40, 80, 112, 160, 960, 1280]
final_expand_width = [make_divisible((base_stage_width[(- 2)] * max(self.width_mult_list)), 8) for _ in self.width_mult_list]
last_channel = [make_divisible((base_stage_width[(- 1)] * max(self.width_mult_list)), 8) for _ in self.width_mult_list]
stride_stages = [1, 2, 2, 2, 1, 2]
act_stages = ['relu', 'relu', 'relu', 'h_swish', 'h_swish', 'h_swish']
se_stages = [False, False, True, False, True, True]
if (depth_list is None):
n_block_list = [1, 2, 3, 4, 2, 3]
self.depth_list = [4, 4]
print('Use MobileNetV3 Depth Setting')
else:
n_block_list = ([1] + ([max(self.depth_list)] * 5))
width_list = []
for base_width in base_stage_width[:(- 2)]:
width = [make_divisible((base_width * width_mult), 8) for width_mult in self.width_mult_list]
width_list.append(width)
input_channel = width_list[0]
if (len(set(input_channel)) == 1):
first_conv = ConvLayer(3, max(input_channel), kernel_size=3, stride=2, act_func='h_swish')
first_block_conv = MBInvertedConvLayer(in_channels=max(input_channel), out_channels=max(input_channel), kernel_size=3, stride=stride_stages[0], expand_ratio=1, act_func=act_stages[0], use_se=se_stages[0])
else:
first_conv = DynamicConvLayer(in_channel_list=int2list(3, len(input_channel)), out_channel_list=input_channel, kernel_size=3, stride=2, act_func='h_swish')
first_block_conv = DynamicMBConvLayer(in_channel_list=input_channel, out_channel_list=input_channel, kernel_size_list=3, expand_ratio_list=1, stride=stride_stages[0], act_func=act_stages[0], use_se=se_stages[0])
first_block = MobileInvertedResidualBlock(first_block_conv, IdentityLayer(input_channel, input_channel))
self.block_group_info = []
blocks = [first_block]
_block_index = 1
feature_dim = input_channel
for (width, n_block, s, act_func, use_se) in zip(width_list[1:], n_block_list[1:], stride_stages[1:], act_stages[1:], se_stages[1:]):
self.block_group_info.append([(_block_index + i) for i in range(n_block)])
_block_index += n_block
output_channel = width
for i in range(n_block):
if (i == 0):
stride = s
else:
stride = 1
mobile_inverted_conv = DynamicMBConvLayer(in_channel_list=feature_dim, out_channel_list=output_channel, kernel_size_list=ks_list, expand_ratio_list=expand_ratio_list, stride=stride, act_func=act_func, use_se=use_se)
if ((stride == 1) and (feature_dim == output_channel)):
shortcut = IdentityLayer(feature_dim, feature_dim)
else:
shortcut = None
blocks.append(MobileInvertedResidualBlock(mobile_inverted_conv, shortcut))
feature_dim = output_channel
if (len(final_expand_width) == 1):
final_expand_layer = ConvLayer(max(feature_dim), max(final_expand_width), kernel_size=1, act_func='h_swish')
feature_mix_layer = ConvLayer(max(final_expand_width), max(last_channel), kernel_size=1, bias=False, use_bn=False, act_func='h_swish')
else:
final_expand_layer = DynamicConvLayer(in_channel_list=feature_dim, out_channel_list=final_expand_width, kernel_size=1, act_func='h_swish')
feature_mix_layer = DynamicConvLayer(in_channel_list=final_expand_width, out_channel_list=last_channel, kernel_size=1, use_bn=False, act_func='h_swish')
if (len(set(last_channel)) == 1):
classifier = LinearLayer(max(last_channel), n_classes, dropout_rate=dropout_rate)
else:
classifier = DynamicLinearLayer(in_features_list=last_channel, out_features=n_classes, bias=True, dropout_rate=dropout_rate)
super(OFAMobileNetV3, self).__init__(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
self.runtime_depth = [len(block_idx) for block_idx in self.block_group_info]
' MyNetwork required methods '
def name():
return 'OFAMobileNetV3'
def forward(self, x):
x = self.first_conv(x)
x = self.blocks[0](x)
for (stage_id, block_idx) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
x = self.blocks[idx](x)
x = self.final_expand_layer(x)
x = x.mean(3, keepdim=True).mean(2, keepdim=True)
x = self.feature_mix_layer(x)
x = torch.squeeze(x)
x = self.classifier(x)
return x
def module_str(self):
_str = (self.first_conv.module_str + '\n')
_str += (self.blocks[0].module_str + '\n')
for (stage_id, block_idx) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += (self.blocks[idx].module_str + '\n')
_str += (self.final_expand_layer.module_str + '\n')
_str += (self.feature_mix_layer.module_str + '\n')
_str += (self.classifier.module_str + '\n')
return _str
def config(self):
return {'name': OFAMobileNetV3.__name__, 'bn': self.get_bn_param(), 'first_conv': self.first_conv.config, 'blocks': [block.config for block in self.blocks], 'final_expand_layer': self.final_expand_layer.config, 'feature_mix_layer': self.feature_mix_layer.config, 'classifier': self.classifier.config}
def build_from_config(config):
raise ValueError('do not support this function')
def load_weights_from_net(self, src_model_dict):
model_dict = self.state_dict()
for key in src_model_dict:
if (key in model_dict):
new_key = key
elif ('.bn.bn.' in key):
new_key = key.replace('.bn.bn.', '.bn.')
elif ('.conv.conv.weight' in key):
new_key = key.replace('.conv.conv.weight', '.conv.weight')
elif ('.linear.linear.' in key):
new_key = key.replace('.linear.linear.', '.linear.')
elif ('.linear.' in key):
new_key = key.replace('.linear.', '.linear.linear.')
elif ('bn.' in key):
new_key = key.replace('bn.', 'bn.bn.')
elif ('conv.weight' in key):
new_key = key.replace('conv.weight', 'conv.conv.weight')
else:
raise ValueError(key)
assert (new_key in model_dict), ('%s' % new_key)
model_dict[new_key] = src_model_dict[key]
self.load_state_dict(model_dict)
' set, sample and get active sub-networks '
def set_active_subnet(self, wid=None, ks=None, e=None, d=None):
width_mult_id = int2list(wid, (4 + len(self.block_group_info)))
ks = int2list(ks, (len(self.blocks) - 1))
expand_ratio = int2list(e, (len(self.blocks) - 1))
depth = int2list(d, len(self.block_group_info))
for (block, k, e) in zip(self.blocks[1:], ks, expand_ratio):
if (k is not None):
block.mobile_inverted_conv.active_kernel_size = k
if (e is not None):
block.mobile_inverted_conv.active_expand_ratio = e
for (i, d) in enumerate(depth):
if (d is not None):
self.runtime_depth[i] = min(len(self.block_group_info[i]), d)
def set_constraint(self, include_list, constraint_type='depth'):
if (constraint_type == 'depth'):
self.__dict__['_depth_include_list'] = include_list.copy()
elif (constraint_type == 'expand_ratio'):
self.__dict__['_expand_include_list'] = include_list.copy()
elif (constraint_type == 'kernel_size'):
self.__dict__['_ks_include_list'] = include_list.copy()
elif (constraint_type == 'width_mult'):
self.__dict__['_widthMult_include_list'] = include_list.copy()
else:
raise NotImplementedError
def clear_constraint(self):
self.__dict__['_depth_include_list'] = None
self.__dict__['_expand_include_list'] = None
self.__dict__['_ks_include_list'] = None
self.__dict__['_widthMult_include_list'] = None
def sample_active_subnet(self):
ks_candidates = (self.ks_list if (self.__dict__.get('_ks_include_list', None) is None) else self.__dict__['_ks_include_list'])
expand_candidates = (self.expand_ratio_list if (self.__dict__.get('_expand_include_list', None) is None) else self.__dict__['_expand_include_list'])
depth_candidates = (self.depth_list if (self.__dict__.get('_depth_include_list', None) is None) else self.__dict__['_depth_include_list'])
width_mult_setting = None
ks_setting = []
if (not isinstance(ks_candidates[0], list)):
ks_candidates = [ks_candidates for _ in range((len(self.blocks) - 1))]
for k_set in ks_candidates:
k = random.choice(k_set)
ks_setting.append(k)
expand_setting = []
if (not isinstance(expand_candidates[0], list)):
expand_candidates = [expand_candidates for _ in range((len(self.blocks) - 1))]
for e_set in expand_candidates:
e = random.choice(e_set)
expand_setting.append(e)
depth_setting = []
if (not isinstance(depth_candidates[0], list)):
depth_candidates = [depth_candidates for _ in range(len(self.block_group_info))]
for d_set in depth_candidates:
d = random.choice(d_set)
depth_setting.append(d)
self.set_active_subnet(width_mult_setting, ks_setting, expand_setting, depth_setting)
return {'wid': width_mult_setting, 'ks': ks_setting, 'e': expand_setting, 'd': depth_setting}
def get_active_subnet(self, preserve_weight=True):
first_conv = copy.deepcopy(self.first_conv)
blocks = [copy.deepcopy(self.blocks[0])]
final_expand_layer = copy.deepcopy(self.final_expand_layer)
feature_mix_layer = copy.deepcopy(self.feature_mix_layer)
classifier = copy.deepcopy(self.classifier)
input_channel = blocks[0].mobile_inverted_conv.out_channels
for (stage_id, block_idx) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight), copy.deepcopy(self.blocks[idx].shortcut)))
input_channel = stage_blocks[(- 1)].mobile_inverted_conv.out_channels
blocks += stage_blocks
_subnet = MobileNetV3(first_conv, blocks, final_expand_layer, feature_mix_layer, classifier)
_subnet.set_bn_param(**self.get_bn_param())
return _subnet
def get_active_net_config(self):
first_conv_config = self.first_conv.config
first_block_config = self.blocks[0].config
if isinstance(self.first_conv, DynamicConvLayer):
first_conv_config = self.first_conv.get_active_subnet_config(3)
first_block_config = {'name': MobileInvertedResidualBlock.__name__, 'mobile_inverted_conv': self.blocks[0].mobile_inverted_conv.get_active_subnet_config(first_conv_config['out_channels']), 'shortcut': (self.blocks[0].shortcut.config if (self.blocks[0].shortcut is not None) else None)}
final_expand_config = self.final_expand_layer.config
feature_mix_layer_config = self.feature_mix_layer.config
if isinstance(self.final_expand_layer, DynamicConvLayer):
final_expand_config = self.final_expand_layer.get_active_subnet_config(self.blocks[(- 1)].mobile_inverted_conv.active_out_channel)
feature_mix_layer_config = self.feature_mix_layer.get_active_subnet_config(final_expand_config['out_channels'])
classifier_config = self.classifier.config
if isinstance(self.classifier, DynamicLinearLayer):
classifier_config = self.classifier.get_active_subnet_config(self.feature_mix_layer.active_out_channel)
block_config_list = [first_block_config]
input_channel = first_block_config['mobile_inverted_conv']['out_channels']
for (stage_id, block_idx) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
middle_channel = make_divisible(round((input_channel * self.blocks[idx].mobile_inverted_conv.active_expand_ratio)), 8)
stage_blocks.append({'name': MobileInvertedResidualBlock.__name__, 'mobile_inverted_conv': {'name': MBInvertedConvLayer.__name__, 'in_channels': input_channel, 'out_channels': self.blocks[idx].mobile_inverted_conv.active_out_channel, 'kernel_size': self.blocks[idx].mobile_inverted_conv.active_kernel_size, 'stride': self.blocks[idx].mobile_inverted_conv.stride, 'expand_ratio': self.blocks[idx].mobile_inverted_conv.active_expand_ratio, 'mid_channels': middle_channel, 'act_func': self.blocks[idx].mobile_inverted_conv.act_func, 'use_se': self.blocks[idx].mobile_inverted_conv.use_se}, 'shortcut': (self.blocks[idx].shortcut.config if (self.blocks[idx].shortcut is not None) else None)})
input_channel = self.blocks[idx].mobile_inverted_conv.active_out_channel
block_config_list += stage_blocks
return {'name': MobileNetV3.__name__, 'bn': self.get_bn_param(), 'first_conv': first_conv_config, 'blocks': block_config_list, 'final_expand_layer': final_expand_config, 'feature_mix_layer': feature_mix_layer_config, 'classifier': classifier_config}
' Width Related Methods '
def re_organize_middle_weights(self, expand_ratio_stage=0):
for block in self.blocks[1:]:
block.mobile_inverted_conv.re_organize_middle_weights(expand_ratio_stage) |
class ReplaceValueRunExpander(RunExpander):
def __init__(self, value):
self.name = type(self).name
if (value in type(self).values_dict):
self.values = type(self).values_dict[value]
else:
self.values = [value]
def expand(self, run_spec: RunSpec) -> List[RunSpec]:
def sanitize(value):
return str(value).replace('/', '_')
return [replace(run_spec, name=f"{run_spec.name}{(',' if (':' in run_spec.name) else ':')}{self.name}={sanitize(value)}", adapter_spec=replace(run_spec.adapter_spec, **{self.name: value})) for value in self.values] |
def read_data_files_from_folder(foldername):
data = list()
file_names = list()
file_count = 0
print('Reading data from all files in folder ', foldername)
listing = os.listdir((foldername + '/'))
to_subtract = file_count
for file in listing:
if ((file[0] != '.') and (file[(- 3):] == '.ll')):
f = open(os.path.join(foldername, file), 'r')
data.append(f.read().splitlines())
f.close()
file_names.append(file)
file_count += 1
print('Number of files read from', foldername, ': ', (file_count - to_subtract))
print('Total number of files read for dataset', foldername, ': ', file_count)
return (data, file_names) |
class TestDBFileReader(TestCase):
def setUp(self):
self.temp_paths = []
def tearDown(self):
for path in self.temp_paths:
self._delete_path(path)
def _delete_path(path):
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def _make_temp_path(self):
with tempfile.NamedTemporaryFile() as f:
temp_path = f.name
self.temp_paths.append(temp_path)
return temp_path
def _build_source_reader(ws, size):
src_ds = make_source_dataset(ws, size)
return src_ds.reader()
def _read_all_data(ws, reader, session):
dst_ds = make_destination_dataset(ws, reader.schema().clone_schema())
with TaskGroup() as tg:
pipe(reader, dst_ds.writer(), num_runtime_threads=8)
session.run(tg)
return ws.blobs[str(dst_ds.content().label())].fetch()
(('LevelDB' not in core.C.registered_dbs()), 'Need LevelDB')
def test_cached_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
db_path = self._make_temp_path()
cached_reader1 = CachedReader(self._build_source_reader(ws, 100), db_path, loop_over=False)
build_cache_step = cached_reader1.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader1, session)
self.assertEqual(sorted(data), list(range(100)))
cached_reader2 = CachedReader(self._build_source_reader(ws, 200), db_path)
build_cache_step = cached_reader2.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader2, session)
self.assertEqual(sorted(data), list(range(100)))
self._delete_path(db_path)
cached_reader3 = CachedReader(self._build_source_reader(ws, 300), db_path)
build_cache_step = cached_reader3.build_cache_step()
session.run(build_cache_step)
data = self._read_all_data(ws, cached_reader3, session)
self.assertEqual(sorted(data), list(range(300)))
self._delete_path(db_path)
(('LevelDB' not in core.C.registered_dbs()), 'Need LevelDB')
def test_db_file_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
db_path = self._make_temp_path()
cached_reader = CachedReader(self._build_source_reader(ws, 100), db_path=db_path, db_type='LevelDB')
build_cache_step = cached_reader.build_cache_step()
session.run(build_cache_step)
db_file_reader = DBFileReader(db_path=db_path, db_type='LevelDB')
data = self._read_all_data(ws, db_file_reader, session)
self.assertEqual(sorted(data), list(range(100)))
self._delete_path(db_path) |
def convert_clip_backbone(flax_params, torch_config):
torch_model = CLIP(**torch_config)
torch_model.eval()
torch_clip_params = torch_model.state_dict()
flax_clip_params = flatten_nested_dict(flax_params['backbone']['clip'])
new_torch_params = {}
for (flax_key, v) in flax_clip_params.items():
torch_key = flax_key.replace('/', '.')
torch_key = torch_key.replace('text.token_embedding.embedding', 'token_embedding.kernel')
if (torch_key.startswith('text.transformer') or torch_key.startswith('text.text_projection') or torch_key.startswith('text.ln_final') or torch_key.startswith('text.positional_embedding')):
torch_key = torch_key[5:]
torch_key = torch_key.replace('text_projection.kernel', 'text_projection')
torch_key = torch_key.replace('visual.proj.kernel', 'visual.proj')
torch_key = torch_key.replace('.scale', '.weight')
torch_key = torch_key.replace('.kernel', '.weight')
if (('conv' in torch_key) or ('downsample.0.weight' in torch_key)):
v = v.transpose(3, 2, 0, 1)
elif (('weight' in torch_key) and (v.ndim == 2) and ('embedding' not in torch_key)):
v = v.T
new_torch_params[torch_key] = v
attn_params = _convert_attn_layers(new_torch_params)
new_torch_params.update(attn_params)
attn_params = {}
for (name, param) in new_torch_params.items():
if (name in torch_clip_params.keys()):
new_param = torch.from_numpy(new_torch_params[name])
torch_clip_params[name].copy_(new_param)
else:
attn_params[name] = param
return (torch_clip_params, torch_model, attn_params) |
class Material(schema_utils.Model):
mat_name = types.StringType()
mat_file = types.StringType()
index = types.PolyModelType(optplan.ComplexNumber) |
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = Shift_gcn(in_channels, out_channels, A)
self.tcn1 = Shift_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU()
if (not residual):
self.residual = (lambda x: 0)
elif ((in_channels == out_channels) and (stride == 1)):
self.residual = (lambda x: x)
else:
self.residual = tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
x = (self.tcn1(self.gcn1(x)) + self.residual(x))
return self.relu(x) |
def read_file(path):
with open(path, 'r') as f:
content = f.read()
f.close()
return content |
class PseudolineArrangement():
def __init__(self, seq, encoding='auto'):
if ((encoding == 'transpositions') or ((encoding == 'auto') and (len(seq[0]) == 2) and (len(seq) > 3))):
self._n = (max(map(max, seq)) + 1)
if (((self._n * (self._n - 1)) / 2) != len(seq)):
raise ValueError((((((('A line is numbered ' + str((self._n - 1))) + ' but the number') + ' of transpositions is different from binomial(') + str((self._n - 1))) + ',2). Are the lines numbered from 0 to n-1?') + ' Are they really non-parallel? Please check the documentation.'))
self._permutations = [[] for i in range(self._n)]
for (i, j) in seq:
self._permutations[i].append(j)
self._permutations[j].append(i)
elif ((encoding == 'permutations') or ((encoding == 'auto') and (len(seq[0]) == (len(seq) - 1)) and (max(seq[0]) > 1))):
self._n = len(seq)
self._permutations = [list(_) for _ in seq]
if (max(map(max, seq)) != (self._n - 1)):
raise ValueError('Are the lines really numbered from 0 to n-1?')
elif ((encoding == 'Felsner') or ((encoding == 'auto') and (len(seq[0]) == (len(seq) - 1)))):
seq = deepcopy(seq)
self._n = len(seq)
ordering = list(range(self._n))
self._permutations = [[] for i in range(self._n)]
crossings = ((self._n * (self._n - 1)) / 2)
i = 0
while (crossings > 0):
if ((seq[i] != []) and ((seq[i][0] == 0) and (seq[(i + 1)][0] == 1))):
crossings -= 1
self._permutations[ordering[i]].append(ordering[(i + 1)])
self._permutations[ordering[(i + 1)]].append(ordering[i])
(ordering[i], ordering[(i + 1)]) = (ordering[(i + 1)], ordering[i])
(seq[i], seq[(i + 1)]) = (seq[(i + 1)], seq[i])
seq[i].pop(0)
seq[(i + 1)].pop(0)
if ((i > 0) and (seq[(i - 1)] is not [])):
i -= 1
else:
i += 1
else:
i += 1
else:
if (encoding != 'auto'):
raise ValueError("The value of encoding must be one of 'transpositions', 'permutations', 'Felsner' or 'auto'.")
raise ValueError('The encoding you used could not be guessed. Your input string is probably badly formatted, or you have at most 3 lines and we cannot distinguish the encoding. Please specify the encoding you used.')
def transpositions(self):
t = []
perm = deepcopy(self._permutations)
crossings = ((self._n * (self._n - 1)) / 2)
while (crossings > 0):
i = 0
while (perm[i] == []):
i += 1
k = 0
while (i != perm[perm[i][0]][0]):
i = perm[i][0]
k += 1
if (k > self._n):
raise ValueError((((('It looks like the data does not correspond to a' + 'pseudoline arrangement. We have found k>2 lines') + 'such that the ith line meets the (i+1)th before') + ' the (i-1)th (this creates a cyclic dependency)') + ' which is totally impossible.'))
t.append((i, perm[i][0]))
perm[perm[i][0]].pop(0)
perm[i].pop(0)
crossings -= 1
if (max(map(len, perm)) != 0):
raise ValueError('There has been an error while computing the transpositions.')
return t
def permutations(self):
return deepcopy(self._permutations)
def felsner_matrix(self):
m = [[] for i in range(self._n)]
for (i, j) in self.transpositions():
if (i < j):
(i, j) = (j, i)
m[j].append(0)
m[i].append(1)
return m
def show(self, **args):
x = 1
from sage.plot.line import line
from sage.plot.text import text
lines = [[(0, ((self._n - 1) - i))] for i in range(self._n)]
for (i, j) in self.transpositions():
iy = lines[i][(- 1)][1]
jy = lines[j][(- 1)][1]
lines[i].append((x, iy))
lines[j].append((x, jy))
if (abs((iy - jy)) != 1):
raise ValueError(((('There has been a problem while plotting the figure. It ' + 'seems that the lines are not correctly ordered. Please ') + 'check the pseudolines modules documentation, there is a ') + 'warning about that. '))
lines[i].append(((x + 2), jy))
lines[j].append(((x + 2), iy))
x += 2
L = line([(1, 1)])
for (i, l) in enumerate(lines):
l.append(((x + 2), l[(- 1)][1]))
L += line(l)
L += text(str(i), (0, (l[0][1] + 0.3)), horizontal_alignment='right')
L += text(str(i), ((x + 2), (l[(- 1)][1] + 0.3)), horizontal_alignment='left')
return L.show(axes=False, **args)
def __repr__(self):
return ('Arrangement of pseudolines of size ' + str(self._n))
def __eq__(self, other):
return ((self._n == other._n) and (self._permutations == other._permutations))
def __ne__(self, other):
return (not (self == other)) |
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (((tf.reduce_sum(K_XX, keep_dims=True) / (m * m)) + (tf.reduce_sum(K_YY, keep_dims=True) / (n * n))) - ((2 * tf.reduce_sum(K_XY, keep_dims=True)) / (m * n)))
else:
if (const_diagonal is not False):
trace_X = (m * const_diagonal)
trace_Y = (n * const_diagonal)
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))) + ((tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1)))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
return mmd2 |
def imshowc_bbox(imgs, bboxs, nx=None, color='r', mode='pascal', path=None):
assert (len(imgs) == len(bboxs))
assert isinstance(imgs, (np.ndarray, torch.Tensor))
assert (imgs.ndim == 4)
if isinstance(imgs, torch.Tensor):
assert (imgs.shape[1] in [3, 4])
imgs = imgs.cpu().detach().numpy()
imgs = imgs.transpose(0, 2, 3, 1)
elif isinstance(imgs, np.ndarray):
assert (imgs.shape[3] in [3, 4])
bboxs2 = []
for bbox in bboxs:
assert isinstance(bbox, (np.ndarray, torch.Tensor))
assert (bbox.ndim == 2)
if isinstance(bbox, torch.Tensor):
bboxs2.append(bbox.cpu().detach().numpy())
else:
bboxs2.append(bbox)
bboxs = bboxs2
if (nx is None):
nx = min(10, len(imgs))
ny = (((len(imgs) - 1) // nx) + 1)
for (idx, (img, bbox)) in enumerate(zip(imgs, bboxs)):
plt.subplot(ny, nx, (idx + 1))
fig = plt.imshow(img, vmin=0, vmax=1)
ax = plt.gca()
for bbox2 in bbox:
if (mode == 'coco'):
(x, y, w, h) = (bbox2[0], bbox2[1], bbox2[2], bbox2[3])
elif (mode == 'pascal'):
(x, y, w, h) = (bbox2[0], bbox2[1], (bbox2[2] - bbox2[0]), (bbox2[3] - bbox2[1]))
rect = patches.Rectangle((x, y), w, h, edgecolor=color, facecolor='none')
ax.add_patch(rect)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.tight_layout()
plt.subplots_adjust(0, 0, 1, 1, 0, 0)
if (path is None):
plt.show()
else:
mkdir(parent(path))
plt.savefig(path)
plt.close() |
class ConfidenceInterval(BaseTest):
def __init__(self, calculator: BaseCalculator, poinull: POIarray, qtilde: bool=False):
super().__init__(calculator, poinull)
self._qtilde = qtilde
def qtilde(self) -> bool:
return self._qtilde
def pvalues(self) -> np.ndarray:
poialt = None
return self.calculator.pvalue(poinull=self.poinull, poialt=poialt, qtilde=self.qtilde, onesided=False)[0]
def interval(self, alpha: float=0.32, printlevel: int=1) -> dict[(str, float)]:
bands = {}
poinull = self.poinull
observed = self.calculator.bestfit.params[poinull.parameter]['value']
bands['observed'] = observed
if (min(self.pvalues()) > alpha):
msg = f'The minimum of the scanned p-values is {min(self.pvalues())} which is larger than the'
msg += f' confidence level alpha = {alpha}. Try to increase the range of POI values.'
raise POIRangeError(msg)
tck = interpolate.splrep(poinull.values, (self.pvalues() - alpha), s=0)
roots = np.array(interpolate.sproot(tck))
msg = f' bound on the POI `{poinull.name}` cannot not be interpolated.'
if (roots.size > 2):
msg_warn = 'Multiple roots have been founds.'
if isinstance(self.calculator, FrequentistCalculator):
msg_warn += " Try to increase the number of toys, 'ntoysnull', to reduce fluctuations."
warnings.warn(msg_warn)
lower_roots = roots[(roots < observed)]
upper_roots = roots[(roots > observed)]
if (upper_roots.size == 0):
msg = (('Upper' + msg) + ' Try to increase the maximum POI value.')
raise POIRangeError(msg)
else:
bands['upper'] = max(upper_roots)
if (lower_roots.size == 0):
if self.qtilde:
bands['lower'] = 0.0
else:
msg = (('Low' + msg) + ' Try to decrease the minimum POI value.')
raise POIRangeError(msg)
else:
bands['lower'] = min(lower_roots)
if (self.qtilde and (bands['lower'] < 0.0)):
bands['lower'] = 0.0
if (printlevel > 0):
msg = f'''
Confidence interval on {poinull.name}:
'''
msg += f" {bands['lower']} < {poinull.name} < {bands['upper']} at {((1 - alpha) * 100):.1f}% C.L."
print(msg)
return bands |
class WarmupMultiStepLR(_WarmupLRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=(- 1), cfg=None):
if (not (list(milestones) == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
self.milestones = milestones
self.gamma = gamma
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self, iter):
warmup_factor = self.get_warmup_factor_at_iter(iter)
if (iter <= self.warmup_iters):
return [(warmup_factor * base_lr) for base_lr in self.base_lrs]
return [(base_lr * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs] |
def test_case19():
url = (brokerIp + '/ngsi-ld/v1/entities/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata18), headers=headers)
print(r.content)
print(r.status_code)
url = (brokerIp + '/ngsi-ld/v1/entities/urn:ngsi-ld:Vehicle:A501/attrs/brandName1')
r = requests.delete(url)
print(r.content)
print(r.status_code)
assert (r.status_code == 404) |
('pretrained-simple')
class PretrainedTransformerIndexerSimple(PretrainedTransformerIndexer):
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary) -> Dict[(str, List[int])]:
tokens = get_tokens(tokens)
token_wordpiece_ids = [[token.info[self._index_name]['wordpiece-ids'] for token in token_list] for token_list in tokens]
if (len(tokens) == 2):
(wordpiece_ids, type_ids, offsets_doc, offsets_query) = self.intra_word_tokenize_sentence_pair(token_wordpiece_ids[0], token_wordpiece_ids[1])
else:
(wordpiece_ids, type_ids, offsets_doc) = self.intra_word_tokenize_sentence(token_wordpiece_ids[0])
if (len(offsets_doc) == 0):
(doc_starting_offsets, doc_ending_offsets) = ([], [])
else:
(doc_starting_offsets, doc_ending_offsets) = list(zip(*offsets_doc))
if (len(wordpiece_ids) > 512):
postions_ids = [((i * 512) / len(wordpiece_ids)) for i in range(len(wordpiece_ids))]
else:
postions_ids = list(range(len(wordpiece_ids)))
token_mask = ([1] * len(tokens[0]))
wordpiece_mask = ([1] * len(wordpiece_ids))
wordpiece_to_tokens = ([(- 1)] * len(wordpiece_ids))
for (i, (start, end)) in enumerate(zip(doc_starting_offsets, doc_ending_offsets)):
for j in range(start, end):
wordpiece_to_tokens[j] = i
return {'wordpiece-ids': wordpiece_ids, 'document-starting-offsets': list(doc_starting_offsets), 'document-ending-offsets': list(doc_ending_offsets), 'type-ids': type_ids, 'position-ids': postions_ids, 'wordpiece-mask': wordpiece_mask, 'mask': token_mask, 'wordpiece-to-token': wordpiece_to_tokens}
def add_token_info(self, tokens: List[Token], index_name: str):
self._index_name = index_name
for token in tokens:
wordpieces = self._tokenizer.tokenize(token.text)
if (len(wordpieces) == 0):
token.info[index_name] = {'wordpiece-ids': [self._tokenizer.unk_token_id]}
continue
token.info[index_name] = {'wordpiece-ids': [bpe_id for bpe_id in self._tokenizer.encode(wordpieces, add_special_tokens=False)]}
def as_padded_tensor_dict(self, tokens: IndexedTokenList, padding_lengths: Dict[(str, int)]) -> Dict[(str, torch.Tensor)]:
return {key: torch.LongTensor(pad_sequence_to_length(val, padding_lengths[key], default_value=(lambda : (0 if (('mask' in key) or ('type-ids' in key)) else self._tokenizer.pad_token_id)))) for (key, val) in tokens.items()}
def intra_word_tokenize_in_id(self, tokens: List[List[int]], starting_offset: int=0) -> Tuple[(List[int], List[Tuple[(int, int)]], int)]:
wordpieces: List[int] = []
offsets = []
cumulative = starting_offset
for token in tokens:
subword_wordpieces = token
wordpieces.extend(subword_wordpieces)
start_offset = cumulative
cumulative += len(subword_wordpieces)
end_offset = cumulative
offsets.append((start_offset, end_offset))
return (wordpieces, offsets, cumulative)
def intra_word_tokenize_sentence_pair(self, tokens_a: List[List[int]], tokens_b: List[List[int]]) -> Tuple[(List[int], List[int], List[Tuple[(int, int)]], List[Tuple[(int, int)]])]:
(wordpieces_a, offsets_a, cumulative) = self.intra_word_tokenize_in_id(tokens_a, self._allennlp_tokenizer.num_added_start_tokens)
(wordpieces_b, offsets_b, cumulative) = self.intra_word_tokenize_in_id(tokens_b, (cumulative + self._allennlp_tokenizer.num_added_middle_tokens))
text_ids = self._tokenizer.build_inputs_with_special_tokens(wordpieces_a, wordpieces_b)
type_ids = self._tokenizer.create_token_type_ids_from_sequences(wordpieces_a, wordpieces_b)
assert ((cumulative + self._allennlp_tokenizer.num_added_end_tokens) == len(text_ids))
return (text_ids, type_ids, offsets_a, offsets_b)
def intra_word_tokenize_sentence(self, tokens_a: List[List[int]]) -> Tuple[(List[int], List[int], List[Tuple[(int, int)]])]:
(wordpieces_a, offsets_a, cumulative) = self.intra_word_tokenize_in_id(tokens_a, self._allennlp_tokenizer.num_added_start_tokens)
text_ids = self._tokenizer.build_inputs_with_special_tokens(wordpieces_a)
type_ids = self._tokenizer.create_token_type_ids_from_sequences(wordpieces_a)
assert ((cumulative + self._allennlp_tokenizer.num_added_end_tokens) == len(text_ids))
return (text_ids, type_ids, offsets_a) |
class _ROIPool(Function):
def forward(ctx, input, roi, output_size, spatial_scale):
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.input_shape = input.size()
(output, argmax) = _C.roi_pool_forward(input, roi, spatial_scale, output_size[0], output_size[1])
ctx.save_for_backward(input, roi, argmax)
return output
_differentiable
def backward(ctx, grad_output):
(input, rois, argmax) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
(bs, ch, h, w) = ctx.input_shape
grad_input = _C.roi_pool_backward(grad_output, input, rois, argmax, spatial_scale, output_size[0], output_size[1], bs, ch, h, w)
return (grad_input, None, None, None) |
class GitRequirement(Requirement):
def __init__(self):
super().__init__('git 2.0+')
def check(self):
Shell.exec('git --version') |
def store_cluster_nodes(gold_clusters, gold_cluster_lookup, gold_nuggets, sys_nuggets, g2s_mapping):
cluster_nodes_in_gold = []
cluster_id_to_gold_node = {}
cluster_nodes_in_sys = []
cluster_id_to_sys_node = {}
rewritten_lookup = {}
gold_id_2_system_id = {}
mapped_system_nuggets = set()
for (gold_index, (sys_index, _)) in enumerate(g2s_mapping):
gold_nugget_id = gold_nuggets[gold_index]
sys_nugget_id = sys_nuggets[sys_index]
gold_id_2_system_id[gold_nugget_id] = sys_nugget_id
for (gold_nugget_id, cluster_id) in gold_cluster_lookup.iteritems():
sys_nugget_id = gold_id_2_system_id[gold_nugget_id]
rewritten_lookup[sys_nugget_id] = cluster_id
mapped_system_nuggets.add(sys_nugget_id)
tid = 0
max_cluster_id = 0
for (cluster_id, cluster) in gold_clusters.iteritems():
node_id = ('te%d' % tid)
cluster_nodes_in_gold.append(node_id)
cluster_nodes_in_sys.append(node_id)
cluster_id_to_gold_node[cluster_id] = node_id
cluster_id_to_sys_node[cluster_id] = node_id
tid += 1
if (cluster_id > max_cluster_id):
max_cluster_id = cluster_id
additional_cluster_id = (max_cluster_id + 1)
for nugget in sys_nuggets:
if (nugget not in mapped_system_nuggets):
node_id = ('te%d' % tid)
cluster_nodes_in_sys.append(node_id)
cluster_id_to_sys_node[additional_cluster_id] = node_id
rewritten_lookup[nugget] = additional_cluster_id
tid += 1
return (cluster_nodes_in_gold, cluster_nodes_in_sys, cluster_id_to_gold_node, cluster_id_to_sys_node, rewritten_lookup) |
def save_options_file(opt):
opt_fname = '{}/options.yaml'.format(opt.output_path)
if os.path.isfile(opt_fname):
with open(opt_fname) as file:
opt_old = yaml.safe_load(file)
if (opt != opt_old):
opt_new_fname = '{}/options_temp.yaml'.format(opt.output_path)
with open(opt_new_fname, 'w') as file:
yaml.safe_dump(util.to_dict(opt), file, default_flow_style=False, indent=4)
print('existing options file found (different from current one)...')
os.system('diff {} {}'.format(opt_fname, opt_new_fname))
os.system('rm {}'.format(opt_new_fname))
override = None
while (override not in ['y', 'n']):
override = input('override? (y/n) ')
if (override == 'n'):
print('safe exiting...')
exit()
else:
print('existing options file found (identical)')
else:
print('(creating new options file...)')
with open(opt_fname, 'w') as file:
yaml.safe_dump(util.to_dict(opt), file, default_flow_style=False, indent=4) |
class SpacepyTestingTests(unittest.TestCase):
def testassertWarnsSimple(self):
with spacepy_testing.assertWarns(self, 'always', 'f.*', SPTestWarning):
warnings.warn('foo', SPTestWarning)
def testassertWarnsfilter(self):
with warnings.catch_warnings(record=True) as cm:
with spacepy_testing.assertWarns(self, 'always', 'f.*', SPTestWarning):
warnings.warn('foo', SPTestWarning)
warnings.warn('foo')
warnings.warn('bar', SPTestWarning)
self.assertEqual(2, len(cm))
for w in cm:
self.assertIn(w.category, (UserWarning, SPTestWarning))
if (w.category is UserWarning):
self.assertEqual('foo', str(w.message))
else:
self.assertEqual('bar', str(w.message))
def testassertWarnsTwice(self):
with self.assertRaises(AssertionError):
with spacepy_testing.assertWarns(self, 'always', 'f.*', SPTestWarning):
warnings.warn('foo', SPTestWarning)
warnings.warn('foo2', SPTestWarning)
def testassertDoesntWarnSimple(self):
with spacepy_testing.assertDoesntWarn(self, 'always', 'f.*', SPTestWarning):
pass
def testassertDoesntWarnIssued(self):
with self.assertRaises(AssertionError):
with spacepy_testing.assertDoesntWarn(self, 'always', 'f.*', SPTestWarning):
warnings.warn('foo', SPTestWarning)
def testassertDoesntWarnfilter(self):
with warnings.catch_warnings(record=True) as cm:
with spacepy_testing.assertDoesntWarn(self, 'always', 'f.*', SPTestWarning):
warnings.warn('foo')
warnings.warn('bar', SPTestWarning)
self.assertEqual(2, len(cm))
for w in cm:
self.assertIn(w.category, (UserWarning, SPTestWarning))
if (w.category is UserWarning):
self.assertEqual('foo', str(w.message))
else:
self.assertEqual('bar', str(w.message)) |
_NETWORK.register_module()
class FFN(BaseModule):
_api_warning({'dropout': 'ffn_drop', 'add_residual': 'add_identity'}, cls_name='FFN')
def __init__(self, embed_dims=256, feedforward_channels=1024, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True, init_cfg=None, **kwargs):
super(FFN, self).__init__(init_cfg)
assert (num_fcs >= 2), f'num_fcs should be no less than 2. got {num_fcs}.'
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.num_fcs = num_fcs
self.act_cfg = act_cfg
self.activate = build_activation_layer(act_cfg)
layers = []
in_channels = embed_dims
for _ in range((num_fcs - 1)):
layers.append(Sequential(Linear(in_channels, feedforward_channels), self.activate, nn.Dropout(ffn_drop)))
in_channels = feedforward_channels
layers.append(Linear(feedforward_channels, embed_dims))
layers.append(nn.Dropout(ffn_drop))
self.layers = Sequential(*layers)
self.dropout_layer = (build_dropout(dropout_layer) if dropout_layer else torch.nn.Identity())
self.add_identity = add_identity
_api_warning({'residual': 'identity'}, cls_name='FFN')
def forward(self, x, identity=None):
out = self.layers(x)
if (not self.add_identity):
return self.dropout_layer(out)
if (identity is None):
identity = x
return (identity + self.dropout_layer(out)) |
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.pdelu = PDELU()
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = self.pdelu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = self.pdelu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = self.pdelu(out)
return out |
def create_segmenter(model_cfg):
model_cfg = model_cfg.copy()
decoder_cfg = model_cfg.pop('decoder')
decoder_cfg['n_cls'] = model_cfg['n_cls']
encoder = create_vit(model_cfg)
decoder = create_decoder(encoder, decoder_cfg)
model = Segmenter(encoder, decoder, n_cls=model_cfg['n_cls'])
return model |
class ResnetEncoder(nn.Module):
def __init__(self, num_layers, pretrained):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {18: models.resnet18, 34: models.resnet34, 50: models.resnet50, 101: models.resnet101, 152: models.resnet152}
if (num_layers not in resnets):
raise ValueError('{} is not a valid number of resnet layers'.format(num_layers))
self.encoder = resnets[num_layers](pretrained)
if (num_layers > 34):
self.num_ch_enc[1:] *= 4
def forward(self, input_image):
self.features = []
x = ((input_image - 0.45) / 0.225)
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[(- 1)])))
self.features.append(self.encoder.layer2(self.features[(- 1)]))
self.features.append(self.encoder.layer3(self.features[(- 1)]))
self.features.append(self.encoder.layer4(self.features[(- 1)]))
return self.features |
class TimestepSubProblem(SubProblem):
timesteps: int
def __init__(self, timesteps: int, name: str=None) -> None:
self.timesteps = timesteps
super().__init__(name=name)
def build_inputs(self) -> None:
constructed_fields = {}
type_hints_map = T.get_type_hints(self.Inputs)
for field in dataclasses.fields(self.Inputs):
field_type = type_hints_map[field.name]
if field.metadata.get('timestepped', False):
field_type = T.get_args(field_type)[0]
constructed_fields[field.name] = [ops.StorageOps.symbolic(field_type, f'{self.name}.{field.name}[{i}]') for i in range(self.timesteps)]
elif field.metadata.get('length', False):
sequence_instance = typing_util.get_sequence_from_dataclass_sequence_field(field, field_type)
constructed_fields[field.name] = ops.StorageOps.symbolic(sequence_instance, f'{self.name}.{field.name}')
else:
try:
constructed_fields[field.name] = ops.StorageOps.symbolic(field_type, f'{self.name}.{field.name}')
except NotImplementedError as ex:
raise TypeError(f'Could not create instance of type {field_type} for field {self.name}.{field.name}; if this is a sequence, please either annotate with timestepped=True, or override build_inputs') from ex
self.inputs = self.Inputs(**constructed_fields) |
class ActorPerturbation(nn.Module, BasePolicy):
def __init__(self, state_dim, action_dim, latent_action_dim, max_action, max_latent_action=2, phi=0.05):
super(ActorPerturbation, self).__init__()
self.hidden_size = (400, 300, 400, 300)
self.l1 = nn.Linear(state_dim, self.hidden_size[0])
self.l2 = nn.Linear(self.hidden_size[0], self.hidden_size[1])
self.l3 = nn.Linear(self.hidden_size[1], latent_action_dim)
self.l4 = nn.Linear((state_dim + action_dim), self.hidden_size[2])
self.l5 = nn.Linear(self.hidden_size[2], self.hidden_size[3])
self.l6 = nn.Linear(self.hidden_size[3], action_dim)
self.max_latent_action = max_latent_action
self.max_action = max_action
self.phi = phi
self.vae = None
def forward(self, state, decoder):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
latent_action = (self.max_latent_action * torch.tanh(self.l3(a)))
mid_action = decoder(state, z=latent_action)
a = F.relu(self.l4(torch.cat([state, mid_action], 1)))
a = F.relu(self.l5(a))
a = (self.phi * torch.tanh(self.l6(a)))
final_action = (a + mid_action).clamp((- self.max_action), self.max_action)
return (latent_action, mid_action, final_action)
def policy_infer(self, obs):
return self(obs, self.vae.decode)[(- 1)] |
class SwatPLC3(PLC):
def pre_loop(self, sleep=0.1):
print('DEBUG: swat-s1 plc3 enters pre_loop')
time.sleep(sleep)
def main_loop(self):
print('DEBUG: swat-s1 plc3 enters main_loop.')
count = 0
while (count <= PLC_SAMPLES):
lit301 = float(self.get(LIT301_3))
print(('DEBUG PLC3 - get lit301: %f' % lit301))
self.send(LIT301_3, lit301, PLC3_ADDR)
time.sleep(PLC_PERIOD_SEC)
count += 1
print('DEBUG swat plc3 shutdown') |
def rnd_int_uniform(low, high):
if (low == high):
return low
return np.random.choice(range(low, (high + 1))) |
class UnetSkipConnectionBlock_F32(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetSkipConnectionBlock_F32, self).__init__()
self.outermost = outermost
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=3, stride=1, padding=1)
down = [downconv]
up = [uprelu, upconv]
model = ((down + [submodule]) + up)
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = (down + up)
else:
upconv = nn.ConvTranspose2d((inner_nc * 2), outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1) |
class TestLookupIntentParser(FixtureTest):
def setUp(self):
super(TestLookupIntentParser, self).setUp()
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: dummy_intent_1\nslots:\n - name: dummy_slot_name\n entity: dummy_entity_1\n - name: dummy_slot_name2\n entity: dummy_entity_2\n - name: startTime\n entity: snips/datetime\nutterances:\n - >\n This is a [dummy_slot_name](dummy_1) query with another\n [dummy_slot_name2](dummy_2) [startTime](at 10p.m.) or\n [startTime](tomorrow)\n - "This is a [dummy_slot_name](dummy_1) "\n - "[startTime](tomorrow evening) there is a [dummy_slot_name](dummy_1)"\n\n---\ntype: entity\nname: dummy_entity_1\nautomatically_extensible: no\nvalues:\n- [dummy_a, dummy 2a, dummy a, 2 dummy a]\n- [dummy_b, dummy b, dummy_bb, dummy_b]\n- dummy d\n\n---\ntype: entity\nname: dummy_entity_2\nautomatically_extensible: no\nvalues:\n- [dummy_c, 3p.m., dummy_cc, dummy c]')
self.slots_dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
def test_should_parse_intent(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - foo bar baz\n\n---\ntype: intent\nname: intent2\nutterances:\n - foo bar ban')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'foo bar ban'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='intent2', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_intent_with_filter(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - foo bar baz\n\n---\ntype: intent\nname: intent2\nutterances:\n - foo bar ban')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'foo bar ban'
parsing = parser.parse(text, intents=['intent1'])
self.assertEqual(empty_result(text, 1.0), parsing)
def test_should_parse_top_intents(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - meeting [time:snips/datetime](today)\n\n---\ntype: intent\nname: intent2\nutterances:\n - meeting tomorrow\n\n---\ntype: intent\nname: intent3\nutterances:\n - "[event_type](call) [time:snips/datetime](at 9pm)"\n\n---\ntype: entity\nname: event_type\nvalues:\n - meeting\n - feedback session')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'meeting tomorrow'
results = parser.parse(text, top_n=3)
time_slot = {'entity': 'snips/datetime', 'range': {'end': 16, 'start': 8}, 'slotName': 'time', 'value': 'tomorrow'}
event_slot = {'entity': 'event_type', 'range': {'end': 7, 'start': 0}, 'slotName': 'event_type', 'value': 'meeting'}
weight_intent_1 = (1.0 / 2.0)
weight_intent_2 = 1.0
weight_intent_3 = (1.0 / 3.0)
total_weight = ((weight_intent_1 + weight_intent_2) + weight_intent_3)
proba_intent2 = (weight_intent_2 / total_weight)
proba_intent1 = (weight_intent_1 / total_weight)
proba_intent3 = (weight_intent_3 / total_weight)
expected_results = [extraction_result(intent_classification_result(intent_name='intent2', probability=proba_intent2), slots=[]), extraction_result(intent_classification_result(intent_name='intent1', probability=proba_intent1), slots=[time_slot]), extraction_result(intent_classification_result(intent_name='intent3', probability=proba_intent3), slots=[event_slot, time_slot])]
self.assertEqual(expected_results, results)
('snips_nlu.intent_parser.lookup_intent_parser.get_stop_words')
def test_should_parse_intent_with_stop_words(self, mock_get_stop_words):
mock_get_stop_words.return_value = {'a', 'hey'}
dataset = self.slots_dataset
config = LookupIntentParserConfig(ignore_stop_words=True)
parser = LookupIntentParser(config).fit(dataset)
text = 'Hey this is dummy_a query with another dummy_c at 10p.m. or at 12p.m.'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='dummy_intent_1', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_intent_with_duplicated_slot_names(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: math_operation\nslots:\n - name: number\n entity: snips/number\nutterances:\n - what is [number](one) plus [number](one)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'what is one plus one'
parsing = parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='math_operation', probability=probability)
expected_slots = [{'entity': 'snips/number', 'range': {'end': 11, 'start': 8}, 'slotName': 'number', 'value': 'one'}, {'entity': 'snips/number', 'range': {'end': 20, 'start': 17}, 'slotName': 'number', 'value': 'one'}]
self.assertDictEqual(expected_intent, parsing[RES_INTENT])
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_parse_intent_with_ambivalent_words(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: give_flower\nutterances:\n - give a rose to [name](emily)\n - give a daisy to [name](tom)\n - give a tulip to [name](daisy)\n ')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'give a daisy to emily'
parsing = parser.parse(text)
expected_intent = intent_classification_result(intent_name='give_flower', probability=1.0)
expected_slots = [{'entity': 'name', 'range': {'end': 21, 'start': 16}, 'slotName': 'name', 'value': 'emily'}]
self.assertDictEqual(expected_intent, parsing[RES_INTENT])
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_ignore_completely_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: dummy_intent_1\nutterances:\n - Hello world\n\n---\ntype: intent\nname: dummy_intent_2\nutterances:\n - Hello world')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'Hello world'
res = parser.parse(text)
self.assertEqual(empty_result(text, 1.0), res)
def test_should_ignore_very_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent_1\nutterances:\n - "[event_type](meeting) tomorrow"\n\n---\ntype: intent\nname: intent_2\nutterances:\n - call [time:snips/datetime](today)\n\n---\ntype: entity\nname: event_type\nvalues:\n - call\n - diner')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'call tomorrow'
res = parser.parse(text)
self.assertEqual(empty_result(text, 1.0), res)
def test_should_parse_slightly_ambiguous_utterances(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent_1\nutterances:\n - call tomorrow\n\n---\ntype: intent\nname: intent_2\nutterances:\n - call [time:snips/datetime](today)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
text = 'call tomorrow'
res = parser.parse(text)
expected_intent = intent_classification_result(intent_name='intent_1', probability=(2.0 / 3.0))
expected_result = parsing_result(text, expected_intent, [])
self.assertEqual(expected_result, res)
def test_should_not_parse_when_not_fitted(self):
parser = LookupIntentParser()
self.assertFalse(parser.fitted)
with self.assertRaises(NotTrained):
parser.parse('foobar')
def test_should_parse_intent_after_deserialization(self):
dataset = self.slots_dataset
shared = self.get_shared_data(dataset)
parser = LookupIntentParser(**shared).fit(dataset)
parser.persist(self.tmp_file_path)
deserialized_parser = LookupIntentParser.from_path(self.tmp_file_path, **shared)
text = 'this is a dummy_a query with another dummy_c at 10p.m. or at 12p.m.'
parsing = deserialized_parser.parse(text)
probability = 1.0
expected_intent = intent_classification_result(intent_name='dummy_intent_1', probability=probability)
self.assertEqual(expected_intent, parsing[RES_INTENT])
def test_should_parse_slots(self):
dataset = self.slots_dataset
parser = LookupIntentParser().fit(dataset)
texts = [('this is a dummy a query with another dummy_c at 10p.m. or at 12p.m.', [unresolved_slot(match_range=(10, 17), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(37, 44), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(45, 54), value='at 10p.m.', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(58, 67), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this, is,, a, dummy a query with another dummy_c at 10pm or at 12p.m.', [unresolved_slot(match_range=(14, 21), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(41, 48), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(49, 56), value='at 10pm', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(60, 69), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this is a dummy b', [unresolved_slot(match_range=(10, 17), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' this is a dummy b ', [unresolved_slot(match_range=(11, 18), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' at 8am there is a dummy a', [unresolved_slot(match_range=(1, 7), value='at 8am', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(21, 29), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name')])]
for (text, expected_slots) in texts:
parsing = parser.parse(text)
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_parse_stop_words_slots(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: search\nutterances:\n - search\n - search [search_object](this)\n - search [search_object](a cat)\n\n---\ntype: entity\nname: search_object\nvalues:\n - [this thing, that]\n ')
resources = deepcopy(self.get_resources('en'))
resources[STOP_WORDS] = {'a', 'this', 'that'}
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser_config = LookupIntentParserConfig(ignore_stop_words=True)
parser = LookupIntentParser(config=parser_config, resources=resources)
parser.fit(dataset)
res_1 = parser.parse('search this')
res_2 = parser.parse('search that')
expected_intent = intent_classification_result(intent_name='search', probability=1.0)
expected_slots_1 = [unresolved_slot(match_range=(7, 11), value='this', entity='search_object', slot_name='search_object')]
expected_slots_2 = [unresolved_slot(match_range=(7, 11), value='that', entity='search_object', slot_name='search_object')]
self.assertEqual(expected_intent, res_1[RES_INTENT])
self.assertEqual(expected_intent, res_2[RES_INTENT])
self.assertListEqual(expected_slots_1, res_1[RES_SLOTS])
self.assertListEqual(expected_slots_2, res_2[RES_SLOTS])
def test_should_get_intents(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello John\n\n---\ntype: intent\nname: greeting2\nutterances:\n - Hello [name](John)\n\n---\ntype: intent\nname: greeting3\nutterances:\n - "[greeting](Hello) [name](John)"\n ')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
top_intents = parser.get_intents('Hello John')
expected_intents = [{RES_INTENT_NAME: 'greeting1', RES_PROBA: (1.0 / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: 'greeting2', RES_PROBA: ((1.0 / 2.0) / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: 'greeting3', RES_PROBA: ((1.0 / 3.0) / ((1.0 + (1.0 / 2.0)) + (1.0 / 3.0)))}, {RES_INTENT_NAME: None, RES_PROBA: 0.0}]
self.assertListEqual(expected_intents, top_intents)
def test_should_get_slots(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello [name1](John)\n\n---\ntype: intent\nname: greeting2\nutterances:\n - Hello [name2](Thomas)\n\n---\ntype: intent\nname: goodbye\nutterances:\n - Goodbye [name](Eric)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
slots_greeting1 = parser.get_slots('Hello John', 'greeting1')
slots_greeting2 = parser.get_slots('Hello Thomas', 'greeting2')
slots_goodbye = parser.get_slots('Goodbye Eric', 'greeting1')
self.assertEqual(1, len(slots_greeting1))
self.assertEqual(1, len(slots_greeting2))
self.assertEqual(0, len(slots_goodbye))
self.assertEqual('John', slots_greeting1[0][RES_VALUE])
self.assertEqual('name1', slots_greeting1[0][RES_ENTITY])
self.assertEqual('Thomas', slots_greeting2[0][RES_VALUE])
self.assertEqual('name2', slots_greeting2[0][RES_ENTITY])
def test_should_get_no_slots_with_none_intent(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting\nutterances:\n - Hello [name](John)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
slots = parser.get_slots('Hello John', None)
self.assertListEqual([], slots)
def test_get_slots_should_raise_with_unknown_intent(self):
slots_dataset_stream = io.StringIO('\n---\ntype: intent\nname: greeting1\nutterances:\n - Hello [name1](John)\n\n---\ntype: intent\nname: goodbye\nutterances:\n - Goodbye [name](Eric)')
dataset = Dataset.from_yaml_files('en', [slots_dataset_stream]).json
parser = LookupIntentParser().fit(dataset)
with self.assertRaises(IntentNotFoundError):
parser.get_slots('Hello John', 'greeting3')
def test_should_parse_slots_after_deserialization(self):
dataset = self.slots_dataset
shared = self.get_shared_data(dataset)
parser = LookupIntentParser(**shared).fit(dataset)
parser.persist(self.tmp_file_path)
deserialized_parser = LookupIntentParser.from_path(self.tmp_file_path, **shared)
texts = [('this is a dummy a query with another dummy_c at 10p.m. or at 12p.m.', [unresolved_slot(match_range=(10, 17), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(37, 44), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(45, 54), value='at 10p.m.', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(58, 67), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this, is,, a, dummy a query with another dummy_c at 10pm or at 12p.m.', [unresolved_slot(match_range=(14, 21), value='dummy a', entity='dummy_entity_1', slot_name='dummy_slot_name'), unresolved_slot(match_range=(41, 48), value='dummy_c', entity='dummy_entity_2', slot_name='dummy_slot_name2'), unresolved_slot(match_range=(49, 56), value='at 10pm', entity='snips/datetime', slot_name='startTime'), unresolved_slot(match_range=(60, 69), value='at 12p.m.', entity='snips/datetime', slot_name='startTime')]), ('this is a dummy b', [unresolved_slot(match_range=(10, 17), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')]), (' this is a dummy b ', [unresolved_slot(match_range=(11, 18), value='dummy b', entity='dummy_entity_1', slot_name='dummy_slot_name')])]
for (text, expected_slots) in texts:
parsing = deserialized_parser.parse(text)
self.assertListEqual(expected_slots, parsing[RES_SLOTS])
def test_should_be_serializable_into_bytearray(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: MakeTea\nutterances:\n- make me [number_of_cups:snips/number](one) cup of tea\n- i want [number_of_cups] cups of tea please\n- can you prepare [number_of_cups] cup of tea ?\n\n---\ntype: intent\nname: MakeCoffee\nutterances:\n- make me [number_of_cups:snips/number](two) cups of coffee\n- brew [number_of_cups] cups of coffee\n- can you prepare [number_of_cups] cup of coffee')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
shared = self.get_shared_data(dataset)
intent_parser = LookupIntentParser(**shared).fit(dataset)
intent_parser_bytes = intent_parser.to_byte_array()
loaded_intent_parser = LookupIntentParser.from_byte_array(intent_parser_bytes, **shared)
result = loaded_intent_parser.parse('make me two cups of coffee')
self.assertEqual('MakeCoffee', result[RES_INTENT][RES_INTENT_NAME])
def test_should_parse_naughty_strings(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: my_intent\nutterances:\n- this is [slot1:entity1](my first entity)\n- this is [slot2:entity2](second_entity)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
naughty_strings_path = ((TEST_PATH / 'resources') / 'naughty_strings.txt')
with naughty_strings_path.open(encoding='utf8') as f:
naughty_strings = [line.strip('\n') for line in f.readlines()]
parser = LookupIntentParser().fit(dataset)
for s in naughty_strings:
with self.fail_if_exception('Exception raised'):
parser.parse(s)
def test_should_fit_with_naughty_strings_no_tags(self):
naughty_strings_path = ((TEST_PATH / 'resources') / 'naughty_strings.txt')
with naughty_strings_path.open(encoding='utf8') as f:
naughty_strings = [line.strip('\n') for line in f.readlines()]
utterances = [{DATA: [{TEXT: naughty_string}]} for naughty_string in naughty_strings]
naughty_dataset = {'intents': {'naughty_intent': {'utterances': utterances}}, 'entities': dict(), 'language': 'en'}
with self.fail_if_exception('Exception raised'):
LookupIntentParser().fit(naughty_dataset)
def test_should_fit_and_parse_with_non_ascii_tags(self):
inputs = [('string%s' % i) for i in range(10)]
utterances = [{DATA: [{TEXT: string, ENTITY: 'non_ascii_entity', SLOT_NAME: 'non_ascii_slot'}]} for string in inputs]
naughty_dataset = {'intents': {'naughty_intent': {'utterances': utterances}}, 'entities': {'non_ascii_entity': {'use_synonyms': False, 'automatically_extensible': True, 'matching_strictness': 1.0, 'data': []}}, 'language': 'en'}
with self.fail_if_exception('Exception raised'):
parser = LookupIntentParser().fit(naughty_dataset)
parsing = parser.parse('string0')
expected_slot = {'entity': 'non_ascii_entity', 'range': {'start': 0, 'end': 7}, 'slotName': 'non_ascii_slot', 'value': 'string0'}
intent_name = parsing[RES_INTENT][RES_INTENT_NAME]
self.assertEqual('naughty_intent', intent_name)
self.assertListEqual([expected_slot], parsing[RES_SLOTS])
def test_should_be_serializable_before_fitting(self):
config = LookupIntentParserConfig(ignore_stop_words=True)
parser = LookupIntentParser(config=config)
parser.persist(self.tmp_file_path)
expected_dict = {'config': {'unit_name': 'lookup_intent_parser', 'ignore_stop_words': True}, 'language_code': None, 'intents_names': [], 'map': None, 'slots_names': [], 'entity_scopes': None, 'stop_words_whitelist': None}
metadata = {'unit_name': 'lookup_intent_parser'}
self.assertJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
self.assertJsonContent((self.tmp_file_path / 'intent_parser.json'), expected_dict)
('snips_nlu.intent_parser.lookup_intent_parser.get_stop_words')
def test_should_be_serializable(self, mock_get_stop_words):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: searchFlight\nslots:\n - name: origin\n entity: city\n - name: destination\n entity: city\nutterances:\n - find me a flight from [origin](Paris) to [destination](New York)\n - I need a flight to [destination](Berlin)\n\n---\ntype: entity\nname: city\nvalues:\n - london\n - [new york, big apple]\n - [paris, city of lights]')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
mock_get_stop_words.return_value = {'a', 'me'}
config = LookupIntentParserConfig(ignore_stop_words=True)
parser = LookupIntentParser(config=config).fit(dataset)
parser.persist(self.tmp_file_path)
expected_dict = {'config': {'unit_name': 'lookup_intent_parser', 'ignore_stop_words': True}, 'intents_names': ['searchFlight'], 'language_code': 'en', 'map': {'-': [0, [0, 1]], '-': [0, [1]]}, 'slots_names': ['origin', 'destination'], 'entity_scopes': [{'entity_scope': {'builtin': [], 'custom': ['city']}, 'intent_group': ['searchFlight']}], 'stop_words_whitelist': dict()}
metadata = {'unit_name': 'lookup_intent_parser'}
self.assertJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
self.assertJsonContent((self.tmp_file_path / 'intent_parser.json'), expected_dict)
def test_should_be_deserializable(self):
parser_dict = {'config': {'unit_name': 'lookup_intent_parser', 'ignore_stop_words': True}, 'language_code': 'en', 'map': {hash_str('make coffee'): [0, []], hash_str('prepare % snipsnumber % coffees'): [0, [0]], hash_str('% snipsnumber % teas at % snipstemperature %'): [1, [0, 1]]}, 'slots_names': ['nb_cups', 'tea_temperature'], 'intents_names': ['MakeCoffee', 'MakeTea'], 'entity_scopes': [{'entity_scope': {'builtin': ['snips/number'], 'custom': []}, 'intent_group': ['MakeCoffee']}, {'entity_scope': {'builtin': ['snips/number', 'snips/temperature'], 'custom': []}, 'intent_group': ['MakeTea']}], 'stop_words_whitelist': dict()}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'lookup_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
resources = self.get_resources('en')
builtin_entity_parser = BuiltinEntityParser.build(language='en')
custom_entity_parser = EntityParserMock()
parser = LookupIntentParser.from_path(self.tmp_file_path, custom_entity_parser=custom_entity_parser, builtin_entity_parser=builtin_entity_parser, resources=resources)
res_make_coffee = parser.parse('make me a coffee')
res_make_tea = parser.parse('two teas at 90C please')
expected_result_coffee = parsing_result(input='make me a coffee', intent=intent_classification_result('MakeCoffee', 1.0), slots=[])
expected_result_tea = parsing_result(input='two teas at 90C please', intent=intent_classification_result('MakeTea', 1.0), slots=[{'entity': 'snips/number', 'range': {'end': 3, 'start': 0}, 'slotName': 'nb_cups', 'value': 'two'}, {'entity': 'snips/temperature', 'range': {'end': 16, 'start': 12}, 'slotName': 'tea_temperature', 'value': '90C'}])
self.assertEqual(expected_result_coffee, res_make_coffee)
self.assertEqual(expected_result_tea, res_make_tea)
def test_should_be_deserializable_before_fitting(self):
parser_dict = {'config': {}, 'language_code': None, 'map': None, 'slots_names': [], 'intents_names': [], 'entity_scopes': None}
self.tmp_file_path.mkdir()
metadata = {'unit_name': 'dict_deterministic_intent_parser'}
self.writeJsonContent((self.tmp_file_path / 'intent_parser.json'), parser_dict)
self.writeJsonContent((self.tmp_file_path / 'metadata.json'), metadata)
parser = LookupIntentParser.from_path(self.tmp_file_path)
config = LookupIntentParserConfig()
expected_parser = LookupIntentParser(config=config)
self.assertEqual(parser.to_dict(), expected_parser.to_dict())
def test_get_entity_scopes(self):
dataset_stream = io.StringIO('\n---\ntype: intent\nname: intent1\nutterances:\n - meeting [schedule_time:snips/datetime](today)\n\n---\ntype: intent\nname: intent2\nutterances:\n - hello world\n\n---\ntype: intent\nname: intent3\nutterances:\n - what will be the weather [weather_time:snips/datetime](tomorrow)\n \n---\ntype: intent\nname: intent4\nutterances:\n - find a flight for [city](Paris) [flight_time:snips/datetime](tomorrow)')
dataset = Dataset.from_yaml_files('en', [dataset_stream]).json
entity_scopes = _get_entity_scopes(dataset)
expected_scopes = [{'entity_scope': {'builtin': ['snips/datetime'], 'custom': []}, 'intent_group': ['intent1', 'intent3']}, {'entity_scope': {'builtin': [], 'custom': []}, 'intent_group': ['intent2']}, {'entity_scope': {'builtin': ['snips/datetime'], 'custom': ['city']}, 'intent_group': ['intent4']}]
def sort_key(group_scope):
return ' '.join(group_scope['intent_group'])
self.assertListEqual(sorted(expected_scopes, key=sort_key), sorted(entity_scopes, key=sort_key)) |
def zero_mean_unit_var_norm(input_values: List[np.ndarray]) -> List[np.ndarray]:
return [((x - np.mean(x)) / np.sqrt((np.var(x) + 1e-05))) for x in input_values] |
def load_results():
base = 's3://pvinsight.nrel/output/'
nrel_data = pd.read_csv((base + 'pvo_results.csv'))
slac_data = pd.read_csv((base + 'scsf-unified-results.csv'))
slac_data['all-pass'] = np.logical_and(np.alltrue(np.logical_not(slac_data[['solver-error', 'f1-increase', 'obj-increase']]), axis=1), np.isfinite(slac_data['deg']))
cols = ['ID', 'rd', 'deg', 'rd_low', 'rd_high', 'all-pass', 'fix-ts', 'num-days', 'num-days-used', 'use-frac', 'res-median', 'res-var', 'res-L0norm']
df = pd.merge(nrel_data, slac_data, how='left', left_on='datastream', right_on='ID')
df = df[cols]
df.set_index('ID', inplace=True)
df = df[(df['all-pass'] == True)]
df['deg'] = (df['deg'] * 100)
df['difference'] = (df['rd'] - df['deg'])
df['rd_range'] = (df['rd_high'] - df['rd_low'])
cols = ['rd', 'deg', 'difference', 'rd_range', 'res-median', 'res-var', 'res-L0norm', 'rd_low', 'rd_high', 'all-pass', 'fix-ts', 'num-days', 'num-days-used', 'use-frac']
df = df[cols]
return df |
.parametrize('constructor_name', ['list', 'tuple', 'array', 'dataframe', 'sparse_csr', 'sparse_csc'])
def test_num_features(constructor_name):
X = [[1, 2, 3], [4, 5, 6]]
X = _convert_container(X, constructor_name)
assert (_num_features(X) == 3) |
def test_works_no_rel(spark, cat_log, requested_cats, model):
ground_thuth = spark.createDataFrame(data=[['red_apples', 1, 1.0], ['healthy_food', 1, 1.0], ['fruits', 1, (2 / 3)], ['fruits', 2, (1 / 3)]], schema='category string, item_idx int, relevance double')
feature_schema = FeatureSchema([FeatureInfo(column='user_idx', feature_type=FeatureType.CATEGORICAL, feature_hint=FeatureHint.QUERY_ID), FeatureInfo(column='item_idx', feature_type=FeatureType.CATEGORICAL, feature_hint=FeatureHint.ITEM_ID), FeatureInfo(column='category', feature_type=FeatureType.CATEGORICAL)])
dataset = create_dataset(cat_log.drop('relevance'), feature_schema=feature_schema)
model.fit(dataset)
sparkDataFrameEqual(model.predict(requested_cats, k=3), ground_thuth) |
def download(name=None, url=None):
map_location = gpu_utils.get_device()
if (name is not None):
try:
url = _lookup_table[name]
except KeyError as k:
print('Pre-trained model {} is not downloadable!'.format(k))
state = hub.load_state_dict_from_url(url, model_dir=None, map_location=map_location, check_hash=True)
if (not isinstance(state, dict)):
state = {'model': state}
if ('metadata' in state):
print(state['metadata'])
return state |
def register_Ns3LteRrcSapCellSelectionInfo_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::CellSelectionInfo const &', 'arg0')])
cls.add_instance_attribute('qQualMin', 'int8_t', is_const=False)
cls.add_instance_attribute('qRxLevMin', 'int8_t', is_const=False)
return |
class PlanarDiagram(AbstractPartitionDiagram):
def __classcall_private__(cls, diag):
order = max((v for p in diag for v in p))
PD = PlanarDiagrams(order)
return PD(diag)
def check(self):
super().check()
if (not self.is_planar()):
raise ValueError(('the diagram %s must be planar' % self)) |
def get_simclr_augmentation(P, image_size):
resize_scale = (P.resize_factor, 1.0)
color_jitter = TL.ColorJitterLayer(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8)
color_gray = TL.RandomColorGrayLayer(p=0.2)
resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=image_size)
resize_crop = nn.Identity()
if (P.dataset == 'imagenet'):
transform = nn.Sequential(color_jitter, color_gray)
else:
transform = nn.Sequential(color_jitter, color_gray, resize_crop)
return transform |
class MaxPoolingLoss(object):
def __init__(self, ratio=0.3, p=1.7, reduce=True):
assert ((ratio > 0) and (ratio <= 1)), 'ratio should be in range [0, 1]'
assert (p > 1), 'p should be >1'
self.ratio = ratio
self.p = p
self.reduce = reduce
def __call__(self, loss):
is_cuda = loss.is_cuda
shape = loss.size()
loss = loss.view((- 1))
(losses, indices) = loss.sort()
losses = losses.cpu()
indices = indices.cpu()
weights = torch.zeros(losses.size(0))
_mpl.compute_weights(losses.size(0), losses, indices, weights, self.ratio, self.p)
loss = loss.view(shape)
weights = weights.view(shape)
if is_cuda:
weights = weights.cuda()
loss = (weights * loss)
if self.reduce:
loss = loss.sum()
return loss |
_safe_enum
_enum
class Typeclasses(aenum.AutoNumberEnum):
bool = bool
bool_ = bool_
int8 = int8
int16 = int16
int32 = int32
int64 = int64
uint8 = uint8
uint16 = uint16
uint32 = uint32
uint64 = uint64
float16 = float16
float32 = float32
float64 = float64
complex64 = complex64
complex128 = complex128 |
def common(fun_v, get_exact=None, n_eigs=5, tau=0.0):
def report_eigs(pb, evp):
from numpy import NaN
bounding_box = pb.domain.mesh.get_bounding_box()
box_size = (bounding_box[1][0] - bounding_box[0][0])
output(('box_size: %f' % box_size))
output('eigenvalues:')
if (get_exact is not None):
eeigs = get_exact(n_eigs, box_size, pb.domain.shape.dim)
output('n exact FEM error')
for (ie, eig) in enumerate(evp.eigs):
if (ie < len(eeigs)):
exact = eeigs[ie]
err = (100 * abs(((exact - eig) / exact)))
else:
exact = NaN
err = NaN
output(('%d: %.8f %.8f %7.4f%%' % (ie, exact, eig, err)))
else:
output('n FEM')
for (ie, eig) in enumerate(evp.eigs):
output(('%d: %.8f' % (ie, eig)))
filename_mesh = (data_dir + '/meshes/quantum/square.mesh')
options = {'n_eigs': n_eigs, 'eigs_only': False, 'post_process_hook_final': 'report_eigs', 'evps': 'eig'}
regions = {'Omega': 'all', 'Surface': ('vertices of surface', 'facet')}
materials = {'m': ({'val': 0.5},), 'mat_v': 'fun_v'}
functions = {'fun_v': (fun_v,)}
approx_order = 2
fields = {'field_Psi': ('real', 'scalar', 'Omega', approx_order)}
variables = {'Psi': ('unknown field', 'field_Psi', 0), 'v': ('test field', 'field_Psi', 'Psi')}
ebcs = {'ZeroSurface': ('Surface', {'Psi.0': 0.0})}
integrals = {'i': (2 * approx_order)}
equations = {'lhs': 'dw_laplace.i.Omega(m.val, v, Psi)\n + dw_dot.i.Omega(mat_v.V, v, Psi)', 'rhs': 'dw_dot.i.Omega(v, Psi)'}
solvers = {'eig': ('eig.scipy', {'method': 'eigsh', 'tol': 1e-10, 'maxiter': 150, 'which': 'LM', 'sigma': tau})}
return locals() |
def owner_create_rref_my_script_module(a):
return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface) |
class Slice(Function):
node_type = 'goos.function.slice'
def __init__(self, fun: Function, slices: List[Union[(int, List[int], str)]]) -> None:
super().__init__(fun)
self._slices = slices
def eval(self, inputs: List[goos.NumericFlow]) -> goos.NumericFlow:
shape = inputs[0].array.shape
slices = self._make_slices(shape)
return goos.NumericFlow(inputs[0].array[slices])
def grad(self, inputs: List[goos.NumericFlow], grad_val: goos.NumericFlow.Grad) -> goos.NumericFlow.Grad:
shape = inputs[0].array.shape
slices = self._make_slices(shape)
grad = type(inputs[0]).Grad()
grad.array_grad = np.zeros_like(inputs[0].array)
grad.array_grad[slices] = grad_val.array_grad
return [grad]
def _make_slices(self, shape) -> List[slice]:
slices = []
for (i, dim) in enumerate(shape):
if isinstance(self._slices[i], int):
slices += [slice(self._slices[i], (self._slices[i] + 1))]
elif isinstance(self._slices[i], List):
slices += [slice(self._slices[i][0], self._slices[i][1])]
elif ((self._slices[i] == 'c') or (self._slices[i] == 'center')):
slices += [slice((dim // 2), ((dim // 2) + 1))]
elif (self._slices[i] is None):
slices += [slice(0, dim)]
else:
raise ValueError(('Invalid slice value, got ' + str(self._slices[i])))
return tuple(slices) |
def get_processor():
image_processor = EfficientNetImageProcessor(do_center_crop=True, rescale_factor=(1 / 127.5), rescale_offset=True, do_normalize=False, include_top=False, resample=Image.BILINEAR)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.model_max_length = 64
processor = AlignProcessor(image_processor=image_processor, tokenizer=tokenizer)
return processor |
class YTVISDatasetMapper():
def __init__(self, is_train: bool, is_tgt: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, sampling_frame_num: int=2, sampling_frame_range: int=5, sampling_frame_shuffle: bool=False, num_classes: int=40, src_dataset_name: str='', tgt_dataset_name: str=''):
self.is_train = is_train
self.is_tgt = is_tgt
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.sampling_frame_num = sampling_frame_num
self.sampling_frame_range = sampling_frame_range
self.sampling_frame_shuffle = sampling_frame_shuffle
self.num_classes = num_classes
if (not is_tgt):
self.src_metadata = MetadataCatalog.get(src_dataset_name)
self.tgt_metadata = MetadataCatalog.get(tgt_dataset_name)
if tgt_dataset_name.startswith('ytvis_2019'):
src2tgt = OVIS_TO_YTVIS_2019
elif tgt_dataset_name.startswith('ytvis_2021'):
src2tgt = OVIS_TO_YTVIS_2021
elif tgt_dataset_name.startswith('ovis'):
if src_dataset_name.startswith('ytvis_2019'):
src2tgt = YTVIS_2019_TO_OVIS
elif src_dataset_name.startswith('ytvis_2021'):
src2tgt = YTVIS_2021_TO_OVIS
else:
raise NotImplementedError
else:
raise NotImplementedError
self.src2tgt = {}
for (k, v) in src2tgt.items():
self.src2tgt[self.src_metadata.thing_dataset_id_to_contiguous_id[k]] = self.tgt_metadata.thing_dataset_id_to_contiguous_id[v]
logger = logging.getLogger(__name__)
mode = ('training' if is_train else 'inference')
logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}')
def from_config(cls, cfg, is_train: bool=True, is_tgt: bool=True):
augs = build_augmentation(cfg, is_train)
sampling_frame_num = cfg.INPUT.SAMPLING_FRAME_NUM
sampling_frame_range = cfg.INPUT.SAMPLING_FRAME_RANGE
sampling_frame_shuffle = cfg.INPUT.SAMPLING_FRAME_SHUFFLE
ret = {'is_train': is_train, 'is_tgt': is_tgt, 'augmentations': augs, 'image_format': cfg.INPUT.FORMAT, 'use_instance_mask': cfg.MODEL.MASK_ON, 'sampling_frame_num': sampling_frame_num, 'sampling_frame_range': sampling_frame_range, 'sampling_frame_shuffle': sampling_frame_shuffle, 'num_classes': cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, 'tgt_dataset_name': cfg.DATASETS.TRAIN[(- 1)]}
return ret
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
video_length = dataset_dict['length']
if self.is_train:
ref_frame = random.randrange(video_length)
start_idx = max(0, (ref_frame - self.sampling_frame_range))
end_idx = min(video_length, ((ref_frame + self.sampling_frame_range) + 1))
selected_idx = np.random.choice(np.array((list(range(start_idx, ref_frame)) + list(range((ref_frame + 1), end_idx)))), (self.sampling_frame_num - 1))
selected_idx = (selected_idx.tolist() + [ref_frame])
selected_idx = sorted(selected_idx)
if self.sampling_frame_shuffle:
random.shuffle(selected_idx)
else:
selected_idx = range(video_length)
video_annos = dataset_dict.pop('annotations', None)
file_names = dataset_dict.pop('file_names', None)
if self.is_train:
_ids = set()
for frame_idx in selected_idx:
_ids.update([anno['id'] for anno in video_annos[frame_idx]])
ids = dict()
for (i, _id) in enumerate(_ids):
ids[_id] = i
dataset_dict['video_len'] = len(video_annos)
dataset_dict['frame_idx'] = list(selected_idx)
dataset_dict['image'] = []
dataset_dict['instances'] = []
dataset_dict['file_names'] = []
for frame_idx in selected_idx:
dataset_dict['file_names'].append(file_names[frame_idx])
image = utils.read_image(file_names[frame_idx], format=self.image_format)
utils.check_image_size(dataset_dict, image)
aug_input = T.AugInput(image)
transforms = self.augmentations(aug_input)
image = aug_input.image
image_shape = image.shape[:2]
dataset_dict['image'].append(torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))))
if ((video_annos is None) or (not self.is_train)):
continue
_frame_annos = []
for anno in video_annos[frame_idx]:
_anno = {}
for (k, v) in anno.items():
_anno[k] = copy.deepcopy(v)
_frame_annos.append(_anno)
annos = [utils.transform_instance_annotations(obj, transforms, image_shape) for obj in _frame_annos if (obj.get('iscrowd', 0) == 0)]
sorted_annos = [_get_dummy_anno() for _ in range(len(ids))]
for _anno in annos:
idx = ids[_anno['id']]
sorted_annos[idx] = _anno
_gt_ids = [_anno['id'] for _anno in sorted_annos]
instances = utils.annotations_to_instances(sorted_annos, image_shape, mask_format='bitmask')
if (not self.is_tgt):
instances.gt_classes = torch.tensor([(self.src2tgt[c] if (c in self.src2tgt) else (- 1)) for c in instances.gt_classes.tolist()])
instances.gt_ids = torch.tensor(_gt_ids)
instances = filter_empty_instances(instances)
if (not instances.has('gt_masks')):
instances.gt_masks = BitMasks(torch.empty((0, *image_shape)))
dataset_dict['instances'].append(instances)
return dataset_dict |
_config
def task_finetune_msvdqa():
exp_name = 'finetune_msvd_qa'
datasets = ['msvdqa']
loss_names = _loss_names({'openend_vqa': 1})
batch_size = 512
msrvttqa_label_size = 1001
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 0.0001
val_check_interval = 1.0
lr_mult = 10 |
def test_pickle_config():
import pickle
import io
config = Config()
config.load_file(StringIO(textwrap.dedent(' #!returnn.py\n\n def my_custom_func():\n return 42\n\n class CustomClass:\n x = 43\n\n def __init__(self):\n super().__init__()\n CustomClass.x = 44\n\n def get_value(self):\n return CustomClass.x\n ')))
f = config.typed_dict['my_custom_func']
obj = config.typed_dict['CustomClass']()
sio = io.BytesIO()
p = pickle._Pickler(sio)
with global_config_ctx(config):
p.dump(config)
config_ = pickle.loads(sio.getvalue())
f_ = config_.typed_dict['my_custom_func']
assert (f is not f_)
assert (f() == f_() == 42)
obj_ = config_.typed_dict['CustomClass']()
assert (type(obj) is not type(obj_))
assert (obj.get_value() == obj_.get_value() == 44) |
def build_model(args, input_dim, hidden_dims, context_dim, num_blocks, conditional):
def build_cnf():
diffeq = ODEnet(hidden_dims=hidden_dims, input_shape=(input_dim,), context_dim=context_dim, layer_type=args.layer_type, nonlinearity=args.nonlinearity)
odefunc = ODEfunc(diffeq=diffeq)
cnf = CNF(odefunc=odefunc, T=args.time_length, train_T=args.train_T, conditional=conditional, solver=args.solver, use_adjoint=args.use_adjoint, atol=args.atol, rtol=args.rtol)
return cnf
chain = [build_cnf() for _ in range(num_blocks)]
if args.batch_norm:
bn_layers = [MovingBatchNorm1d(input_dim, bn_lag=args.bn_lag, sync=args.sync_bn) for _ in range(num_blocks)]
bn_chain = [MovingBatchNorm1d(input_dim, bn_lag=args.bn_lag, sync=args.sync_bn)]
for (a, b) in zip(chain, bn_layers):
bn_chain.append(a)
bn_chain.append(b)
chain = bn_chain
model = SequentialFlow(chain)
return model |
def CreateDataLoader(opt):
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader |
class EmptyCondition(Condition):
def __init__(self, container: list):
self._container = container
def condition_met(self):
met = (len(self._container) == 0)
return (met, False) |
def color_transform(image, rng, *, brightness, contrast, saturation, hue, to_grayscale_prob, color_jitter_prob, apply_prob, shuffle):
(apply_rng, transform_rng) = jax.random.split(rng)
(perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng) = jax.random.split(transform_rng, 7)
should_apply = (jax.random.uniform(apply_rng, shape=()) <= apply_prob)
should_apply_gs = (jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob)
should_apply_color = (jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob)
def _make_cond(fn, idx):
def identity_fn(x, unused_rng, unused_param):
return x
def cond_fn(args, i):
def clip(args):
return jax.tree_map((lambda arg: jnp.clip(arg, 0.0, 1.0)), args)
out = jax.lax.cond(((should_apply & should_apply_color) & (i == idx)), args, (lambda a: clip(fn(*a))), args, (lambda a: identity_fn(*a)))
return jax.lax.stop_gradient(out)
return cond_fn
random_brightness_cond = _make_cond(_random_brightness, idx=0)
random_contrast_cond = _make_cond(_random_contrast, idx=1)
random_saturation_cond = _make_cond(_random_saturation, idx=2)
random_hue_cond = _make_cond(_random_hue, idx=3)
def _color_jitter(x):
rgb_tuple = tuple(jax.tree_map(jnp.squeeze, jnp.split(x, 3, axis=(- 1))))
if shuffle:
order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32))
else:
order = range(4)
for idx in order:
if (brightness > 0):
rgb_tuple = random_brightness_cond((rgb_tuple, b_rng, brightness), idx)
if (contrast > 0):
rgb_tuple = random_contrast_cond((rgb_tuple, c_rng, contrast), idx)
if (saturation > 0):
rgb_tuple = random_saturation_cond((rgb_tuple, s_rng, saturation), idx)
if (hue > 0):
rgb_tuple = random_hue_cond((rgb_tuple, h_rng, hue), idx)
return jnp.stack(rgb_tuple, axis=(- 1))
out_apply = _color_jitter(image)
out_apply = jax.lax.cond((should_apply & should_apply_gs), out_apply, _to_grayscale, out_apply, (lambda x: x))
return jnp.clip(out_apply, 0.0, 1.0) |
class Mishra01(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([0.0] * self.N), ([(1.0 + 1e-09)] * self.N)))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
xn = (self.N - sum(x[0:(- 1)]))
return ((1 + xn) ** xn) |
class RegularPartitionTuples(PartitionTuples):
def __init__(self, regular, **kwds):
if ((regular not in ZZ) or (regular < 1)):
raise ValueError('regular must be an integer greater than 1')
self._ell = regular
PartitionTuples.__init__(self, **kwds)
def __contains__(self, mu):
if (not PartitionTuples.__contains__(self, mu)):
return False
if isinstance(mu, Partition):
return (max((mu.to_exp() + [0])) < self._ell)
if isinstance(mu, PartitionTuple):
return all(((max((nu.to_exp() + [0])) < self._ell) for nu in mu))
if (not mu):
return True
if (mu in _Partitions):
return all(((mu.count(i) < self._ell) for i in set(mu) if (i > 0)))
return all(((list(nu).count(i) < self._ell) for nu in mu for i in set(nu) if (i > 0)))
def _an_element_(self):
if (self._level is None):
lvl = 4
else:
lvl = self._level
if (self._size is None):
size = 3
else:
size = self._size
elt = RegularPartitionTuples_level_size(lvl, size, self._ell).an_element()
return self.element_class(self, list(elt)) |
class DebertaV2Tokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, split_by_punct=False, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
super().__init__(do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = DebertaV2Tokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.do_lower_case = do_lower_case
self.split_by_punct = split_by_punct
self._tokenizer = SPMTokenizer(vocab_file, split_by_punct=split_by_punct)
def vocab_size(self):
return len(self.vocab)
def vocab(self):
return self._tokenizer.vocab
def get_vocab(self):
vocab = self.vocab.copy()
vocab.update(self.get_added_vocab())
return vocab
def _tokenize(self, text):
if self.do_lower_case:
text = text.lower()
return self._tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
return self._tokenizer.spm.PieceToId(token)
def _convert_id_to_token(self, index):
return (self._tokenizer.spm.IdToPiece(index) if (index < self.vocab_size) else self.unk_token)
def convert_tokens_to_string(self, tokens):
return self._tokenizer.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', False)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) |
def assert_line_wolfe(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=(lambda sp: f((x + (p * sp)))), derphi=(lambda sp: np.dot(fprime((x + (p * sp))), p)), **kw) |
def new_data_aug_generator(args=None):
img_size = args.input_size
remove_random_resized_crop = args.src
(mean, std) = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
primary_tfl = []
scale = (0.08, 1.0)
interpolation = 'bicubic'
if remove_random_resized_crop:
primary_tfl = [transforms.Resize(img_size, interpolation=3), transforms.RandomCrop(img_size, padding=4, padding_mode='reflect'), transforms.RandomHorizontalFlip()]
else:
primary_tfl = [RandomResizedCropAndInterpolation(img_size, scale=scale, interpolation=interpolation), transforms.RandomHorizontalFlip()]
secondary_tfl = [transforms.RandomChoice([gray_scale(p=1.0), Solarization(p=1.0), GaussianBlur(p=1.0)])]
if ((args.color_jitter is not None) and (not (args.color_jitter == 0))):
secondary_tfl.append(transforms.ColorJitter(args.color_jitter, args.color_jitter, args.color_jitter))
final_tfl = [transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
return transforms.Compose(((primary_tfl + secondary_tfl) + final_tfl)) |
class LocalParallelRuntime(Runtime):
def __init__(self, cores: int, mem: tp.Optional[int]=None, verbose=False, executor: exectools.Executor=exectools.LocalExecutor()):
super().__init__()
self.runs_noprereq: tp.List[Run] = []
self.runs_prereq: tp.List[Run] = []
self.complete: tp.Set[Run] = set()
self.cores = cores
self.mem = mem
self.verbose = verbose
self.executor = executor
self._pending_jobs: tp.Set[asyncio.Task] = set()
self._starter_task: asyncio.Task
def add_run(self, run: Run) -> None:
if (run.experiment.resreq_cores() > self.cores):
raise RuntimeError('Not enough cores available for run')
if ((self.mem is not None) and (run.experiment.resreq_mem() > self.mem)):
raise RuntimeError('Not enough memory available for run')
if (run.prereq is None):
self.runs_noprereq.append(run)
else:
self.runs_prereq.append(run)
async def do_run(self, run: Run) -> tp.Optional[Run]:
try:
runner = ExperimentSimpleRunner(self.executor, run.experiment, run.env, self.verbose)
(await run.prep_dirs(executor=self.executor))
(await runner.prepare())
except asyncio.CancelledError:
return None
print('starting run ', run.name())
run.output = (await runner.run())
if self.verbose:
print(f'Writing collected output of run {run.name()} to JSON file ...')
run.output.dump(run.outpath)
print('finished run ', run.name())
return run
async def wait_completion(self) -> None:
assert self._pending_jobs
(done, self._pending_jobs) = (await asyncio.wait(self._pending_jobs, return_when=asyncio.FIRST_COMPLETED))
for run in done:
run = (await run)
self.complete.add(run)
self.cores_used -= run.experiment.resreq_cores()
self.mem_used -= run.experiment.resreq_mem()
def enough_resources(self, run: Run) -> bool:
exp = run.experiment
if (self.cores is not None):
enough_cores = ((self.cores - self.cores_used) >= exp.resreq_cores())
else:
enough_cores = True
if (self.mem is not None):
enough_mem = ((self.mem - self.mem_used) >= exp.resreq_mem())
else:
enough_mem = True
return (enough_cores and enough_mem)
def prereq_ready(self, run: Run) -> bool:
if (run.prereq is None):
return True
return (run.prereq in self.complete)
async def do_start(self) -> None:
self.cores_used = 0
self.mem_used = 0
runs = (self.runs_noprereq + self.runs_prereq)
for run in runs:
while (not self.enough_resources(run)):
print('waiting for resources')
(await self.wait_completion())
while (not self.prereq_ready(run)):
print('waiting for prereq')
(await self.wait_completion())
self.cores_used += run.experiment.resreq_cores()
self.mem_used += run.experiment.resreq_mem()
job = asyncio.create_task(self.do_run(run))
self._pending_jobs.add(job)
(await asyncio.wait(self._pending_jobs))
async def start(self) -> None:
self._starter_task = asyncio.create_task(self.do_start())
try:
(await self._starter_task)
except asyncio.CancelledError:
for job in self._pending_jobs:
job.cancel()
(await asyncio.wait(self._pending_jobs))
def interrupt_handler(self) -> None:
self._starter_task.cancel() |
class Encoder(nn.Module):
def __init__(self, d_model: int, q: int, v: int, h: int, attention_size: int=None, dropout: float=0.3, chunk_mode: str='chunk'):
super().__init__()
chunk_mode_modules = {'chunk': MultiHeadAttentionChunk, 'window': MultiHeadAttentionWindow}
if (chunk_mode in chunk_mode_modules.keys()):
MHA = chunk_mode_modules[chunk_mode]
else:
MHA = MultiHeadAttention
self._selfAttention = MHA(d_model, q, v, h, attention_size=attention_size)
self._feedForward = PositionwiseFeedForward(d_model)
self._layerNorm1 = nn.LayerNorm(d_model)
self._layerNorm2 = nn.LayerNorm(d_model)
self._dopout = nn.Dropout(p=dropout)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
x = self._selfAttention(query=x, key=x, value=x)
x = self._dopout(x)
x = self._layerNorm1((x + residual))
residual = x
x = self._feedForward(x)
x = self._dopout(x)
x = self._layerNorm2((x + residual))
return x
def attention_map(self) -> torch.Tensor:
return self._selfAttention.attention_map |
def test_parse_tagged_words(pretrain_file):
model = build_model(pretrain_file)
sentence = [('I', 'PRP'), ('am', 'VBZ'), ('Luffa', 'NNP')]
result = model.parse_tagged_words([sentence], 10)
assert (len(result) == 1)
pts = [x for x in result[0].yield_preterminals()]
for (word, pt) in zip(sentence, pts):
assert (pt.children[0].label == word[0])
assert (pt.label == word[1]) |
def argument_handler():
parser = argparse.ArgumentParser()
parser.add_argument('--retrain_num_epochs', type=int, default=20, help='Number of epochs for the retraining phase.')
parser.add_argument('--eval_batch_size', type=int, default=32, help='Batch size for evaluation.')
parser.add_argument('--retrain_batch_size', type=int, default=32, help='Batch size for retraining.')
parser.add_argument('--retrain_lr', type=float, default=0.001, help='Learning rate to use during retraining.')
parser.add_argument('--retrain_momentum', type=float, default=0.9, help='SGD momentum to use during retraining.')
parser.add_argument('--representative_dataset_dir', type=str, default='./data', help='Folder path to save the representative dataset.')
parser.add_argument('--ptq_batch_size', type=int, default=50, help='Batch size for the representative data during PTQ calibration.')
parser.add_argument('--num_calibration_iterations', type=int, default=10, help='Number of iterations for calibration.')
parser.add_argument('--weights_compression_ratio', type=float, default=0.75, help='Weights compression ratio to use for mixed-precision quantization.')
parser.add_argument('--mixed_precision_num_of_images', type=int, default=32, help='Number of images to use for mixed-precision configuration search.')
parser.add_argument('--enable_mixed_precision_gradients_weighting', action='store_true', default=False, help='Whether to use gradients during mixed-precision configuration search or not.')
parser.add_argument('--seed', type=int, default=0, help='Seed to set for randomness.')
return parser.parse_args() |
def vgg_layers(layer_names):
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model |
_experiment
def cem_cartpole(ctxt=None, seed=1):
set_seed(seed)
with LocalTFRunner(snapshot_config=ctxt) as runner:
env = GarageEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy', env_spec=env.spec, hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
n_samples = 20
algo = CEM(env_spec=env.spec, policy=policy, baseline=baseline, best_frac=0.05, max_path_length=100, n_samples=n_samples)
runner.setup(algo, env, sampler_cls=OnPolicyVectorizedSampler)
runner.train(n_epochs=100, batch_size=1000) |
def train(epoch, loss_list):
model.train()
for (batch_idx, (image, mask)) in enumerate(train_loader):
if args.cuda:
(image, mask) = (image.cuda(), mask.cuda())
(image, mask) = (Variable(image), Variable(mask))
optimizer.zero_grad()
output = model(image)
loss = criterion(output, mask)
loss_list.append(loss.data[0])
loss.backward()
optimizer.step()
if args.clip:
nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
if ((batch_idx % args.log_interval) == 0):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(image)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), loss.data[0])) |
class ParamScheduler():
def __init__(self, schedule):
if isinstance(schedule, (int, float)):
self.type = 'constant'
elif isinstance(schedule, list):
self.type = 'piecewise_linear'
assert isinstance(schedule, list), 'Please specify the schedule as a list of tuples!'
for item in schedule:
assert isinstance(item, list), 'Each entry in the schedule must be a list with signature [time, param_value].'
times = [item[0] for item in schedule]
assert (times == sorted(times)), 'All the times must be sorted in an increasing order!'
else:
raise NotImplementedError
self.schedule = schedule
def get_param_value(self, timestep):
assert (timestep >= 0)
if (self.type == 'constant'):
param_value = self.schedule
elif (self.type == 'piecewise_linear'):
if (timestep <= self.schedule[0][0]):
param_value = self.schedule[0][1]
elif (timestep >= self.schedule[(- 1)][0]):
param_value = self.schedule[(- 1)][1]
else:
for ((l_t, l_v), (r_t, r_v)) in zip(self.schedule[:(- 1)], self.schedule[1:]):
if (l_t <= timestep < r_t):
slope = (float((timestep - l_t)) / (r_t - l_t))
param_value = _linear_interpolation(l_v, r_v, slope)
else:
raise NotImplementedError
logging.info(f'Setting the param value at t={timestep} to {param_value}.')
return param_value |
def AdvancePosition(node):
pos = GetPosition(node)
pos.x += 5.0
if (pos.x >= 210.0):
return
SetPosition(node, pos)
ns.core.Simulator.Schedule(ns.core.Seconds(1.0), AdvancePosition, node) |
_node(optplan.ImportOverlap)
class ImportOverlap():
def __init__(self, params: optplan.ImportOverlap, work: workspace.Workspace=None) -> None:
self._params = params
def __call__(self, simspace: SimulationSpace, wlen: float=None, **kwargs) -> fdfd_tools.VecField:
matpath = os.path.join(simspace._filepath, self._params.file_name)
overlap = sio.loadmat(matpath)
reference_grid = simspace(wlen).eps_bg
overlap_grid = np.zeros(reference_grid.grids.shape, dtype=np.complex_)
xyz = reference_grid.xyz
dxyz = reference_grid.dxyz
shifts = reference_grid.shifts
overlap_comp = ['Ex', 'Ey', 'Ez']
overlap_center = self._params.center
overlap_coords = [(overlap['x'][0] + overlap_center[0]), (overlap['y'][0] + overlap_center[1]), (overlap['z'][0] + overlap_center[2])]
coord_dims = np.array([overlap_coords[0].size, overlap_coords[1].size, overlap_coords[2].size])
singleton_dims = np.where((coord_dims == 1))[0]
if (not (singleton_dims.size == 0)):
for axis in singleton_dims:
dx = dxyz[axis][0]
coord = overlap_coords[axis][0]
overlap_coords[axis] = np.insert(overlap_coords[axis], 0, (coord - (dx / 2)))
overlap_coords[axis] = np.append(overlap_coords[axis], (coord + (dx / 2)))
for comp in overlap_comp:
overlap[comp] = np.repeat(overlap[comp], overlap_coords[axis].size, axis)
for i in range(0, 3):
overlap_interp_function = RegularGridInterpolator((overlap_coords[0], overlap_coords[1], overlap_coords[2]), overlap[overlap_comp[i]], bounds_error=False, fill_value=0.0)
xs = (xyz[0] + (dxyz[0] * shifts[(i, 0)]))
ys = (xyz[1] + (dxyz[1] * shifts[(i, 1)]))
zs = (xyz[2] + (dxyz[2] * shifts[(i, 2)]))
eval_coord_grid = np.meshgrid(xs, ys, zs, indexing='ij')
eval_coord_points = np.reshape(eval_coord_grid, (3, (- 1)), order='C').T
interp_overlap = overlap_interp_function(eval_coord_points)
overlap_grid[i] = np.reshape(interp_overlap, (len(xs), len(ys), len(zs)), order='C')
return overlap_grid |
def changeContagionReciprocity_OLD(G, A, i):
return sum([(G.isArc(u, i) and (A[u] == 1)) for u in G.outIterator(i)]) |
class ReductionLayer(object):
def __init__(self, batch_size, mem_size, hidden_size):
self.hidden_size = hidden_size
self.mem_size = mem_size
self.batch_size = batch_size
(N, M, d) = (batch_size, mem_size, hidden_size)
self.L = np.tril(np.ones([M, M], dtype='float32'))
self.sL = np.tril(np.ones([M, M], dtype='float32'), k=(- 1))
def __call__(self, u_t, a, b, scope=None):
(N, M, d) = (self.batch_size, self.mem_size, self.hidden_size)
(L, sL) = (self.L, self.sL)
with tf.name_scope((scope or self.__class__.__name__)):
L = tf.tile(tf.expand_dims(L, 0), [N, 1, 1])
sL = tf.tile(tf.expand_dims(sL, 0), [N, 1, 1])
logb = tf.log((b + 1e-09))
logb = tf.concat(1, [tf.zeros([N, 1, 1]), tf.slice(logb, [0, 1, 0], [(- 1), (- 1), (- 1)])])
left = (L * tf.exp(tf.batch_matmul(L, (logb * sL))))
right = (a * u_t)
u = tf.batch_matmul(left, right)
return u |
class SimplicialSet_arbitrary(Parent):
_method
def faces(self, simplex):
dim = simplex.dimension()
if (simplex not in self):
raise ValueError('this simplex is not in this simplicial set')
if simplex.is_nondegenerate():
if self.is_finite():
return self.face_data()[simplex]
else:
return self.n_skeleton(dim).face_data()[simplex]
underlying = simplex.nondegenerate()
faces = []
for (J, t) in [face_degeneracies(m, simplex.degeneracies()) for m in range((dim + 1))]:
if (t is None):
faces.append(underlying.apply_degeneracies(*J))
else:
faces.append(self.face(underlying, t).apply_degeneracies(*J))
return faces
def face(self, simplex, i):
if ((i < 0) or (i > simplex.dimension())):
raise ValueError('cannot compute face {} of {}-dimensional simplex'.format(i, simplex.dimension()))
faces = self.faces(simplex)
if (faces is not None):
return self.faces(simplex)[i]
return None
def __contains__(self, x):
try:
underlying = x.nondegenerate()
return (underlying in self.n_cells(underlying.dimension()))
except AttributeError:
return False
def alexander_whitney(self, simplex, dim_left):
dim = simplex.dimension()
if ((dim_left < 0) or (dim_left > dim)):
raise ValueError('alexander_whitney is only valid if dim_left is between 0 and the dimension of the simplex')
left = simplex
for i in range(dim, dim_left, (- 1)):
left = self.face(left, i)
right = simplex
for i in range(dim_left):
right = self.face(right, 0)
if (left.is_degenerate() or right.is_degenerate()):
c = ZZ.zero()
else:
c = ZZ.one()
return [(c, left, right)]
def nondegenerate_simplices(self, max_dim=None):
if self.is_finite():
if (max_dim is None):
return list(self._simplices)
return list((sigma for sigma in self._simplices if (sigma.dimension() <= max_dim)))
if (max_dim is None):
raise NotImplementedError('this simplicial set may be infinite, so specify max_dim')
return list((sigma for sigma in self.n_skeleton(max_dim)._simplices))
def cells(self, subcomplex=None, max_dim=None):
if (subcomplex is None):
if self.is_finite():
simplices = {}
for sigma in self.nondegenerate_simplices():
if (sigma.dimension() in simplices):
simplices[sigma.dimension()].append(sigma)
else:
simplices[sigma.dimension()] = [sigma]
if (max_dim is not None):
return {d: sorted(simplices[d]) for d in simplices if (d <= max_dim)}
return {d: sorted(simplices[d]) for d in simplices}
if (max_dim is None):
raise NotImplementedError('this simplicial set may be infinite, so specify max_dim')
return self.n_skeleton(max_dim).cells()
return self.quotient(subcomplex).cells(max_dim=max_dim)
def n_cells(self, n, subcomplex=None):
cells = self.cells(subcomplex=subcomplex, max_dim=n)
try:
return list(cells[n])
except KeyError:
return []
def _an_element_(self):
vertices = self.n_cells(0)
if vertices:
return vertices[0]
return None
def all_n_simplices(self, n):
non_degen = [_ for _ in self.nondegenerate_simplices(max_dim=n)]
ans = set([_ for _ in non_degen if (_.dimension() == n)])
for sigma in non_degen:
d = sigma.dimension()
ans.update([sigma.apply_degeneracies(*_) for _ in all_degeneracies(d, (n - d))])
return sorted(ans)
def _map_from_empty_set(self):
from sage.topology.simplicial_set_examples import Empty
return Empty().Hom(self)({})
def identity(self):
return self.Hom(self).identity()
def constant_map(self, codomain=None, point=None):
from sage.topology.simplicial_set_examples import Point
if (codomain is None):
codomain = Point()
return self.Hom(codomain).constant_map(point)
def is_reduced(self):
return (len(self.n_cells(0)) == 1)
def graph(self):
from sage.graphs.graph import Graph
G = Graph(loops=True, multiedges=True)
for e in self.n_cells(1):
G.add_edge(self.face(e, 0), self.face(e, 1), e)
for v in self.n_cells(0):
G.add_vertex(v)
return G
def is_connected(self):
return self.graph().is_connected()
def subsimplicial_set(self, simplices):
from .simplicial_set_constructions import SubSimplicialSet
if isinstance(simplices, SimplicialComplex):
new = []
for f in simplices.facets():
d = f.dimension()
found = False
for x in self.n_cells(d):
if (str(x) == str(tuple(sorted(tuple(f), key=str)))):
new.append(x)
found = True
break
if (not found):
raise ValueError('not all simplices are in the original simplicial set')
simplices = new
if (not self.is_finite()):
max_dim = max((sigma.dimension() for sigma in simplices))
data = self.n_skeleton(max_dim).face_data()
nondegenerate_simplices = self.nondegenerate_simplices(max_dim)
else:
data = self.face_data()
nondegenerate_simplices = self.nondegenerate_simplices()
vertices = set()
keep = set(simplices)
old_keep = set()
while (keep != old_keep):
old_keep = copy.copy(keep)
for x in old_keep:
underlying = x.nondegenerate()
if (underlying not in data.keys()):
raise ValueError('not all simplices are in the original simplicial set')
keep.add(underlying)
if ((underlying in data) and data[underlying]):
keep.update([f.nondegenerate() for f in data[underlying]])
else:
assert (underlying.dimension() == 0)
vertices.add(underlying)
missing = set(nondegenerate_simplices).difference(keep)
for x in missing:
if (x in data):
del data[x]
for x in vertices:
data[x] = None
return SubSimplicialSet(data, self)
def chain_complex(self, dimensions=None, base_ring=ZZ, augmented=False, cochain=False, verbose=False, subcomplex=None, check=False):
kwds = {'base_ring': base_ring, 'augmented': augmented, 'cochain': cochain, 'verbose': verbose, 'subcomplex': subcomplex, 'check': check}
if (not self.is_finite()):
if (dimensions is None):
raise NotImplementedError('this simplicial set may be infinite, so specify dimensions when computing its chain complex')
else:
max_dim = max(dimensions)
return SimplicialSet_finite.chain_complex(self.n_skeleton((max_dim + 1)), dimensions=dimensions, **kwds)
return SimplicialSet_finite.chain_complex(self, dimensions=dimensions, **kwds)
def homology(self, dim=None, **kwds):
if (not self.is_finite()):
if (dim is None):
raise NotImplementedError('this simplicial set may be infinite, so specify dimensions when computing homology')
elif isinstance(dim, (list, tuple, range)):
dim = list(dim)
max_dim = max(dim)
space = self.n_skeleton((max_dim + 1))
min_dim = min(dim)
H = GenericCellComplex.homology(space, **kwds)
return {n: H[n] for n in H if (min_dim <= n <= max_dim)}
else:
max_dim = dim
space = self.n_skeleton((max_dim + 1))
else:
space = self
return GenericCellComplex.homology(space, dim=dim, **kwds)
def cohomology(self, dim=None, **kwds):
return self.homology(dim=dim, cohomology=True, **kwds)
def betti(self, dim=None, subcomplex=None):
dict = {}
H = self.homology(dim, base_ring=QQ, subcomplex=subcomplex)
try:
for n in H.keys():
dict[n] = H[n].dimension()
if (n == 0):
dict[n] += 1
return dict
except AttributeError:
return H.dimension()
def n_chains(self, n, base_ring=ZZ, cochains=False):
if self.is_finite():
return GenericCellComplex.n_chains(self, n=n, base_ring=base_ring, cochains=cochains)
from sage.homology.chains import Chains, Cochains
n_cells = tuple(self.n_cells(n))
if cochains:
return Cochains(self, n, n_cells, base_ring)
else:
return Chains(self, n, n_cells, base_ring)
def quotient(self, subcomplex, vertex_name='*'):
from .simplicial_set_constructions import SubSimplicialSet
from .simplicial_set_constructions import QuotientOfSimplicialSet, QuotientOfSimplicialSet_finite
if (not isinstance(subcomplex, SimplicialSet_finite)):
subcomplex = self.subsimplicial_set(subcomplex)
elif ((not isinstance(subcomplex, SubSimplicialSet)) and (subcomplex.ambient_space() == self)):
raise ValueError('the "subcomplex" is not actually a subcomplex')
if self.is_finite():
return QuotientOfSimplicialSet_finite(subcomplex.inclusion_map(), vertex_name=vertex_name)
else:
return QuotientOfSimplicialSet(subcomplex.inclusion_map(), vertex_name=vertex_name)
def disjoint_union(self, *others):
from .simplicial_set_constructions import DisjointUnionOfSimplicialSets, DisjointUnionOfSimplicialSets_finite
if all((space.is_finite() for space in ([self] + list(others)))):
return DisjointUnionOfSimplicialSets_finite(((self,) + others))
else:
return DisjointUnionOfSimplicialSets(((self,) + others))
def coproduct(self, *others):
if (self.is_pointed() and all((X.is_pointed() for X in others))):
return self.wedge(*others)
if (self.is_pointed() or any((X.is_pointed() for X in others))):
raise ValueError('some, but not all, of the simplicial sets are pointed, so the categorical coproduct is not defined: the category is ambiguous')
return self.disjoint_union(*others)
def product(self, *others):
from .simplicial_set_constructions import ProductOfSimplicialSets, ProductOfSimplicialSets_finite
if (self.is_finite() and all((X.is_finite() for X in others))):
return ProductOfSimplicialSets_finite(((self,) + others))
else:
return ProductOfSimplicialSets(((self,) + others))
cartesian_product = product
def pushout(self, *maps):
from .simplicial_set_constructions import PushoutOfSimplicialSets, PushoutOfSimplicialSets_finite
if any(((self != f.domain()) for f in maps)):
raise ValueError('the domains of the maps must be equal')
if (not maps):
return PushoutOfSimplicialSets_finite()
if all((f.codomain().is_finite() for f in maps)):
return PushoutOfSimplicialSets_finite(maps)
else:
return PushoutOfSimplicialSets(maps)
def pullback(self, *maps):
from .simplicial_set_constructions import PullbackOfSimplicialSets, PullbackOfSimplicialSets_finite
if any(((self != f.codomain()) for f in maps)):
raise ValueError('the codomains of the maps must be equal')
if (not maps):
return PullbackOfSimplicialSets_finite()
if (self.is_finite() and all((f.domain().is_finite() for f in maps))):
return PullbackOfSimplicialSets_finite(maps)
else:
return PullbackOfSimplicialSets(maps)
def wedge(self, *others):
from .simplicial_set_constructions import WedgeOfSimplicialSets, WedgeOfSimplicialSets_finite
if all((space.is_finite() for space in ([self] + list(others)))):
return WedgeOfSimplicialSets_finite(((self,) + others))
else:
return WedgeOfSimplicialSets(((self,) + others))
def cone(self):
from .simplicial_set_constructions import ConeOfSimplicialSet, ConeOfSimplicialSet_finite, ReducedConeOfSimplicialSet, ReducedConeOfSimplicialSet_finite
if self.is_pointed():
if self.is_finite():
return ReducedConeOfSimplicialSet_finite(self)
else:
return ReducedConeOfSimplicialSet(self)
if self.is_finite():
return ConeOfSimplicialSet_finite(self)
else:
return ConeOfSimplicialSet(self)
def suspension(self, n=1):
from .simplicial_set_constructions import SuspensionOfSimplicialSet, SuspensionOfSimplicialSet_finite
if (n < 0):
raise ValueError('n must be non-negative')
if (n == 0):
return self
if self.is_finite():
Sigma = SuspensionOfSimplicialSet_finite(self)
else:
Sigma = SuspensionOfSimplicialSet(self)
if (n == 1):
return Sigma
return Sigma.suspension((n - 1))
def join(self, *others):
raise NotImplementedError('joins are not implemented for simplicial sets')
def reduce(self):
if self.is_reduced():
return self
if (not self.is_connected()):
raise ValueError('this simplicial set is not connected')
graph = self.graph()
spanning_tree = [e[2] for e in graph.min_spanning_tree()]
return self.quotient(spanning_tree)
def _Hom_(self, other, category=None):
from sage.topology.simplicial_set_morphism import SimplicialSetHomset
if (category is None):
if (self.is_finite() and other.is_finite()):
if (self.is_pointed() and other.is_pointed()):
category = SimplicialSets().Finite().Pointed()
else:
category = SimplicialSets().Finite()
elif (self.is_pointed() and other.is_pointed()):
category = SimplicialSets().Pointed()
else:
category = SimplicialSets()
return SimplicialSetHomset(self, other, category=category)
def rename_latex(self, s):
self._latex_name = s
def _latex_(self):
if (hasattr(self, '_latex_name') and (self._latex_name is not None)):
return self._latex_name
return str(self)
def _repr_(self):
num = len(self.nondegenerate_simplices())
if (num == 1):
return 'Simplicial set with 1 non-degenerate simplex'
return 'Simplicial set with {} non-degenerate simplices'.format(num) |
class RTU2b(RTU):
def pre_loop(self, sleep=0.6):
time.sleep(sleep)
def main_loop(self):
count = 0
offset = 0
while True:
registers = []
if (count >= (len(wadi2_list) / 3)):
count = 0
offset = 0
if (count <= 2):
registers.append(wadi2_list[offset])
registers.append(wadi2_list[(offset + 1)])
registers.append(wadi2_list[(offset + 2)])
self.send(HR_0_2a, registers, RTU2B_ADDR, count=3)
elif (count <= 5):
registers.append(wadi2_list[(offset + 2)])
registers.append(wadi2_list[offset])
registers.append(wadi2_list[(offset + 1)])
self.send(HR_0_2a, registers, RTU2B_ADDR, count=3)
elif (count <= 8):
registers.append(wadi2_list[(offset + 1)])
registers.append(wadi2_list[(offset + 2)])
registers.append(wadi2_list[offset])
self.send(HR_0_2a, registers, RTU2B_ADDR, count=3)
elif (count <= 11):
registers.append(wadi2_list[(offset + 2)])
registers.append(wadi2_list[(offset + 1)])
registers.append(wadi2_list[offset])
self.send(HR_0_2a, registers, RTU2B_ADDR, count=3)
else:
pass
count += 1
offset += 3
time.sleep(RTU_PERIOD_SEC) |
class SimpleTableImage(object):
def __init__(self, image_file, width=None, height=None):
self.image_file = image_file
if width:
self.width = round(width)
else:
self.width = width
if height:
self.height = round(height)
else:
self.height = height
def __str__(self):
safe_filename = quote(self.image_file)
output = ('<a href="%s" target="_blank">' % safe_filename)
output += ('<img src="%s"' % safe_filename)
if self.height:
output += (' height="%s"' % self.height)
if self.width:
output += (' width="%s"' % self.width)
output += '></a>'
return output |
class ImageFolder(DatasetFolder):
def __init__(self, root, target=(- 1), transform=None, target_transform=None, loader=default_loader):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS, target=target, transform=transform, target_transform=target_transform)
self.imgs = self.samples |
class NegotiationModel(NMTModel):
def __init__(self, encoder, decoder, context_embedder, kb_embedder, stateful=False):
super(NegotiationModel, self).__init__(encoder, decoder, stateful=stateful)
self.context_embedder = context_embedder
self.kb_embedder = kb_embedder
def forward(self, src, tgt, context, title, desc, lengths, dec_state=None, enc_state=None, tgt_lengths=None):
(enc_final, enc_memory_bank) = self.encoder(src, lengths, enc_state)
(_, context_memory_bank) = self.context_embedder(context)
if self.kb_embedder:
(_, title_memory_bank) = self.kb_embedder(title)
(_, desc_memory_bank) = self.kb_embedder(desc)
memory_banks = [enc_memory_bank, context_memory_bank, title_memory_bank, desc_memory_bank]
else:
memory_banks = [enc_memory_bank, context_memory_bank]
enc_state = self.decoder.init_decoder_state(src, enc_memory_bank, enc_final)
dec_state = (enc_state if (dec_state is None) else dec_state)
(decoder_outputs, dec_state, attns) = self.decoder(tgt, memory_banks, dec_state, memory_lengths=lengths, lengths=tgt_lengths)
return (decoder_outputs, attns, dec_state) |
def data_parallel(f, input, params, mode, device_ids, output_device=None):
assert isinstance(device_ids, list)
if (output_device is None):
output_device = device_ids[0]
if (len(device_ids) == 1):
return f(input, params, mode)
params_all = Broadcast.apply(device_ids, *params.values())
params_replicas = [{k: params_all[(i + (j * len(params)))] for (i, k) in enumerate(params.keys())} for j in range(len(device_ids))]
replicas = [partial(f, params=p, mode=mode) for p in params_replicas]
inputs = scatter([input], device_ids)
outputs = parallel_apply(replicas, inputs)
return gather(outputs, output_device) |
def insert_search_log(keyword, total, sws):
return agent.insert('search_log', **{'keyword': keyword, 'etc': f"softwares : {';'.join(sws)}", 'website': 'grabCAD', 'total': total}) |
def find_subtree(roots: Set[Node], graph_size: int):
nodes = set()
open = copy(roots)
while (len(open) > 0):
n = open.pop()
nodes.add(n)
for u in n.out_edges:
if (u.stage_id == n.stage_id):
nodes.add(u)
open.add(u)
open = copy(nodes)
while (len(open) > 0):
n = open.pop()
if (n in roots):
continue
for u in n.in_edges:
if (u.stage_id == n.stage_id):
if ((u.type != NodeTypes.IN) and ((n.id - u.id) > (graph_size // 2))):
continue
open.add(u)
nodes.add(u)
return nodes |
class normalizer():
def __init__(self, size, eps=0.01, default_clip_range=np.inf):
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.total_sum = np.zeros(self.size, np.float32)
self.total_sumsq = np.zeros(self.size, np.float32)
self.total_count = np.ones(1, np.float32)
self.mean = np.zeros(self.size, np.float32)
self.std = np.ones(self.size, np.float32)
self.lock = threading.Lock()
def update(self, v):
v = v.reshape((- 1), self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += np.square(v).sum(axis=0)
self.local_count[0] += v.shape[0]
def sync(self, local_sum, local_sumsq, local_count):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return (local_sum, local_sumsq, local_count)
def recompute_stats(self):
with self.lock:
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
(sync_sum, sync_sumsq, sync_count) = self.sync(local_sum, local_sumsq, local_count)
self.total_sum += sync_sum
self.total_sumsq += sync_sumsq
self.total_count += sync_count
self.mean = (self.total_sum / self.total_count)
self.std = np.sqrt(np.maximum(np.square(self.eps), ((self.total_sumsq / self.total_count) - np.square((self.total_sum / self.total_count)))))
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def normalize(self, v, clip_range=None):
if (clip_range is None):
clip_range = self.default_clip_range
return np.clip(((v - self.mean) / self.std), (- clip_range), clip_range) |
class SingleLayerFunctionalConvModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = FunctionalConv2d()
def forward(self, x):
x = self.conv1(x)
return x |
def train(config: TrainArgs):
config.trainer.initialize(config)
converter = HFCheckpointConverter.from_hf(config.model_name_or_path, trust_remote_code=config.trust_remote_code)
model_config = converter.default_config
tokenizer = copy.deepcopy(converter.tokenizer)
if (tokenizer.pad_token_id is None):
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(0)
tokenizer.model_max_length = config.max_tune_length
(training_key, lora_key) = jrandom.split(jrandom.PRNGKey(config.trainer.seed), 2)
train_dataset = mk_dataset(config, tokenizer)
optimizer = config.optimizer.build(config.trainer.num_train_steps)
with config.trainer.device_mesh:
parameter_axis_mapping = config.trainer.parameter_axis_mapping
logger.info(f'Loading pretrained model from {converter.reference_checkpoint}')
model: LmHeadModel = converter.load_pretrained(model_config, axis_mapping=parameter_axis_mapping)
_jit(axis_resources=parameter_axis_mapping, donate_args=True)
def loraize_hf_model(model):
return loraize(model, config.lora, key=lora_key)
model = loraize_hf_model(model)
lora_param_filter = lora_trainable_params_filter(model)
def compute_loss(model: LmHeadModel, example: LmExample, key=None):
return model.compute_loss(example, key=key).scalar()
trainer = Trainer(config.trainer, optimizer, compute_loss, is_trainable=lora_param_filter)
trainer.add_default_hooks()
state = trainer.initial_state(training_key, model=model)
all_param_count = parameter_count(state.model)
just_lora_params = parameter_count(trainer.trainable_params_only(state.model))
wandb.summary['parameter_count'] = all_param_count
wandb.summary['trainable_parameter_count'] = just_lora_params
logger.info(f'Total parameter count: {all_param_count}')
logger.info(f'Trainable parameter count: {just_lora_params}')
logger.info(f'Fraction of parameters that are trainable: {(((just_lora_params * 1.0) / all_param_count) % 0.3)}')
loader = trainer.replicated_loader(train_dataset, trainer.TrainBatch)
loader = non_caching_cycle(loader)
if (state.step != 0):
logger.info(f'Resuming training from step {state.step}')
for i in range(state.step):
next(loader)
if (config.hf_save_path is not None):
full_save_path = os.path.join(config.hf_save_path, trainer.run_id)
trainer.add_hook(save_peft_checkpoint_callback(full_save_path, config.lora, config.model_name_or_path, tokenizer, config.hf_upload), every=config.hf_save_steps)
if (config.merged_hf_save_path is not None):
full_save_path = os.path.join(config.merged_hf_save_path, trainer.run_id)
trainer.add_hook(save_merged_hf_checkpoint_callback(full_save_path, converter, config.merged_hf_upload), every=config.hf_save_steps)
trainer.train(state, loader) |
def get_fuzzer_from_type_str(dtype_str: str, elements: List, p=0.5, max_l0=float('inf')) -> BaseFuzzer:
elements = [e for e in elements if (random.random() > ELEMENT_DROPOUT)]
dtype_str = dtype_str.lower()
if (dtype_str == 'time'):
return TimeFuzzer(elements, p=p, max_l0=max_l0)
if ((dtype_str == '') or (dtype_str == 'blob')):
return StringFuzzer(elements, p=p, max_l0=max_l0)
if (('date' in dtype_str) or ('timestamp' in dtype_str)):
if ((len(elements) > 0) and isint(elements[0])):
return NumberFuzzer(elements, p=p, max_l0=max_l0, scale=4)
return DateFuzzer(elements, p=p, max_l0=max_l0)
if ('bool' in dtype_str):
return BoolFuzzer(elements, p=p)
if ('bit' in dtype_str):
return BitFuzzer(elements, p=p)
if ('year' in dtype_str):
return NumberFuzzer(elements, p=p, scale=4, max_l0=max_l0)
unsigned = False
if ('unsigned' in dtype_str):
unsigned = True
args = []
if ('(' in dtype_str):
args = [int(x) for x in dtype_str[(dtype_str.index('(') + 1):dtype_str.index(')')].split(',')]
for s in number_dtype_str:
if (s in dtype_str):
(scale, precision) = (10, 0)
if (len(args) != 0):
scale = args[0]
if (len(args) > 1):
scale -= args[1]
precision = args[1]
is_int = ('int' in dtype_str)
return NumberFuzzer(elements, p, max_l0=max_l0, scale=scale, unsigned=unsigned, is_int=is_int, precision=precision)
for s in char_dtype_str:
if (s in dtype_str):
length = 20
if (len(args) != 0):
length = args[0]
return StringFuzzer(elements, p, max_l0=max_l0, length=length) |
def main(_):
default_hparams = nmt.create_hparams(FLAGS)
out_dir = FLAGS.out_dir
if (not tf.gfile.Exists(out_dir)):
tf.gfile.MakeDirs(out_dir)
hparams = nmt.create_or_load_hparams(out_dir, default_hparams, FLAGS.hparams_path, save_hparams=False)
log_device_placement = hparams.log_device_placement
out_dir = hparams.out_dir
num_train_steps = hparams.num_train_steps
steps_per_stats = hparams.steps_per_stats
avg_ckpts = hparams.avg_ckpts
if (not hparams.attention):
model_creator = nmt_model.Model
elif ((hparams.encoder_type == 'gnmt') or (hparams.attention_architecture in ['gnmt', 'gnmt_v2'])):
model_creator = gnmt_model.GNMTModel
elif (hparams.attention_architecture == 'standard'):
model_creator = attention_model.AttentionModel
else:
raise ValueError(('Unknown attention architecture %s' % hparams.attention_architecture))
train_model = model_helper.create_train_model(model_creator, hparams, scope=None)
config_proto = utils.get_config_proto(log_device_placement=log_device_placement, num_intra_threads=1, num_inter_threads=36)
def run(train_sess, num_workers, worker_id, num_replicas_per_worker):
random_seed = FLAGS.random_seed
if ((random_seed is not None) and (random_seed > 0)):
utils.print_out(('# Set random seed to %d' % random_seed))
random.seed((random_seed + worker_id))
np.random.seed((random_seed + worker_id))
log_file = os.path.join(out_dir, ('log_%d' % time.time()))
log_f = tf.gfile.GFile(log_file, mode='a')
utils.print_out(('# log_file=%s' % log_file), log_f)
global_step = train_sess.run(train_model.model.global_step)[0]
last_stats_step = global_step
(stats, info, start_train_time) = before_train(train_model, train_sess, global_step, hparams, log_f, num_replicas_per_worker)
epoch_steps = (FLAGS.epoch_size / ((FLAGS.batch_size * num_workers) * num_replicas_per_worker))
for i in range(FLAGS.max_steps):
start_time = time.time()
if ((hparams.epoch_step != 0) and ((hparams.epoch_step % epoch_steps) == 0)):
hparams.epoch_step = 0
skip_count = train_model.skip_count_placeholder
feed_dict = {}
feed_dict[skip_count] = [0 for i in range(num_replicas_per_worker)]
init = train_model.iterator.initializer
train_sess.run(init, feed_dict=feed_dict)
if (worker_id == 0):
results = train_sess.run([train_model.model.update, train_model.model.train_loss, train_model.model.predict_count, train_model.model.train_summary, train_model.model.global_step, train_model.model.word_count, train_model.model.batch_size, train_model.model.grad_norm, train_model.model.learning_rate])
step_result = [r[0] for r in results]
else:
(global_step, _) = train_sess.run([train_model.model.global_step, train_model.model.update])
hparams.epoch_step += 1
if (worker_id == 0):
(global_step, info['learning_rate'], step_summary) = train.update_stats(stats, start_time, step_result)
if ((global_step - last_stats_step) >= steps_per_stats):
last_stats_step = global_step
is_overflow = train.process_stats(stats, info, global_step, steps_per_stats, log_f)
train.print_step_info(' ', global_step, info, train._get_best_results(hparams), log_f)
if is_overflow:
break
stats = train.init_stats()
(sess, num_workers, worker_id, num_replicas_per_worker) = parallax.parallel_run(train_model.graph, FLAGS.resource_info_file, sync=FLAGS.sync, parallax_config=parallax_config.build_config())
run(sess, num_workers, worker_id, num_replicas_per_worker) |
def update_config(config, args):
_update_config_from_file(config, args.cfg)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
if args.batch_size:
config.DATA.BATCH_SIZE = args.batch_size
if args.data_path:
config.DATA.DATA_PATH = args.data_path
if args.zip:
config.DATA.ZIP_MODE = True
if args.cache_mode:
config.DATA.CACHE_MODE = args.cache_mode
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.use_checkpoint:
config.TRAIN.USE_CHECKPOINT = True
if args.amp_opt_level:
config.AMP_OPT_LEVEL = args.amp_opt_level
if args.output:
config.OUTPUT = args.output
if args.tag:
config.TAG = args.tag
if args.eval:
config.EVAL_MODE = True
if args.throughput:
config.THROUGHPUT_MODE = True
config.LOCAL_RANK = args.local_rank
config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
config.freeze() |
class TestInnerAngleRepresentation(TestCase):
def test_call_value_should_be_inner_angle(self):
p1s = tf.constant([[[[2, 3, 4]]]], dtype=tf.float32)
p2s = tf.constant([[[[1, 1, 1]]]], dtype=tf.float32)
p3s = tf.constant([[[[3, 4, 2]]]], dtype=tf.float32)
angles = representation(p1s, p2s, p3s)
self.assertAlmostEqual(float(angles[0][0][0]), math.acos((11 / 14))) |
.parametrize('k_max,p,interest_extractor,add_pos', [(2, 1, 'sa', True), (1, 100, 'dr', False), (3, 50, 'dr', True)])
def test_COMIREC(k_max, p, interest_extractor, add_pos):
model_name = 'COMIREC'
(x, y, user_feature_columns, item_feature_columns) = get_xy_fd(False)
if (tf.__version__ >= '2.0.0'):
tf.compat.v1.disable_eager_execution()
else:
K.set_learning_phase(True)
sampler_config = NegativeSampler(sampler='uniform', num_sampled=2, item_name='item')
model = ComiRec(user_feature_columns, item_feature_columns, k_max=k_max, p=p, interest_extractor=interest_extractor, add_pos=add_pos, sampler_config=sampler_config)
model.compile('adam', sampledsoftmaxloss)
check_model(model, model_name, x, y) |
def test_single_objective_set_normalized():
single_cdv_tmp = SingleObjectiveCDV(max_empirical_loss=max_empirical_loss_1)
single_cdv_tmp.set_normalized(True)
assert (single_cdv_tmp.get_descent_vector([loss_1]).data == max_empirical_loss_1.data) |
def plot_full(df_training, df_final, rank, name):
if ('Classification' in name):
ylabel = 'Error'
else:
ylabel = 'Loss'
df_final = df_final.rename(columns={'Collapse': 'Collapse-Avg', 'Collapse Worst': 'Collapse-Worst', 'Specialization': 'Adaptation', 'Mutual Information': 'Inverse Mutual Information', 'Hungarian Score': 'Alignment'})
print(list(df_final))
df_training_mini = df_training[(df_training['Iteration'] > 0)]
df_final = df_final[(df_final['Eval Mode'] == 'last')]
hue_perf = ['GT-Modular', 'Modular-op', 'Modular', 'Monolithic']
hue_metrics = ['GT-Modular', 'Modular-op', 'Modular', 'Random']
hue_pie = ['GT-Modular', 'Modular', 'Modular-op', 'Monolithic']
hue_pie_sub = ['Modular', 'Monolithic']
if ('MHA' in name):
plot_line(df_training, 'Iteration', 'Perf', 'Rule', 'Search-Version', 'Model', f'{name}/training_rs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
plot_line(df_training, 'Iteration', 'Perf', 'Rule', None, 'Model', f'{name}/training_r.pdf', title='Rules: {col_name}', xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
plot_line(df_training, 'Iteration', 'Perf', None, None, 'Model', f'{name}/training.pdf', title=None, xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
if ('MHA' in name):
plot_line(df_training_mini, 'Iteration', 'Perf', 'Rule', 'Search-Version', 'Model', f'{name}/training_mrs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
plot_line(df_training_mini, 'Iteration', 'Perf', 'Rule', None, 'Model', f'{name}/training_mr.pdf', title='Rules: {col_name}', xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
plot_line(df_training_mini, 'Iteration', 'Perf', None, None, 'Model', f'{name}/training_m.pdf', title=None, xlabel='Iterations', ylabel=ylabel, hue_order=hue_perf)
plot_line(df_final, 'Rules', 'Perf', None, None, 'Model', f'{name}/perf_r.pdf', title=None, xlabel='Rules', ylabel=ylabel, hue_order=hue_perf, marker='o')
plot_line(df_final, 'Rules', 'Perf-OoD', None, None, 'Model', f'{name}/perf_ood_r.pdf', title=None, xlabel='Rules', ylabel=ylabel, hue_order=hue_perf, marker='o')
if ('MHA' in name):
for i in [3, 5, 10, 20, 30]:
plot_log_line(df_final, 'Number of Parameters', f'Perf-{i}', 'Rules', 'Search-Version', 'Model', f'{name}/perf_{i}_rs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', f'Perf-OoD-{i}', 'Rules', 'Search-Version', 'Model', f'{name}/perf_{i}_ood_rs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', f'Perf-{i}', 'Rules', 'Search-Version', 'Model', f'{name}/perf_{i}_drs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', f'Perf-OoD-{i}', 'Rules', 'Search-Version', 'Model', f'{name}/perf_{i}_ood_drs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
elif ('RNN' in name):
for i in [3, 5, 10, 20, 30]:
plot_log_line(df_final, 'Number of Parameters', f'Perf-{i}', 'Rules', None, 'Model', f'{name}/perf_{i}_r.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', f'Perf-OoD-{i}', 'Rules', None, 'Model', f'{name}/perf_{i}_ood_r.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', f'Perf-{i}', 'Rules', None, 'Model', f'{name}/perf_{i}_dr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', f'Perf-OoD-{i}', 'Rules', None, 'Model', f'{name}/perf_{i}_ood_dr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
if ('MHA' in name):
plot_log_line(df_final, 'Number of Parameters', 'Perf', 'Rules', 'Search-Version', 'Model', f'{name}/perf_rs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', 'Perf-OoD', 'Rules', 'Search-Version', 'Model', f'{name}/perf_ood_rs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf', 'Rules', 'Search-Version', 'Model', f'{name}/perf_drs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf-OoD', 'Rules', 'Search-Version', 'Model', f'{name}/perf_ood_drs.pdf', title='Search-Version: {row_name} | Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', 'Perf', 'Rules', None, 'Model', f'{name}/perf_pr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', 'Perf-OoD', 'Rules', None, 'Model', f'{name}/perf_ood_pr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', 'Perf', None, None, 'Model', f'{name}/perf_p.pdf', title=None, xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Number of Parameters', 'Perf-OoD', None, None, 'Model', f'{name}/perf_ood_p.pdf', title=None, xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf', 'Rules', None, 'Model', f'{name}/perf_dr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf-OoD', 'Rules', None, 'Model', f'{name}/perf_ood_dr.pdf', title='Rules: {col_name}', xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf', None, None, 'Model', f'{name}/perf_d.pdf', title=None, xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_log_line(df_final, 'Dimension', 'Perf-OoD', None, None, 'Model', f'{name}/perf_ood_d.pdf', title=None, xlabel=None, ylabel=ylabel, hue_order=hue_perf)
plot_metrics(df_final, 'Rules', 'Collapse-Worst', 'Model', f'{name}/cw_r.pdf', hue_order=hue_metrics)
plot_metrics(df_final, 'Rules', 'Collapse-Avg', 'Model', f'{name}/ca_r.pdf', hue_order=hue_metrics)
plot_metrics(df_final, 'Rules', 'Adaptation', 'Model', f'{name}/adap_r.pdf', hue_order=hue_metrics)
plot_metrics(df_final, 'Rules', 'Inverse Mutual Information', 'Model', f'{name}/imi_r.pdf', hue_order=hue_metrics)
plot_metrics(df_final, 'Rules', 'Alignment', 'Model', f'{name}/h_r.pdf', hue_order=hue_metrics)
plot_metrics(df_final, 'Model', 'Collapse-Worst', 'Model', f'{name}/cw.pdf', dodge=False, hue_order=hue_metrics)
plot_metrics(df_final, 'Model', 'Collapse-Avg', 'Model', f'{name}/ca.pdf', dodge=False, hue_order=hue_metrics)
plot_metrics(df_final, 'Model', 'Adaptation', 'Model', f'{name}/adap.pdf', dodge=False, hue_order=hue_metrics)
plot_metrics(df_final, 'Model', 'Inverse Mutual Information', 'Model', f'{name}/imi.pdf', dodge=False, hue_order=hue_metrics)
plot_metrics(df_final, 'Model', 'Alignment', 'Model', f'{name}/h.pdf', dodge=False, hue_order=hue_metrics)
df = df_final[df_final['Encoder Dimension'].isin([32, 128, 512])].copy()
df['Capacity'] = df['Encoder Dimension']
df['Capacity'] = df['Capacity'].replace({32: 'Small', 128: 'Medium', 512: 'Large'})
if ('MHA' in name):
plot_metrics_full(df, 'Capacity', 'Collapse-Worst', 'Rules', 'Search-Version', 'Model', f'{name}/cw_full_s.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Collapse-Avg', 'Rules', 'Search-Version', 'Model', f'{name}/ca_full_s.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Adaptation', 'Rules', 'Search-Version', 'Model', f'{name}/adap_full_s.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Inverse Mutual Information', 'Rules', 'Search-Version', 'Model', f'{name}/imi_full_s.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Alignment', 'Rules', 'Search-Version', 'Model', f'{name}/h_full_s.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Collapse-Worst', 'Rules', None, 'Model', f'{name}/cw_full.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Collapse-Avg', 'Rules', None, 'Model', f'{name}/ca_full.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Adaptation', 'Rules', None, 'Model', f'{name}/adap_full.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Inverse Mutual Information', 'Rules', None, 'Model', f'{name}/imi_full.pdf', hue_order=hue_metrics)
plot_metrics_full(df, 'Capacity', 'Alignment', 'Rules', None, 'Model', f'{name}/h_full.pdf', hue_order=hue_metrics)
if ('MHA' in name):
plot_pie(rank[0]['perf_last'], hue_pie, colors, f'{name}/rank_1.pdf')
plot_pie(rank[0]['perf_ood_last'], hue_pie, colors, f'{name}/rank_ood_1.pdf')
plot_pie(rank[0]['perf_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_sub_1.pdf', circle=False)
plot_pie(rank[0]['perf_ood_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_ood_sub_1.pdf', circle=False)
plot_pie(rank[1]['perf_last'], hue_pie, colors, f'{name}/rank_2.pdf')
plot_pie(rank[1]['perf_ood_last'], hue_pie, colors, f'{name}/rank_ood_2.pdf')
plot_pie(rank[1]['perf_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_sub_2.pdf', circle=False)
plot_pie(rank[1]['perf_ood_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_ood_sub_2.pdf', circle=False)
r = dict()
for key in rank[0].keys():
r[key] = (rank[0][key] + rank[1][key])
rank = r
plot_pie(rank['perf_last'], hue_pie, colors, f'{name}/rank.pdf')
plot_pie(rank['perf_ood_last'], hue_pie, colors, f'{name}/rank_ood.pdf')
plot_pie(rank['perf_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_sub.pdf', circle=False)
plot_pie(rank['perf_ood_sub_last'], hue_pie_sub, [colors[1], colors[(- 1)]], f'{name}/rank_ood_sub.pdf', circle=False)
df = df_final[df_final[['Rules', 'Encoder Dimension']].apply(tuple, 1).isin([(2, 32), (4, 64), (8, 128), (16, 256), (32, 512)])].copy()
plot_line(df, 'Rules', 'Perf', None, None, 'Model', f'{name}/perf_mr.pdf', title=None, xlabel='Rules', ylabel=ylabel, hue_order=hue_perf, marker='o')
plot_line(df, 'Rules', 'Perf-OoD', None, None, 'Model', f'{name}/perf_ood_mr.pdf', title=None, xlabel='Rules', ylabel=ylabel, hue_order=hue_perf, marker='o') |
def get_nvidia_smi():
smi = 'nvidia-smi'
if (get_platform() == 'win32'):
smi = ('"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi)
return smi |
class KLDivLoss(_Loss):
__constants__ = ['reduction']
def __init__(self, size_average=None, reduce=None, reduction: str='mean', log_target: bool=False) -> None:
super(KLDivLoss, self).__init__(size_average, reduce, reduction)
self.log_target = log_target
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return F.kl_div(input, target, reduction=self.reduction, log_target=self.log_target) |
def download_url(url, model_dir='~/.torch/proxyless_nas', overwrite=False):
model_dir = os.path.expanduser(model_dir)
filename = url.split('/')[(- 1)]
cached_file = os.path.join(model_dir, filename)
if ((not os.path.exists(cached_file)) or overwrite):
os.makedirs(model_dir, exist_ok=True)
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return cached_file |
def get_nae_v2(**model_cfg):
arch = model_cfg.pop('arch')
sampling = model_cfg.pop('sampling')
x_dim = model_cfg['x_dim']
z_dim = model_cfg['z_dim']
encoder = get_net(in_dim=x_dim, out_dim=z_dim, **model_cfg['encoder'])
decoder = get_net(in_dim=z_dim, out_dim=x_dim, **model_cfg['decoder'])
if ((arch == 'nae_l2') and (sampling == 'omi')):
sampler_z = get_sampler(**model_cfg['sampler_z'])
sampler_x = get_sampler(**model_cfg['sampler_x'])
nae = NAE_L2_OMI(encoder, decoder, sampler_z, sampler_x, **model_cfg['nae'])
else:
raise ValueError(f'Invalid sampling: {sampling}')
return nae |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.