code stringlengths 101 5.91M |
|---|
class CNNGroupedDataset(Dataset):
def __init__(self, file_path, input_transform=None, target_transform=None):
self.image_features = np.load(('%s_image_feature_grouped_test.npy' % file_path), allow_pickle=True)
self.non_spatial_features = np.load(('%s_non_spatial_feature_grouped_test.npy' % file_path), allow_pickle=True)
self.all_labels = np.load(('%s_label_grouped_test.npy' % file_path), allow_pickle=True)
self.input_transform = input_transform
self.target_transform = target_transform
def __len__(self):
return self.all_labels.shape[0]
def __getitem__(self, idx):
img_feature = self.image_features[idx]
non_spatial_feature = self.non_spatial_features[idx]
label = self.all_labels[idx].astype(np.single)
if self.input_transform:
img_feature = torch.stack([self.input_transform(feat) for feat in img_feature])
non_spatial_feature = non_spatial_feature.astype(np.single)
if self.target_transform:
label = self.target_transform(label)
return (img_feature, non_spatial_feature, label) |
class SageMakerTestEnvironment():
framework: str
role = 'arn:aws:iam:::role/sagemaker_execution_role'
hyperparameters = {'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5500}
distributed_hyperparameters = {**hyperparameters, 'max_steps': 1000}
def metric_definitions(self) -> str:
if (self.framework == 'pytorch'):
return [{'Name': 'train_runtime', 'Regex': 'train_runtime.*=\\D*(.*?)$'}, {'Name': 'eval_accuracy', 'Regex': 'eval_accuracy.*=\\D*(.*?)$'}, {'Name': 'eval_loss', 'Regex': 'eval_loss.*=\\D*(.*?)$'}]
else:
return [{'Name': 'train_runtime', 'Regex': 'train_runtime.*=\\D*(.*?)$'}, {'Name': 'eval_accuracy', 'Regex': 'loss.*=\\D*(.*?)]?$'}, {'Name': 'eval_loss', 'Regex': 'sparse_categorical_accuracy.*=\\D*(.*?)]?$'}]
def base_job_name(self) -> str:
return f'{self.framework}-transfromers-test'
def test_path(self) -> str:
return f'./tests/sagemaker/scripts/{self.framework}'
def image_uri(self) -> str:
if (self.framework == 'pytorch'):
return '.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04'
else:
return '.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04' |
class TimeDistributedMaskCriterion(Criterion):
def __init__(self, criterion, padding_value=0, bigdl_type='float'):
super(TimeDistributedMaskCriterion, self).__init__(None, bigdl_type, criterion, padding_value) |
class TimeDistributed(torch.nn.Module):
def __init__(self, module):
super(TimeDistributed, self).__init__()
self._module = module
def forward(self, *inputs, **kwargs):
reshaped_inputs = []
for input_tensor in inputs:
input_size = input_tensor.size()
if (len(input_size) <= 2):
raise RuntimeError(('No dimension to distribute: ' + str(input_size)))
squashed_shape = ([(- 1)] + [x for x in input_size[2:]])
reshaped_inputs.append(input_tensor.contiguous().view(*squashed_shape))
reshaped_outputs = self._module(*reshaped_inputs, **kwargs)
if isinstance(reshaped_outputs, torch.Tensor):
new_shape = ([input_size[0], input_size[1]] + [x for x in reshaped_outputs.size()[1:]])
outputs = reshaped_outputs.contiguous().view(*new_shape)
elif isinstance(reshaped_outputs, tuple):
outputs = []
for output in reshaped_outputs:
new_shape = ([input_size[0], input_size[1]] + [x for x in output.size()[1:]])
outputs.append(output.contiguous().view(*new_shape))
outputs = tuple(outputs)
else:
raise ValueError('Not support!')
return outputs |
class Vocabulary(object):
def __init__(self, vocab_file, start_word='<S>', end_word='</S>', unk_word='<UNK>', pad_word='<PAD>'):
if (not tf.gfile.Exists(vocab_file)):
tf.logging.fatal('Vocab file %s not found.', vocab_file)
tf.logging.info('Initializing vocabulary from file: %s', vocab_file)
with tf.gfile.GFile(vocab_file, mode='r') as f:
reverse_vocab = list(f.readlines())
reverse_vocab = [line.split()[0] for line in reverse_vocab]
assert (start_word in reverse_vocab)
assert (end_word in reverse_vocab)
if (unk_word not in reverse_vocab):
reverse_vocab.append(unk_word)
vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
print((' [TL] Vocabulary from %s : %s %s %s' % (vocab_file, start_word, end_word, unk_word)))
print((' vocabulary with %d words (includes start_word, end_word, unk_word)' % len(vocab)))
self.vocab = vocab
self.reverse_vocab = reverse_vocab
self.start_id = vocab[start_word]
self.end_id = vocab[end_word]
self.unk_id = vocab[unk_word]
self.pad_id = vocab[pad_word]
print((' start_id: %d' % self.start_id))
print((' end_id: %d' % self.end_id))
print((' unk_id: %d' % self.unk_id))
print((' pad_id: %d' % self.pad_id))
def word_to_id(self, word):
if (word in self.vocab):
return self.vocab[word]
else:
return self.unk_id
def id_to_word(self, word_id):
if (word_id >= len(self.reverse_vocab)):
return self.reverse_vocab[self.unk_id]
else:
return self.reverse_vocab[word_id] |
class TestQuaternion(unittest.TestCase):
def setUp(self):
(x, u0, u1, u2) = sympy.symbols('x u0 u1 u2', real=True)
(y, v0, v1, v2) = sympy.symbols('y v0 v1 v2', real=True)
u = sophus.Vector3(u0, u1, u2)
v = sophus.Vector3(v0, v1, v2)
self.a = Quaternion(x, u)
self.b = Quaternion(y, v)
def test_muliplications(self):
product = (self.a * self.a.inv())
self.assertEqual(product.simplify(), Quaternion.identity())
product = (self.a.inv() * self.a)
self.assertEqual(product.simplify(), Quaternion.identity())
def test_derivatives(self):
d = sympy.Matrix(4, 4, (lambda r, c: sympy.diff((self.a * self.b)[r], self.a[c])))
self.assertEqual(d, Quaternion.Da_a_mul_b(self.a, self.b))
d = sympy.Matrix(4, 4, (lambda r, c: sympy.diff((self.a * self.b)[r], self.b[c])))
self.assertEqual(d, Quaternion.Db_a_mul_b(self.a, self.b)) |
class NN():
def __init__(self, H1=100, H2=100, d=2, D=64, p=0.3):
with tf.name_scope('layer_1'):
self.W1 = self.weight_var('W1', [D, H1])
self.b1 = self.bias_var('b1', [H1])
with tf.name_scope('layer_2'):
self.W2 = self.weight_var('W2', [H1, H2])
self.b2 = self.bias_var('b2', [H2])
with tf.name_scope('layer_3'):
self.W3 = self.weight_var('W3', [H2, d])
self.b3 = self.bias_var('b3', [d])
with tf.name_scope('layer_4'):
self.W4 = self.weight_var('W4', [d, 10])
self.b4 = self.bias_var('b4', [10])
self.p = p
self.d = d
def weight_var(name, shape, W=None, trainable=True):
if (not (W is None)):
init = tf.constant(W, dtype=tf.float64)
else:
init = tf.orthogonal_initializer()(shape=shape, dtype=tf.float64)
return tf.get_variable(name, initializer=init, dtype=tf.float64, trainable=trainable)
def bias_var(name, shape, b=None, trainable=True):
if (not (b is None)):
init = tf.constant(b, dtype=tf.float64)
else:
init = tf.constant(0.0, shape=shape, dtype=tf.float64)
return tf.get_variable(name, initializer=init, dtype=tf.float64, trainable=trainable)
def predict(self, x):
l1 = tf.sigmoid((tf.matmul(x, self.W1) + self.b1))
l1_d = tf.nn.dropout(l1, self.p)
l2 = tf.sigmoid((tf.matmul(l1_d, self.W2) + self.b2))
l2_d = tf.nn.dropout(l2, self.p)
l3 = tf.sigmoid((tf.matmul(l2_d, self.W3) + self.b3))
l4 = (tf.matmul(l3, self.W4) + self.b4)
return tf.reshape(l4, [(- 1), 10])
def initialize(self, sess):
sess.run(tf.variables_initializer(self.get_params()))
def get_params(self):
return [self.W1, self.b1, self.W2, self.b2, self.W3, self.b3, self.W4, self.b4]
def out_dim(self):
return self.d
def save_weights(self, sess):
(W1, b1, W2, b2, W3, b3, W4) = sess.run(self.get_params())
np.save('W1.npy', W1)
np.save('b1.npy', b1)
np.save('W2.npy', W2)
np.save('b2.npy', b2)
np.save('W3.npy', W3)
np.save('b3.npy', b3)
np.save('W4.npy', W4) |
class AtariCategoricalQNetwork(network.Network):
def __init__(self, input_tensor_spec, action_spec, **kwargs):
super(AtariCategoricalQNetwork, self).__init__(input_tensor_spec, state_spec=())
input_tensor_spec = tf.TensorSpec(dtype=tf.float32, shape=input_tensor_spec.shape)
self._categorical_q_network = categorical_q_network.CategoricalQNetwork(input_tensor_spec, action_spec, **kwargs)
def num_atoms(self):
return self._categorical_q_network.num_atoms
def call(self, observation, step_type=None, network_state=()):
state = tf.cast(observation, tf.float32)
state = (state / 255)
return self._categorical_q_network(state, step_type=step_type, network_state=network_state) |
def get_data_creator(backend='torch'):
if (backend == 'torch'):
def data_creator(config):
import torch
from torch.utils.data import TensorDataset, DataLoader
tsdata = get_tsdataset()
(x, y) = tsdata.roll(lookback=7, horizon=1).to_numpy()
return DataLoader(TensorDataset(torch.from_numpy(x).float(), torch.from_numpy(y).float()), batch_size=config['batch_size'], shuffle=True)
return data_creator
if (backend == 'keras'):
def data_creator(config):
tsdata = get_tsdataset()
tsdata.roll(lookback=7, horizon=1)
return tsdata.to_tf_dataset(batch_size=config['batch_size'], shuffle=True)
return data_creator |
class DataProviderLayer(caffe.Layer):
def setup(self, bottom, top):
self.bottomup_feat_dim = cfg.BOTTOMUP_FEAT_DIM
self.query_maxlen = cfg.QUERY_MAXLEN
self.split = json.loads(self.param_str_)['split']
self.batchsize = json.loads(self.param_str_)['batchsize']
self.use_kld = cfg.USE_KLD
self.top_names = ['qvec', 'cvec', 'img_feat', 'spt_feat', 'query_label', 'query_label_mask', 'query_bbox_targets', 'query_bbox_inside_weights', 'query_bbox_outside_weights']
top[0].reshape(self.query_maxlen, self.batchsize)
top[1].reshape(self.query_maxlen, self.batchsize)
top[2].reshape(self.batchsize, cfg.RPN_TOPN, self.bottomup_feat_dim)
top[3].reshape(self.batchsize, cfg.RPN_TOPN, 5)
if self.use_kld:
top[4].reshape(self.batchsize, cfg.RPN_TOPN)
else:
top[4].reshape(self.batchsize)
top[5].reshape(self.batchsize)
top[6].reshape((self.batchsize * cfg.RPN_TOPN), 4)
top[7].reshape((self.batchsize * cfg.RPN_TOPN), 4)
top[8].reshape((self.batchsize * cfg.RPN_TOPN), 4)
if (str(self.phase) == 'TRAIN'):
dp = get_data_provider(data_split=self.split, batchsize=self.batchsize)
if (cfg.NTHREADS > 1):
import torch
self.dataloader = torch.utils.data.DataLoader(dp, batch_size=self.batchsize, shuffle=True, num_workers=int(cfg.NTHREADS))
else:
self.dataloader = dp
self.data_iter = iter(self.dataloader)
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if (str(self.phase) != 'TRAIN'):
return
try:
next_data = self.data_iter.next()
except:
self.data_iter = iter(self.dataloader)
next_data = self.data_iter.next()
next_data = map(np.array, next_data)
my_complete_data = functools.partial(complete_data, batchsize=self.batchsize)
(gt_boxes, qvec, cvec, img_feat, bbox, img_shape, spt_feat, query_label, query_label_mask, query_bbox_targets, query_bbox_inside_weights, query_bbox_outside_weights, valid_data, iid_list) = map(my_complete_data, next_data)
qvec = np.transpose(qvec, (1, 0))
top[0].reshape(*qvec.shape)
top[0].data[...] = qvec
cvec = np.transpose(cvec, (1, 0))
top[1].reshape(*cvec.shape)
top[1].data[...] = cvec
top[2].reshape(*img_feat.shape)
top[2].data[...] = img_feat
top[3].reshape(*spt_feat.shape)
top[3].data[...] = spt_feat
top[4].reshape(*query_label.shape)
top[4].data[...] = query_label
top[5].reshape(*query_label_mask.shape)
top[5].data[...] = query_label_mask
query_bbox_targets = query_bbox_targets.reshape((- 1), 4)
top[6].reshape(*query_bbox_targets.shape)
top[6].data[...] = query_bbox_targets
query_bbox_inside_weights = query_bbox_inside_weights.reshape((- 1), 4)
top[7].reshape(*query_bbox_inside_weights.shape)
top[7].data[...] = query_bbox_inside_weights
query_bbox_outside_weights = query_bbox_outside_weights.reshape((- 1), 4)
top[8].reshape(*query_bbox_outside_weights.shape)
top[8].data[...] = query_bbox_outside_weights
def backward(self, top, propagate_down, bottom):
pass |
def convert_uri_to_bucket_path(uri, strip_trailing_slash=True):
s3_uri = urlparse(uri)
s3_path = s3_uri.path.lstrip('/')
if strip_trailing_slash:
s3_path = s3_path.rstrip('/')
return (s3_uri.netloc, s3_path) |
def main():
config = get_config()
if (config.is_cuda and (not torch.cuda.is_available())):
raise Exception('No GPU found')
torch.manual_seed(config.seed)
if config.is_cuda:
torch.cuda.manual_seed(config.seed)
logging.info('===> Configurations')
dconfig = vars(config)
for k in dconfig:
logging.info(' {}: {}'.format(k, dconfig[k]))
DatasetClass = load_dataset(config.dataset)
logging.info('===> Initializing dataloader')
if config.is_train:
train_data_loader = initialize_data_loader(DatasetClass, config, phase=config.train_phase, threads=config.threads, augment_data=True, shuffle=True, repeat=True, batch_size=config.batch_size, limit_numpoints=config.train_limit_numpoints)
val_data_loader = initialize_data_loader(DatasetClass, config, threads=config.val_threads, phase=config.val_phase, augment_data=False, shuffle=False, repeat=False, batch_size=config.val_batch_size, limit_numpoints=False)
dataset = train_data_loader.dataset
else:
test_data_loader = initialize_data_loader(DatasetClass, config, threads=config.threads, phase=config.test_phase, augment_data=False, shuffle=False, repeat=False, batch_size=config.test_batch_size, limit_numpoints=False)
dataset = test_data_loader.dataset
logging.info('===> Building model')
pipeline_model = load_pipeline(config, dataset)
logging.info(f'===> Number of trainable parameters: {count_parameters(pipeline_model)}')
if (config.weights.lower() != 'none'):
logging.info(('===> Loading weights: ' + config.weights))
state = torch.load(config.weights)
pipeline_model.load_state_dict(state['state_dict'], strict=(not config.lenient_weight_loading))
if (config.pretrained_weights.lower() != 'none'):
logging.info(('===> Loading pretrained weights: ' + config.pretrained_weights))
state = torch.load(config.pretrained_weights)
pipeline_model.load_pretrained_weights(state['state_dict'])
if config.is_train:
train(pipeline_model, train_data_loader, val_data_loader, config)
else:
test(pipeline_model, test_data_loader, config) |
class HfArgumentParserTest(unittest.TestCase):
def argparsersEqual(self, a: argparse.ArgumentParser, b: argparse.ArgumentParser) -> bool:
self.assertEqual(len(a._actions), len(b._actions))
for (x, y) in zip(a._actions, b._actions):
xx = {k: v for (k, v) in vars(x).items() if (k != 'container')}
yy = {k: v for (k, v) in vars(y).items() if (k != 'container')}
self.assertEqual(xx, yy)
def test_basic(self):
parser = HfArgumentParser(BasicExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo', type=int, required=True)
expected.add_argument('--bar', type=float, required=True)
expected.add_argument('--baz', type=str, required=True)
expected.add_argument('--flag', type=string_to_bool, default=False, const=True, nargs='?')
self.argparsersEqual(parser, expected)
args = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(example,) = parser.parse_args_into_dataclasses(args, look_for_args_file=False)
self.assertFalse(example.flag)
def test_with_default(self):
parser = HfArgumentParser(WithDefaultExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo', default=42, type=int)
expected.add_argument('--baz', default='toto', type=str, help='help message')
self.argparsersEqual(parser, expected)
def test_with_default_bool(self):
parser = HfArgumentParser(WithDefaultBoolExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo', type=string_to_bool, default=False, const=True, nargs='?')
expected.add_argument('--baz', type=string_to_bool, default=True, const=True, nargs='?')
expected.add_argument('--no_baz', action='store_false', default=False, dest='baz')
expected.add_argument('--opt', type=string_to_bool, default=None)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args, Namespace(foo=False, baz=True, opt=None))
args = parser.parse_args(['--foo', '--no_baz'])
self.assertEqual(args, Namespace(foo=True, baz=False, opt=None))
args = parser.parse_args(['--foo', '--baz'])
self.assertEqual(args, Namespace(foo=True, baz=True, opt=None))
args = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'])
self.assertEqual(args, Namespace(foo=True, baz=True, opt=True))
args = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'])
self.assertEqual(args, Namespace(foo=False, baz=False, opt=False))
def test_with_enum(self):
parser = HfArgumentParser(EnumExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo', default='toto', choices=['titi', 'toto'], type=str)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args.foo, 'toto')
enum_ex = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo, BasicEnum.toto)
args = parser.parse_args(['--foo', 'titi'])
self.assertEqual(args.foo, 'titi')
enum_ex = parser.parse_args_into_dataclasses(['--foo', 'titi'])[0]
self.assertEqual(enum_ex.foo, BasicEnum.titi)
def test_with_list(self):
parser = HfArgumentParser(ListExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo_int', nargs='+', default=[], type=int)
expected.add_argument('--bar_int', nargs='+', default=[1, 2, 3], type=int)
expected.add_argument('--foo_str', nargs='+', default=['Hallo', 'Bonjour', 'Hello'], type=str)
expected.add_argument('--foo_float', nargs='+', default=[0.1, 0.2, 0.3], type=float)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args, Namespace(foo_int=[], bar_int=[1, 2, 3], foo_str=['Hallo', 'Bonjour', 'Hello'], foo_float=[0.1, 0.2, 0.3]))
args = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split())
self.assertEqual(args, Namespace(foo_int=[1], bar_int=[2, 3], foo_str=['a', 'b', 'c'], foo_float=[0.1, 0.7]))
def test_with_optional(self):
parser = HfArgumentParser(OptionalExample)
expected = argparse.ArgumentParser()
expected.add_argument('--foo', default=None, type=int)
expected.add_argument('--bar', default=None, type=float, help='help message')
expected.add_argument('--baz', default=None, type=str)
expected.add_argument('--ces', nargs='+', default=[], type=str)
expected.add_argument('--des', nargs='+', default=[], type=int)
self.argparsersEqual(parser, expected)
args = parser.parse_args([])
self.assertEqual(args, Namespace(foo=None, bar=None, baz=None, ces=[], des=[]))
args = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split())
self.assertEqual(args, Namespace(foo=12, bar=3.14, baz='42', ces=['a', 'b', 'c'], des=[1, 2, 3]))
def test_with_required(self):
parser = HfArgumentParser(RequiredExample)
expected = argparse.ArgumentParser()
expected.add_argument('--required_list', nargs='+', type=int, required=True)
expected.add_argument('--required_str', type=str, required=True)
expected.add_argument('--required_enum', type=str, choices=['titi', 'toto'], required=True)
self.argparsersEqual(parser, expected)
def test_parse_dict(self):
parser = HfArgumentParser(BasicExample)
args_dict = {'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True}
parsed_args = parser.parse_dict(args_dict)[0]
args = BasicExample(**args_dict)
self.assertEqual(parsed_args, args)
def test_integration_training_args(self):
parser = HfArgumentParser(TrainingArguments)
self.assertIsNotNone(parser) |
def run(test_opts, model_id, image_name, use_multi_id_G):
out_path_results = os.path.join(test_opts.exp_dir, test_opts.data_dir_name)
os.makedirs(out_path_results, exist_ok=True)
out_path_results = os.path.join(out_path_results, test_opts.image_name)
os.makedirs(out_path_results, exist_ok=True)
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
opts = Namespace(**opts)
net = StyleCLIPMapper(opts, test_opts.run_id)
net.eval()
net.to(global_config.device)
generator_type = (paths_config.multi_id_model_type if use_multi_id_G else image_name)
new_G = load_tuned_G(model_id, generator_type)
old_G = load_old_G()
run_styleclip(net, new_G, opts, paths_config.pti_results_keyword, out_path_results, test_opts)
run_styleclip(net, old_G, opts, paths_config.e4e_results_keyword, out_path_results, test_opts) |
def select_adaptive_pool2d(x, pool_type='avg', output_size=1):
if (pool_type == 'avg'):
x = F.adaptive_avg_pool2d(x, output_size)
elif (pool_type == 'avgmax'):
x = adaptive_avgmax_pool2d(x, output_size)
elif (pool_type == 'catavgmax'):
x = adaptive_catavgmax_pool2d(x, output_size)
elif (pool_type == 'max'):
x = F.adaptive_max_pool2d(x, output_size)
else:
assert False, ('Invalid pool type: %s' % pool_type)
return x |
.parametrize('shape', ([11], [1, 2, 3], [32, 14]))
def test_output_shapes(shape):
vertices = o3d.core.Tensor([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=o3d.core.float32)
triangles = o3d.core.Tensor([[0, 1, 2]], dtype=o3d.core.uint32)
scene = o3d.t.geometry.RaycastingScene()
scene.add_triangles(vertices, triangles)
rs = np.random.RandomState(123)
rays = o3d.core.Tensor.from_numpy(rs.uniform(size=(shape + [6])).astype(np.float32))
query_points = o3d.core.Tensor.from_numpy(rs.uniform(size=(shape + [3])).astype(np.float32))
ans = scene.count_intersections(rays)
assert (list(ans.shape) == shape)
ans = scene.compute_distance(query_points)
assert (list(ans.shape) == shape)
ans = scene.compute_signed_distance(query_points)
assert (list(ans.shape) == shape)
ans = scene.compute_occupancy(query_points)
assert (list(ans.shape) == shape)
last_dim = {'t_hit': [], 'geometry_ids': [], 'primitive_ids': [], 'primitive_uvs': [2], 'primitive_normals': [3], 'points': [3], 'ray_ids': [], 'ray_splits': []}
ans = scene.cast_rays(rays)
for (k, v) in ans.items():
expected_shape = (shape + last_dim[k])
assert (list(v.shape) == expected_shape), 'shape mismatch: expected {} but got {} for {}'.format(expected_shape, list(v.shape), k)
ans = scene.compute_closest_points(query_points)
for (k, v) in ans.items():
expected_shape = (shape + last_dim[k])
assert (list(v.shape) == expected_shape), 'shape mismatch: expected {} but got {} for {}'.format(expected_shape, list(v.shape), k)
ans = scene.list_intersections(rays)
nx = np.sum(scene.count_intersections(rays).numpy()).tolist()
for (k, v) in ans.items():
if (k == 'ray_splits'):
alt_shape = [(np.prod(rays.shape[:(- 1)]) + 1)]
else:
alt_shape = [nx]
expected_shape = np.append(alt_shape, last_dim[k]).tolist()
assert (list(v.shape) == expected_shape), 'shape mismatch: expected {} but got {} for {}'.format(expected_shape, list(v.shape), k) |
class CategoricalAccuracyWithLogits(torchsample.metrics.CategoricalAccuracy):
def __call__(self, y_pred, y_true):
return super(CategoricalAccuracyWithLogits, self).__call__(F.softmax(y_pred), y_true) |
def lprint(text: str, logs_path: str) -> None:
print(text)
with open(logs_path, 'a') as f:
f.write((text + '\n')) |
class IterLoader():
def __init__(self, dataloader: DataLoader, use_distributed: bool=False):
self._dataloader = dataloader
self.iter_loader = iter(self._dataloader)
self._use_distributed = use_distributed
self._epoch = 0
def epoch(self) -> int:
return self._epoch
def __next__(self):
try:
data = next(self.iter_loader)
except StopIteration:
self._epoch += 1
if (hasattr(self._dataloader.sampler, 'set_epoch') and self._use_distributed):
self._dataloader.sampler.set_epoch(self._epoch)
time.sleep(2)
self.iter_loader = iter(self._dataloader)
data = next(self.iter_loader)
return data
def __iter__(self):
return self
def __len__(self):
return len(self._dataloader) |
def _parse_args():
(args_config, remaining) = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
args = parser.parse_args(remaining)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return (args, args_text) |
def ReadWordCounts(word_counts_file_handle):
counts = {}
for line in word_counts_file_handle.readlines():
splits = line.strip().split()
if (len(splits) < 2):
raise Exception((('Invalid format of line ' + line) + ' in counts file.'))
word = splits[0]
count = int(splits[1])
counts[word] = count
return counts |
def blockdiag_butterfly_project_einsum(M, nblocks1, nblocks2, b1, b2):
(m, n) = M.shape
(k, j) = (nblocks1, nblocks2)
M_permuted_batched = rearrange(M, '(l j b2) (k i) -> k j (l b2) i', k=nblocks1, j=nblocks2, b2=b2)
(U, Vt) = low_rank_project(M_permuted_batched, rank=b1)
w1_bfly = rearrange(Vt, 'k j b1 i -> k (j b1) i')
w2_bfly = rearrange(U, 'k j lb2 b1 -> j lb2 (k b1)')
return (w1_bfly, w2_bfly) |
class IBStat(threading.Thread):
def __init__(self, interval: int=10, flush_count: int=1000, dump_path: str='', logging: bool=False, from_main: bool=False):
super().__init__()
self.interval = interval
self.flush_count = flush_count
self.dump_path = dump_path
self.logging = logging
self.ib_counters = Path('/sys/class/infiniband/')
if (not self.ib_counters.exists()):
print('no ib device found, skip monitor')
if from_main:
exit(0)
return
self.ib_counters_file = {}
self.ib_counters_file['send'] = sorted(list(self.ib_counters.glob('*/ports/*/counters/port_xmit_data')))
self.ib_counters_file['recv'] = list(self.ib_counters.glob('*/ports/*/counters/port_rcv_data'))
self.ib_device_count = len(self.ib_counters_file['send'])
self.ib_device = []
self.ib_counters_fd = {k: [i.open() for i in v] for (k, v) in self.ib_counters_file.items()}
self.ib_stat: Dict[(str, List[List[float]])] = {}
self.ib_stat['send'] = [[] for _ in range(self.ib_device_count)]
self.ib_stat['recv'] = [[] for _ in range(self.ib_device_count)]
ib_device_pattern = re.compile('mlx\\d+(_bond){0,1}_\\d+')
for ib in self.ib_counters_file['send']:
match = ib_device_pattern.search(str(ib.absolute()))
if (not match):
if (not from_main):
print('no ib device found, skip monitor')
return
raise ValueError(f'device {ib} wrong pattern')
self.ib_device.append(match.group(0))
if dump_path:
p = Path(dump_path)
if (p.exists() and (not p.is_dir())):
raise ValueError(f'dump_path {dump_path} is not dir')
if (not p.exists()):
p.mkdir()
self.setDaemon(True)
self.start()
def read_once(self, fd):
line = fd.read().strip()
fd.seek(0)
scale = Decimal()
return (Decimal(int(line)) / scale)
def run(self):
self.count = 1
self.has_dump = False
self.has_record = False
self.timestamp = []
self.this_start = time.time()
self.last = {k: [Decimal(0) for _ in v] for (k, v) in self.ib_counters_file.items()}
def dump_json(self):
if (not self.dump_path):
return
with open(f'{self.dump_path}/{self.count}.json', 'w') as f:
data = {}
for (trans_type, ib_stat) in self.ib_stat.items():
for (device_name, stat) in zip(self.ib_device, ib_stat):
data[f'{device_name}_{trans_type}'] = stat
data['timestamp'] = self.timestamp
json.dump(data, f, sort_keys=True, indent=2)
def do_record(self):
self.has_record = False
for (name, fds) in self.ib_counters_fd.items():
for (index, fd) in enumerate(fds):
self.last[name][index] = self.read_once(fd)
self.has_record = True
def do_parse(self, dur):
logging_buffer = [str(int(time.time()))]
for (name, fds) in self.ib_counters_fd.items():
for (index, fd) in enumerate(fds):
throught_put = round(float(Decimal(((self.read_once(fd) - self.last[name][index]) / dur))), 2)
logging_buffer.append(f'{self.ib_device[index]}_{name}:{throught_put}Gb/s')
self.ib_stat[name][index].append(throught_put)
if self.logging:
print(' '.join(logging_buffer), flush=True)
def generate_json(self):
for _ in range(self.flush_count):
do_record(self)
self.this_start = time.time()
time.sleep(self.interval)
do_parse(self, self.interval)
self.timestamp.append(int(time.time()))
def clean_up(self):
def inner():
nonlocal self
if self.has_dump:
print('atexit, has dump, skip')
return
if (not self.has_record):
print('atexit, no recording yet')
return
print('atexit, saving unsaved ib stat')
self.count = 'final'
try:
dur = Decimal((time.time() - self.this_start))
do_parse(self, dur)
self.timestamp.append(int(time.time()))
dump_json(self)
except Exception:
pass
return inner
atexit.register(clean_up(self))
def next_step(self):
self.timestamp = []
self.count += 1
self.ib_stat['send'] = [[] for _ in range(self.ib_device_count)]
self.ib_stat['recv'] = [[] for _ in range(self.ib_device_count)]
while True:
self.has_dump = False
generate_json(self)
dump_json(self)
self.has_dump = True
next_step(self) |
class SegmentronConfig(dict):
def __init__(self, *args, **kwargs):
super(SegmentronConfig, self).__init__(*args, **kwargs)
self.immutable = False
def __setattr__(self, key, value, create_if_not_exist=True):
if (key in ['immutable']):
self.__dict__[key] = value
return
t = self
keylist = key.split('.')
for k in keylist[:(- 1)]:
t = t.__getattr__(k, create_if_not_exist)
t.__getattr__(keylist[(- 1)], create_if_not_exist)
t[keylist[(- 1)]] = value
def __getattr__(self, key, create_if_not_exist=True):
if (key in ['immutable']):
if (key not in self.__dict__):
self.__dict__[key] = False
return self.__dict__[key]
if (not (key in self)):
if (not create_if_not_exist):
raise KeyError
self[key] = SegmentronConfig()
return self[key]
def __setitem__(self, key, value):
if self.immutable:
raise AttributeError('Attempted to set "{}" to "{}", but SegConfig is immutable'.format(key, value))
if isinstance(value, six.string_types):
try:
value = literal_eval(value)
except ValueError:
pass
except SyntaxError:
pass
super(SegmentronConfig, self).__setitem__(key, value)
def update_from_other_cfg(self, other):
if isinstance(other, dict):
other = SegmentronConfig(other)
assert isinstance(other, SegmentronConfig)
cfg_list = [('', other)]
while len(cfg_list):
(prefix, tdic) = cfg_list[0]
cfg_list = cfg_list[1:]
for (key, value) in tdic.items():
key = ('{}.{}'.format(prefix, key) if prefix else key)
if isinstance(value, dict):
cfg_list.append((key, value))
continue
try:
self.__setattr__(key, value, create_if_not_exist=False)
except KeyError:
raise KeyError('Non-existent config key: {}'.format(key))
def remove_irrelevant_cfg(self):
model_name = self.MODEL.MODEL_NAME
from ..models.model_zoo import MODEL_REGISTRY
model_list = MODEL_REGISTRY.get_list()
model_list_lower = [x.lower() for x in model_list]
assert (model_name.lower() in model_list_lower), 'Expected model name in {}, but received {}'.format(model_list, model_name)
pop_keys = []
for key in self.MODEL.keys():
if (key.lower() in model_list_lower):
if ((model_name.lower() == 'pointrend') and (key.lower() == self.MODEL.POINTREND.BASEMODEL.lower())):
continue
if ((key.lower() in model_list_lower) and (key.lower() != model_name.lower())):
if (model_name.lower() in ['pvt_trans2seg', 'pvt_fpt']):
continue
pop_keys.append(key)
for key in pop_keys:
self.MODEL.pop(key)
def check_and_freeze(self):
self.TIME_STAMP = time.strftime('%Y-%m-%d-%H-%M', time.localtime())
self.remove_irrelevant_cfg()
self.immutable = True
def update_from_list(self, config_list):
if ((len(config_list) % 2) != 0):
raise ValueError('Command line options config format error! Please check it: {}'.format(config_list))
for (key, value) in zip(config_list[0::2], config_list[1::2]):
try:
self.__setattr__(key, value, create_if_not_exist=False)
except KeyError:
raise KeyError('Non-existent config key: {}'.format(key))
def update_from_file(self, config_file):
with codecs.open(config_file, 'r', 'utf-8') as file:
loaded_cfg = yaml.load(file, Loader=yaml.FullLoader)
self.update_from_other_cfg(loaded_cfg)
def set_immutable(self, immutable):
self.immutable = immutable
for value in self.values():
if isinstance(value, SegmentronConfig):
value.set_immutable(immutable)
def is_immutable(self):
return self.immutable |
def check(ppml_args, arg_name):
try:
value = ppml_args[arg_name]
return value
except KeyError:
invalidInputError(False, ('need argument ' + arg_name)) |
class PedsimInteractiveObstacle():
def __init__(self):
self.obstacleType = InteractiveObstacleType.SHELF |
def reshape(array, newshape):
if is_numpy_array(array):
return np.reshape(array, newshape)
elif is_torch_tensor(array):
return array.reshape(*newshape)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.reshape(array, newshape)
elif is_jax_tensor(array):
return jnp.reshape(array, newshape)
else:
raise ValueError(f'Type not supported for reshape: {type(array)}.') |
class Embedding(TokenEmbedder):
def __init__(self, num_embeddings: int, embedding_dim: int, projection_dim: int=None, weight: torch.FloatTensor=None, padding_index: int=None, trainable: bool=True, max_norm: float=None, norm_type: float=2.0, scale_grad_by_freq: bool=False, sparse: bool=False) -> None:
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.padding_index = padding_index
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.trainable = trainable
self.embedding_dim = embedding_dim
self.output_dim = (projection_dim or embedding_dim)
if (weight is None):
weight = torch.FloatTensor(num_embeddings, embedding_dim)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
torch.nn.init.xavier_uniform_(self.weight)
else:
if (weight.size() != (num_embeddings, embedding_dim)):
raise ConfigurationError('A weight matrix was passed with contradictory embedding shapes.')
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
if (self.padding_index is not None):
self.weight.data[self.padding_index].fill_(0)
if projection_dim:
self._projection = torch.nn.Linear(embedding_dim, projection_dim)
else:
self._projection = None
def get_output_dim(self) -> int:
return self.output_dim
def forward(self, inputs):
original_inputs = inputs
if (original_inputs.dim() > 2):
inputs = inputs.view((- 1), inputs.size((- 1)))
embedded = embedding(inputs, self.weight, max_norm=self.max_norm, norm_type=self.norm_type, scale_grad_by_freq=self.scale_grad_by_freq, sparse=self.sparse)
if (original_inputs.dim() > 2):
view_args = (list(original_inputs.size()) + [embedded.size((- 1))])
embedded = embedded.view(*view_args)
if self._projection:
projection = self._projection
for _ in range((embedded.dim() - 2)):
projection = TimeDistributed(projection)
embedded = projection(embedded)
return embedded
def load_pretrain_from_file(self, vocab: Vocabulary, pretrained_file, vocab_namespace, amr=False):
weight = _read_pretrained_embeddings_file(pretrained_file, self.embedding_dim, vocab, vocab_namespace, amr)
self.weight = torch.nn.Parameter(weight, requires_grad=self.trainable)
def from_params(cls, vocab: Vocabulary, params) -> 'Embedding':
num_embeddings = params.get('num_embeddings', None)
vocab_namespace = params.get('vocab_namespace', 'tokens')
if (num_embeddings is None):
num_embeddings = vocab.get_vocab_size(vocab_namespace)
embedding_dim = params.get('embedding_dim')
pretrained_file = params.get('pretrained_file', None)
data_type = params.get('data_type', None)
projection_dim = params.get('projection_dim', None)
trainable = params.get('trainable', True)
padding_index = params.get('padding_index', None)
max_norm = params.get('max_norm', None)
norm_type = params.get('norm_type', 2.0)
scale_grad_by_freq = params.get('scale_grad_by_freq', False)
sparse = params.get('sparse', False)
if pretrained_file:
weight = _read_pretrained_embeddings_file(pretrained_file, embedding_dim, vocab, vocab_namespace, (data_type == 'AMR'))
else:
weight = None
return cls(num_embeddings=num_embeddings, embedding_dim=embedding_dim, projection_dim=projection_dim, weight=weight, padding_index=padding_index, trainable=trainable, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse) |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride):
super(InvertedResidual, self).__init__()
if (not (1 <= stride <= 3)):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = (oup // 2)
assert ((self.stride != 1) or (inp == (branch_features << 1)))
if (self.stride > 1):
self.branch1 = nn.Sequential(self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(inp), nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.Conv2d((inp if (self.stride > 1) else branch_features), branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(branch_features), nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True))
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if (self.stride == 1):
(x1, x2) = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out |
def get_ip(host):
if (host in ['localhost', '127.0.0.1']):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(('10.255.255.255', 1))
host = sock.getsockname()[0]
except RuntimeError:
pass
finally:
sock.close()
return host |
def start_sever():
for (index, sever) in enumerate(server_list):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(server_list[sever], 22, 'ubuntu', key_filename='/Users/jonah/.ssh/megadata-OR.pem')
sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())
sftp.put((('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/run_scripts/example/train_script' + str((index + 1))) + '.sh'), '/home/ubuntu/train_script.sh')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/Signum_SGD.py', '/home/ubuntu/awd-lstm-lm/Signum_SGD.py')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/utils.py', '/home/ubuntu/awd-lstm-lm/utils.py')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/main_signum.py', '/home/ubuntu/awd-lstm-lm/main_signum.py')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/splitcross.py', '/home/ubuntu/awd-lstm-lm/splitcross.py')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/model.py', '/home/ubuntu/awd-lstm-lm/model.py')
sftp.put('/Users/jonah/Desktop/signum/source code/signSGD-with-Majority-Vote/benchmark_QRNN/compressor.py', '/home/ubuntu/awd-lstm-lm/compressor.py') |
_registry(pattern_type='InputData')
class InputData(Pattern):
def __call__(self, model):
pattern_mapping_config = {'InputData': [{'patterns': {'in': [[(0, 'input_ids'), (1, 'segment_ids'), (2, 'input_mask')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}, {2: [0]}], [[0, 1, 2], 3]]}, 'returns': [0, 1, 2]}, {'patterns': {'in': [[(0, 'input_ids'), (1, 'token_type_ids'), (2, 'attention_mask')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}, {2: [0]}], [[0, 1, 2], 3]]}, 'returns': [0, 1, 2]}, {'patterns': {'in': [[(0, 'IteratorGetNext')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {0: [4]}, {0: [1]}], [[0, 1, 2], 3]]}, 'returns': []}, {'patterns': {'in': [[(0, 'dense_x'), (1, 'offsets'), (2, 'indices')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}, {2: [0]}], [[0, 1, 2], 3]]}, 'returns': [0, 1, 2]}, {'patterns': {'in': [[(0, 'input_ids'), (1, 'attention_mask')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}], [[0, 1], 2]]}, 'returns': []}, {'patterns': {'in': [[(0, 'input_ids')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}], [[], 1]]}, 'returns': []}, {'patterns': {'in': [[(0, 'sample'), (1, 'timestep'), (2, 'encoder_hidden_states')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}, {2: [0]}], [[0, 1, 2], 3]]}, 'returns': [0, 1, 2]}, {'patterns': {'in': [[(0, 'latent_sample')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}], [[], 1]]}, 'returns': []}, {'patterns': {'in': [[(0, 'input_ids'), (1, 'input_mask')]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}, {1: [0]}], [[0, 1], 2]]}, 'returns': []}, {'patterns': {'in': [[(0, ['input', 'pixel_values'])]], 'out': [[(0, 'Input')]]}, 'search_mode': 'node_name', 'node_names': {0: 'input_data'}, 'input_tensors': {0: [[], [[], 0]]}, 'output_tensors': {0: [[{0: [0]}], [[0], 1]]}, 'returns': [0]}]}
for i in range(len(pattern_mapping_config['InputData'])):
pattern_dict = pattern_mapping_config['InputData'][i]
(model, new_node_names, ret_old_nodes) = util.pattern_mapping('InputData', pattern_dict, model)
if (len(new_node_names) != 0):
model.nodes[0].attr = None
for j in range(len(model.nodes[0].output_tensors)):
if (model.nodes[0].output_tensors[j].shape is None):
model.nodes[0].output_tensors[j].shape = [(- 1), (- 1)]
return model
if (model.nodes[0].op_type != 'Input'):
onnx_input_nodes_list = []
model_input_tensors = []
for node in model.nodes:
if (node.op_type == 'ONNXINPUT'):
onnx_input_nodes_list.append(node.name)
for input_tensor in node.output_tensors:
model_input_tensors.append(copy.deepcopy(input_tensor))
input_data_node = util.construct_node('input_data', 'Input', output_tensors=model_input_tensors)
model.insert_nodes(0, [input_data_node])
model.nodes[0].attr = None
model.remove_nodes(onnx_input_nodes_list)
return model |
def main():
args = get_args()
(ages, img_paths) = ([], [])
for filename in tqdm(os.listdir(os.path.join(args.data_path, 'AgeDB'))):
(_, _, age, gender) = filename.split('.')[0].split('_')
ages.append(age)
img_paths.append(f'AgeDB/{filename}')
outputs = dict(age=ages, path=img_paths)
output_dir = os.path.join(args.data_path, 'meta')
os.makedirs(output_dir, exist_ok=True)
output_path = os.path.join(output_dir, 'agedb.csv')
df = pd.DataFrame(data=outputs)
df.to_csv(str(output_path), index=False) |
class Attention(nn.Module):
def __init__(self, dim=512, heads=8, dim_head=(512 // 8), dropout=0.1):
super().__init__()
inner_dim = (dim_head * heads)
project_out = (not ((heads == 1) and (dim_head == dim)))
self.heads = heads
self.scale = (dim_head ** (- 0.5))
self.attend = nn.Softmax(dim=(- 1))
self.to_qkv = nn.Linear(dim, (inner_dim * 3), bias=False)
self.to_out = (nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) if project_out else nn.Identity())
def forward(self, x, register_hook=False):
qkv = self.to_qkv(x).chunk(3, dim=(- 1))
(q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads)), qkv)
dots = (torch.matmul(q, k.transpose((- 1), (- 2))) * self.scale)
attn = self.attend(dots)
self.save_attention_map(attn)
if register_hook:
attn.register_hook(self.save_attn_gradients)
out = torch.matmul(attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def get_self_attention(self, x):
qkv = self.to_qkv(x).chunk(3, dim=(- 1))
(q, k, v) = map((lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads)), qkv)
dots = (torch.matmul(q, k.transpose((- 1), (- 2))) * self.scale)
attn = self.attend(dots)
return attn |
def mkdirs(*paths: Path, exist_ok: bool=True, parents: bool=True, **kwargs) -> None:
for p in paths:
p.mkdir(exist_ok=exist_ok, parents=parents, **kwargs) |
def create_task_manager(dataset_name='test'):
task_manager = TaskManager(False, SpeedMonitor())
splitter = create_test_dataset_splitter(dataset_name)
task_manager.new_dataset(batch_size=10, dataset_size=1000, dataset_name=dataset_name, dataset_splitter=splitter, task_type=elastic_training_pb2.TRAINING)
return task_manager |
class UpDS(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True, kernels_per_layer=1):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConvDS(in_channels, out_channels, (in_channels // 2), kernels_per_layer=kernels_per_layer)
else:
self.up = nn.ConvTranspose2d(in_channels, (in_channels // 2), kernel_size=2, stride=2)
self.conv = DoubleConvDS(in_channels, out_channels, kernels_per_layer=kernels_per_layer)
def forward(self, x1, x2=None):
x = self.up(x1)
if (x2 is not None):
x = torch.cat([x2, x], dim=1)
return self.conv(x) |
def infer_direction(sen, default):
all_nw = []
all_pw = []
nw = ['decrease']
pw = ['increase']
for i in range(len(nw)):
neg_words = list(reduce((lambda y, x: np.append(y, x.lemma_names())), wordnet.synsets(nw[i]), []))
pos_words = list(reduce((lambda y, x: np.append(y, x.lemma_names())), wordnet.synsets(pw[i]), []))
all_nw.extend(neg_words)
all_pw.extend(pos_words)
try:
all_nw = list(reduce((lambda y, x: np.append(y, lexeme(x))), all_nw, []))
all_pw = list(reduce((lambda y, x: np.append(y, lexeme(x))), all_pw, []))
except:
print('Error. Continue.')
return infer_direction(sen, default)
all_nw = [x for x in iter(set(all_nw))]
all_pw = [x for x in iter(set(all_pw))]
neg = 0
pos = 0
for word in all_nw:
if (word in sen):
neg += 1
for word in all_pw:
if (word in sen):
pos += 1
if (pos > neg):
return 'Significantly increased'
elif (neg > pos):
return 'Significantly decreased'
else:
return default |
def ready_mongo_observer(ex, db_name='sacred', url='localhost:27017'):
ex.observers.append(MongoObserver(url=url, db_name=db_name)) |
def get_heat_matrix(adj_matrix: np.ndarray, t: float=5.0) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = (adj_matrix + np.eye(num_nodes))
D_tilde = np.diag((1 / np.sqrt(A_tilde.sum(axis=1))))
H = ((D_tilde A_tilde) D_tilde)
return expm(((- t) * (np.eye(num_nodes) - H))) |
def callParserFunction(functionName, args, extractor):
try:
functionName = functionName.lower()
if (functionName == '#invoke'):
(module, fun) = (args[0].strip(), args[1].strip())
logging.debug('%*s#invoke %s %s %s', extractor.frame.depth, '', module, fun, args[2:])
if (len(args) == 2):
templateTitle = fullyQualifiedTemplateTitle(module)
if (not templateTitle):
logging.warn('Template with empty title')
params = None
frame = extractor.frame
while frame:
if (frame.title == templateTitle):
params = frame.args
break
frame = frame.prev
else:
params = [extractor.transform(p) for p in args[2:]]
params = extractor.templateParams(params)
ret = sharp_invoke(module, fun, params)
logging.debug('%*s<#invoke %s %s %s', extractor.frame.depth, '', module, fun, ret)
return ret
if (functionName in parserFunctions):
return parserFunctions[functionName](extractor, *args)
except:
return ''
return '' |
class DPR(Retriever):
def __init__(self, name, cfg):
super().__init__(name)
cfg = setup_cfg_gpu(cfg)
logger.info('CFG (after gpu configuration):')
logger.info('%s', OmegaConf.to_yaml(cfg))
saved_state = load_states_from_checkpoint(cfg.model_file)
set_cfg_params_from_state(saved_state.encoder_params, cfg)
(tensorizer, encoder, _) = init_biencoder_components(cfg.encoder.encoder_model_type, cfg, inference_only=True)
encoder = encoder.question_model
(encoder, _) = setup_for_distributed_mode(encoder, None, cfg.device, cfg.n_gpu, cfg.local_rank, cfg.fp16)
encoder.eval()
model_to_load = get_model_obj(encoder)
logger.info('Loading saved model state ...')
encoder_prefix = 'question_model.'
prefix_len = len(encoder_prefix)
logger.info('Encoder state prefix %s', encoder_prefix)
question_encoder_state = {key[prefix_len:]: value for (key, value) in saved_state.model_dict.items() if (key.startswith(encoder_prefix) and (key != 'question_model.embeddings.position_ids'))}
model_to_load.load_state_dict(question_encoder_state, strict=False)
vector_size = model_to_load.get_out_size()
logger.info('Encoder vector_size=%d', vector_size)
self.retriever = DenseRPCRetriever(encoder, cfg.batch_size, tensorizer, cfg.rpc_retriever_cfg_file, vector_size, use_l2_conversion=cfg.use_l2_conversion)
self.retriever.load_index(cfg.rpc_index_id)
self.KILT_mapping = None
if cfg.KILT_mapping:
self.KILT_mapping = dict(pickle.load(open(cfg.KILT_mapping, 'rb')))
self.rpc_meta_compressed = cfg.rpc_meta_compressed
self.cfg = cfg
def from_config_file(cls, name, config_file):
cfg = OmegaConf.load(config_file)
return cls(name, cfg)
def process_query(cls, x, ent_start_token, ent_end_token):
return (x['query'].replace(ent_start_token, '').replace(ent_end_token, '').strip() + ('?' if (not x['query'].endswith('?')) else ''))
def feed_data(self, queries_data, ent_start_token=utils.ENT_START, ent_end_token=utils.ENT_START, logger=None):
self.questions = [DPR.process_query(x, ent_start_token, ent_end_token) for x in queries_data]
self.query_ids = [x['id'] for x in queries_data]
def run(self):
dup_multiplier = 1
questions_tensor = self.retriever.generate_question_vectors(self.questions)
top_ids_and_scores = self.retriever.get_top_docs(questions_tensor.numpy(), (dup_multiplier * self.cfg.n_docs), search_batch=256)
provenance = {}
for (record, query_id) in tqdm(zip(top_ids_and_scores, self.query_ids)):
element = []
(docs_meta, scores) = record
cnt = 0
for (score, meta) in zip(scores, docs_meta):
if (cnt >= self.cfg.n_docs):
break
(doc_id, text, title) = meta[:3]
wikipedia_id = (self.KILT_mapping[int(doc_id)] if (self.KILT_mapping and (int(doc_id) in self.KILT_mapping)) else None)
element.append({'score': str(score), 'text': (str(zlib.decompress(text).decode()) if self.rpc_meta_compressed else text), 'wikipedia_title': (str(zlib.decompress(title).decode()) if self.rpc_meta_compressed else title), 'wikipedia_id': str(wikipedia_id), 'doc_id': str(doc_id)})
cnt += 1
assert (query_id not in provenance)
provenance[query_id] = element
return provenance |
def convert_flax_checkpoint_to_pytorch(flax_checkpoint_path, config_file, gin_file=None, pytorch_dump_path='./', num_experts=8):
print(f'Loading flax weights from : {flax_checkpoint_path}')
flax_params = checkpoints.load_t5x_checkpoint(flax_checkpoint_path)
if (gin_file is not None):
config = convert_gin_to_config(gin_file, num_experts)
else:
config = SwitchTransformersConfig.from_pretrained(config_file)
pt_model = SwitchTransformersForConditionalGeneration(config)
flax_params = flax_params['target']
flax_params = flatten_dict(flax_params, sep='/')
flax_params = rename_keys(flax_params)
flax_params = unflatten_dict(flax_params, sep='/')
load_flax_weights_in_pytorch_model(pt_model, flax_params)
print(f'Save PyTorch model to {pytorch_dump_path}')
pt_model.save_pretrained(pytorch_dump_path) |
def main(argv):
from experiments.util import AsyncExecutor, generate_launch_commands
import experiments.meta_overfitting_v2.maml_overfitting_base
command_list = []
for dataset in FLAGS.datasets.split(','):
if (dataset == 'sin'):
n_context_samples = [5, 10, 20]
elif (dataset == 'cauchy'):
n_context_samples = [20, 40]
else:
raise AssertionError('dataset must be either of [sin, cauchy]')
exp_config = {'exp_name': [('meta-overfitting-v2-maml-%s' % dataset)], 'dataset': [dataset], 'n_threads': [N_THREADS], 'seed': list(range(30, 55)), 'data_seed': [28], 'num_layers': [4], 'layer_size': [32], 'n_iter_fit': [50000], 'lr_inner': [0.1, 0.2, 0.3, 0.5], 'n_train_tasks': [2, 4, 8, 16, 32, 64, 128, 256, 512], 'n_test_tasks': [200], 'n_context_samples': n_context_samples, 'n_test_samples': [100]}
command_list.extend(generate_launch_commands(experiments.meta_overfitting_v2.maml_overfitting_base, exp_config))
if FLAGS.cluster:
cluster_cmds = []
for python_cmd in command_list:
cmd_hash = hashlib.md5(str.encode(python_cmd)).hexdigest()
bsub_cmd = ('bsub -oo /cluster/project/infk/krause/rojonas/stdout/gp-priors/meta-overfitting/%s.out -W 03:59 -R "rusage[mem=1048]" -n %i ' % (cmd_hash, N_THREADS))
cluster_cmds.append(((bsub_cmd + ' ') + python_cmd))
answer = input(('About to submit %i compute jobs to the cluster. Proceed? [yes/no]\n' % len(cluster_cmds)))
if (answer == 'yes'):
for cmd in cluster_cmds:
os.system(cmd)
else:
answer = input(('About to run %i compute jobs locally on %i workers. Proceed? [yes/no]\n' % (len(command_list), FLAGS.n_workers)))
if (answer == 'yes'):
exec_fn = (lambda cmd: os.system(cmd))
executor = AsyncExecutor(n_jobs=FLAGS.n_workers)
executor.run(exec_fn, command_list) |
class QueryNERProcessor(object):
def get_examples(self, data_dir, data_sign):
data = read_mrc_ner_examples(os.path.join(data_dir, ('mrc-ner.' + data_sign)))
return data |
def buildVocab(fName):
dat = open(fName).read()
dat = dat.split()
vocab = dict(zip(dat, (1 + np.arange(len(dat)))))
invVocab = {v: k for (k, v) in vocab.items()}
return (vocab, invVocab) |
class ImageTextDataset(VisionDataset):
def __init__(self, root: str, file_path: str, captions_per_image=2, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, transforms: Optional[Callable]=None):
super().__init__(root, transforms, transform, target_transform)
with open(file_path, 'r') as f:
examples = [json.loads(line) for line in f.readlines()]
self.captions = []
self.image_paths = []
for example in examples:
captions_subset = example['captions'][:captions_per_image]
self.captions.extend(captions_subset)
self.image_paths.extend(([example['image_path']] * len(captions_subset)))
def _load_image(self, idx: int):
path = self.image_paths[idx]
return read_image(path, mode=ImageReadMode.RGB)
def _load_target(self, idx):
return self.captions[idx]
def __getitem__(self, index: int):
image = self._load_image(index)
target = self._load_target(index)
if (self.transforms is not None):
(image, target) = self.transforms(image, target)
return (image, target)
def __len__(self) -> int:
return len(self.captions) |
class TensorDict(OrderedDict):
def concat(self, other):
return TensorDict(self, **other)
def copy(self):
return TensorDict(super(TensorDict, self).copy())
def __getattr__(self, name):
if (not hasattr(torch.Tensor, name)):
raise AttributeError("'TensorDict' object has not attribute '{}'".format(name))
def apply_attr(*args, **kwargs):
return TensorDict({n: (getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e) for (n, e) in self.items()})
return apply_attr
def attribute(self, attr: str, *args):
return TensorDict({n: getattr(e, attr, *args) for (n, e) in self.items()})
def apply(self, fn, *args, **kwargs):
return TensorDict({n: fn(e, *args, **kwargs) for (n, e) in self.items()})
def _iterable(a):
return isinstance(a, (TensorDict, list)) |
def get_lvis_instances_meta(dataset_name):
if ('cocofied' in dataset_name):
return _get_coco_instances_meta()
if ('v0.5' in dataset_name):
return _get_lvis_instances_meta_v0_5()
elif ('v1' in dataset_name):
return _get_lvis_instances_meta_v1()
raise ValueError('No built-in metadata for dataset {}'.format(dataset_name)) |
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
assert ((clamp is None) or (clamp >= 0))
spec = activation_funcs[act]
alpha = float((alpha if (alpha is not None) else spec.def_alpha))
gain = float((gain if (gain is not None) else spec.def_gain))
clamp = float((clamp if (clamp is not None) else (- 1)))
key = (dim, act, alpha, gain, clamp)
if (key in _bias_act_cuda_cache):
return _bias_act_cuda_cache[key]
class BiasActCuda(torch.autograd.Function):
def forward(ctx, x, b):
ctx.memory_format = (torch.channels_last if ((x.ndim > 2) and (x.stride(1) == 1)) else torch.contiguous_format)
x = x.contiguous(memory_format=ctx.memory_format)
b = (b.contiguous() if (b is not None) else _null_tensor)
y = x
if ((act != 'linear') or (gain != 1) or (clamp >= 0) or (b is not _null_tensor)):
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward((x if (('x' in spec.ref) or spec.has_2nd_grad) else _null_tensor), (b if (('x' in spec.ref) or spec.has_2nd_grad) else _null_tensor), (y if ('y' in spec.ref) else _null_tensor))
return y
def backward(ctx, dy):
dy = dy.contiguous(memory_format=ctx.memory_format)
(x, b, y) = ctx.saved_tensors
dx = None
db = None
if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
dx = dy
if ((act != 'linear') or (gain != 1) or (clamp >= 0)):
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if (i != dim)])
return (dx, db)
class BiasActCudaGrad(torch.autograd.Function):
def forward(ctx, dy, x, b, y):
ctx.memory_format = (torch.channels_last if ((dy.ndim > 2) and (dy.stride(1) == 1)) else torch.contiguous_format)
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward((dy if spec.has_2nd_grad else _null_tensor), x, b, y)
return dx
def backward(ctx, d_dx):
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
(dy, x, b, y) = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if (spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2])):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if (spec.has_2nd_grad and ctx.needs_input_grad[2]):
d_b = d_x.sum([i for i in range(d_x.ndim) if (i != dim)])
return (d_dy, d_x, d_b, d_y)
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda |
def resnet44(pretrained: bool=False, progress: bool=True, num_classes: int=10, layer_config: dict=None) -> ResNet:
print('Converting ResNet-44 to {} mode'.format(MODE_STRING))
return create_torchvision_biomodel(small_resnet.resnet44, MODE, layer_config, pretrained, progress, num_classes) |
def _update_optimizer_with_constant_learning_rate(optimizer, learning_rate):
constant_lr = optimizer.learning_rate.constant_learning_rate
constant_lr.learning_rate = learning_rate |
class PythonParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PYTHONPARAMETER |
class DDIMParallelSchedulerOutput(BaseOutput):
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None |
class N2V(CARE):
def __init__(self, config, name=None, basedir='.'):
((config is None) or isinstance(config, self._config_class) or _raise(ValueError(("Invalid configuration of type '%s', was expecting type '%s'." % (type(config).__name__, self._config_class.__name__)))))
if ((config is not None) and (not config.is_valid())):
invalid_attr = config.is_valid(True)[1]
raise ValueError(('Invalid configuration attributes: ' + ', '.join(invalid_attr)))
((not ((config is None) and (basedir is None))) or _raise(ValueError('No config provided and cannot be loaded from disk since basedir=None.')))
((name is None) or (isinstance(name, string_types) and (len(name) > 0)) or _raise(ValueError(("No valid name: '%s'" % str(name)))))
((basedir is None) or isinstance(basedir, (string_types, Path)) or _raise(ValueError(("No valid basedir: '%s'" % str(basedir)))))
self.config = config
self.name = (name if (name is not None) else datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S.%f'))
self.basedir = (Path(basedir) if (basedir is not None) else None)
if (config is not None):
self._update_and_check_config()
self._set_logdir()
if (config is None):
self._update_and_check_config()
self._model_prepared = False
self.keras_model = self._build()
if (config is None):
self._find_and_load_weights()
def _build(self):
return self._build_unet(n_dim=self.config.n_dim, residual=self.config.unet_residual, n_depth=self.config.unet_n_depth, kern_size=self.config.unet_kern_size, n_first=self.config.unet_n_first, last_activation=self.config.unet_last_activation, batch_norm=self.config.batch_norm, blurpool=self.config.blurpool, skip_skipone=self.config.skip_skipone)(self.config.unet_input_shape, self.config.single_net_per_channel)
def _build_unet(self, n_dim=2, n_depth=2, kern_size=3, n_first=32, residual=True, last_activation='linear', batch_norm=True, blurpool=False, skip_skipone=False):
def _build_this(input_shape, single_net_per_channel):
if single_net_per_channel:
return build_single_unet_per_channel(input_shape, last_activation, n_depth, n_first, ((kern_size,) * n_dim), pool_size=((2,) * n_dim), residual=residual, prob_out=False, batch_norm=batch_norm, blurpool=blurpool, skip_skipone=skip_skipone)
else:
return build_unet(input_shape, last_activation, n_depth, n_first, ((kern_size,) * n_dim), pool_size=((2,) * n_dim), residual=residual, prob_out=False, batch_norm=batch_norm, blurpool=blurpool, skip_skipone=skip_skipone)
return _build_this
def train(self, X, validation_X, epochs=None, steps_per_epoch=None):
(n_train, n_val) = (len(X), len(validation_X))
frac_val = ((1.0 * n_val) / (n_train + n_val))
frac_warn = 0.05
if (frac_val < frac_warn):
warnings.warn(('small number of validation images (only %.1f%% of all images)' % (100 * frac_val)))
axes = axes_check_and_normalize(('S' + self.config.axes), X.ndim)
ax = axes_dict(axes)
div_by = (2 ** self.config.unet_n_depth)
axes_relevant = ''.join((a for a in 'XYZT' if (a in axes)))
val_num_pix = 1
train_num_pix = 1
val_patch_shape = ()
for a in axes_relevant:
n = X.shape[ax[a]]
val_num_pix *= validation_X.shape[ax[a]]
train_num_pix *= X.shape[ax[a]]
val_patch_shape += tuple([validation_X.shape[ax[a]]])
if ((n % div_by) != 0):
raise ValueError(('training images must be evenly divisible by %d along axes %s (axis %s has incompatible size %d)' % (div_by, axes_relevant, a, n)))
if (epochs is None):
epochs = self.config.train_epochs
if (steps_per_epoch is None):
steps_per_epoch = self.config.train_steps_per_epoch
if (not self._model_prepared):
self.prepare_for_training()
manipulator = eval('pm_{0}({1})'.format(self.config.n2v_manipulator, str(self.config.n2v_neighborhood_radius)))
means = np.array([float(mean) for mean in self.config.means], ndmin=len(X.shape), dtype=np.float32)
stds = np.array([float(std) for std in self.config.stds], ndmin=len(X.shape), dtype=np.float32)
X = self.__normalize__(X, means, stds)
validation_X = self.__normalize__(validation_X, means, stds)
_mask = (np.array(self.config.structN2Vmask) if self.config.structN2Vmask else None)
training_data = N2V_DataWrapper(X, np.concatenate((X, np.zeros(X.shape, dtype=X.dtype)), axis=axes.index('C')), batch_size=self.config.train_batch_size, length=(self.config.train_steps_per_epoch * self.config.train_epochs), perc_pix=self.config.n2v_perc_pix, shape=self.config.n2v_patch_shape, value_manipulation=manipulator, structN2Vmask=_mask)
validation_Y = np.concatenate((validation_X, np.zeros(validation_X.shape, dtype=validation_X.dtype)), axis=axes.index('C'))
n2v_utils.manipulate_val_data(validation_X, validation_Y, perc_pix=self.config.n2v_perc_pix, shape=val_patch_shape, value_manipulation=manipulator)
self.callbacks.append(CARETensorBoardImage(model=self.keras_model, data=(validation_X, validation_X), log_dir=str(((self.logdir / 'logs') / 'images')), n_images=3, prob_out=False))
history = self.keras_model.fit(iter(training_data), validation_data=(validation_X, validation_Y), epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=self.callbacks, verbose=1)
if (self.basedir is not None):
self.keras_model.save_weights(str((self.logdir / 'weights_last.h5')))
if (self.config.train_checkpoint is not None):
print()
self._find_and_load_weights(self.config.train_checkpoint)
try:
(self.logdir / 'weights_now.h5').unlink()
except FileNotFoundError:
pass
return history
def prepare_for_training(self, optimizer=None, **kwargs):
if (optimizer is None):
from tensorflow.keras.optimizers import Adam
optimizer = Adam(learning_rate=self.config.train_learning_rate)
self.callbacks = self.prepare_model(self.keras_model, optimizer, self.config.train_loss, **kwargs)
if (self.basedir is not None):
if (self.config.train_checkpoint is not None):
from tensorflow.keras.callbacks import ModelCheckpoint
self.callbacks.append(ModelCheckpoint(str((self.logdir / self.config.train_checkpoint)), save_best_only=True, save_weights_only=True))
self.callbacks.append(ModelCheckpoint(str((self.logdir / 'weights_now.h5')), save_best_only=False, save_weights_only=True))
if self.config.train_tensorboard:
from tensorflow.keras.callbacks import TensorBoard
self.callbacks.append(TensorBoard(log_dir=str((self.logdir / 'logs')), write_graph=False, profile_batch=0))
if (self.config.train_reduce_lr is not None):
from tensorflow.keras.callbacks import ReduceLROnPlateau
rlrop_params = self.config.train_reduce_lr
if ('verbose' not in rlrop_params):
rlrop_params['verbose'] = True
self.callbacks.append(ReduceLROnPlateau(**rlrop_params))
self._model_prepared = True
def prepare_model(self, model, optimizer, loss, metrics=('mse', 'mae')):
from tensorflow.keras.optimizers import Optimizer
(isinstance(optimizer, Optimizer) or _raise(ValueError()))
if (loss == 'mse'):
loss_standard = eval('loss_mse()')
elif (loss == 'mae'):
loss_standard = eval('loss_mae()')
_metrics = [eval(('loss_%s()' % m)) for m in metrics]
callbacks = [TerminateOnNaN()]
model.compile(optimizer=optimizer, loss=loss_standard, metrics=_metrics)
return callbacks
def __normalize__(self, data, means, stds):
return ((data - means) / stds)
def __denormalize__(self, data, means, stds):
return ((data * stds) + means)
def predict(self, img, axes, resizer=PadAndCropResizer(), n_tiles=None, tta=False):
means = np.array([float(mean) for mean in self.config.means], ndmin=len(img.shape), dtype=np.float32)
stds = np.array([float(std) for std in self.config.stds], ndmin=len(img.shape), dtype=np.float32)
if (img.dtype != np.float32):
print('The input image is of type {} and will be casted to float32 for prediction.'.format(img.dtype))
img = img.astype(np.float32)
new_axes = axes
new_n_tiles = n_tiles
if ('C' in axes):
new_axes = (axes.replace('C', '') + 'C')
if n_tiles:
new_n_tiles = (tuple([n_tiles[axes.index(c)] for c in axes if (c != 'C')]) + (n_tiles[axes.index('C')],))
normalized = self.__normalize__(np.moveaxis(img, axes.index('C'), (- 1)), means, stds)
else:
normalized = self.__normalize__(img[(..., np.newaxis)], means, stds)
normalized = normalized[(..., 0)]
if tta:
aug = tta_forward(normalized)
preds = []
for img in aug:
preds.append(self._predict_mean_and_scale(img, axes=new_axes, normalizer=None, resizer=resizer, n_tiles=new_n_tiles)[0])
pred = tta_backward(preds)
else:
pred = self._predict_mean_and_scale(normalized, axes=new_axes, normalizer=None, resizer=resizer, n_tiles=new_n_tiles)[0]
pred = self.__denormalize__(pred, means, stds)
if ('C' in axes):
pred = np.moveaxis(pred, (- 1), axes.index('C'))
return pred
def predict_bioimageio(self, img: np.ndarray, axes: str, eps: float=1e-06):
means = np.array([float(mean) for mean in self.config.means], ndmin=len(img.shape))
stds = np.array([float(std) for std in self.config.stds], ndmin=len(img.shape))
img = img.astype(np.float64)
if ('b' in axes):
axes = axes.replace('b', 'S').upper()
new_axes = axes
if ('C' in axes):
new_axes = (axes.replace('C', '') + 'C')
normalized = np.moveaxis(img, axes.index('C'), (- 1))
normalized = ((normalized - means) / (stds + eps))
else:
normalized = img[(..., np.newaxis)]
normalized = ((normalized - means) / (stds + eps))
normalized = normalized[(..., 0)]
pred = self._predict_mean_and_scale(normalized, axes=new_axes, normalizer=None, resizer=None)[0]
pred = pred.astype(np.float64)
pred = self.__denormalize__(pred, means, stds)
if ('C' in axes):
pred = np.moveaxis(pred, (- 1), axes.index('C'))
return pred
def _set_logdir(self):
self.logdir = (self.basedir / self.name)
config_file = (self.logdir / 'config.json')
if (self.config is None):
if config_file.exists():
config_dict = load_json(str(config_file))
self.config = self._config_class(np.array([]), **config_dict)
if (not self.config.is_valid()):
invalid_attr = self.config.is_valid(True)[1]
raise ValueError(('Invalid attributes in loaded config: ' + ', '.join(invalid_attr)))
else:
raise FileNotFoundError(("config file doesn't exist: %s" % str(config_file.resolve())))
else:
if self.logdir.exists():
warnings.warn(('output path for model already exists, files may be overwritten: %s' % str(self.logdir.resolve())))
self.logdir.mkdir(parents=True, exist_ok=True)
save_json(vars(self.config), str(config_file))
_without_basedir(warn=True)
def export_TF(self, name: str, description: str, authors: List[str], test_img: np.ndarray, axes: str, patch_shape: Tuple[(int, int)], license: str='BSD-3-Clause', result_path: Union[(Path, str)]=None):
input_n_dims = len(test_img.shape)
if ('C' in axes):
input_n_dims -= 1
assert (input_n_dims == self.config.n_dim), 'Input and network dimensions do not match.'
assert (test_img.shape[axes.index('X')] == test_img.shape[axes.index('Y')]), 'X and Y dimensions are not of same length.'
if (patch_shape != None):
self.config.patch_shape = patch_shape
if (result_path is None):
result_path = self.logdir
result_path = Path(result_path).absolute()
test_output = self.predict_bioimageio(test_img, axes)
model_path = (result_path / 'tf_model.zip')
config_path = (result_path / 'config.json')
save_model_tf(model=self.keras_model, config=self.config, config_path=config_path, model_path=model_path)
new_axes = axes.replace('S', 'b').lower()
if ('b' not in new_axes):
new_axes = ('b' + new_axes)
axes = ('S' + axes)
test_img = test_img[(np.newaxis, ...)]
test_output = test_output[(np.newaxis, ...)]
input_file = (self.logdir.absolute() / 'test_input.npy')
np.save(input_file, test_img.astype(np.float64))
output_file = (self.logdir.absolute() / 'test_output.npy')
np.save(output_file, test_output.astype(np.float64))
preprocessing = [{'name': 'zero_mean_unit_variance', 'kwargs': {'mode': 'fixed', 'axes': ('yx' if (len(axes) == 4) else 'zyx'), 'mean': [float(m) for m in self.config.means], 'std': [float(s) for s in self.config.stds]}}]
postprocessing = [{'name': 'scale_linear', 'kwargs': {'axes': ('yx' if (len(axes) == 4) else 'zyx'), 'gain': [float(s) for s in self.config.stds], 'offset': [float(m) for m in self.config.means]}}]
authors = [{'name': author} for author in authors]
algorithm = which_algorithm(self.config)
cite = get_algorithm_details(algorithm)
doc = generate_bioimage_md(name, cite, result_path)
files = [str(config_path.absolute()), str(model_path.absolute())]
result_archive_path = (result_path / (result_path.stem + Extensions.BIOIMAGE_EXT.value))
build_modelzoo(result_archive_path, model_path, result_path, input_file, output_file, preprocessing, postprocessing, doc, name, authors, algorithm, tf.__version__, cite, new_axes, files)
print(('\nModel exported in BioImage ModelZoo format:\n%s' % str(result_archive_path.resolve())))
def _config_class(self):
return N2VConfig |
class shortcut(nn.Module):
def __init__(self, input_dim_1, input_dim_2):
super().__init__()
self.block = nn.Sequential(nn.Linear(input_dim_1, input_dim_2), nn.ReLU())
self.linear_shortcut = nn.Linear(input_dim_2, input_dim_2)
def forward(self, x1, x2):
return (self.block(x1) + self.linear_shortcut(x2)) |
def prepare_faiss_index(slice_samples_paths, partitions, sample_fraction=None):
training_sample = load_sample(slice_samples_paths, sample_fraction=sample_fraction)
dim = training_sample.shape[(- 1)]
index = FaissIndex(dim, partitions)
print_message('#> Training with the vectors...')
index.train(training_sample)
print_message('Done training!\n')
return index |
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content |
class Model(nn.Module):
def __init__(self, input_size, num_classes):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, 3, padding=1)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64, 128, 3, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 128, 3)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(((2 * 2) * 128), 256)
self.fc2 = nn.Linear(256, num_classes)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn3(self.conv3(x)))
x = F.max_pool2d(x, 2, 2)
x = x.flatten(1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x |
def backward_hook_norm(module, grad_in, grad_out):
std = torch.std(grad_in[0])
return ((grad_in[0] / std),) |
class TestAutoXGBRegressor(TestCase):
def setUp(self) -> None:
from bigdl.orca import init_orca_context
init_orca_context(cores=8, init_ray_on_spark=True)
def tearDown(self) -> None:
from bigdl.orca import stop_orca_context
stop_orca_context()
def test_fit(self):
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2, name='auto_xgb_regressor', tree_method='hist')
(data, validation_data) = get_data()
auto_xgb_reg.fit(data=data, validation_data=validation_data, search_space=get_xgb_search_space(), n_sampling=4, epochs=1, metric='mae')
best_model = auto_xgb_reg.get_best_model()
assert (5 <= best_model.n_estimators <= 10)
assert (2 <= best_model.max_depth <= 5)
best_config = auto_xgb_reg.get_best_config()
assert all(((k in best_config.keys()) for k in get_xgb_search_space().keys()))
def test_metric(self):
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2, name='auto_xgb_regressor', tree_method='hist')
(data, validation_data) = get_data()
with pytest.raises(RuntimeError) as exeinfo:
auto_xgb_reg.fit(data=data, epochs=1, validation_data=validation_data, metric='logloss', search_space=get_xgb_search_space(), n_sampling=4)
assert ('metric logloss' in str(exeinfo))
auto_xgb_reg.fit(data=data, epochs=1, validation_data=validation_data, metric='logloss', metric_mode='min', search_space=get_xgb_search_space(), n_sampling=4)
def test_metric_func(self):
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2, name='auto_xgb_regressor', tree_method='hist')
(data, validation_data) = get_data()
def pyrmsle(y_true, y_pred):
y_pred[(y_pred < (- 1))] = ((- 1) + 1e-06)
elements = np.power((np.log1p(y_true) - np.log1p(y_pred)), 2)
return float(np.sqrt((np.sum(elements) / len(y_true))))
with pytest.raises(RuntimeError) as exeinfo:
auto_xgb_reg.fit(data=data, epochs=1, validation_data=validation_data, metric=pyrmsle, search_space=get_xgb_search_space(), n_sampling=4)
assert ('metric_mode' in str(exeinfo))
auto_xgb_reg.fit(data=data, epochs=1, validation_data=validation_data, metric=pyrmsle, metric_mode='min', search_space=get_xgb_search_space(), n_sampling=4)
def test_data_creator(self):
(train_data_creator, val_data_creator) = get_data_creators()
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2, name='auto_xgb_regressor', tree_method='hist')
model_search_space = get_xgb_search_space()
search_space = {'features': hp.sample_from((lambda spec: np.random.choice(['f1', 'f2', 'f3'], size=2)))}
search_space.update(model_search_space)
auto_xgb_reg.fit(data=train_data_creator, epochs=1, validation_data=val_data_creator, metric='logloss', metric_mode='min', search_space=search_space, n_sampling=2)
best_config = auto_xgb_reg.get_best_config()
assert all(((k in best_config.keys()) for k in search_space.keys()))
assert (len(best_config['features']) == 2)
def test_spark_df(self):
(df, val_df, feature_cols, label_cols) = get_spark_df()
auto_xgb_reg = AutoXGBRegressor(cpus_per_trial=2, name='auto_xgb_regressor', tree_method='hist')
search_space = get_xgb_search_space()
auto_xgb_reg.fit(data=df, epochs=1, validation_data=val_df, metric='logloss', metric_mode='min', search_space=search_space, n_sampling=2, feature_cols=feature_cols, label_cols=label_cols)
best_model = auto_xgb_reg.get_best_model()
assert (5 <= best_model.n_estimators <= 10)
assert (2 <= best_model.max_depth <= 5)
best_config = auto_xgb_reg.get_best_config()
assert all(((k in best_config.keys()) for k in search_space.keys())) |
class Bar(Progress):
width = 32
suffix = '%(index)d/%(max)d'
bar_prefix = ' |'
bar_suffix = '| '
empty_fill = ' '
fill = '#'
def update(self):
filled_length = int((self.width * self.progress))
empty_length = (self.width - filled_length)
message = (self.message % self)
bar = (self.fill * filled_length)
empty = (self.empty_fill * empty_length)
suffix = (self.suffix % self)
line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, suffix])
self.writeln(line) |
def _addmm_flops_compute(input, mat1, mat2, *, beta=1, alpha=1, out=None):
macs = (_prod(mat1.shape) * mat2.shape[(- 1)])
return (((2 * macs) + _prod(input.shape)), macs) |
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, out_planes, dropRate=0.0):
super(BottleneckBlock, self).__init__()
inter_planes = (out_planes * 4)
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(inter_planes)
self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
def forward(self, x):
out = self.conv1(self.relu(self.bn1(x)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
out = self.conv2(self.relu(self.bn2(out)))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, inplace=False, training=self.training)
return torch.cat([x, out], 1) |
class SpectralNormConv(object):
_version = 1
def __init__(self, coeff, input_dim, name='weight', n_power_iterations=1, eps=1e-12):
self.coeff = coeff
self.input_dim = input_dim
self.name = name
if (n_power_iterations <= 0):
raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations))
self.n_power_iterations = n_power_iterations
self.eps = eps
def compute_weight(self, module, do_power_iteration):
weight = getattr(module, (self.name + '_orig'))
u = getattr(module, (self.name + '_u'))
v = getattr(module, (self.name + '_v'))
sigma_log = getattr(module, (self.name + '_sigma'))
stride = module.stride
padding = module.padding
if do_power_iteration:
with torch.no_grad():
for _ in range(self.n_power_iterations):
v_s = conv_transpose2d(u.view(self.out_shape), weight, stride=stride, padding=padding, output_padding=0)
v = normalize(v_s.view((- 1)), dim=0, eps=self.eps, out=v)
u_s = conv2d(v.view(self.input_dim), weight, stride=stride, padding=padding, bias=None)
u = normalize(u_s.view((- 1)), dim=0, eps=self.eps, out=u)
if (self.n_power_iterations > 0):
u = u.clone()
v = v.clone()
weight_v = conv2d(v.view(self.input_dim), weight, stride=stride, padding=padding, bias=None)
weight_v = weight_v.view((- 1))
sigma = torch.dot(u.view((- 1)), weight_v)
factorReverse = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
sigma_log.copy_(sigma.detach())
weight = (weight / (factorReverse + 1e-05))
return weight
def remove(self, module):
with torch.no_grad():
weight = self.compute_weight(module, do_power_iteration=False)
delattr(module, self.name)
delattr(module, (self.name + '_u'))
delattr(module, (self.name + '_orig'))
module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training))
def apply(module, coeff, input_dim, name, n_power_iterations, eps):
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNormConv) and (hook.name == name)):
raise RuntimeError('Cannot register two spectral_norm hooks on the same parameter {}'.format(name))
fn = SpectralNormConv(coeff, input_dim, name, n_power_iterations, eps)
weight = module._parameters[name]
with torch.no_grad():
num_input_dim = (((input_dim[0] * input_dim[1]) * input_dim[2]) * input_dim[3])
v = normalize(torch.randn(num_input_dim), dim=0, eps=fn.eps)
stride = module.stride
padding = module.padding
u = conv2d(v.view(input_dim), weight, stride=stride, padding=padding, bias=None)
fn.out_shape = u.shape
num_output_dim = (((fn.out_shape[0] * fn.out_shape[1]) * fn.out_shape[2]) * fn.out_shape[3])
u = normalize(torch.randn(num_output_dim), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter((fn.name + '_orig'), weight)
setattr(module, fn.name, weight.data)
module.register_buffer((fn.name + '_u'), u)
module.register_buffer((fn.name + '_v'), v)
module.register_buffer((fn.name + '_sigma'), torch.ones(1).to(weight.device))
module.register_forward_pre_hook(fn)
module._register_state_dict_hook(SpectralNormConvStateDictHook(fn))
module._register_load_state_dict_pre_hook(SpectralNormConvLoadStateDictPreHook(fn))
return fn |
def N_gram_detector(ngram_n_ratio):
score = 0
non_zero = []
for (idx, key) in enumerate(ngram_n_ratio):
if (((idx in range(3)) and ('score' in key)) or ('ratio' in key)):
score += (0.0 * ngram_n_ratio[key])
continue
if (('score' in key) or ('ratio' in key)):
score += (((idx + 1) * np.log((idx + 1))) * ngram_n_ratio[key])
if (ngram_n_ratio[key] != 0):
non_zero.append((idx + 1))
return (score / (sum(non_zero) + 1e-08)) |
class ML_FISTA(nn.Module):
def __init__(self, T):
super(ML_FISTA, self).__init__()
self.T = T
self.W1 = nn.Parameter(torch.randn(32, 3, 4, 4), requires_grad=True)
self.strd1 = 2
self.W2 = nn.Parameter(torch.randn(64, 32, 4, 4), requires_grad=True)
self.strd2 = 2
self.W3 = nn.Parameter(torch.randn(128, 64, 4, 4), requires_grad=True)
self.strd3 = 2
self.W4 = nn.Parameter(torch.randn(256, 128, 3, 3), requires_grad=True)
self.strd4 = 1
self.W5 = nn.Parameter(torch.randn(512, 256, 3, 3), requires_grad=True)
self.strd5 = 1
self.W6 = nn.Parameter(torch.randn(512, 512, 3, 3), requires_grad=True)
self.strd6 = 1
self.c1 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c2 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.c3 = nn.Parameter(torch.ones(1, 1, 1, 1), requires_grad=True)
self.b1 = nn.Parameter(torch.zeros(1, 32, 1, 1), requires_grad=True)
self.b2 = nn.Parameter(torch.zeros(1, 64, 1, 1), requires_grad=True)
self.b3 = nn.Parameter(torch.zeros(1, 128, 1, 1), requires_grad=True)
self.b4 = nn.Parameter(torch.zeros(1, 256, 1, 1), requires_grad=True)
self.b5 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.b6 = nn.Parameter(torch.zeros(1, 512, 1, 1), requires_grad=True)
self.Wclass = nn.Linear(512, 10)
self.W1.data = ((0.1 / np.sqrt((3 * 16))) * self.W1.data)
self.W2.data = ((0.1 / np.sqrt((32 * 16))) * self.W2.data)
self.W3.data = ((0.1 / np.sqrt((64 * 16))) * self.W3.data)
self.W4.data = ((1 / np.sqrt((128 * 9))) * self.W4.data)
self.W5.data = ((1 / np.sqrt((256 * 9))) * self.W5.data)
self.W6.data = ((1 / np.sqrt((512 * 9))) * self.W6.data)
def forward(self, x):
t = 1
t_prv = t
gamma1 = F.relu(((self.c1 * F.conv2d(x, self.W1, stride=self.strd1, padding=1)) + self.b1))
gamma2 = F.relu(((self.c2 * F.conv2d(gamma1, self.W2, stride=self.strd2, padding=1)) + self.b2))
gamma3 = F.relu(((self.c3 * F.conv2d(gamma2, self.W3, stride=self.strd3, padding=1)) + self.b3))
gamma3_prv = gamma3
for _ in range(self.T):
t_prv = t
t = float(((1 + np.sqrt((1 + (4 * (t_prv ** 2))))) / 2))
Z = (gamma3 + (((t_prv - 1) / t) * (gamma3 - gamma3_prv)))
gamma3_prv = gamma3
gamma2 = F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1)
gamma1 = F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1)
gamma1 = F.relu(((gamma1 - (self.c1 * F.conv2d((F.conv_transpose2d(gamma1, self.W1, stride=self.strd1, padding=1) - x), self.W1, stride=self.strd1, padding=1))) + self.b1))
gamma2 = F.relu(((gamma2 - (self.c2 * F.conv2d((F.conv_transpose2d(gamma2, self.W2, stride=self.strd2, padding=1) - gamma1), self.W2, stride=self.strd2, padding=1))) + self.b2))
gamma3 = F.relu(((Z - (self.c3 * F.conv2d((F.conv_transpose2d(Z, self.W3, stride=self.strd3, padding=1) - gamma2), self.W3, stride=self.strd3, padding=1))) + self.b3))
gamma4 = F.relu((F.conv2d(gamma3, self.W4, stride=self.strd4, padding=1) + self.b4))
gamma5 = F.max_pool2d(F.relu((F.conv2d(gamma4, self.W5, stride=self.strd5, padding=1) + self.b5)), kernel_size=2, stride=2)
gamma6 = F.max_pool2d(F.relu((F.conv2d(gamma5, self.W6, stride=self.strd6, padding=1) + self.b6)), kernel_size=2, stride=2)
gammaGoal = gamma6
gamma = gammaGoal.view(gammaGoal.shape[0], ((gammaGoal.shape[1] * gammaGoal.shape[2]) * gammaGoal.shape[3]))
out = self.Wclass(gamma)
out = F.log_softmax(out, dim=1)
return out |
def cli_main():
parser = options.get_interactive_generation_parser()
parser.add_argument('--prompts', type=str, default=None, required=True)
parser.add_argument('--output', type=str, default=None, required=True)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--samples-per-prompt', type=int, default=1)
args = options.parse_args_and_arch(parser)
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
main(args) |
class PrintLogsCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
print('')
print(('logs: ' + str(logs))) |
class TestBatchBySizeFn(TestBatchBySize):
def test_compare_with_baseline(self):
def batch_by_size_fn_wrapper(indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult):
def num_tokens_fn(idx):
return num_tokens_vec[idx]
return batch_by_size_fn(indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult)
self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper) |
def resnet_v2(input_shape, depth, num_classes=10):
if (((depth - 2) % 9) != 0):
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
num_filters_in = 16
num_res_blocks = int(((depth - 2) / 9))
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs, num_filters=num_filters_in, conv_first=True)
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if (stage == 0):
num_filters_out = (num_filters_in * 4)
if (res_block == 0):
activation = None
batch_normalization = False
else:
num_filters_out = (num_filters_in * 2)
if (res_block == 0):
strides = 2
y = resnet_layer(inputs=x, num_filters=num_filters_in, kernel_size=1, strides=strides, activation=activation, batch_normalization=batch_normalization, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_in, conv_first=False)
y = resnet_layer(inputs=y, num_filters=num_filters_out, kernel_size=1, conv_first=False)
if (res_block == 0):
x = resnet_layer(inputs=x, num_filters=num_filters_out, kernel_size=1, strides=strides, activation=None, batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)
model = Model(inputs=inputs, outputs=outputs)
return model |
def _latex_circuit_drawer(circuit, scale=0.7, filename=None, style=None, plot_barriers=True, reverse_bits=False, justify=None):
tmpfilename = 'circuit'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, (tmpfilename + '.tex'))
_generate_latex_source(circuit, filename=tmppath, scale=scale, style=style, plot_barriers=plot_barriers, reverse_bits=reverse_bits, justify=justify)
image = None
try:
subprocess.run(['pdflatex', '-halt-on-error', '-output-directory={}'.format(tmpdirname), '{}'.format((tmpfilename + '.tex'))], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, check=True)
except OSError as ex:
if (ex.errno == errno.ENOENT):
logger.warning('WARNING: Unable to compile latex. Is `pdflatex` installed? Skipping latex circuit drawing...')
raise
except subprocess.CalledProcessError as ex:
with open('latex_error.log', 'wb') as error_file:
error_file.write(ex.stdout)
logger.warning('WARNING Unable to compile latex. The output from the pdflatex command can be found in latex_error.log')
raise
else:
if (not HAS_PIL):
raise ImportError('The latex drawer needs pillow installed. Run "pip install pillow" before using the latex drawer.')
try:
base = os.path.join(tmpdirname, tmpfilename)
subprocess.run(['pdftocairo', '-singlefile', '-png', '-q', (base + '.pdf'), base])
image = Image.open((base + '.png'))
image = utils._trim(image)
os.remove((base + '.png'))
if filename:
image.save(filename, 'PNG')
except OSError as ex:
if (ex.errno == errno.ENOENT):
logger.warning('WARNING: Unable to convert pdf to image. Is `poppler` installed? Skipping circuit drawing...')
raise
return image |
class Timer():
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.duration = (self.end - self.start) |
def run_vicuna_bench(mt_bench_path, model_name):
working_dir = os.path.join(mt_bench_path, 'fastchat', 'llm_judge')
if os.path.exists(os.path.join(working_dir, 'data', 'vicuna_bench', 'model_answer', f'{model_name}.jsonl')):
return
commands = [f'python gen_api_answer.py --model {model_name} --max-tokens {MAX_CONTEXT} --parallel 128 --openai-api-base --bench-name vicuna_bench', f'python gen_judgment.py --model-list {model_name} --parallel 8 --mode pairwise-baseline --bench-name vicuna_bench']
env = os.environ.copy()
env['PYTHONPATH'] = f"{env.get('PYTHONPATH', '')}:{mt_bench_path}"
for command in commands:
subprocess.run(command, shell=True, cwd=working_dir, env=env) |
def get_data(name, data_dir, height, width, batch_size, workers):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([T.Resize((height, width), interpolation=3), T.ToTensor(), normalizer])
test_loader = DataLoader(Preprocessor(list((set(dataset.query) | set(dataset.gallery))), root=dataset.images_dir, transform=test_transformer), batch_size=batch_size, num_workers=workers, shuffle=False, pin_memory=True)
return (dataset, test_loader) |
def sync_from_root(sess, variables, comm=None):
if (comm is None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for var in variables:
if (rank == 0):
comm.Bcast(sess.run(var))
else:
import tensorflow as tf
returned_var = np.empty(var.shape, dtype='float32')
comm.Bcast(returned_var)
sess.run(tf.assign(var, returned_var)) |
class Solution(object):
def __init__(self, pctsp, size=None):
self._route = []
if size:
self.size = size
else:
self.size = len(pctsp.prize)
self.quality = sys.maxsize
self.pctsp = pctsp
self.prize = 0
'\n Computes the quality of the solution.\n '
def compute(self):
self.prize = 0
self.quality = 0
for (i, city) in enumerate(self._route):
if (i < self.size):
self.prize += self.pctsp.prize[city]
if (i > 0):
previousCity = self._route[(i - 1)]
self.quality += self.pctsp.cost[previousCity][city]
if ((i + 1) == self.size):
self.quality += self.pctsp.cost[city][0]
else:
self.quality += self.pctsp.penal[city]
def copy(self):
cp = copy.copy(self)
cp._route = list(self._route)
return cp
def swap(self, i, j):
city_i = self._route[i]
city_i_prev = self._route[(i - 1)]
city_i_next = self._route[((i + 1) % self.size)]
city_j = self._route[j]
self.quality = ((((((self.quality - self.pctsp.cost[city_i_prev][city_i]) - self.pctsp.cost[city_i][city_i_next]) + self.pctsp.cost[city_i_prev][city_j]) + self.pctsp.cost[city_j][city_i_next]) - self.pctsp.penal[city_j]) + self.pctsp.penal[city_i])
self.prize = ((self.prize - self.pctsp.prize[city_i]) + self.pctsp.prize[city_j])
(self._route[j], self._route[i]) = (self._route[i], self._route[j])
def is_valid(self):
return (self.prize >= self.pctsp.prize_min)
def add_city(self):
city_l = self._route[(self.size - 1)]
city_add = self._route[self.size]
self.quality = ((((self.quality - self.pctsp.cost[city_l][0]) - self.pctsp.penal[city_add]) + self.pctsp.cost[city_l][city_add]) + self.pctsp.cost[city_add][0])
self.size += 1
self.prize += self.pctsp.prize[city_add]
def remove_city(self, index):
city_rem = self._route[index]
city_rem_prev = self._route[(index - 1)]
city_rem_next = self._route[((index + 1) % self.size)]
self.quality = ((((self.quality - self.pctsp.cost[city_rem_prev][city_rem]) - self.pctsp.cost[city_rem][city_rem_next]) + self.pctsp.penal[city_rem]) + self.pctsp.cost[city_rem_prev][city_rem_next])
self.prize -= self.pctsp.prize[city_rem]
del self._route[index]
self._route.append(city_rem)
self.size -= 1
def remove_cities(self, quant):
for i in range((self.size - quant), self.size):
city_rem = self._route[i]
city_rem_prev = self._route[(i - 1)]
self.quality = ((self.quality - self.pctsp.cost[city_rem_prev][city_rem]) + self.pctsp.penal[city_rem])
self.prize -= self.pctsp.prize[city_rem]
city_rem = self._route[(self.size - 1)]
city_l = self._route[((self.size - quant) - 1)]
self.quality = ((self.quality - self.pctsp.cost[city_rem][0]) + self.pctsp.cost[city_l][0])
self.size -= quant
def print_route(self):
print(self._route)
def route(self):
return self._route
def route(self, r):
self._route = r
self.compute() |
def resnext20_16x4d_svhn(num_classes=10, **kwargs):
return get_resnext_cifar(num_classes=num_classes, blocks=20, cardinality=16, bottleneck_width=4, model_name='resnext20_16x4d_svhn', **kwargs) |
def pcd_video_from_camera(executor, o3dvis):
rscam = io3d.RealSenseSensor()
rscam.start_capture()
mdata = rscam.get_metadata()
print(mdata)
os.makedirs('capture/color')
os.makedirs('capture/depth')
rgbd_frame_future = executor.submit(rscam.capture_frame)
def on_window_close():
nonlocal rscam, executor
executor.shutdown()
rscam.stop_capture()
return True
o3dvis.set_on_close(on_window_close)
while True:
rgbd_frame = rgbd_frame_future.result()
rgbd_frame_future = executor.submit(rscam.capture_frame)
point_cloud_video(executor, rgbd_frame, mdata, rscam.get_timestamp(), o3dvis) |
class UnetConfig(AllowExtraBaseModel):
dim: int
dim_mults: ListOrTuple(int)
text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)
cond_dim: int = None
channels: int = 3
attn_dim_head: int = 32
attn_heads: int = 16
def create(self):
return Unet(**self.dict()) |
def to_numpy(tensor):
if isinstance(tensor, KeepTensor):
return tensor.data
elif isinstance(tensor, tuple):
return tuple((to_numpy(x) for x in tensor))
elif isinstance(tensor, list):
return [to_numpy(x) for x in tensor]
elif isinstance(tensor, np.ndarray):
return tensor
elif torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif (isinstance(tensor, float) or isinstance(tensor, int)):
return tensor
else:
raise Exception(('Not supported type %s' % type(tensor))) |
class GaussianNoise(torch.nn.Module):
def __init__(self, mean=0.0, std=1.0):
super().__init__()
self.std = std
self.mean = mean
def forward(self, img):
noise = ((torch.randn(img.size()) * self.std) + self.mean)
noise = noise.to(img.device)
return (img + noise)
def __repr__(self):
return (self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)) |
def pluralize(word, pos=NOUN, custom={}):
if (word in custom.keys()):
return custom[word]
w = word.lower()
if (pos == NOUN):
if (w in plural_irregular_en):
return (w + 'en')
if (w in plural_irregular_een):
return (w + 'en')
if (w in plural_irregular_eren):
return (w + 'eren')
if (w in plural_irregular_deren):
return (w + 'deren')
if (w in plural_irregular):
return plural_irregular[w]
if w.endswith('icus'):
return (w[:(- 2)] + 'i')
if w.endswith(('es', 'as', 'nis', 'ris', 'vis')):
return (w + 'sen')
if (w.endswith('s') and (not w.endswith(('us', 'ts', 'mens')))):
return (w[:(- 1)] + 'zen')
if w.endswith('f'):
return (w[:(- 1)] + 'ven')
if w.endswith('um'):
return (w + 's')
if w.endswith('ie'):
return (w + 's')
if w.endswith(('ee', 'ie')):
return (w[:(- 1)] + 'en')
if w.endswith('heid'):
return (w[:(- 4)] + 'heden')
if w.endswith(('e', 'e', 'el', 'em', 'en', 'er', 'eu', 'ie', 'ue', 'ui', 'eau', 'ah')):
return (w + 's')
if (w.endswith(VOWELS) or (w.endswith('y') and (not w.endswith('e')))):
return (w + "'s")
if w.endswith('or'):
return (w + 'en')
if w.endswith('ij'):
return (w + 'en')
if ((len(w) > 1) and (not is_vowel(w[(- 1)])) and (not is_vowel(w[(- 2)]))):
return (w + 'en')
if ((len(w) > 2) and (not is_vowel(w[(- 1)])) and (not is_vowel(w[(- 3)]))):
return ((w + w[(- 1)]) + 'en')
if ((len(w) > 2) and (not is_vowel(w[(- 1)])) and (w[(- 2)] == w[(- 3)])):
return ((w[:(- 2)] + w[(- 1)]) + 'en')
return (w + 'en')
return w |
def ask_yes_no(question: str) -> bool:
while True:
try:
print('{0} [y/n]'.format(question))
return strtobool(input().lower())
except ValueError:
pass |
_comparison(baseline_images=['plot_centroids'], remove_text=False, extensions=['png'], tol=CVT_IMAGE_TOLERANCE)
def test_plot_centroids(cvt_archive_3d):
plt.figure(figsize=(8, 6))
cvt_archive_3d_plot(cvt_archive_3d, plot_centroids=True, cell_alpha=0.1) |
def oid_v6_classes():
return ['Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet'] |
def test_digits_modular_sparse():
model1 = FeatureBasedSelection(100, 'sqrt')
model2 = FeatureBasedSelection(100, 'log')
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], optimizer='modular', random_state=0)
model.fit(X_digits_sparse)
assert_array_equal(model.ranking, digits_modular_ranking)
assert_array_almost_equal(model.gains, digits_modular_gains, 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
def ComputeIO(track):
x_center = track.x_center
y_center = track.y_center
x_raceline = track.x_raceline
y_raceline = track.y_raceline
ns = x_center.shape[0]
x_inner = np.zeros([ns])
y_inner = np.zeros([ns])
x_outer = np.zeros([ns])
y_outer = np.zeros([ns])
track_width = track.track_width
eps = 0.01
for idx in range(ns):
theta = track.xy_to_param(x_center[idx], y_center[idx])
(x_, y_) = track.param_to_xy((theta + eps))
(_x, _y) = track.param_to_xy((theta - eps))
(x, y) = track.param_to_xy(theta)
norm = np.sqrt((((y_ - _y) ** 2) + ((x_ - _x) ** 2)))
width = (track_width / 2)
x_inner[idx] = (x - ((width * (y_ - _y)) / norm))
y_inner[idx] = (y + ((width * (x_ - _x)) / norm))
width = ((- track_width) / 2)
x_outer[idx] = (x - ((width * (y_ - _y)) / norm))
y_outer[idx] = (y + ((width * (x_ - _x)) / norm))
return (x_inner, y_inner, x_outer, y_outer) |
class EfficientNet(torch.nn.Module):
def __init__(self, drop_rate=0, num_class=1000):
super().__init__()
num_dep = [2, 4, 4, 6, 9, 15, 0]
filters = [24, 48, 64, 128, 160, 256, 256, 1280]
dp_index = 0
dp_rates = [x.item() for x in torch.linspace(0, 0.2, sum(num_dep))]
self.p1 = []
self.p2 = []
self.p3 = []
self.p4 = []
self.p5 = []
for i in range(num_dep[0]):
if (i == 0):
self.p1.append(Conv(3, filters[0], torch.nn.SiLU(), 3, 2))
self.p1.append(Residual(filters[0], filters[0], 1, 1, dp_rates[dp_index]))
else:
self.p1.append(Residual(filters[0], filters[0], 1, 1, dp_rates[dp_index]))
dp_index += 1
for i in range(num_dep[1]):
if (i == 0):
self.p2.append(Residual(filters[0], filters[1], 2, 4, dp_rates[dp_index]))
else:
self.p2.append(Residual(filters[1], filters[1], 1, 4, dp_rates[dp_index]))
dp_index += 1
for i in range(num_dep[2]):
if (i == 0):
self.p3.append(Residual(filters[1], filters[2], 2, 4, dp_rates[dp_index]))
else:
self.p3.append(Residual(filters[2], filters[2], 1, 4, dp_rates[dp_index]))
dp_index += 1
for i in range(num_dep[3]):
if (i == 0):
self.p4.append(Residual(filters[2], filters[3], 2, 4, dp_rates[dp_index], False))
else:
self.p4.append(Residual(filters[3], filters[3], 1, 4, dp_rates[dp_index], False))
dp_index += 1
for i in range(num_dep[4]):
if (i == 0):
self.p4.append(Residual(filters[3], filters[4], 1, 6, dp_rates[dp_index], False))
else:
self.p4.append(Residual(filters[4], filters[4], 1, 6, dp_rates[dp_index], False))
dp_index += 1
for i in range(num_dep[5]):
if (i == 0):
self.p5.append(Residual(filters[4], filters[5], 2, 6, dp_rates[dp_index], False))
else:
self.p5.append(Residual(filters[5], filters[5], 1, 6, dp_rates[dp_index], False))
dp_index += 1
for i in range(num_dep[6]):
if (i == 0):
self.p5.append(Residual(filters[5], filters[6], 2, 6, dp_rates[dp_index], False))
else:
self.p5.append(Residual(filters[6], filters[6], 1, 6, dp_rates[dp_index], False))
dp_index += 1
self.p1 = torch.nn.Sequential(*self.p1)
self.p2 = torch.nn.Sequential(*self.p2)
self.p3 = torch.nn.Sequential(*self.p3)
self.p4 = torch.nn.Sequential(*self.p4)
self.p5 = torch.nn.Sequential(*self.p5)
self.fc1 = torch.nn.Sequential(Conv(filters[6], filters[7], torch.nn.SiLU()), torch.nn.AdaptiveAvgPool2d(1), torch.nn.Flatten())
self.fc2 = torch.nn.Linear(filters[7], num_class)
self.drop_rate = drop_rate
init_weight(self)
def forward(self, x):
x = self.p1(x)
x = self.p2(x)
x = self.p3(x)
x = self.p4(x)
x = self.p5(x)
x = self.fc1(x)
if (self.drop_rate > 0):
x = dropout(x, self.drop_rate, self.training)
return self.fc2(x)
def export(self):
from timm.models.layers import Swish
for m in self.modules():
if ((type(m) is Conv) and hasattr(m, 'relu')):
if isinstance(m.relu, torch.nn.SiLU):
m.relu = Swish()
if (type(m) is SE):
if isinstance(m.se[2], torch.nn.SiLU):
m.se[2] = Swish()
return self |
def IsBuiltinType(token):
if (token in ('virtual', 'inline')):
return False
return ((token in TYPES) or (token in TYPE_MODIFIERS)) |
def torgb(x, cnum, ksize, stride, rate, name, activation=tf.nn.tanh, padding='SAME'):
x = tf.layers.conv2d(x, cnum, ksize, stride, dilation_rate=rate, activation=activation, padding=padding, name=name)
return x |
def test_make_sparse_convmodule():
from mmdet3d.ops import make_sparse_convmodule
voxel_features = torch.tensor([[6.56126, 0.9648336, (- 1.7339306), 0.315], [6.8162713, (- 2.480431), (- 1.3616394), 0.36], [11.643568, (- 4.744306), (- 1.3580885), 0.16], [23.482342, 6.5036807, 0.5806964, 0.35]], dtype=torch.float32)
coordinates = torch.tensor([[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232], [1, 35, 930, 469]], dtype=torch.int32)
input_sp_tensor = spconv.SparseConvTensor(voxel_features, coordinates, [41, 1600, 1408], 2)
sparse_block0 = make_sparse_convmodule(4, 16, 3, 'test0', stride=1, padding=0, conv_type='SubMConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('conv', 'norm', 'act'))
assert isinstance(sparse_block0[0], spconv.SubMConv3d)
assert (sparse_block0[0].in_channels == 4)
assert (sparse_block0[0].out_channels == 16)
assert isinstance(sparse_block0[1], torch.nn.BatchNorm1d)
assert (sparse_block0[1].eps == 0.001)
assert (sparse_block0[1].momentum == 0.01)
assert isinstance(sparse_block0[2], torch.nn.ReLU)
out_features = sparse_block0(input_sp_tensor)
assert (out_features.features.shape == torch.Size([4, 16]))
sparse_block1 = make_sparse_convmodule(4, 16, 3, 'test1', stride=1, padding=0, conv_type='SparseInverseConv3d', norm_cfg=dict(type='BN1d', eps=0.001, momentum=0.01), order=('norm', 'act', 'conv'))
assert isinstance(sparse_block1[0], torch.nn.BatchNorm1d)
assert isinstance(sparse_block1[1], torch.nn.ReLU)
assert isinstance(sparse_block1[2], spconv.SparseInverseConv3d) |
class EpisodeViewer():
def __init__(self, episode, G, sensitive_hosts, width=7, height=7):
self.episode = episode
self.G = G
self.sensitive_hosts = sensitive_hosts
self.timestep = 0
self._setup_GUI(width, height)
self._next_graph()
Tk.mainloop()
def _setup_GUI(self, width, height):
self.root = Tk.Tk()
self.root.wm_title('Cyber Attack Simulator')
self.root.wm_protocol('WM_DELETE_WINDOW', self._close)
self.fig = plt.figure(figsize=(width, height))
self.axes = self.fig.add_subplot(111)
self.fig.tight_layout()
self.fig.subplots_adjust(top=0.8)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.root)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
back = Tk.Button(self.root, text='back', command=self._previous_graph)
back.pack()
next = Tk.Button(self.root, text='next', command=self._next_graph)
next.pack()
def _close(self):
plt.close('all')
self.root.destroy()
def _next_graph(self):
if (self.timestep < len(self.episode)):
t_state = self.episode[self.timestep][0]
self.G = self._update_graph(self.G, t_state)
self._draw_graph(self.G)
self.timestep += 1
def _previous_graph(self):
if (self.timestep > 1):
self.timestep -= 2
self._next_graph()
def _update_graph(self, G, state):
for m in list(G.nodes):
if (m == AGENT):
continue
node_color = get_host_representation(state, self.sensitive_hosts, m, COLORS)
G.nodes[m]['color'] = node_color
return G
def _draw_graph(self, G):
pos = {}
colors = []
labels = {}
for n in list(G.nodes):
colors.append(G.nodes[n]['color'])
labels[n] = G.nodes[n]['label']
pos[n] = G.nodes[n]['pos']
self.axes.cla()
nx.draw_networkx_nodes(G, pos, node_color=colors, node_size=1500, ax=self.axes)
nx.draw_networkx_labels(G, pos, labels, font_size=12, font_weight='bold')
nx.draw_networkx_edges(G, pos)
plt.axis('off')
(state, action, reward, done) = self.episode[self.timestep]
if done:
title = f'''t={self.timestep}
Goal reached
total reward={reward}'''
else:
title = f'''t={self.timestep}
{action}
reward={reward}'''
ax_title = self.axes.set_title(title, fontsize=16, pad=10)
ax_title.set_y(1.05)
xticks = self.axes.get_xticks()
yticks = self.axes.get_yticks()
xmin = (((3 * xticks[0]) - xticks[1]) / 2.0)
ymin = (((3 * yticks[0]) - yticks[1]) / 2.0)
xmax = (((3 * xticks[(- 1)]) - xticks[(- 2)]) / 2.0)
ymax = (((3 * yticks[(- 1)]) - yticks[(- 2)]) / 2.0)
self.axes.set_xlim(left=xmin, right=xmax)
self.axes.set_ylim(bottom=ymin, top=ymax)
self.canvas.draw()
def legend(compromised=True):
a = mpatches.Patch(color='black', label='Agent')
s = mpatches.Patch(color='magenta', label='Sensitive (S)')
c = mpatches.Patch(color='green', label='Compromised (C)')
r = mpatches.Patch(color='blue', label='Reachable (R)')
legend_entries = [a, s, c, r]
if compromised:
sc = mpatches.Patch(color='yellow', label='S & C')
sr = mpatches.Patch(color='orange', label='S & R')
o = mpatches.Patch(color='red', label='not S, C or R')
legend_entries.extend([sc, sr, o])
return legend_entries |
def layer_norm(inputs, params, activation_fn=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, begin_norm_axis=1, begin_params_axis=(- 1), scope=None):
with variable_scope.variable_scope(scope, 'LayerNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
inputs_shape = inputs.shape
inputs_rank = inputs_shape.ndims
if (inputs_rank is None):
raise ValueError(('Inputs %s has undefined rank.' % inputs.name))
dtype = inputs.dtype.base_dtype
if (begin_norm_axis < 0):
begin_norm_axis = (inputs_rank + begin_norm_axis)
if ((begin_params_axis >= inputs_rank) or (begin_norm_axis >= inputs_rank)):
raise ValueError(('begin_params_axis (%d) and begin_norm_axis (%d) must be < rank(inputs) (%d)' % (begin_params_axis, begin_norm_axis, inputs_rank)))
params_shape = inputs_shape[begin_params_axis:]
if (not params_shape.is_fully_defined()):
raise ValueError(('Inputs %s: shape(inputs)[%s:] is not fully defined: %s' % (inputs.name, begin_params_axis, inputs_shape)))
(beta, gamma) = params
norm_axes = list(range(begin_norm_axis, inputs_rank))
(mean, variance) = nn.moments(inputs, norm_axes, keep_dims=True)
variance_epsilon = (1e-12 if (dtype != dtypes.float16) else 0.001)
outputs = nn.batch_normalization(inputs, mean, variance, offset=beta, scale=gamma, variance_epsilon=variance_epsilon)
outputs.set_shape(inputs_shape)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
def run_wraps(model_context):
gpt2block_cls = get_gpt2_module_type(module='block')
wrap_type_tuple = (gpt2block_cls, torch.nn.LayerNorm)
fsdp_config = {'sync_module_states': True, 'limit_all_gathers': True, 'atorch_wrap_cls': wrap_type_tuple}
strategy = ['parallel_mode', ('fsdp', fsdp_config)]
mc_copy = copy.deepcopy(model_context)
(status, result, _) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, model_context.loss_func, model_context.prepare_input, model_context.model_input_format, model_context.optim_args, model_context.optim_param_func, model_context.dataloader_args, load_strategy=strategy, ignore_dryrun_on_load_strategy=True)
assert status
fsdp_module_counts = {t: 0 for t in wrap_type_tuple}
for (_, child) in result.model.named_modules():
if isinstance(child, FSDP):
for t in wrap_type_tuple:
if isinstance(child.module, t):
fsdp_module_counts[t] += 1
assert (fsdp_module_counts[wrap_type_tuple[0]] == 3)
assert (fsdp_module_counts[wrap_type_tuple[1]] == ((3 * 2) + 1))
model_context = copy.deepcopy(mc_copy)
wrap_policy = get_skip_match_module_child_wrap_policy(wrap_type_tuple)
fsdp_config = {'sync_module_states': True, 'limit_all_gathers': True, 'auto_wrap_policy': wrap_policy}
strategy = ['parallel_mode', ('fsdp', fsdp_config)]
(status, result, _) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, model_context.loss_func, model_context.prepare_input, model_context.model_input_format, model_context.optim_args, model_context.optim_param_func, model_context.dataloader_args, load_strategy=strategy, ignore_dryrun_on_load_strategy=True)
assert status
fsdp_module_counts = {t: 0 for t in wrap_type_tuple}
for (_, child) in result.model.named_modules():
if isinstance(child, FSDP):
for t in wrap_type_tuple:
if isinstance(child.module, t):
fsdp_module_counts[t] += 1
assert (fsdp_module_counts[wrap_type_tuple[0]] == 3)
assert (fsdp_module_counts[wrap_type_tuple[1]] == 1)
model_context = mc_copy
wrap_type_tuple_name = (gpt2block_cls, 'LayerNorm')
wrap_policy = get_skip_match_module_child_wrap_policy(wrap_type_tuple_name, model_context.model)
fsdp_config = {'sync_module_states': True, 'limit_all_gathers': True, 'auto_wrap_policy': wrap_policy}
strategy = ['parallel_mode', ('fsdp', fsdp_config)]
(status, result, _) = auto_accelerate(model_context.model, model_context.optim_func, model_context.dataset, model_context.loss_func, model_context.prepare_input, model_context.model_input_format, model_context.optim_args, model_context.optim_param_func, model_context.dataloader_args, load_strategy=strategy, ignore_dryrun_on_load_strategy=True)
assert status
fsdp_module_counts = {t: 0 for t in wrap_type_tuple}
for (_, child) in result.model.named_modules():
if isinstance(child, FSDP):
for t in wrap_type_tuple:
if isinstance(child.module, t):
fsdp_module_counts[t] += 1
assert (fsdp_module_counts[wrap_type_tuple[0]] == 3)
assert (fsdp_module_counts[wrap_type_tuple[1]] == 1) |
class ResnetUtilsTest(tf.test.TestCase):
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
(n, n2) = (4, 2)
x = create_test_input(1, n, n, 1)
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43], [43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37], [37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
(n, n2) = (5, 3)
x = create_test_input(1, n, n, 1)
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
with tf.variable_scope(scope, values=[inputs]):
with slim.arg_scope([slim.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict('end_points')
return (net, end_points)
def testEndPointsV2(self):
blocks = [resnet_v2.resnet_v2_block('block1', base_depth=1, num_units=2, stride=2), resnet_v2.resnet_v2_block('block2', base_depth=2, num_units=2, stride=1)]
inputs = create_test_input(2, 32, 16, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny')
expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3']
self.assertItemsEqual(expected, end_points.keys())
def _stack_blocks_nondense(self, net, blocks):
for block in blocks:
with tf.variable_scope(block.scope, 'block', [net]):
for (i, unit) in enumerate(block.args):
with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
block = resnet_v2.resnet_v2_block
blocks = [block('block1', base_depth=1, num_units=2, stride=2), block('block2', base_depth=2, num_units=2, stride=2), block('block3', base_depth=4, num_units=2, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
nominal_stride = 8
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
if (output_stride is None):
factor = 1
else:
factor = (nominal_stride // output_stride)
output = resnet_utils.subsample(output, factor)
tf.get_variable_scope().reuse_variables()
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
(output, expected) = sess.run([output, expected])
self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001) |
class Conv3dIndepNormal(_DeepIndepNormal):
def __init__(self, backbone: nn.Module, hidden_channels: int, out_channels: int=1):
super().__init__(backbone=backbone, mean_head=nn.Conv3d(hidden_channels, out_channels=out_channels, kernel_size=1), logstd_head=nn.Conv3d(hidden_channels, out_channels=out_channels, kernel_size=1)) |
def AOLM(fms, fm1):
A = torch.sum(fms, dim=1, keepdim=True)
a = torch.mean(A, dim=[2, 3], keepdim=True)
M = (A > a).float()
A1 = torch.sum(fm1, dim=1, keepdim=True)
a1 = torch.mean(A1, dim=[2, 3], keepdim=True)
M1 = (A1 > a1).float()
coordinates = []
for (i, m) in enumerate(M):
mask_np = m.cpu().numpy().reshape(14, 14)
component_labels = measure.label(mask_np)
properties = measure.regionprops(component_labels)
areas = []
for prop in properties:
areas.append(prop.area)
max_idx = areas.index(max(areas))
intersection = (((component_labels == (max_idx + 1)).astype(int) + (M1[i][0].cpu().numpy() == 1).astype(int)) == 2)
prop = measure.regionprops(intersection.astype(int))
if (len(prop) == 0):
bbox = [0, 0, 14, 14]
print('there is one img no intersection')
else:
bbox = prop[0].bbox
x_lefttop = ((bbox[0] * 32) - 1)
y_lefttop = ((bbox[1] * 32) - 1)
x_rightlow = ((bbox[2] * 32) - 1)
y_rightlow = ((bbox[3] * 32) - 1)
if (x_lefttop < 0):
x_lefttop = 0
if (y_lefttop < 0):
y_lefttop = 0
coordinate = [x_lefttop, y_lefttop, x_rightlow, y_rightlow]
coordinates.append(coordinate)
return coordinates |
def test3():
nodef = Node('F')
nodee = Node('E')
path_found = breadth_first_search(nodef, nodee)
assert (not path_found) |
class DataTrainingArguments():
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_val_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'})
max_test_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
return_entity_level_metrics: bool = field(default=False, metadata={'help': 'Whether to return all the entity levels during evaluation or just the overall ones.'}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.