code stringlengths 101 5.91M |
|---|
_name('slim_eval')
def test_slim_eval_nonuniform(benchmark):
slim_eval_runner(benchmark, uniform=False) |
def test_revelation():
d = DogmaticDict({'a': 7, 'b': 12})
d['b'] = 23
assert ('a' not in d)
m = d.revelation()
assert (set(m) == {'a'})
assert ('a' in d) |
class KLDivergence(PytorchMetric):
def __init__(self):
self.total = torch.tensor(0)
self.divergence = torch.tensor(0)
def __call__(self, preds, targets):
epsilon = 1e-07
_check_same_shape(preds, targets)
output_size = targets.size(0)
div = (targets / preds)
self.divergence = (self.divergence + (targets * ((targets / preds) + epsilon).log()).sum())
self.total += output_size
def compute(self):
return (self.divergence.float() / self.total) |
def filter_file(infile, filt, exclude):
vocab = set()
with codecs.open(filt, 'r', encoding='utf-8') as vocabfile:
for line in vocabfile:
vocab.add(line.strip())
sys.stdout = codecs.getwriter('utf-8')((sys.stdout if is_python2 else sys.stdout.buffer))
with codecs.open(infile, 'r', encoding='utf-8') as textfile:
for line in textfile:
if exclude:
print(' '.join(map((lambda word: (word if (word not in vocab) else '')), line.strip().split())))
else:
print(' '.join(map((lambda word: (word if (word in vocab) else '<UNK>')), line.strip().split()))) |
def test_cpp_iterators():
assert (m.tuple_iterator() == 12)
assert (m.dict_iterator() == (305 + 711))
assert (m.passed_iterator(iter(((- 7), 3))) == (- 4)) |
class SPADEGenerator(BaseNetwork):
def modify_commandline_options(parser, is_train):
parser.set_defaults(norm_G='spectralspadesyncbatch3x3')
parser.add_argument('--num_upsampling_layers', choices=('normal', 'more', 'most'), default='normal', help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator")
return parser
def __init__(self, opt):
super().__init__()
self.opt = opt
nf = opt.ngf
(self.sw, self.sh) = self.compute_latent_vector_size(opt)
if opt.use_vae:
self.fc = nn.Linear(opt.z_dim, (((16 * nf) * self.sw) * self.sh))
else:
self.fc = nn.Conv2d(self.opt.semantic_nc, (16 * nf), 3, padding=1)
self.head_0 = SPADEResnetBlock((16 * nf), (16 * nf), opt)
self.G_middle_0 = SPADEResnetBlock((16 * nf), (16 * nf), opt)
self.G_middle_1 = SPADEResnetBlock((16 * nf), (16 * nf), opt)
self.up_0 = SPADEResnetBlock((16 * nf), (8 * nf), opt)
self.up_1 = SPADEResnetBlock((8 * nf), (4 * nf), opt)
self.up_2 = SPADEResnetBlock((4 * nf), (2 * nf), opt)
self.up_3 = SPADEResnetBlock((2 * nf), (1 * nf), opt)
final_nc = nf
if (opt.num_upsampling_layers == 'most'):
self.up_4 = SPADEResnetBlock((1 * nf), (nf // 2), opt)
final_nc = (nf // 2)
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if (opt.num_upsampling_layers == 'normal'):
num_up_layers = 5
elif (opt.num_upsampling_layers == 'more'):
num_up_layers = 6
elif (opt.num_upsampling_layers == 'most'):
num_up_layers = 7
else:
raise ValueError(('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers))
sw = (opt.crop_size // (2 ** num_up_layers))
sh = round((sw / opt.aspect_ratio))
return (sw, sh)
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
if (z is None):
z = torch.randn(input.size(0), self.opt.z_dim, dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view((- 1), (16 * self.opt.ngf), self.sh, self.sw)
else:
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
if ((self.opt.num_upsampling_layers == 'more') or (self.opt.num_upsampling_layers == 'most')):
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if (self.opt.num_upsampling_layers == 'most'):
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 0.2))
x = F.tanh(x)
return x |
def _stack(in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, bn_momentum: float) -> nn.Sequential:
assert (repeats >= 1)
first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum)
remaining = []
for _ in range(1, repeats):
remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))
return nn.Sequential(first, *remaining) |
def ignore_undocumented(name):
if name.isupper():
return True
if (name.endswith('ModelMixin') or name.endswith('Decoder') or name.endswith('Encoder') or name.endswith('Layer') or name.endswith('Embeddings') or name.endswith('Attention')):
return True
if (os.path.isdir(os.path.join(PATH_TO_DIFFUSERS, name)) or os.path.isfile(os.path.join(PATH_TO_DIFFUSERS, f'{name}.py'))):
return True
if (name.startswith('load_tf') or name.startswith('load_pytorch')):
return True
if (name.startswith('is_') and name.endswith('_available')):
return True
if ((name in DEPRECATED_OBJECTS) or (name in UNDOCUMENTED_OBJECTS)):
return True
if name.startswith('MMBT'):
return True
if (name in SHOULD_HAVE_THEIR_OWN_PAGE):
return True
return False |
def test_divergence_bound():
np.random.seed(846)
var1 = 4
var2 = 16
p1 = norm(scale=np.sqrt(var1))
p2 = norm(scale=np.sqrt(var2))
samples = p2.rvs(MC_SAMPLES)
log_weights = (p1.logpdf(samples) - p2.logpdf(samples))
for alpha in [1.5, 2, 3]:
print('alpha =', alpha)
for elbo in [None, 0]:
expected_dalpha = _gaussian_alpha_divergence(alpha, var1, var2)
if (elbo is None):
expected_dalpha += ((alpha / (alpha - 1)) * _gaussian_kl_divergence(var2, var1))
np.testing.assert_allclose(viabel.divergence_bound(log_weights, alpha=alpha, log_norm_bound=elbo), expected_dalpha, atol=MC_TOL, rtol=MC_TOL, err_msg='incorrect d2 value') |
class Trainer(object):
def __init__(self, env, sampler, sample_processor, policy, dynamics_model, n_itr, start_itr=0, initial_random_samples=True, dynamics_model_max_epochs=200, sess=None):
self.env = env
self.sampler = sampler
self.sample_processor = sample_processor
self.dynamics_model = dynamics_model
self.policy = policy
self.n_itr = n_itr
self.start_itr = start_itr
self.dynamics_model_max_epochs = dynamics_model_max_epochs
self.initial_random_samples = initial_random_samples
if (sess is None):
sess = tf.Session()
self.sess = sess
def train(self):
with self.sess.as_default() as sess:
sess.run(tf.initializers.global_variables())
start_time = time.time()
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
logger.log(('\n Iteration %d ' % itr))
time_env_sampling_start = time.time()
if (self.initial_random_samples and (itr == 0)):
logger.log('Obtaining random samples from the environment...')
env_paths = self.sampler.obtain_samples(log=True, random=True, log_prefix='')
else:
logger.log('Obtaining samples from the environment using the policy...')
env_paths = self.sampler.obtain_samples(log=True, log_prefix='')
logger.record_tabular('Time-EnvSampling', (time.time() - time_env_sampling_start))
logger.log('Processing environment samples...')
time_env_samp_proc = time.time()
samples_data = self.sample_processor.process_samples(env_paths, log=True)
logger.record_tabular('Time-EnvSampleProc', (time.time() - time_env_samp_proc))
time_fit_start = time.time()
logger.log(('Training dynamics model for %i epochs ...' % self.dynamics_model_max_epochs))
self.dynamics_model.fit(samples_data['observations'], samples_data['actions'], samples_data['next_observations'], epochs=self.dynamics_model_max_epochs, verbose=True, log_tabular=True)
logger.record_tabular('Time-ModelFit', (time.time() - time_fit_start))
logger.logkv('Itr', itr)
logger.logkv('n_timesteps', self.sampler.total_timesteps_sampled)
logger.logkv('Time', (time.time() - start_time))
logger.logkv('ItrTime', (time.time() - itr_start_time))
logger.log('Saving snapshot...')
params = self.get_itr_snapshot(itr)
self.log_diagnostics(env_paths, '')
logger.save_itr_params(itr, params)
logger.log('Saved')
logger.dumpkvs()
if (itr == 1):
sess.graph.finalize()
logger.log('Training finished')
self.sess.close()
def get_itr_snapshot(self, itr):
return dict(itr=itr, policy=self.policy, env=self.env, dynamics_model=self.dynamics_model)
def log_diagnostics(self, paths, prefix):
self.env.log_diagnostics(paths, prefix)
self.policy.log_diagnostics(paths, prefix) |
def test_masked_ones_summarize(model, X, w):
X = torch.tensor(numpy.array(X))
mask = torch.ones_like(X).type(torch.bool)
X_ = torch.masked.MaskedTensor(X, mask=mask)
d1 = model.distributions[0]
d2 = model.distributions[1]
model.summarize(X_, sample_weight=w)
assert_array_almost_equal(model._xw_sum, [0., 4.173105, 4.912965, 4.113657], 4)
assert_array_almost_equal(model._xw_starts_sum, [0.136405, 3.163595], 4)
assert_array_almost_equal(model._xw_ends_sum, [0.876271, 2.423729], 4)
assert_array_almost_equal(d1._w_sum, [5.049643, 5.049643, 5.049643], 4)
assert_array_almost_equal(d1._xw_sum, [8.834015, 5.17916, 0.], 4)
assert_array_almost_equal(d2._w_sum, [11.450351, 11.450351, 11.450351], 4)
assert_array_almost_equal(d2._xw_sum, [18.86598, 12.320832, 21.093086], 4) |
class Program():
def __init__(self, ir: GraphIR) -> None:
self.inputs: Dict[(str, torch.Tensor)] = {}
code_forward: List[str] = []
for input_var_name in ir.input_var():
abs_tensor: AbsTensor = ir.vars[input_var_name]
assert abs_tensor.is_concrete(), f'Input {input_var_name} is not concrete'
random_input = random_tensor(abs_tensor.shape, abs_tensor.dtype.torch(), use_cuda=False)
self.inputs[input_var_name] = random_input
code_forward.append(line(8, f'# {input_var_name}: {list(random_input.shape)}'))
self.params: Dict[(str, torch.Tensor)] = {}
var_2_param: Dict[(str, str)] = {}
def var_names(var: List[str]) -> List[str]:
return [var_2_param.get(v, v) for v in var]
code_nn_modules: List[str] = []
code_params: List[str] = []
for ir_inst in ir.insts:
op = ir_inst.iexpr.op
if isinstance(op, Constant):
p_name = f'p{len(self.params)}'
p_data = random_tensor(op.abs_tensor.shape, op.abs_tensor.dtype.torch())
self.params[p_name] = p_data
code_params.append(line(8, f'self.{p_name} = torch.nn.parameter.Parameter(params["{p_name}"], requires_grad={p_data.is_floating_point()}) # {list(p_data.shape)}'))
retvals = ir_inst.retvals()
assert (len(retvals) == 1), f'Constant should have one retval, got len(retvals) = {len(retvals)!r}'
var_2_param[retvals[0]] = f'self.{p_name}'
elif (not isinstance(op, Input)):
input_vals = var_names(ir_inst.iexpr.args)
ret_vals_str = ', '.join(ir_inst.retvals())
if isinstance(op, AutoInfOpBase):
symb_2_value = op.attrs
invoke_str_tmp: str = op.inst.invoke_str(symb_2_value).replace('??', '{}')
invoke_str = invoke_str_tmp.format(*input_vals)
code_forward.append(line(8, f'{ret_vals_str} = {invoke_str}'))
else:
(code, is_nn_module) = gen_code(op)
if is_nn_module:
layer_name = f'layer{len(code_nn_modules)}'
code_nn_modules.append(line(8, f'self.{layer_name} = {code}'))
code_forward.append(line(8, f"{ret_vals_str} = self.{layer_name}({', '.join(input_vals)})"))
else:
code_forward.append(line(8, f'{ret_vals_str} = {code.format(*input_vals)}'))
bk_name = 'JIT'
code_bk_run = ['exported = torch.jit.trace(model, example_kwarg_inputs=inputs)', 'exported = torch.jit.optimize_for_inference(exported)', 'ret_exported = exported(**inputs)']
bk_ret_name = 'ret_exported'
self.code_header = code_header
self.code_model = code_model.format('\n'.join(code_params), '\n'.join(code_nn_modules), ', '.join(ir.input_var()), '\n'.join(code_forward), ', '.join(ir.leaf_var()))
self.code_main = code_main.format('torch.device("cpu")', 'torch.device("cpu")', bk_name, '\n'.join(code_bk_run), bk_ret_name)
def dump(self, path: os.PathLike) -> None:
os.makedirs(path, exist_ok=True)
with open(os.path.join(path, 'prog.py'), 'w') as f:
print(self.code_header, file=f)
print(self.code_model, file=f)
print(self.code_main, file=f)
with open(os.path.join(path, 'params.pkl'), 'wb') as f:
pickle.dump(self.params, f)
with open(os.path.join(path, 'inputs.pkl'), 'wb') as f:
pickle.dump(self.inputs, f) |
def get_activation(act_fn):
if (act_fn in ['swish', 'silu']):
return nn.SiLU()
elif (act_fn == 'mish'):
return nn.Mish()
elif (act_fn == 'gelu'):
return nn.GELU()
elif (act_fn == 'relu'):
return nn.ReLU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}') |
def auprOut(X1, Y1):
auprBase = 0.0
recallTemp = 1.0
for delta in diff[::(- 1)]:
fp = (np.sum(np.sum((X1 < delta))) / np.float(len(X1)))
tp = (np.sum(np.sum((Y1 < delta))) / np.float(len(Y1)))
if ((tp + fp) == 0):
continue
precision = (tp / (tp + fp))
recall = tp
auprBase += ((recallTemp - recall) * precision)
recallTemp = recall
auprBase += (recall * precision)
return auprBase |
def prepare(config):
(train_examples, train_eval) = process_file(config.train_para_file, config.train_question_file, para_limit=config.para_limit)
(dev_examples, dev_eval) = process_file(config.dev_para_file, config.dev_question_file, para_limit=config.para_limit)
(test_examples, test_eval) = process_file(config.test_para_file, config.test_question_file, para_limit=config.para_limit)
with open(config.word_dictionary, 'r') as fh:
word2idx_dict = json.load(fh)
print('num of words {}'.format(len(word2idx_dict)))
with open(config.label_dictionary, 'r') as fh:
label2idx_dict = json.load(fh)
print('num of labels {}'.format(len(label2idx_dict)))
with open(config.pos_dictionary, 'r') as fh:
pos2idx_dict = json.load(fh)
print('num of pos tags {}'.format(len(pos2idx_dict)))
with open(config.ner_dictionary, 'r') as fh:
ner2idx_dict = json.load(fh)
print('num of ner tags {}'.format(len(ner2idx_dict)))
train_meta = build_features(config, train_examples, 'train', config.train_record_file, word2idx_dict, pos2idx_dict, ner2idx_dict, label2idx_dict, config.para_limit, config.ques_limit, config.max_input_length)
dev_meta = build_features(config, dev_examples, 'dev', config.dev_record_file, word2idx_dict, pos2idx_dict, ner2idx_dict, label2idx_dict, config.para_limit, config.ques_limit, config.max_input_length)
test_meta = build_features(config, test_examples, 'test', config.test_record_file, word2idx_dict, pos2idx_dict, ner2idx_dict, label2idx_dict, config.para_limit, config.ques_limit, config.max_input_length)
save(config.train_eval_file, train_eval, message='train eval')
save(config.dev_eval_file, dev_eval, message='dev eval')
save(config.test_eval_file, test_eval, message='test eval')
save(config.train_meta, train_meta, message='train meta')
save(config.dev_meta, dev_meta, message='dev meta')
save(config.test_meta, test_meta, message='test meta') |
class MSE(PytorchMetric):
def __init__(self):
self.total = torch.tensor(0)
self.sum_squared_error = torch.tensor(0.0)
def __call__(self, preds, targets):
_check_same_shape(preds, targets)
self.sum_squared_error += torch.sum(torch.square(torch.sub(preds, targets)))
self.total += targets.numel()
def compute(self):
return (self.sum_squared_error / self.total) |
def is_int_tensor(tensor):
return _is_type_tensor(tensor, [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]) |
class SamplingStrategy():
Random = 'random'
Genetic = 'genetic'
KdTree = 'kdtree'
Gradient = 'gradient' |
def sample_many(inner_func, get_cost_func, input, batch_rep=1, iter_rep=1):
input = do_batch_rep(input, batch_rep)
costs = []
pis = []
for i in range(iter_rep):
(_log_p, pi) = inner_func(input)
(cost, mask) = get_cost_func(input, pi)
costs.append(cost.view(batch_rep, (- 1)).t())
pis.append(pi.view(batch_rep, (- 1), pi.size((- 1))).transpose(0, 1))
max_length = max((pi.size((- 1)) for pi in pis))
pis = torch.cat([F.pad(pi, (0, (max_length - pi.size((- 1))))) for pi in pis], 1)
costs = torch.cat(costs, 1)
(mincosts, argmincosts) = costs.min((- 1))
minpis = pis[(torch.arange(pis.size(0), out=argmincosts.new()), argmincosts)]
return (minpis, mincosts) |
class Receiver(metaclass=abc.ABCMeta):
def receive_notify(self, obj: object, message: Dict):
raise NotImplementedError('Method receive_notify() not implemented!') |
_arg_scope
def customized_slim_fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, biases_initializer=init_ops.zeros_initializer(), biases_regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None, task_id=1):
if (not isinstance(num_outputs, six.integer_types)):
raise ValueError(('num_outputs should be int or long, got %s.' % (num_outputs,)))
layer_variable_getter = _build_variable_getter({'bias': 'biases', 'kernel': 'weights'})
with variable_scope.variable_scope(scope, 'FC', [inputs], reuse=reuse, custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core_layers.Dense(units=num_outputs, activation=None, use_bias=((not normalizer_fn) and biases_initializer), kernel_initializer=weights_initializer, bias_initializer=biases_initializer, kernel_regularizer=weights_regularizer, bias_regularizer=biases_regularizer, activity_regularizer=None, trainable=trainable, name=sc.name, dtype=inputs.dtype.base_dtype, _scope=sc, _reuse=reuse)
outputs = layer.apply(inputs)
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if (layer.bias is not None):
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if (normalizer_fn is not None):
if (not normalizer_params):
normalizer_params = {}
with tf.variable_scope('task_{}'.format(task_id)):
outputs = normalizer_fn(outputs, **normalizer_params)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs) |
def gradient_penalty(y, x):
weight = torch.ones(y.size()).cuda()
dydx = torch.autograd.grad(outputs=y, inputs=x, grad_outputs=weight, retain_graph=True, create_graph=True, only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), (- 1))
dydx_l2norm = torch.sqrt(torch.sum((dydx ** 2), dim=1))
return torch.mean(((dydx_l2norm - 1) ** 2)) |
def setup_logger(logging_level_console=logging.DEBUG, log_file=None, logging_level_file=logging.DEBUG):
if isinstance(logging_level_console, str):
if (logging_level_console.upper() == 'PROGRESS'):
logging_level_console = PROGRESS_LEVEL_NUM
else:
logging_level_console = getattr(logging, logging_level_console.upper())
if isinstance(logging_level_file, str):
if (logging_level_file.upper() == 'PROGRESS'):
logging_level_file = PROGRESS_LEVEL_NUM
else:
logging_level_file = getattr(logging, logging_level_file.upper())
logger = logging.getLogger('picca.delta_extraction')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s]: %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging_level_console)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
if (log_file is not None):
if os.path.exists(log_file):
newfilename = f'{log_file}.{os.path.getmtime(log_file)}'
os.rename(log_file, newfilename)
file_handler = logging.FileHandler(log_file, mode='w')
file_handler.setLevel(logging_level_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler) |
def evaluate(dataset, predictions, output_folder, **kwargs):
args = dict(dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs)
if isinstance(dataset, datasets.WordDataset):
return word_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError('Unsupported dataset type {}.'.format(dataset_name)) |
class NullTransformer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
return X |
class Layer(object):
def __init__(self, nonlin=tf.identity, N=1, name=None, logging=False):
self.N = N
if (name is None):
layer = self.__class__.__name__.lower()
name = ((layer + '_') + str(get_layer_uid(layer)))
self.name = name
self.logging = logging
self.nonlinearity = nonlin
self.build()
print('Logging: {}'.format(self.logging))
def __call__(self, x, sample=True, **kwargs):
with tf.name_scope(self.name):
if self.logging:
tf.summary.histogram((self.name + '/inputs'), x)
output = self.call(x, sample=sample, **kwargs)
if self.logging:
tf.summary.histogram((self.name + '/outputs'), output)
outputs = self.nonlinearity(output)
return outputs
def call(self, x, sample=True, **kwargs):
raise NotImplementedError()
def build(self):
raise NotImplementedError()
def f(self, x, sampling=True, **kwargs):
raise NotImplementedError()
def get_reg(self):
return ((- (1.0 / self.N)) * self.kldiv())
def kldiv(self):
raise NotImplementedError |
('/app/initCustomProvider', methods=['POST'])
def initCustomProvider():
data = request.get_json()
if ('code' not in data):
return jsonify({'error': 'POST data is improper format.'})
if ('' not in data['code']):
return jsonify({'error': 'Did not detect a decorator. Custom provider scripts should register at least one \n Do `from chainforge.providers import provider` and decorate your provider completion function with '})
provider_scripts_dir = os.path.join(CACHE_DIR, 'provider_scripts')
if (not os.path.isdir(provider_scripts_dir)):
try:
os.makedirs(provider_scripts_dir, exist_ok=True)
except Exception as e:
return jsonify({'error': f"Error creating a new directory 'provider_scripts' at filepath {provider_scripts_dir}: {str(e)}"})
script_id = str(round((time.time() * 1000)))
ProviderRegistry.set_curr_script_id(script_id)
ProviderRegistry.watch_next_registered()
try:
exec(data['code'], globals(), None)
except Exception as e:
return jsonify({'error': f'''Error while executing custom provider code:
{str(e)}'''})
new_registries = ProviderRegistry.last_registered()
if (len(new_registries) == 0):
return jsonify({'error': 'Did not detect any custom providers added to the registry. Make sure you are registering your provider with correctly.'})
if any(((v is not None) for v in new_registries.values())):
past_script_ids = [v for v in new_registries.values() if (v is not None)]
for sid in past_script_ids:
past_script_path = os.path.join(provider_scripts_dir, f'{sid}.py')
try:
if os.path.isfile(past_script_path):
os.remove(past_script_path)
except Exception as e:
return jsonify({'error': f"Error removing cache'd custom provider script at filepath {past_script_path}: {str(e)}"})
registered_providers = [exclude_key(d, 'func') for d in ProviderRegistry.get_all()]
try:
with open(os.path.join(provider_scripts_dir, f'{script_id}.py'), 'w') as f:
f.write(data['code'])
except Exception as e:
return jsonify({'error': f"Error saving script 'provider_scripts' at filepath {provider_scripts_dir}: {str(e)}"})
return jsonify({'providers': registered_providers}) |
def cheater(mdim, pdim, qdeg, start, startsols):
dim = ((mdim * pdim) + (qdeg * (mdim + pdim)))
planes = [random_complex_matrix((mdim + pdim), mdim) for _ in range(0, dim)]
pols = make_pieri_system(mdim, pdim, qdeg, planes)
from phcpy.trackers import track
print(('cheater homotopy with %d paths' % len(startsols)))
sols = track(pols, start, startsols)
for sol in sols:
print(sol)
verify(pols, sols) |
def random_tensor(tensor_shape, tensor_dtype, library='torch'):
if (library == 'torch'):
import torch
if (tensor_dtype == torch.bool):
return torch.randint(0, 2, tensor_shape, dtype=tensor_dtype)
elif (tensor_dtype.is_floating_point or tensor_dtype.is_complex):
return torch.rand(tensor_shape, dtype=tensor_dtype)
else:
return torch.randint(0, 10, tensor_shape, dtype=tensor_dtype)
else:
import tensorflow as tf
if (tensor_dtype == tf.bool):
return tf.cast(tf.random.uniform(tensor_shape, minval=0, maxval=2, dtype=tf.int32), dtype=tf.bool)
elif tensor_dtype.is_floating:
return tf.random.uniform(tensor_shape, dtype=tensor_dtype)
elif tensor_dtype.is_complex:
ftype = (tf.float64 if (tensor_dtype == tf.complex128) else tf.float32)
return tf.complex(tf.random.uniform(tensor_shape, dtype=ftype), tf.random.uniform(tensor_shape, dtype=ftype))
elif (tensor_dtype == tf.string):
return tf.convert_to_tensor(np.ones(tensor_shape, dtype=str))
else:
return tf.saturate_cast(tf.random.uniform(tensor_shape, minval=0, maxval=10, dtype=tf.int64), dtype=tensor_dtype) |
class LatentVariableModel(nn.Module):
def __init__(self, model_config):
super(LatentVariableModel, self).__init__()
self.model_config = model_config
self.output_interval = None
def _construct(self, model_config):
raise NotImplementedError
def infer(self, observation):
raise NotImplementedError
def generate(self, gen=False, n_samples=1):
raise NotImplementedError
def step(self):
raise NotImplementedError
def re_init(self):
raise NotImplementedError
def kl_divergences(self, averaged=True):
kl = []
for (level_ind, latent_level) in enumerate(self.latent_levels):
analytical = (level_ind == (len(self.latent_levels) - 1))
level_kl = latent_level.latent.kl_divergence(analytical)
for dim in range((len(level_kl.data.shape) - 1), 1, (- 1)):
level_kl = level_kl.sum(dim)
level_kl = level_kl.mean(1)
kl.append(level_kl)
if averaged:
kl = [level_kl.mean(dim=0) for level_kl in kl]
return kl
def conditional_log_likelihoods(self, observation, averaged=True):
if (len(observation.data.shape) in [2, 4]):
observation = observation.unsqueeze(1)
if (self.output_interval is not None):
observation = (observation, (observation + self.output_interval))
log_prob = self.output_dist.log_prob(value=observation)
while (len(log_prob.data.shape) > 2):
last_dim = (len(log_prob.data.shape) - 1)
log_prob = log_prob.sum(last_dim)
log_prob = log_prob.mean(1)
if averaged:
log_prob = log_prob.mean(dim=0)
return log_prob
def free_energy(self, observation, averaged=True, anneal_weight=1.0):
cond_log_like = self.conditional_log_likelihoods(observation, averaged=False)
kl = sum(self.kl_divergences(averaged=False))
free_energy = (- (cond_log_like - (anneal_weight * kl)))
if averaged:
return free_energy.mean(dim=0)
else:
return free_energy
def losses(self, observation, averaged=True, anneal_weight=1.0):
cond_log_like = self.conditional_log_likelihoods(observation, averaged=False)
kl = self.kl_divergences(averaged=False)
free_energy = (- (cond_log_like - (anneal_weight * sum(kl))))
if averaged:
return (free_energy.mean(dim=0), cond_log_like.mean(dim=0), [level_kl.mean(dim=0) for level_kl in kl])
else:
return (free_energy, cond_log_like, kl)
def inference_parameters(self):
raise NotImplementedError
def generative_parameters(self):
raise NotImplementedError
def inference_mode(self):
raise NotImplementedError
def generative_mode(self):
raise NotImplementedError |
class ResidualBlock(nn.Module):
def __init__(self, h_dim, norm_layer=None, nl_layer=None, use_dropout=False):
super(ResidualBlock, self).__init__()
block = [conv3x3(h_dim, h_dim, norm_layer=norm_layer, nl_layer=nl_layer), conv3x3(h_dim, h_dim, norm_layer=norm_layer)]
if use_dropout:
block.append(nn.Dropout(0.5))
self.encode = nn.Sequential(*block)
def forward(self, x):
y = self.encode(x)
return (x + y) |
def _has_soft_sentence_predictions(results: List[dict]) -> bool:
return (('rationales' in results[0]) and (len(results[0]['rationales']) > 0) and ('soft_sentence_predictions' in results[0]['rationales'][0]) and (results[0]['rationales'][0]['soft_sentence_predictions'] is not None)) |
class RandomResizedCrop(DualTransform):
def __init__(self, shape, scale_limit=(0.8, 1.2), interpolation=3, always_apply=False, p=1.0):
super().__init__(always_apply, p)
self.shape = shape
self.scale_limit = scale_limit
self.interpolation = interpolation
def apply(self, img, scale=1, scaled_shape=None, h_start=0, w_start=0, d_start=0):
if (scaled_shape is None):
scaled_shape = self.shape
img = F.random_crop(img, scaled_shape[0], scaled_shape[1], scaled_shape[2], h_start, w_start, d_start)
return F.resize(img, new_shape=self.shape, interpolation=self.interpolation)
def apply_to_mask(self, img, scale=1, scaled_shape=None, h_start=0, w_start=0, d_start=0):
if (scaled_shape is None):
scaled_shape = self.shape
img = F.random_crop(img, scaled_shape[0], scaled_shape[1], scaled_shape[2], h_start, w_start, d_start)
return F.resize(img, new_shape=self.shape, interpolation=0)
def get_params(self, **data):
scale = random.uniform(self.scale_limit[0], self.scale_limit[1])
scaled_shape = [int((scale * i)) for i in self.shape]
return {'scale': scale, 'scaled_shape': scaled_shape, 'h_start': random.random(), 'w_start': random.random(), 'd_start': random.random()} |
def spect_diff(u_spect, signal_ndim, order, mesh_bound=None):
size0 = u_spect.shape
s = ([1] * u_spect.dim())
freq0 = np.ones(s)
assert (len(order) == signal_ndim)
b = ((u_spect.dim() - signal_ndim) - 1)
for i in range(signal_ndim):
if (order[i] == 0):
continue
freq = np.fft.fftfreq(size0[(b + i)], (1 / size0[(b + i)]))
if (not (mesh_bound is None)):
freq *= ((2 * np.pi) / (mesh_bound[1][i] - mesh_bound[0][i]))
freq = (freq ** order[i])
s[(b + i)] = (- 1)
freq = np.reshape(freq, s)
s[(b + i)] = 1
freq0 = (freq0 * freq)
freq0 = torch.from_numpy(freq0).to(u_spect)
u_spect = (u_spect * freq0)
totalorder = sum(order)
if ((totalorder % 4) == 1):
u_spect = torch.stack([u_spect[(..., 1)], (- u_spect[(..., 0)])], dim=(- 1))
elif ((totalorder % 4) == 2):
u_spect = (- u_spect)
elif ((totalorder % 4) == 3):
u_spect = torch.stack([(- u_spect[(..., 1)]), u_spect[(..., 0)]], dim=(- 1))
return u_spect |
class TextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int, overwrite_cache=False, cache_dir: Optional[str]=None):
assert os.path.isfile(file_path), f'Input file path {file_path} not found'
block_size = (block_size - tokenizer.num_special_tokens_to_add(pair=False))
(directory, filename) = os.path.split(file_path)
cached_features_file = os.path.join((cache_dir if (cache_dir is not None) else directory), 'cached_lm_{}_{}_{}'.format(tokenizer.__class__.__name__, str(block_size), filename))
lock_path = (cached_features_file + '.lock')
with FileLock(lock_path):
if (os.path.exists(cached_features_file) and (not overwrite_cache)):
start = time.time()
with open(cached_features_file, 'rb') as handle:
self.examples = pickle.load(handle)
logger.info(f'Loading features from cached file {cached_features_file} [took %.3f s]', (time.time() - start))
else:
logger.info(f'Creating features from dataset file at {directory}')
self.examples = []
with open(file_path, encoding='utf-8') as f:
text = f.read()
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
for i in range(0, ((len(tokenized_text) - block_size) + 1), block_size):
self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i:(i + block_size)]))
start = time.time()
with open(cached_features_file, 'wb') as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
logger.info('Saving features into cached file %s [took %.3f s]', cached_features_file, (time.time() - start))
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
return torch.tensor(self.examples[i], dtype=torch.long) |
class COCO(data.Dataset):
num_classes = 80
default_resolution = [512, 512]
mean = np.array([0., 0., 0.], dtype=np.float32).reshape(1, 1, 3)
std = np.array([0., 0., 0.], dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = ('/store/datasets/coco' if os.path.exists('/store/datasets/coco') else '/home/travail/datasets/coco')
self.img_dir = os.path.join(self.data_dir, 'images', '{}2017'.format(split))
if (split == 'test'):
self.annot_path = os.path.join(self.data_dir, 'annotations', 'image_info_test-dev2017.json').format(split)
elif (opt.task == 'exdet'):
self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_extreme_{}2017.json').format(split)
elif (split == 'val'):
self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_{}2017.json').format(split)
else:
self.annot_path = os.path.join(self.data_dir, 'annotations', 'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self._valid_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
self.cat_ids = {v: i for (i, v) in enumerate(self._valid_ids)}
self.voc_color = [((((v // 32) * 64) + 64), (((v // 8) % 4) * 64), ((v % 8) * 32)) for v in range(1, (self.num_classes + 1))]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0., 0.], dtype=np.float32)
self._eig_vec = np.array([[(- 0.), (- 0.), 0.], [(- 0.5832747), 0., (- 0.)], [(- 0.), 0., 0.]], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float('{:.2f}'.format(x))
def convert_eval_format(self, all_bboxes):
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[(cls_ind - 1)]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {'image_id': int(image_id), 'category_id': int(category_id), 'bbox': bbox_out, 'score': float('{:.2f}'.format(score))}
if (len(bbox) > 5):
extreme_points = list(map(self._to_float, bbox[5:13]))
detection['extreme_points'] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results), open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize() |
def whether_move(masks, frames):
if (len(frames) == 4):
c_x_list = ([None] * 4)
c_y_list = ([None] * 4)
diff_c_x = ([None] * 3)
diff_c_y = ([None] * 3)
for k in range(4):
cnt_mask = frames[k]
(c_x, c_y) = mask2bbox(cnt_mask)
c_x_list[k] = c_x
c_y_list[k] = c_y
for k in range(3):
diff_c_x[k] = np.abs((c_x_list[(k + 1)] - c_x_list[k]))
diff_c_y[k] = np.abs((c_y_list[(k + 1)] - c_y_list[k]))
if (np.maximum(np.array(diff_c_x).max(), np.array(diff_c_y).max()) < 10):
kernel = np.ones((3, 3), np.uint8)
for k in range(4):
cnt_mask = cv2.dilate(frames[k].astype(np.float32), kernel, iterations=1)
masks[k] += cnt_mask
masks[k] = clip_mask(masks[k])
return masks |
def test_aggregated_agent_metric_3():
env = MockEnv()
metric = ph.metrics.AggregatedAgentMetric(agent_ids=['agent1', 'agent2'], agent_property='test_property', group_reduce_action='mean', train_reduce_action='last')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 7.5) |
def fasttext_predict(corpus: Union[(List[str], List[List[str]])]):
url = '
filepath = get_cached_file_path('fasttext', 'lid.176.ftz', url)
fasttext.FastText.eprint = (lambda x: None)
classifier = fasttext.load_model(str(filepath))
prediction: Tuple[(List[List[str]], List)] = None
if all([isinstance(ins, list) for ins in corpus]):
prediction = classifier.predict(corpus[0])
elif isinstance(corpus, list):
prediction = classifier.predict(corpus)
label = prediction[0][0][0]
label = label.replace('__label__', '')
return label |
.parametrize('cv1, cv2, expected', [(GroupKFold(2), KFold(3), False), (GroupKFold(2), GroupKFold(3), False), (GroupKFold(3), GroupKFold(3), True), (GroupShuffleSplit(2), GroupShuffleSplit(3), 'non-reproducible'), (GroupShuffleSplit(2, random_state=32), GroupShuffleSplit(3, random_state=32), False), (GroupShuffleSplit(3, random_state=32), GroupShuffleSplit(3, random_state=32), True), (GroupShuffleSplit(3, random_state=33), GroupShuffleSplit(3, random_state=32), False), (KFold(2), KFold(3), False), (KFold(2, shuffle=True), KFold(2, shuffle=True), 'non-reproducible'), (KFold(3, random_state=32, shuffle=True), KFold(3, random_state=32, shuffle=True), True), (KFold(3, random_state=33, shuffle=True), KFold(3, random_state=32, shuffle=True), False), (LeaveOneGroupOut(), LeaveOneGroupOut(), True), (LeavePGroupsOut(3), LeavePGroupsOut(3), True), (LeavePGroupsOut(3), LeavePGroupsOut(2), False), (LeaveOneOut(), LeaveOneOut(), True), (LeavePOut(2), LeavePOut(2), True), (LeavePOut(2), LeavePOut(3), False), (PredefinedSplit([1, 2, 3]), PredefinedSplit([1, 2, 3]), True), (PredefinedSplit([1, 2, 3]), PredefinedSplit([1, 2, 4]), False), (RepeatedKFold(n_splits=2), RepeatedKFold(n_splits=2), 'non-reproducible'), (RepeatedKFold(n_splits=2, random_state=32), RepeatedKFold(n_splits=3, random_state=32), False), (RepeatedKFold(n_splits=2, random_state=32), RepeatedKFold(n_splits=2, random_state=32), True), (RepeatedKFold(n_splits=2, n_repeats=2, random_state=32), RepeatedKFold(n_splits=2, n_repeats=3, random_state=32), False), (RepeatedStratifiedKFold(n_splits=2), RepeatedStratifiedKFold(n_splits=2), 'non-reproducible'), (RepeatedStratifiedKFold(n_splits=2, random_state=32), RepeatedStratifiedKFold(n_splits=3, random_state=32), False), (RepeatedStratifiedKFold(n_splits=2, random_state=32), RepeatedStratifiedKFold(n_splits=2, random_state=32), True), (RepeatedStratifiedKFold(n_splits=2, n_repeats=2, random_state=32), RepeatedStratifiedKFold(n_splits=2, n_repeats=3, random_state=32), False), (ShuffleSplit(n_splits=2), ShuffleSplit(n_splits=2), 'non-reproducible'), (ShuffleSplit(n_splits=2, random_state=32), ShuffleSplit(n_splits=3, random_state=32), False), (ShuffleSplit(n_splits=2, random_state=32), ShuffleSplit(n_splits=2, random_state=32), True), (ShuffleSplit(n_splits=2, test_size=2, random_state=32), ShuffleSplit(n_splits=2, test_size=3, random_state=32), False), (ShuffleSplit(n_splits=2, train_size=2, random_state=32), ShuffleSplit(n_splits=2, train_size=3, random_state=32), False), (StratifiedKFold(2), StratifiedKFold(3), False), (StratifiedKFold(2, shuffle=True), StratifiedKFold(2, shuffle=True), 'non-reproducible'), (StratifiedKFold(3, random_state=32, shuffle=True), StratifiedKFold(3, random_state=32, shuffle=True), True), (StratifiedKFold(3, random_state=33, shuffle=True), StratifiedKFold(3, random_state=32, shuffle=True), False), (StratifiedShuffleSplit(n_splits=2), StratifiedShuffleSplit(n_splits=2), 'non-reproducible'), (StratifiedShuffleSplit(n_splits=2, random_state=32), StratifiedShuffleSplit(n_splits=3, random_state=32), False), (StratifiedShuffleSplit(n_splits=2, random_state=32), StratifiedShuffleSplit(n_splits=2, random_state=32), True), (StratifiedShuffleSplit(n_splits=2, test_size=2, random_state=32), StratifiedShuffleSplit(n_splits=2, test_size=3, random_state=32), False), (StratifiedShuffleSplit(n_splits=2, train_size=2, random_state=32), StratifiedShuffleSplit(n_splits=2, train_size=3, random_state=32), False), (StratifiedGroupKFold(2), StratifiedGroupKFold(3), False), (StratifiedGroupKFold(3), StratifiedGroupKFold(3), True), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), ContinuousStratifiedGroupKFold(n_bins=11, n_splits=2), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, method='quantile'), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), False), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, shuffle=True), 'non-reproducible'), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), True), (ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=33, shuffle=True), ContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32, shuffle=True), False), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2), 'non-reproducible'), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=3, random_state=32), False), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, random_state=32), True), (RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, n_repeats=2, random_state=32), RepeatedContinuousStratifiedGroupKFold(n_bins=10, n_splits=2, n_repeats=3, random_state=32), False), ([(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], [(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], True), ([(np.arange(3, 9), np.arange(0, 3)), (np.arange(0, 7), np.arange(7, 9))], [(np.arange(2, 9), np.arange(0, 2)), (np.arange(0, 7), np.arange(7, 9))], False)])
def test__compute_cvmdsum(cv1, cv2, expected):
cv1 = check_cv(cv1)
cv2 = check_cv(cv2)
md1 = _compute_cvmdsum(cv1)
md2 = _compute_cvmdsum(cv2)
if (expected == 'non-reproducible'):
assert (md1 == md2)
assert (md1 == expected)
else:
assert ((md1 == md2) is expected) |
class Cell(nn.Module):
def __init__(self, steps, block_multiplier, prev_prev_fmultiplier, prev_fmultiplier_down, prev_fmultiplier_same, prev_fmultiplier_up, filter_multiplier):
super(Cell, self).__init__()
self.C_in = (block_multiplier * filter_multiplier)
self.C_out = filter_multiplier
self.C_prev_prev = int((prev_prev_fmultiplier * block_multiplier))
self._prev_fmultiplier_same = prev_fmultiplier_same
if (prev_fmultiplier_down is not None):
self.C_prev_down = int((prev_fmultiplier_down * block_multiplier))
self.preprocess_down = ConvBR(self.C_prev_down, self.C_out, 1, 1, 0)
if (prev_fmultiplier_same is not None):
self.C_prev_same = int((prev_fmultiplier_same * block_multiplier))
self.preprocess_same = ConvBR(self.C_prev_same, self.C_out, 1, 1, 0)
if (prev_fmultiplier_up is not None):
self.C_prev_up = int((prev_fmultiplier_up * block_multiplier))
self.preprocess_up = ConvBR(self.C_prev_up, self.C_out, 1, 1, 0)
if (prev_prev_fmultiplier != (- 1)):
self.pre_preprocess = ConvBR(self.C_prev_prev, self.C_out, 1, 1, 0)
self._steps = steps
self.block_multiplier = block_multiplier
self._ops = nn.ModuleList()
for i in range(self._steps):
for j in range((2 + i)):
stride = 1
if ((prev_prev_fmultiplier == (- 1)) and (j == 0)):
op = None
else:
op = MixedOp(self.C_out, stride)
self._ops.append(op)
self._initialize_weights()
def scale_dimension(self, dim, scale):
assert isinstance(dim, int)
return (int((((float(dim) - 1.0) * scale) + 1.0)) if (dim % 2) else int((dim * scale)))
def prev_feature_resize(self, prev_feature, mode):
if (mode == 'down'):
feature_size_h = self.scale_dimension(prev_feature.shape[2], 0.5)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 0.5)
elif (mode == 'up'):
feature_size_h = self.scale_dimension(prev_feature.shape[2], 2)
feature_size_w = self.scale_dimension(prev_feature.shape[3], 2)
return F.interpolate(prev_feature, (feature_size_h, feature_size_w), mode='bilinear', align_corners=True)
def forward(self, s0, s1_down, s1_same, s1_up, n_alphas):
if (s1_down is not None):
s1_down = self.prev_feature_resize(s1_down, 'down')
s1_down = self.preprocess_down(s1_down)
(size_h, size_w) = (s1_down.shape[2], s1_down.shape[3])
if (s1_same is not None):
s1_same = self.preprocess_same(s1_same)
(size_h, size_w) = (s1_same.shape[2], s1_same.shape[3])
if (s1_up is not None):
s1_up = self.prev_feature_resize(s1_up, 'up')
s1_up = self.preprocess_up(s1_up)
(size_h, size_w) = (s1_up.shape[2], s1_up.shape[3])
all_states = []
if (s0 is not None):
s0 = (F.interpolate(s0, (size_h, size_w), mode='bilinear', align_corners=True) if ((s0.shape[2] != size_h) or (s0.shape[3] != size_w)) else s0)
s0 = (self.pre_preprocess(s0) if (s0.shape[1] != self.C_out) else s0)
if (s1_down is not None):
states_down = [s0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [s0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [s0, s1_up]
all_states.append(states_up)
else:
if (s1_down is not None):
states_down = [0, s1_down]
all_states.append(states_down)
if (s1_same is not None):
states_same = [0, s1_same]
all_states.append(states_same)
if (s1_up is not None):
states_up = [0, s1_up]
all_states.append(states_up)
final_concates = []
for states in all_states:
offset = 0
for i in range(self._steps):
new_states = []
for (j, h) in enumerate(states):
branch_index = (offset + j)
if (self._ops[branch_index] is None):
continue
new_state = self._ops[branch_index](h, n_alphas[branch_index])
new_states.append(new_state)
s = sum(new_states)
offset += len(states)
states.append(s)
concat_feature = torch.cat(states[(- self.block_multiplier):], dim=1)
final_concates.append(concat_feature)
return final_concates
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) |
class WnBReportBest(Callback):
def __init__(self, wb: object, monitor: str='val_loss', mode: str='auto'):
super(WnBReportBest, self).__init__()
self.monitor = monitor
self.mode = mode
self.wb = wb
if (self.mode not in ['auto', 'min', 'max']):
warnings.warn(('WnBReportBest mode %s is unknown, fallback to auto mode.' % self.mode), RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min'):
self.monitor_op = np.less
self.best = np.Inf
elif (self.mode == 'max'):
self.monitor_op = np.greater
self.best = (- np.Inf)
elif self._is_metric(self.monitor):
self.monitor_op = np.greater
self.best = (- np.Inf)
else:
self.monitor_op = np.less
self.best = np.Inf
def on_epoch_end(self, epoch: int, logs: Optional[Dict]=None, metric: Optional[float]=None):
logs = (logs or {})
current = logs.get(self.monitor)
if (current is not None):
if self.monitor_op(current, self.best):
self.wb.run.summary['best'] = current
self.best = current
self.best_epoch = epoch
def _is_metric(monitor: str):
if any([(s in monitor) for s in ['acc', 'prec', 'rec', 'fscore', 'f1', 'f2']]):
return True
else:
return False |
class ExploitabilityP2SROManagerLogger(SimpleP2SROManagerLogger):
def __init__(self, p2sro_manger, log_dir: str, scenario: PSROScenario):
super(ExploitabilityP2SROManagerLogger, self).__init__(p2sro_manger=p2sro_manger, log_dir=log_dir)
self._scenario = scenario
if (not issubclass(scenario.env_class, (PokerMultiAgentEnv, OshiZumoMultiAgentEnv))):
raise ValueError(f'ExploitabilityP2SROManagerLogger is only meant to be used with PokerMultiAgentEnv or OshiZumoMultiAgentEnv,not {scenario.env_class}')
if (not scenario.calc_exploitability_for_openspiel_env):
raise ValueError(f'Only use ExploitabilityP2SROManagerLogger if scenario.calc_exploitability_for_openspiel_env is True.')
self._exploitability_per_generation = []
self._total_steps_per_generation = []
self._total_episodes_per_generation = []
self._num_policies_per_generation = []
self._payoff_table_checkpoint_nums = []
self._payoff_table_checkpoint_paths = []
self._policy_nums_checkpoint_paths = []
self._exploitability_stats_save_path = os.path.join(log_dir, 'exploitability_stats.json')
ensure_dir(self._exploitability_stats_save_path)
def on_active_policy_moved_to_fixed(self, player: int, policy_num: int, fixed_policy_spec: StrategySpec):
current_checkpoint_num = self.get_current_checkpoint_num()
super(ExploitabilityP2SROManagerLogger, self).on_active_policy_moved_to_fixed(player=player, policy_num=policy_num, fixed_policy_spec=fixed_policy_spec)
data = self._manager.get_copy_of_latest_data()
(latest_payoff_table, active_policy_nums_per_player, fixed_policy_nums_per_player) = data
if ((len(fixed_policy_nums_per_player[0]) < 1) or (len(fixed_policy_nums_per_player[1]) < 1)):
return
if (not np.array_equal(fixed_policy_nums_per_player[0], fixed_policy_nums_per_player[1])):
return
n_policies = len(fixed_policy_nums_per_player[0])
latest_policy_index = max(fixed_policy_nums_per_player[0])
env = self._scenario.env_class(self._scenario.env_config)
def extra_action_out_fn(policy: Policy, input_dict, state_batches, model, action_dist: ActionDistribution) -> Dict[(str, TensorType)]:
action = action_dist.deterministic_sample()
action_probs = torch.zeros_like(policy.q_values).long()
action_probs[0][action[0]] = 1.0
return {'q_values': policy.q_values, 'action_probs': action_probs}
if (self._scenario.policy_classes['eval'] != SimpleQTorchPolicyPatched):
raise NotImplementedError(f"This method isn't verified to work with policy classes other than SimpleQTorchPolicyPatched. You're using {self._scenario.policy_classes['eval']}")
policy_class = self._scenario.policy_classes['eval'].with_updates(extra_action_out_fn=extra_action_out_fn)
trainer_config = self._scenario.get_trainer_config(env)
trainer_config['explore'] = False
policies = [policy_class(env.observation_space, env.action_space, with_common_config(trainer_config)) for _ in range(2)]
metanash_probs_0 = get_latest_metanash_strategies(payoff_table=latest_payoff_table, as_player=1, as_policy_num=n_policies, fictitious_play_iters=2000, mix_with_uniform_dist_coeff=0.0, print_matrix=False)[0].probabilities_for_each_strategy()
if self._scenario.single_agent_symmetric_game:
metanash_probs_1 = metanash_probs_0
else:
metanash_probs_1 = get_latest_metanash_strategies(payoff_table=latest_payoff_table, as_player=0, as_policy_num=n_policies, fictitious_play_iters=2000, mix_with_uniform_dist_coeff=0.0, print_matrix=False)[1].probabilities_for_each_strategy()
policy_specs_0 = latest_payoff_table.get_ordered_spec_list_for_player(player=0)[:n_policies]
policy_specs_1 = latest_payoff_table.get_ordered_spec_list_for_player(player=1)[:n_policies]
assert (len(metanash_probs_1) == len(policy_specs_1)), f'len(metanash_probs_1): {len(metanash_probs_1)}, len(policy_specs_1): {len(policy_specs_1)}'
assert (len(metanash_probs_0) == len(policy_specs_0))
assert (len(policy_specs_0) == len(policy_specs_1))
br_checkpoint_paths = []
metanash_weights = []
for (spec_0, prob_0, spec_1, prob_1) in zip(policy_specs_0, metanash_probs_0, policy_specs_1, metanash_probs_1):
br_checkpoint_paths.append((spec_0.metadata['checkpoint_path'], spec_1.metadata['checkpoint_path']))
metanash_weights.append((prob_0, prob_1))
exploitability_this_gen = psro_measure_exploitability_nonlstm(br_checkpoint_path_tuple_list=br_checkpoint_paths, metanash_weights=metanash_weights, set_policy_weights_fn=load_pure_strat, rllib_policies=policies, poker_game_version=env.game_version, open_spiel_env_config=env.open_spiel_env_config)
logger.info(f'{n_policies} policies, {exploitability_this_gen} exploitability')
policy_spec_added_this_gen = [latest_payoff_table.get_spec_for_player_and_pure_strat_index(player=p, pure_strat_index=(n_policies - 1)) for p in range(2)]
latest_policy_steps = sum((policy_spec_added_this_gen[p].metadata['timesteps_training_br'] for p in range(2)))
latest_policy_episodes = sum((policy_spec_added_this_gen[p].metadata['episodes_training_br'] for p in range(2)))
if (latest_policy_index > 0):
total_steps_this_generation = (latest_policy_steps + self._total_steps_per_generation[(latest_policy_index - 1)])
total_episodes_this_generation = (latest_policy_episodes + self._total_episodes_per_generation[(latest_policy_index - 1)])
else:
total_steps_this_generation = latest_policy_steps
total_episodes_this_generation = latest_policy_episodes
self._exploitability_per_generation.append(exploitability_this_gen)
self._total_steps_per_generation.append(total_steps_this_generation)
self._total_episodes_per_generation.append(total_episodes_this_generation)
self._num_policies_per_generation.append(n_policies)
self._payoff_table_checkpoint_nums.append(current_checkpoint_num)
self._payoff_table_checkpoint_paths.append(self.get_latest_numbered_payoff_table_checkpoint_path())
self._policy_nums_checkpoint_paths.append(self.get_latest_numbered_policy_nums_path())
del policies[1]
del policies[0]
stats_out = {'num_policies': self._num_policies_per_generation, 'exploitability': self._exploitability_per_generation, 'timesteps': self._total_steps_per_generation, 'episodes': self._total_episodes_per_generation, 'payoff_table_checkpoint_num': self._payoff_table_checkpoint_nums, 'payoff_table_checkpoint_path': self._payoff_table_checkpoint_paths, 'policy_nums_checkpoint_path': self._policy_nums_checkpoint_paths}
with open(self._exploitability_stats_save_path, '+w') as json_file:
json.dump(stats_out, json_file)
logger.info(colored(f'(Graph this in a notebook) Saved exploitability stats to {self._exploitability_stats_save_path}', 'green')) |
def make_lr_cdb_scheduler(cfg, optimizer):
return WarmupMultiStepLR(optimizer, cfg.SOLVER_CDB.STEPS, cfg.SOLVER_CDB.GAMMA, warmup_factor=cfg.SOLVER_CDB.WARMUP_FACTOR, warmup_iters=cfg.SOLVER_CDB.WARMUP_ITERS, warmup_method=cfg.SOLVER_CDB.WARMUP_METHOD) |
class TestHeatSphere(unittest.TestCase):
def test_sphere_heat_kernel(self):
grid_size = 4
nb_samples = 10
n = 5
space = Sphere(n=n, order=_TRUNCATION_LEVEL)
ts = torch.linspace(0.1, 1, grid_size, requires_grad=True)
xs = space.rand(nb_samples).requires_grad_(True)
ys = xs
measure = SqExpSpectralMeasure(space.dim, 1.0)
kernel = EigenbasisSumKernel(measure=measure, manifold=space)
def heat_kernel(t, x, y):
kernel.measure.lengthscale = torch.sqrt((2 * t.view(1)))
kernel.normalizer = 1
return kernel(x, y)
for t in ts:
for x in xs:
for y in ys:
(dfdt, _, _) = torch.autograd.grad(heat_kernel(t, x[None], y[None]), (t, x, y), allow_unused=True)
egrad = (lambda u: torch.autograd.grad(heat_kernel(t, u[None], y[None]), (t, u, y))[1])
fx = (lambda u: heat_kernel(t, u[None], y[None]))
ehess = (lambda u, h: torch.autograd.functional.hvp(fx, u, h)[1])
lapf = manifold_laplacian(x, space, egrad, ehess)
self.assertTrue(np.isclose(dfdt.detach().numpy(), lapf, atol=1e-05))
print('passed') |
class RandomDirectionEmitter(EmitterBase):
def __init__(self, archive, x0, sigma0, selection_rule='filter', restart_rule='no_improvement', weight_rule='truncation', bounds=None, batch_size=None, seed=None):
self._rng = np.random.default_rng(seed)
self._batch_size = batch_size
self._x0 = np.array(x0, dtype=archive.dtype)
self._sigma0 = sigma0
EmitterBase.__init__(self, archive, len(self._x0), bounds)
if (selection_rule not in ['mu', 'filter']):
raise ValueError(f'Invalid selection_rule {selection_rule}')
self._selection_rule = selection_rule
if (restart_rule not in ['basic', 'no_improvement']):
raise ValueError(f'Invalid restart_rule {restart_rule}')
self._restart_rule = restart_rule
opt_seed = (None if (seed is None) else self._rng.integers(10000))
self.opt = CMAEvolutionStrategy(sigma0, batch_size, self._solution_dim, weight_rule, opt_seed, self.archive.dtype)
self.opt.reset(self._x0)
self._num_parents = ((self.opt.batch_size // 2) if (selection_rule == 'mu') else None)
self._target_behavior_dir = self._generate_random_direction()
self._batch_size = self.opt.batch_size
self._restarts = 0
def x0(self):
return self._x0
def sigma0(self):
return self._sigma0
def batch_size(self):
return self._batch_size
def ask(self, grad_estimate=False):
return self.opt.ask(self.lower_bounds, self.upper_bounds)
def _generate_random_direction(self):
ranges = (self.archive.upper_bounds - self.archive.lower_bounds)
behavior_dim = len(ranges)
unscaled_dir = self._rng.standard_normal(behavior_dim)
return (unscaled_dir * ranges)
def _check_restart(self, num_parents):
if (self._restart_rule == 'no_improvement'):
return (num_parents == 0)
return False
def tell(self, solutions, objective_values, behavior_values, jacobian=None, metadata=None):
ranking_data = []
new_sols = 0
metadata = (itertools.repeat(None) if (metadata is None) else metadata)
for (i, (sol, obj, beh, meta)) in enumerate(zip(solutions, objective_values, behavior_values, metadata)):
(status, _) = self.archive.add(sol, obj, beh, meta)
added = bool(status)
projection = np.dot(beh, self._target_behavior_dir)
ranking_data.append((added, projection, i))
if added:
new_sols += 1
if (self._selection_rule == 'filter'):
key = (lambda x: (x[0], x[1]))
elif (self._selection_rule == 'mu'):
key = (lambda x: x[1])
ranking_data.sort(reverse=True, key=key)
indices = [d[2] for d in ranking_data]
num_parents = (new_sols if (self._selection_rule == 'filter') else self._num_parents)
self.opt.tell(solutions[indices], num_parents)
if (self.opt.check_stop([projection for (status, projection, i) in ranking_data]) or self._check_restart(new_sols)):
new_x0 = self.archive.get_random_elite()[0]
self.opt.reset(new_x0)
self._target_behavior_dir = self._generate_random_direction()
self._restarts += 1 |
def log_scaffold_stats(data: MoleculeDataset, index_sets: List[Set[int]], num_scaffolds: int=10, num_labels: int=20, logger: logging.Logger=None) -> List[Tuple[(List[float], List[int])]]:
target_avgs = []
counts = []
for index_set in index_sets:
data_set = [data[i] for i in index_set]
targets = [d.targets for d in data_set]
targets = np.array(targets, dtype=np.float)
target_avgs.append(np.nanmean(targets, axis=0))
counts.append(np.count_nonzero((~ np.isnan(targets)), axis=0))
stats = [(target_avgs[i][:num_labels], counts[i][:num_labels]) for i in range(min(num_scaffolds, len(target_avgs)))]
if (logger is not None):
logger.debug(f'Label averages per scaffold, in decreasing order of scaffold frequency,capped at {num_scaffolds} scaffolds and {num_labels} labels: {stats}')
return stats |
def test_initial_solutions_shape(archive_fixture):
(archive, _) = archive_fixture
initial_solutions = [[0, 0, 0], [1, 1, 1]]
with pytest.raises(ValueError):
GaussianEmitter(archive, sigma=1.0, initial_solutions=initial_solutions) |
def try_wrapper(func):
def inner(*args, **kwargs):
try_cnt = 0
while (try_cnt < TRY_CNT):
try:
return func(*args, **kwargs)
except Exception as e:
print(f'func() failed, try again... (No. {(try_cnt + 1)}). Error: {e}')
try_cnt += 1
time.sleep(min(1024, (2 ** (try_cnt / 2))))
continue
return inner |
def main(exp, args, num_gpu):
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed testing. This will turn on the CUDNN deterministic setting, ')
is_distributed = (num_gpu > 1)
cudnn.benchmark = True
rank = args.local_rank
file_name = os.path.join(exp.output_dir, args.experiment_name)
if (rank == 0):
os.makedirs(file_name, exist_ok=True)
results_folder = os.path.join(file_name, 'track_results_motdt')
os.makedirs(results_folder, exist_ok=True)
model_folder = args.model_folder
setup_logger(file_name, distributed_rank=rank, filename='val_log.txt', mode='a')
logger.info('Args: {}'.format(args))
if (args.conf is not None):
exp.test_conf = args.conf
if (args.nms is not None):
exp.nmsthre = args.nms
if (args.tsize is not None):
exp.test_size = (args.tsize, args.tsize)
model = exp.get_model()
logger.info('Model Summary: {}'.format(get_model_info(model, exp.test_size)))
val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test)
evaluator = MOTEvaluator(args=args, dataloader=val_loader, img_size=exp.test_size, confthre=exp.test_conf, nmsthre=exp.nmsthre, num_classes=exp.num_classes)
torch.cuda.set_device(rank)
model.cuda(rank)
model.eval()
if ((not args.speed) and (not args.trt)):
if (args.ckpt is None):
ckpt_file = os.path.join(file_name, 'best_ckpt.pth.tar')
else:
ckpt_file = args.ckpt
logger.info('loading checkpoint')
loc = 'cuda:{}'.format(rank)
ckpt = torch.load(ckpt_file, map_location=loc)
model.load_state_dict(ckpt['model'])
logger.info('loaded checkpoint done.')
if is_distributed:
model = DDP(model, device_ids=[rank])
if args.fuse:
logger.info('\tFusing model...')
model = fuse_model(model)
if args.trt:
assert ((not args.fuse) and (not is_distributed) and (args.batch_size == 1)), 'TensorRT model is not support model fusing and distributed inferencing!'
trt_file = os.path.join(file_name, 'model_trt.pth')
assert os.path.exists(trt_file), 'TensorRT model is not found!\n Run tools/trt.py first!'
model.head.decode_in_inference = False
decoder = model.head.decode_outputs
else:
trt_file = None
decoder = None
(*_, summary) = evaluator.evaluate_motdt(model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder, model_folder)
logger.info(('\n' + summary))
mm.lap.default_solver = 'lap'
gt_type = '_val_half'
print('gt_type', gt_type)
gtfiles = glob.glob(os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type)))
print('gt_files', gtfiles)
tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if (not os.path.basename(f).startswith('eval'))]
logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))
logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))
logger.info("Default LAP solver '{}'".format(mm.lap.default_solver))
logger.info('Loading files.')
gt = OrderedDict([(Path(f).parts[(- 3)], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])
ts = OrderedDict([(os.path.splitext(Path(f).parts[(- 1)])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=(- 1))) for f in tsfiles])
mh = mm.metrics.create()
(accs, names) = compare_dataframes(gt, ts)
logger.info('Running metrics')
metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
div_dict = {'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}
for divisor in div_dict:
for divided in div_dict[divisor]:
summary[divided] = (summary[divided] / summary[divisor])
fmt = mh.formatters
change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', 'partially_tracked', 'mostly_lost']
for k in change_fmt_list:
fmt[k] = fmt['mota']
print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))
metrics = (mm.metrics.motchallenge_metrics + ['num_objects'])
summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)
print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))
logger.info('Completed') |
def D_loss(G, D, reals, labels, minibatch_size, loss_type, reg_type, gamma=10.0, wgan_epsilon=0.001, wgan_target=1.0, **kwargs):
latents = tf.random_normal(([minibatch_size] + G.input_shapes[0][1:]))
fake_imgs_out = G.get_output_for(latents, labels, is_training=True)[0]
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_imgs_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
if (loss_type == 'logistic'):
loss = tf.nn.softplus(fake_scores_out)
loss += tf.nn.softplus((- real_scores_out))
elif (loss_type == 'hinge'):
loss = tf.maximum(0.0, (1.0 + fake_scores_out))
loss += tf.maximum(0.0, (1.0 - real_scores_out))
elif (loss_type == 'wgan'):
loss = (fake_scores_out - real_scores_out)
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += (epsilon_penalty * wgan_epsilon)
reg = None
with tf.name_scope('GradientPenalty'):
if (reg_type in ['r1', 'r2']):
if (reg_type == 'r1'):
grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
else:
grads = tf.gradients(tf.reduce_sum(fake_scores_out), [fake_imgs_out])[0]
gradient_penalty = tf.reduce_sum(tf.square(grads), axis=[1, 2, 3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = (gradient_penalty * (gamma * 0.5))
elif (reg_type == 'gp'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_imgs_out.dtype)
mixed_imgs_out = tflib.lerp(tf.cast(reals, fake_imgs_out.dtype), fake_imgs_out, mixing_factors)
mixed_scores_out = D.get_output_for(mixed_imgs_out, labels, is_training=True)
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_grads = tf.gradients(tf.reduce_sum(mixed_scores_out), [mixed_imgs_out])[0]
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square((mixed_norms - wgan_target))
reg = (gradient_penalty * (gamma / (wgan_target ** 2)))
return (loss, reg) |
def rectangular_coordinates(size: tuple) -> Tensor:
def linspace_func(nx):
return torch.linspace(0.0, 1.0, nx)
linspaces = map(linspace_func, size)
coordinates = torch.meshgrid(*linspaces, indexing='ij')
return torch.stack(coordinates, dim=(- 1)) |
def main():
args = parse_args()
dist_world_size = (args.nproc_per_node * args.nnodes)
current_env = os.environ.copy()
current_env['MASTER_ADDR'] = args.master_addr
current_env['MASTER_PORT'] = str(args.master_port)
current_env['WORLD_SIZE'] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
dist_rank = ((args.nproc_per_node * args.node_rank) + local_rank)
current_env['RANK'] = str(dist_rank)
current_env['LOCAL_RANK'] = str(local_rank)
cmd = (([sys.executable, '-u', args.training_script] + args.training_script_args) + ['--dist'])
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=process.args) |
def cal_gcmvn_stats(features_list):
features = np.concatenate(features_list)
square_sums = (features ** 2).sum(axis=0)
mean = features.mean(axis=0)
features = np.subtract(features, mean)
var = ((square_sums / features.shape[0]) - (mean ** 2))
std = np.sqrt(np.maximum(var, 1e-08))
return {'mean': mean.astype('float32'), 'std': std.astype('float32')} |
def crps_loss(model, y, x, q_list, device, args):
num_pts = y.size(0)
q_list = (torch.arange(101) / 100.0)
num_q = q_list.size(0)
q_rep = q_list.view((- 1), 1).repeat(1, num_pts).view((- 1), 1).to(device)
y_stacked = y.repeat(num_q, 1)
y_mat = y_stacked.reshape(num_q, num_pts)
if (x is None):
model_in = q_rep
else:
x_stacked = x.repeat(num_q, 1)
model_in = torch.cat([x_stacked, q_rep], dim=1)
pred_y = model(model_in)
sq_diff = ((pred_y - y_stacked) ** 2)
loss = torch.mean(sq_diff)
return loss |
def get_git_sha(repo=None):
process = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, cwd=repo)
(out, _err) = process.communicate()
return out.decode('UTF-8').strip() |
def get_initial_model(params, train_seed):
model = model_dict[params['model_name']](seed=train_seed, **params['model_kwargs'])
model.randomize_params(params['randomize_kernel_weight']['high'], params['randomize_kernel_weight']['low'], except_for=['bias'])
model.randomize_params(params['randomize_bias']['high'], params['randomize_bias']['low'], except_for=['kernel_weight'])
model.randomize_diagonal(params['randomize_diagonal']['high'], params['randomize_diagonal']['low'])
return model |
def _get_right_parentheses_index_(s):
left_paren_count = 0
for (index, x) in enumerate(s):
if (x == '('):
left_paren_count += 1
elif (x == ')'):
left_paren_count -= 1
if (left_paren_count == 0):
return index
else:
pass
return None |
class ActualIndexDataset():
def get_collate_fn(self):
def collate_fn(batch):
collated = {**super(ActualIndexDataset, self).get_collate_fn()(batch), 'index': [s['index'] for s in batch]}
return collated
return collate_fn
def __getitem__(self, index):
return {**super(ActualIndexDataset, self).__getitem__(index), 'index': index}
def __repr__(self):
return ('IndexDataset with original dataset being: \n' + super(ActualIndexDataset, self).__repr__()) |
class MEInitBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(MEInitBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=False)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x |
class UNet(nn.Module):
def __init__(self, in_channels, n_classes, base_n_filter=8):
super(UNet, self).__init__()
self.in_channels = in_channels
self.n_classes = n_classes
self.base_n_filter = base_n_filter
self.lrelu = nn.LeakyReLU()
self.dropout3d = nn.Dropout3d(p=0.6)
self.upsacle = nn.Upsample(scale_factor=2, mode='nearest')
self.conv3d_c1_1 = nn.Conv3d(self.in_channels, self.base_n_filter, kernel_size=3, stride=1, padding=1, bias=False)
self.conv3d_c1_2 = nn.Conv3d(self.base_n_filter, self.base_n_filter, kernel_size=3, stride=1, padding=1, bias=False)
self.lrelu_conv_c1 = self.lrelu_conv(self.base_n_filter, self.base_n_filter)
self.inorm3d_c1 = nn.InstanceNorm3d(self.base_n_filter)
self.conv3d_c2 = nn.Conv3d(self.base_n_filter, (self.base_n_filter * 2), kernel_size=3, stride=2, padding=1, bias=False)
self.norm_lrelu_conv_c2 = self.norm_lrelu_conv((self.base_n_filter * 2), (self.base_n_filter * 2))
self.inorm3d_c2 = nn.InstanceNorm3d((self.base_n_filter * 2))
self.conv3d_c3 = nn.Conv3d((self.base_n_filter * 2), (self.base_n_filter * 4), kernel_size=3, stride=2, padding=1, bias=False)
self.norm_lrelu_conv_c3 = self.norm_lrelu_conv((self.base_n_filter * 4), (self.base_n_filter * 4))
self.inorm3d_c3 = nn.InstanceNorm3d((self.base_n_filter * 4))
self.conv3d_c4 = nn.Conv3d((self.base_n_filter * 4), (self.base_n_filter * 8), kernel_size=3, stride=2, padding=1, bias=False)
self.norm_lrelu_conv_c4 = self.norm_lrelu_conv((self.base_n_filter * 8), (self.base_n_filter * 8))
self.inorm3d_c4 = nn.InstanceNorm3d((self.base_n_filter * 8))
self.conv3d_c5 = nn.Conv3d((self.base_n_filter * 8), (self.base_n_filter * 16), kernel_size=3, stride=2, padding=1, bias=False)
self.norm_lrelu_conv_c5 = self.norm_lrelu_conv((self.base_n_filter * 16), (self.base_n_filter * 16))
self.norm_lrelu_upscale_conv_norm_lrelu_l0 = self.norm_lrelu_upscale_conv_norm_lrelu((self.base_n_filter * 16), (self.base_n_filter * 8))
self.conv3d_l0 = nn.Conv3d((self.base_n_filter * 8), (self.base_n_filter * 8), kernel_size=1, stride=1, padding=0, bias=False)
self.inorm3d_l0 = nn.InstanceNorm3d((self.base_n_filter * 8))
self.conv_norm_lrelu_l1 = self.conv_norm_lrelu((self.base_n_filter * 16), (self.base_n_filter * 16))
self.conv3d_l1 = nn.Conv3d((self.base_n_filter * 16), (self.base_n_filter * 8), kernel_size=1, stride=1, padding=0, bias=False)
self.norm_lrelu_upscale_conv_norm_lrelu_l1 = self.norm_lrelu_upscale_conv_norm_lrelu((self.base_n_filter * 8), (self.base_n_filter * 4))
self.conv_norm_lrelu_l2 = self.conv_norm_lrelu((self.base_n_filter * 8), (self.base_n_filter * 8))
self.conv3d_l2 = nn.Conv3d((self.base_n_filter * 8), (self.base_n_filter * 4), kernel_size=1, stride=1, padding=0, bias=False)
self.norm_lrelu_upscale_conv_norm_lrelu_l2 = self.norm_lrelu_upscale_conv_norm_lrelu((self.base_n_filter * 4), (self.base_n_filter * 2))
self.conv_norm_lrelu_l3 = self.conv_norm_lrelu((self.base_n_filter * 4), (self.base_n_filter * 4))
self.conv3d_l3 = nn.Conv3d((self.base_n_filter * 4), (self.base_n_filter * 2), kernel_size=1, stride=1, padding=0, bias=False)
self.norm_lrelu_upscale_conv_norm_lrelu_l3 = self.norm_lrelu_upscale_conv_norm_lrelu((self.base_n_filter * 2), self.base_n_filter)
self.conv_norm_lrelu_l4 = self.conv_norm_lrelu((self.base_n_filter * 2), (self.base_n_filter * 2))
self.conv3d_l4 = nn.Conv3d((self.base_n_filter * 2), self.n_classes, kernel_size=1, stride=1, padding=0, bias=False)
self.ds2_1x1_conv3d = nn.Conv3d((self.base_n_filter * 8), self.n_classes, kernel_size=1, stride=1, padding=0, bias=False)
self.ds3_1x1_conv3d = nn.Conv3d((self.base_n_filter * 4), self.n_classes, kernel_size=1, stride=1, padding=0, bias=False)
def conv_norm_lrelu(self, feat_in, feat_out):
return nn.Sequential(nn.Conv3d(feat_in, feat_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm3d(feat_out), nn.LeakyReLU())
def norm_lrelu_conv(self, feat_in, feat_out):
return nn.Sequential(nn.InstanceNorm3d(feat_in), nn.LeakyReLU(), nn.Conv3d(feat_in, feat_out, kernel_size=3, stride=1, padding=1, bias=False))
def lrelu_conv(self, feat_in, feat_out):
return nn.Sequential(nn.LeakyReLU(), nn.Conv3d(feat_in, feat_out, kernel_size=3, stride=1, padding=1, bias=False))
def norm_lrelu_upscale_conv_norm_lrelu(self, feat_in, feat_out):
return nn.Sequential(nn.InstanceNorm3d(feat_in), nn.LeakyReLU(), nn.Upsample(scale_factor=2, mode='nearest'), nn.Conv3d(feat_in, feat_out, kernel_size=3, stride=1, padding=1, bias=False), nn.InstanceNorm3d(feat_out), nn.LeakyReLU())
def forward(self, x):
out = self.conv3d_c1_1(x)
residual_1 = out
out = self.lrelu(out)
out = self.conv3d_c1_2(out)
out = self.dropout3d(out)
out = self.lrelu_conv_c1(out)
out += residual_1
context_1 = self.lrelu(out)
out = self.inorm3d_c1(out)
out = self.lrelu(out)
out = self.conv3d_c2(out)
residual_2 = out
out = self.norm_lrelu_conv_c2(out)
out = self.dropout3d(out)
out = self.norm_lrelu_conv_c2(out)
out += residual_2
out = self.inorm3d_c2(out)
out = self.lrelu(out)
context_2 = out
out = self.conv3d_c3(out)
residual_3 = out
out = self.norm_lrelu_conv_c3(out)
out = self.dropout3d(out)
out = self.norm_lrelu_conv_c3(out)
out += residual_3
out = self.inorm3d_c3(out)
out = self.lrelu(out)
context_3 = out
out = self.conv3d_c4(out)
residual_4 = out
out = self.norm_lrelu_conv_c4(out)
out = self.dropout3d(out)
out = self.norm_lrelu_conv_c4(out)
out += residual_4
out = self.inorm3d_c4(out)
out = self.lrelu(out)
context_4 = out
out = self.conv3d_c5(out)
residual_5 = out
out = self.norm_lrelu_conv_c5(out)
out = self.dropout3d(out)
out = self.norm_lrelu_conv_c5(out)
out += residual_5
out = self.norm_lrelu_upscale_conv_norm_lrelu_l0(out)
out = self.conv3d_l0(out)
out = self.inorm3d_l0(out)
out = self.lrelu(out)
out = torch.cat([out, context_4], dim=1)
out = self.conv_norm_lrelu_l1(out)
out = self.conv3d_l1(out)
out = self.norm_lrelu_upscale_conv_norm_lrelu_l1(out)
out = torch.cat([out, context_3], dim=1)
out = self.conv_norm_lrelu_l2(out)
ds2 = out
out = self.conv3d_l2(out)
out = self.norm_lrelu_upscale_conv_norm_lrelu_l2(out)
out = torch.cat([out, context_2], dim=1)
out = self.conv_norm_lrelu_l3(out)
ds3 = out
out = self.conv3d_l3(out)
out = self.norm_lrelu_upscale_conv_norm_lrelu_l3(out)
out = torch.cat([out, context_1], dim=1)
out = self.conv_norm_lrelu_l4(out)
out_pred = self.conv3d_l4(out)
ds2_1x1_conv = self.ds2_1x1_conv3d(ds2)
ds1_ds2_sum_upscale = self.upsacle(ds2_1x1_conv)
ds3_1x1_conv = self.ds3_1x1_conv3d(ds3)
ds1_ds2_sum_upscale_ds3_sum = (ds1_ds2_sum_upscale + ds3_1x1_conv)
ds1_ds2_sum_upscale_ds3_sum_upscale = self.upsacle(ds1_ds2_sum_upscale_ds3_sum)
out = (out_pred + ds1_ds2_sum_upscale_ds3_sum_upscale)
seg_layer = out
return seg_layer |
class TestMXNetGluonMultipleInput(TestCase):
def test_gluon_multiple_input(self):
config = create_config(log_interval=2, optimizer='adagrad', seed=1128, optimizer_params={'learning_rate': 0.02})
estimator = Estimator.from_mxnet(config=config, model_creator=get_model, loss_creator=get_loss, eval_metrics_creator=get_metrics, validation_metrics_creator=get_metrics, num_workers=4)
estimator.fit(get_train_data_iter, validation_data=get_test_data_iter, epochs=2)
estimator.shutdown() |
def get_dataloaders(dataset, val_dataset=None, batch_size=None, val_batch_size=None, drop_last=True, val_drop_last=False, shuffle_train=False, pin_memory=True, num_workers=0, persistent_workers=True):
if (num_workers == 0):
persistent_workers = False
if (batch_size is None):
batch_size = len(dataset)
train_dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=drop_last, num_workers=num_workers, pin_memory=pin_memory, shuffle=shuffle_train, persistent_workers=persistent_workers)
if val_dataset:
if (val_batch_size is None):
val_batch_size = len(val_dataset)
val_dataloader = DataLoader(val_dataset, batch_size=val_batch_size, drop_last=val_drop_last, num_workers=num_workers, pin_memory=pin_memory, persistent_workers=persistent_workers)
return (train_dataloader, val_dataloader)
return train_dataloader |
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=(lambda idx: idx))
dataset_a = CustomDataset(ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [np.random.randint(0, 80, num).tolist() for num in np.random.randint(1, 20, len_a)]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(side_effect=(lambda idx: cat_ids_list_a[idx]))
dataset_b = CustomDataset(ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [np.random.randint(0, 80, num).tolist() for num in np.random.randint(1, 20, len_b)]
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(side_effect=(lambda idx: cat_ids_list_b[idx]))
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert (concat_dataset[5] == 5)
assert (concat_dataset[25] == 15)
assert (concat_dataset.get_cat_ids(5) == cat_ids_list_a[5])
assert (concat_dataset.get_cat_ids(25) == cat_ids_list_b[15])
assert (len(concat_dataset) == (len(dataset_a) + len(dataset_b)))
repeat_dataset = RepeatDataset(dataset_a, 10)
assert (repeat_dataset[5] == 5)
assert (repeat_dataset[15] == 5)
assert (repeat_dataset[27] == 7)
assert (repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5])
assert (repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5])
assert (repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7])
assert (len(repeat_dataset) == (10 * len(dataset_a)))
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for (k, v) in category_freq.items():
category_freq[k] = (v / len(cat_ids_list_a))
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {cat_id: max(1.0, math.sqrt((repeat_thr / cat_freq))) for (cat_id, cat_freq) in category_freq.items()}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert (len(repeat_factor_dataset) == repeat_factors_cumsum[(- 1)])
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert (repeat_factor_dataset[idx] == bisect.bisect_right(repeat_factors_cumsum, idx)) |
def test_pickle_simple_callable():
assert (m.simple_callable() == )
if env.PYPY:
serialized = pickle.dumps(m.simple_callable)
deserialized = pickle.loads(serialized)
assert (deserialized() == )
else:
with pytest.raises(TypeError) as excinfo:
pickle.dumps(m.simple_callable)
assert re.search('can.*t pickle .*PyCapsule.* object', str(excinfo.value)) |
def _prepare_args(kwargs, create_keys, run_keys, fit_keys, backend):
create_kwargs = _filter_tuner_args(kwargs, create_keys)
run_kwargs = _filter_tuner_args(kwargs, run_keys)
fit_kwargs = _filter_tuner_args(kwargs, fit_keys)
sampler_type = create_kwargs.get('sampler', None)
if sampler_type:
sampler_args = create_kwargs.get('sampler_kwargs', None)
sampler = backend.create_sampler(sampler_type, sampler_args)
create_kwargs['sampler'] = sampler
create_kwargs.pop('sampler_kwargs', None)
pruner_type = create_kwargs.get('pruner', None)
if pruner_type:
pruner_args = create_kwargs.get('pruner_kwargs', {})
pruner = backend.create_pruner(pruner_type, pruner_args)
create_kwargs['pruner'] = pruner
create_kwargs.pop('pruner_kwargs', None)
run_kwargs['callbacks'] = run_kwargs.get('tune_callbacks', None)
run_kwargs.pop('tune_callbacks', None)
run_kwargs['show_progress_bar'] = False
return (create_kwargs, run_kwargs, fit_kwargs) |
def trigger_nets() -> None:
with Timer(as_ms=True) as t:
from src import networks
logger.debug(f'Triggered registry networks in {t.elapsed}ms...') |
_grad()
def LPIPS(rgb, rgb_gt):
rgb = torch.moveaxis(rgb, (- 1), 0)[(None, ...)]
rgb_gt = torch.moveaxis(rgb_gt, (- 1), 0)[(None, ...)]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
lpips = _LPIPS(net='alex', verbose=False).cpu()
return float(lpips(rgb, rgb_gt, normalize=True).item()) |
def get_dynamic_gnn_methods():
gnn_list = ['GCRN', 'EvolveGCN', 'VGRNN', 'CTGCN-C', 'CTGCN-S']
return dict(zip(gnn_list, np.ones(len(gnn_list), dtype=np.int))) |
def basic_bn_stem():
return nn.Sequential(OrderedDict([('conv1', nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)), ('bn1', mynn.AffineChannel2d(64)), ('relu', nn.ReLU(inplace=True)), ('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))])) |
def test_add_batch_none_inserted(data):
add_info = data.archive_with_elite.add(solution=([[1, 2, 3]] * 4), objective=[(data.objective - 1) for _ in range(4)], measures=[data.measures for _ in range(4)])
assert (add_info['status'] == 0).all()
assert np.isclose(add_info['value'], (- 1.0)).all()
assert_archive_elites(archive=data.archive_with_elite, batch_size=1, solution_batch=[data.solution], objective_batch=[data.objective], measures_batch=[data.measures], grid_indices_batch=[data.grid_indices]) |
class DirDecoder(nn.Module):
def __init__(self):
super(DirDecoder, self).__init__()
self.conv_inputs = utils.GraphConv1x1(3, 128, batch_norm=None)
self.conv_noise = utils.GraphConv1x1(100, 128, batch_norm=None)
self.num_layers = 5
for i in range(self.num_layers):
module = utils.DirResNet2(128)
self.add_module('rn{}'.format(i), module)
self.bn_conv2 = utils.GraphConv1x1(128, 128, batch_norm='pre')
self.fc_mu = utils.GraphConv1x1(128, 3, batch_norm=None)
self.fc_logvar = nn.Parameter(torch.zeros(1, 1, 1))
def forward(self, inputs, noise, Di, DiA, mask):
(batch_size, num_nodes, _) = inputs.size()
v = (self.conv_inputs(inputs) + self.conv_noise(noise))
num_faces = (DiA.size(2) // 4)
f = Variable(torch.zeros(batch_size, num_faces, 128))
if v.is_cuda:
f = f.cuda()
for i in range(self.num_layers):
(v, f) = self._modules['rn{}'.format(i)](Di, DiA, v, f)
x = v
x = F.elu(x)
x = self.bn_conv2(x)
x = F.elu(x)
mu = self.fc_mu(x)
y = self.fc_logvar.expand_as(mu).contiguous()
return ((mu + inputs), y) |
def make_cseg_image_name(id: int, extension: str='.png') -> str:
return (('image.%06d.cseg' % id) + extension) |
def compute_sim_matrix(model, data_loader, **kwargs):
k_test = kwargs.pop('k_test')
metric_logger = MetricLogger(delimiter=' ')
header = 'Evaluation:'
logging.info('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_ids = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i:min(num_text, (i + text_bs))]
text_input = model.tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors='pt').to(model.device)
text_feat = model.forward_text(text_input)
text_embed = F.normalize(model.text_proj(text_feat))
text_embeds.append(text_embed)
text_ids.append(text_input.input_ids)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_ids = torch.cat(text_ids, dim=0)
text_atts = torch.cat(text_atts, dim=0)
vit_feats = []
image_embeds = []
for samples in data_loader:
image = samples['image']
image = image.to(model.device)
(image_feat, vit_feat) = model.forward_image(image)
image_embed = model.vision_proj(image_feat)
image_embed = F.normalize(image_embed, dim=(- 1))
vit_feats.append(vit_feat.cpu())
image_embeds.append(image_embed)
vit_feats = torch.cat(vit_feats, dim=0)
image_embeds = torch.cat(image_embeds, dim=0)
sims_matrix = []
for image_embed in image_embeds:
sim_q2t = (image_embed text_embeds.t())
(sim_i2t, _) = sim_q2t.max(0)
sims_matrix.append(sim_i2t)
sims_matrix = torch.stack(sims_matrix, dim=0)
score_matrix_i2t = torch.full((len(data_loader.dataset.image), len(texts)), (- 100.0)).to(model.device)
num_tasks = dist_utils.get_world_size()
rank = dist_utils.get_rank()
step = ((sims_matrix.size(0) // num_tasks) + 1)
start = (rank * step)
end = min(sims_matrix.size(0), (start + step))
for (i, sims) in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
(topk_sim, topk_idx) = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[(start + i)].repeat(k_test, 1, 1).to(model.device)
score = model.compute_itm(image_inputs=image_inputs, text_ids=text_ids[topk_idx], text_atts=text_atts[topk_idx]).float()
score_matrix_i2t[((start + i), topk_idx)] = (score + topk_sim)
sims_matrix = sims_matrix.t()
score_matrix_t2i = torch.full((len(texts), len(data_loader.dataset.image)), (- 100.0)).to(model.device)
step = ((sims_matrix.size(0) // num_tasks) + 1)
start = (rank * step)
end = min(sims_matrix.size(0), (start + step))
for (i, sims) in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
(topk_sim, topk_idx) = sims.topk(k=k_test, dim=0)
image_inputs = vit_feats[topk_idx.cpu()].to(model.device)
score = model.compute_itm(image_inputs=image_inputs, text_ids=text_ids[(start + i)].repeat(k_test, 1), text_atts=text_atts[(start + i)].repeat(k_test, 1)).float()
score_matrix_t2i[((start + i), topk_idx)] = (score + topk_sim)
if dist_utils.is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(score_matrix_i2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2i, op=torch.distributed.ReduceOp.SUM)
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info('Evaluation time {}'.format(total_time_str))
return (score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()) |
def is_uncovered_api(api):
covered_api_list = load_data(join(root_dir, 'logs', 'covered_api.txt'), multiline=True)
covered_api_list = [a.strip() for a in covered_api_list]
return (api.strip() not in covered_api_list) |
def test_digits_sqrt_stochastic_sparse():
model = FeatureBasedSelection(100, 'sqrt', optimizer='stochastic', random_state=0)
model.fit(X_digits_sparse)
assert_array_equal(model.ranking, digits_sqrt_stochastic_ranking)
assert_array_almost_equal(model.gains, digits_sqrt_stochastic_gains, 4)
assert_array_almost_equal(model.subset, X_digits_sparse[model.ranking].toarray()) |
class RandomResizedCrop(transforms.RandomResizedCrop):
def __init__(self, size: Union[(int, Iterable[int])], scale: Iterable[float]=[0.08, 1.0], ratio: Iterable[float]=[(3 / 4), (4 / 3)], interpolation: Union[(str, InterpolationMode)]='bilinear', antialias: bool=True, **kwargs) -> None:
if (type(interpolation) is str):
interpolation = _INTERPOLATION[interpolation]
super().__init__(size, scale=scale, ratio=ratio, interpolation=interpolation, antialias=antialias, **kwargs) |
def check_syntax(sandbox_dir):
models = Path('../benchmarks').rglob('*.[iI][mM][iI]')
count = 0
error_models = []
for model in models:
print((model.name + ' - check syntax'))
print('')
result = subprocess.run(['imitator', '-mode', 'checksyntax', model.absolute()], cwd=sandbox_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if ('ERROR' in result.stderr):
print((model.name + ': error'))
error_models.append(model.absolute())
else:
print((model.name + ': ok'))
print('')
count += 1
print((((str(len(error_models)) + '/') + str(count)) + ' syntax error(s)'))
print('list of models with errors:')
for model in error_models:
print(model) |
('/evaluation/systems/', methods=['GET'])
def system_list():
return jsonify({'success': True, 'systems': general_db.get_systems(g.user)}) |
def preprocess_date_understanding(path, shuffle_choices_seed=None):
if (shuffle_choices_seed is not None):
return preprocess_bigbench_choice(path, n=369, name='date_understanding', shuffle_choices=True, shuffle_choices_seed=shuffle_choices_seed)
else:
return preprocess_bigbench_choice(path, n=369, name='date_understanding', shuffle_choices=True) |
class DeformConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, bias=False):
assert (not bias)
super(DeformConv, self).__init__()
self.with_bias = bias
assert ((in_channels % groups) == 0), 'in_channels {} cannot be divisible by groups {}'.format(in_channels, groups)
assert ((out_channels % groups) == 0), 'out_channels {} cannot be divisible by groups {}'.format(out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // self.groups), *self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = (1.0 / math.sqrt(n))
self.weight.data.uniform_((- stdv), stdv)
def forward(self, input, offset):
return deform_conv(input, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups)
def __repr__(self):
return ''.join(['{}('.format(self.__class__.__name__), 'in_channels={}, '.format(self.in_channels), 'out_channels={}, '.format(self.out_channels), 'kernel_size={}, '.format(self.kernel_size), 'stride={}, '.format(self.stride), 'dilation={}, '.format(self.dilation), 'padding={}, '.format(self.padding), 'groups={}, '.format(self.groups), 'deformable_groups={}, '.format(self.deformable_groups), 'bias={})'.format(self.with_bias)]) |
class TFLayoutLMv3Model(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DebertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = DebertaTokenizer
test_rust_tokenizer = True
rust_tokenizer_class = DebertaTokenizerFast
def setUp(self):
super().setUp()
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', '[UNK]']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.special_tokens_map = {'unk_token': '[UNK]'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'lower newer'
output_text = 'lower newer'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
text = 'lower newer'
bpe_tokens = ['l', 'o', 'w', 'er', 'G', 'n', 'e', 'w', 'er']
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode('sequence builders', add_special_tokens=True, add_prefix_space=False)
encoded_pair_from_decode = tokenizer.encode('sequence builders', 'multi-sequence build', add_special_tokens=True, add_prefix_space=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == encoded_text_from_decode)
assert (encoded_pair == encoded_pair_from_decode)
def test_tokenizer_integration(self):
tokenizer_classes = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
tokenizer = tokenizer_class.from_pretrained('microsoft/deberta-base')
sequences = ['ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary embedding matrix into two small matrices, we separate the size of the hidden layers from the size of vocabulary embedding.']
encoding = tokenizer(sequences, padding=True)
decoded_sequences = [tokenizer.decode(seq, skip_special_tokens=True) for seq in encoding['input_ids']]
expected_encoding = {'input_ids': [[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}
expected_decoded_sequence = ['ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary embedding matrix into two small matrices, we separate the size of the hidden layers from the size of vocabulary embedding.']
self.assertDictEqual(encoding.data, expected_encoding)
for (expected, decoded) in zip(expected_decoded_sequence, decoded_sequences):
self.assertEqual(expected, decoded) |
def batch_it(seq, num=1):
out = []
for item in seq:
if (len(out) == num):
(yield out)
out = []
out.append(item)
if len(out):
(yield out) |
def _make_scratch_csm(scratch, in_channels, cout, expand):
scratch.layer3_csm = FeatureFusionBlock(in_channels[3], nn.ReLU(False), expand=expand, lowest=True)
scratch.layer2_csm = FeatureFusionBlock(in_channels[2], nn.ReLU(False), expand=expand)
scratch.layer1_csm = FeatureFusionBlock(in_channels[1], nn.ReLU(False), expand=expand)
scratch.layer0_csm = FeatureFusionBlock(in_channels[0], nn.ReLU(False))
scratch.CHANNELS = ([cout, cout, (cout * 2), (cout * 4)] if expand else ([cout] * 4))
return scratch |
def get_sparsity_ratio(pruners, model):
pattern_sparsity_cnt = 0
element_sparsity_cnt = 0
if hasattr(model, 'model'):
model = model.model
for pruner in pruners:
if ('MultiheadAttentionPruner' in type(pruner).__name__):
logger.info('Calculate multihead-attention sparsity')
mha_total = 0.0
mha_sparse = 0.0
for (k, v) in pruner.head_masks.items():
mha_total += v.numel()
mha_sparse += (v.numel() - torch.count_nonzero(v))
logger.info(f'MHA sparsity: {(mha_sparse / mha_total)}')
continue
modules = pruner.modules
sparsity_ratio = pruner.pattern.get_sparsity_ratio(pruner.masks)
cnt = 0
for key in modules.keys():
cnt += modules[key].weight.numel()
pattern_sparsity_cnt += int((cnt * sparsity_ratio))
for key in pruner.masks.keys():
block_num = 1
if pruner.pattern.block:
block_size = pruner.pattern.block_size[key]
block_num = (block_size[0] * block_size[1])
element_sparsity_cnt += (torch.sum((pruner.masks[key] == 0)).data.item() * block_num)
linear_conv_cnt = 0
param_cnt = 0
for (name, module) in model.named_modules():
if ((type(module).__name__ in ['Linear']) or (re.search('Conv.d', type(module).__name__) is not None)):
linear_conv_cnt += module.weight.numel()
for (n, param) in model.named_parameters():
param_cnt += param.numel()
if (linear_conv_cnt == 0):
blockwise_over_matmul_gemm_conv = 0
elementwise_over_matmul_gemm_conv = 0
else:
blockwise_over_matmul_gemm_conv = (float(pattern_sparsity_cnt) / linear_conv_cnt)
elementwise_over_matmul_gemm_conv = (float(element_sparsity_cnt) / linear_conv_cnt)
if (param_cnt == 0):
elementwise_over_all = 0
else:
elementwise_over_all = (float(element_sparsity_cnt) / param_cnt)
logger.info(f'elementwise_over_matmul_gemm_conv:{elementwise_over_matmul_gemm_conv}, elementwise_over_all:{elementwise_over_all},blockwise_over_matmul_gemm_conv:{blockwise_over_matmul_gemm_conv}')
return (elementwise_over_matmul_gemm_conv, elementwise_over_all, blockwise_over_matmul_gemm_conv) |
def patch_llama_for_dynamic_yarn_rotary_embeddings(model, original_max_position_embeddings, finetuned):
from .LlamaDynamicYaRNScaledRotaryEmbedding import LlamaDynamicYaRNScaledRotaryEmbedding
for each in model.model.layers:
each.self_attn.rotary_emb = LlamaDynamicYaRNScaledRotaryEmbedding(each.self_attn.head_dim, finetuned=finetuned, original_max_position_embeddings=original_max_position_embeddings, device=each.self_attn.rotary_emb.inv_freq.device) |
.slow
def test_vmc_loop_logging(caplog):
nburn = 4
nepochs = 13
nsteps_per_param_update = 10
fixed_metrics = {'energy': 1.0, 'energy_noclip': 2.5, 'variance': 3.0, 'variance_noclip': np.pi}
def update_param_fn(params, data, optimizer_state, key):
return (params, data, optimizer_state, fixed_metrics, key)
for pmapped in [True, False]:
caplog.clear()
(data, params, key) = make_dummy_data_params_and_key()
metrop_step_fn = make_dummy_metropolis_fn()
nchains = data.shape[0]
if pmapped:
data = _make_different_pmappable_data(data)
(data, params, optimizer_state, key) = utils.distribute.distribute_vmc_state(data, params, None, key)
burning_step = mcmc.metropolis.make_jitted_burning_step(metrop_step_fn, apply_pmap=pmapped)
walker_fn = mcmc.metropolis.make_jitted_walker_fn(nsteps_per_param_update, metrop_step_fn, apply_pmap=pmapped)
with caplog.at_level(logging.INFO):
(data, key) = mcmc.metropolis.burn_data(burning_step, nburn, params, data, key)
train.vmc.vmc_loop(params, optimizer_state, data, nchains, nepochs, walker_fn, update_param_fn, key, is_pmapped=pmapped)
assert (len(caplog.records) == (1 + nepochs)) |
_REGISTRY.register()
def resnet101(norm_layer=nn.BatchNorm2d):
num_block = [3, 4, 23, 3]
return ResNetV1(BottleneckV1b, num_block, norm_layer=norm_layer) |
def main():
parser = argparse.ArgumentParser(description='Neural Solution')
parser.add_argument('action', choices=['start', 'stop', 'cluster'], help='start/stop/management service')
parser.add_argument('--hostfile', default=None, help='start backend serve host file which contains all available nodes')
parser.add_argument('--restful_api_port', type=int, default=8000, help='start restful serve with {restful_api_port}, default 8000')
parser.add_argument('--grpc_api_port', type=int, default=8001, help='start gRPC with {restful_api_port}, default 8001')
parser.add_argument('--result_monitor_port', type=int, default=3333, help='start serve for result monitor at {result_monitor_port}, default 3333')
parser.add_argument('--task_monitor_port', type=int, default=2222, help='start serve for task monitor at {task_monitor_port}, default 2222')
parser.add_argument('--api_type', default='all', help='start web serve with all/grpc/restful, default all')
parser.add_argument('--workspace', default='./ns_workspace', help='neural solution workspace, default "./ns_workspace"')
parser.add_argument('--conda_env', default=None, help='specify the running environment for the task')
parser.add_argument('--upload_path', default='examples', help='specify the file path for the tasks')
parser.add_argument('--query', action='store_true', help='[cluster parameter] query cluster information')
parser.add_argument('--join', help='[cluster parameter] add new node into cluster')
parser.add_argument('--remove', help='[cluster parameter] remove <node-id> from cluster')
args = parser.parse_args()
check_ports(args)
if (args.action == 'start'):
start_service(args)
elif (args.action == 'stop'):
stop_service()
elif (args.action == 'cluster'):
manage_cluster(args) |
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
(logits, end_points) = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(logits, end_points) = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(), [batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
inputs = tf.random_uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
inputs = tf.random_uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
(net, end_points) = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse(('AuxLogits' in end_points))
self.assertFalse(('Predictions' in end_points))
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(net, end_points) = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse(('AuxLogits' in end_points))
self.assertFalse(('Predictions' in end_points))
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540], 'Cell_0': [batch_size, 42, 42, 1080], 'Cell_1': [batch_size, 42, 42, 1080], 'Cell_2': [batch_size, 42, 42, 1080], 'Cell_3': [batch_size, 42, 42, 1080], 'Cell_4': [batch_size, 21, 21, 2160], 'Cell_5': [batch_size, 21, 21, 2160], 'Cell_6': [batch_size, 21, 21, 2160], 'Cell_7': [batch_size, 21, 21, 2160], 'Cell_8': [batch_size, 11, 11, 4320], 'Cell_9': [batch_size, 11, 11, 4320], 'Cell_10': [batch_size, 11, 11, 4320], 'Cell_11': [batch_size, 11, 11, 4320], 'global_pool': [batch_size, 4320], 'AuxLogits': [batch_size, 1000], 'Predictions': [batch_size, 1000], 'Logits': [batch_size, 1000]}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 28, 28, 135], 'Cell_0': [batch_size, 28, 28, 270], 'Cell_1': [batch_size, 28, 28, 270], 'Cell_2': [batch_size, 28, 28, 270], 'Cell_3': [batch_size, 14, 14, 540], 'Cell_4': [batch_size, 14, 14, 540], 'Cell_5': [batch_size, 14, 14, 540], 'Cell_6': [batch_size, 7, 7, 1080], 'Cell_7': [batch_size, 7, 7, 1080], 'Cell_8': [batch_size, 7, 7, 1080], 'global_pool': [batch_size, 1080], 'AuxLogits': [batch_size, num_classes], 'Predictions': [batch_size, num_classes], 'Logits': [batch_size, num_classes]}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_large(inputs, num_classes, config=config)
self.assertEqual(('AuxLogits' in end_points), use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_mobile(inputs, num_classes, config=config)
self.assertEqual(('AuxLogits' in end_points), use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
(height, width) = (331, 331)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_large(inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(_, end_points) = pnasnet.build_pnasnet_mobile(inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(), [batch_size, 135, 28, 28])
def testUseBoundedAcitvationMobileModel(self):
batch_size = 1
(height, width) = (224, 224)
num_classes = 1000
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
(_, _) = pnasnet.build_pnasnet_mobile(inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual((node.op == 'Relu6'), use_bounded_activation) |
class MultimodalDecoder(nn.Module):
def __init__(self, embed_dim, future_steps) -> None:
super().__init__()
self.embed_dim = embed_dim
self.future_steps = future_steps
self.multimodal_proj = nn.Linear(embed_dim, (6 * embed_dim))
self.loc = nn.Sequential(nn.Linear(embed_dim, 256), nn.ReLU(), nn.Linear(256, embed_dim), nn.ReLU(), nn.Linear(embed_dim, (future_steps * 2)))
self.pi = nn.Sequential(nn.Linear(embed_dim, 256), nn.ReLU(), nn.Linear(256, embed_dim), nn.ReLU(), nn.Linear(embed_dim, 1))
def forward(self, x):
x = self.multimodal_proj(x).view((- 1), 6, self.embed_dim)
loc = self.loc(x).view((- 1), 6, self.future_steps, 2)
pi = self.pi(x).squeeze((- 1))
return (loc, pi) |
def primaldual(y, OpA, OpW, c0, eta, y0=None, iter=20, sigma=0.5, tau=0.5, theta=1.0, silent=False, report_pd_gap=False):
if (y0 is None):
y0 = torch.zeros_like(y)
def F(_y):
return (((_y - y).norm(p=2, dim=(- 1)) > (eta + 0.01)) * 10000.0)
def Fstar(_y):
return ((eta * _y.norm(p=2, dim=(- 1))) + (y * _y).sum(dim=(- 1)))
def Gstar(_y):
return ((torch.max(torch.abs(_y), dim=(- 1))[0] > (1.0 + 0.01)) * 10000.0)
c = c0.clone()
c_ = c.clone()
y_ = y0.clone()
for it in tqdm(range(iter), desc='Primal-Dual iterations', disable=silent):
y_ = prox_l2_constraint_conjugate((y_ + (sigma * OpA(OpW(c_)))), (sigma * y), (sigma * eta))
(cold, c) = (c, shrink((c - (tau * OpW.adj(OpA.adj(y_)))), tau))
c_ = (c + (theta * (c - cold)))
if report_pd_gap:
E = (((F(OpA(OpW(c_))) + c_.abs().sum(dim=(- 1))) + Fstar(y_)) + Gstar((- OpW.adj(OpA.adj(y_)))))
print('\n\n Primal Dual Gap: \t {:1.4e} \n\n'.format(E.abs().max()))
return (OpW(c_), c_, y_) |
_name('slim_eval')
def test_slim_eval_large_inputdim(benchmark):
slim_eval_runner(benchmark, input_dim=100) |
def scan_imageid_and_annoid(sequence_dirs):
image_start_end_ids = []
anno_start_end_ids = []
(start_image_id, start_anno_id) = (0, 0)
for sequence_dir in sequence_dirs:
sequence_gt_info_path = osp.join(sequence_dir, 'scene_gt_info.json')
with open(sequence_gt_info_path, 'r') as f:
sequence_gt_info = json.load(f)
image_num = len(sequence_gt_info)
anno_num = [len(v) for v in sequence_gt_info.values()]
anno_num = sum(anno_num)
end_image_id = (start_image_id + image_num)
end_anno_id = (start_anno_id + anno_num)
image_start_end_ids.append((start_image_id, end_image_id))
anno_start_end_ids.append((start_anno_id, end_anno_id))
start_anno_id = end_anno_id
start_image_id = end_image_id
return (image_start_end_ids, anno_start_end_ids) |
class PFRNN_Policy(Policy):
def __init__(self, action_space, nr_inputs, observation_type, action_encoding, cnn_channels, h_dim, encoder_batch_norm, policy_batch_norm, batch_size, resample, dropout=0.1, num_particles=10, num_features=256, particle_aggregation='mgf'):
super().__init__(action_space, encoding_dimension=h_dim)
self.h_dim = h_dim
self.batch_size = batch_size
self.encoder_batch_norm = encoder_batch_norm
self.policy_batch_norm = policy_batch_norm
self.observation_type = observation_type
self.resample = resample
self.dropout = dropout
self.particle_aggregation = particle_aggregation
self.num_features = num_features
self.encoder = encoder.get_encoder(observation_type, nr_inputs, cnn_channels, batch_norm=encoder_batch_norm)
self.cnn_output_dimension = encoder.get_cnn_output_dimension(observation_type, cnn_channels)
self.cnn_output_number = reduce(mul, self.cnn_output_dimension, 1)
if (action_encoding > 0):
if (action_space.__class__.__name__ == 'Discrete'):
action_shape = action_space.n
else:
action_shape = action_space.shape[0]
if encoder_batch_norm:
self.action_encoder = nn.Sequential(nn.Linear(action_shape, action_encoding), nn.BatchNorm1d(action_encoding), nn.ReLU())
else:
self.action_encoder = nn.Sequential(nn.Linear(action_shape, action_encoding), nn.ReLU())
self.num_particles = num_particles
self.rnn = PFGRUCell(self.num_particles, (self.cnn_output_number + action_encoding), self.cnn_output_number, h_dim, 0.9, True, 'relu')
if (self.particle_aggregation == 'mgf'):
agg = MGF_Aggregator
elif (self.particle_aggregation == 'mean'):
agg = Mean_Aggregator
elif (self.particle_aggregation == 'gru'):
agg = GRU_Aggregator
else:
raise NotImplementedErro
self.agg = agg(self.num_particles, self.num_features, self.h_dim, self.cnn_output_number)
if (observation_type == 'fc'):
self.obs_criterion = nn.MSELoss()
else:
self.obs_criterion = nn.BCEWithLogitsLoss()
self.train()
self.reset_parameters()
def new_latent_state(self):
h0 = torch.zeros((self.batch_size * self.num_particles), self.h_dim)
p0 = torch.zeros((self.batch_size * self.num_particles), 1)
return (h0, p0)
def logpdf(self, value, mean, var):
return torch.sum(((((- 0.5) * ((value - mean) ** 2)) / var) - (0.5 * torch.log(((2 * var) * np.pi)))), dim=1)
def vec_conditional_new_latent_state(self, latent_states, masks):
h0 = latent_states[0]
p0 = latent_states[1]
return ((h0 * masks), (p0 * masks))
def reset_parameters(self):
kaimin_normal = torch.nn.init.kaiming_normal_
xavier_normal = torch.nn.init.xavier_normal_
gain = nn.init.calculate_gain('relu')
orthogonal = torch.nn.init.orthogonal_
def weights_init():
def fn(m):
classname = m.__class__.__name__
if ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1))):
kaimin_normal(m.weight.data)
try:
m.bias.data.fill_(0)
except:
pass
return fn
self.apply(weights_init())
if (self.dist.__class__.__name__ == 'DiagGaussian'):
self.dist.fc_mean.weight.data.mul_(0.01)
def encode(self, observation, actions, previous_latent_state):
x = self.encoder(observation)
x = x.view((- 1), self.cnn_output_number)
encoded_actions = None
if hasattr(self, 'action_encoder'):
encoded_actions = self.action_encoder(actions)
encoded_actions = F.relu(encoded_actions)
x_act = torch.cat([x, encoded_actions], dim=1)
if hasattr(self, 'rnn'):
x_reshape = x_act.repeat(self.num_particles, 1)
latent_state = self.rnn(x_reshape, previous_latent_state)
state_tuple = latent_state
(merged_state, particles, weight) = self.agg(state_tuple[0], state_tuple[(- 1)])
return (state_tuple, merged_state) |
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict([('a', torch.DoubleTensor([100.0])), ('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ('c', torch.IntTensor([7, 8, 9]))])
params_1 = collections.OrderedDict([('a', torch.DoubleTensor([1.0])), ('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ('c', torch.IntTensor([2, 2, 2]))])
params_avg = collections.OrderedDict([('a', torch.DoubleTensor([50.5])), ('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), ('c', torch.IntTensor([4, 5, 5]))])
(fd_0, path_0) = tempfile.mkstemp()
(fd_1, path_1) = tempfile.mkstemp()
torch.save(collections.OrderedDict([('model', params_0)]), path_0)
torch.save(collections.OrderedDict([('model', params_1)]), path_1)
output = average_checkpoints([path_0, path_1])['model']
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for ((k_expected, v_expected), (k_out, v_out)) in zip(params_avg.items(), output.items()):
self.assertEqual(k_expected, k_out, 'Key mismatch - expected {} but found {}. (Expected list of keys: {} vs actual list of keys: {})'.format(k_expected, k_out, params_avg.keys(), output.keys()))
np.testing.assert_allclose(v_expected.numpy(), v_out.numpy(), err_msg='Tensor value mismatch for key {}'.format(k_expected))
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save({'model': m.state_dict()}, path)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, 'm1.pt')
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, 'm2.pt')
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, 'm3.pt')
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(torch.equal(new_model['model']['embedding.weight'], (((m1.embedding.weight + m2.embedding.weight) + m3.embedding.weight) / 3.0)))
self.assertTrue(torch.equal(new_model['model']['FC1.weight'], (((m1.FC1.weight + m2.FC1.weight) + m3.FC1.weight) / 3.0)))
self.assertTrue(torch.equal(new_model['model']['FC2.weight'], (((m1.FC2.weight + m2.FC2.weight) + m3.FC2.weight) / 3.0)))
shutil.rmtree(tmpdir) |
class Net(nn.Module):
def __init__(self, bias=True):
super(Net, self).__init__()
self.linear = nn.Linear(30, 50, bias=bias)
self.linear2 = nn.Linear(50, 10, bias=bias)
def forward(self, x):
x = self.linear(x)
x = self.linear2(x)
return x |
def make_figure1_data():
D = util.read_data_single(('%s/choices/%s.csv' % (util.data_path, 'g-1.00-0.50-u-00')))
step = 0.01
scores_uniform = np.array((1.0 / D.groupby('choice_id')['y'].aggregate(len)))
with open('../results/fig1_data.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['alpha', 'p', 'll'])
for alpha in np.arange(0.0, 2.0, step):
D['score'] = np.exp((alpha * np.log((D.deg + util.log_smooth))))
score_tot = D.groupby('choice_id')['score'].aggregate(np.sum)
scores_pa = (np.array(D.loc[((D.y == 1), 'score')]) / np.array(score_tot))
for p in np.arange(0.0, 1.0, step):
scores = ((p * scores_uniform) + ((1 - p) * scores_pa))
ll = sum(np.log((scores + util.log_smooth)))
x = writer.writerow([alpha, p, ll])
m = logit.MixedLogitModel('fig1_em', D=D, vvv=2)
m.add_uniform_model()
m.add_log_degree_model()
m.models[1].u[0] = 0.25
T = m.fit(n_rounds=100, etol=0.001, return_stats=True)
T.to_csv('../results/fig_1_data_em.csv', index=False) |
()
('--src', help='Source directory with JPEG images.', metavar='PATH')
('--dest', help='Directory in which to write modified images.', metavar='PATH')
('--mpp', help='Microns per pixel.', metavar=float, required=True)
def main(src, dest, mpp):
source_jpgs = [f for f in os.listdir(src) if (sf.util.path_to_ext(f).lower() in ['jpeg', 'jpg'])]
if (not len(source_jpgs)):
print('No source jpg/jpeg images found.')
for src_jpg in source_jpgs:
with Image.open(os.path.join(src, src_jpg)) as img:
exif = img.getexif()
if (TIF_EXIF_KEY_MPP not in exif.keys()):
exif[TIF_EXIF_KEY_MPP] = mpp
dest_jpg = os.path.join(dest, src_jpg)
img.save(dest_jpg, exif=exif, quality=100)
print(f'Wrote MPP={mpp} to {dest_jpg}')
else:
print(f'Skipping {src_jpg}; MPP already written') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.