code stringlengths 101 5.91M |
|---|
def resolve_act_layer(kwargs, default='relu'):
act_name = kwargs.pop('act_layer', default)
if (act_name == 'relu'):
return tf.keras.layers.ReLU
elif (act_name == 'relu6'):
return partial(tf.keras.layers.ReLU, max_value=6.0)
else:
raise NotImplemented |
def fid_calculate_activation_statistics(act):
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return (mu, sigma) |
class BaseTask(Problem):
def __init__(self, tokenizer, candidate_pool, obj_dim, transform=(lambda x: x), batch_size=1, candidate_weights=None, max_len=None, max_ngram_size=1, allow_len_change=True, **kwargs):
self.op_types = (['sub', 'ins', 'del'] if allow_len_change else ['sub'])
if (max_len is None):
max_len = (max([(len(tokenizer.encode(cand.mutant_residue_seq)) - 2) for cand in candidate_pool]) - 1)
if (len(candidate_pool) == 0):
xl = 0.0
xu = 1.0
else:
xl = np.array((([0] * 4) * batch_size))
xu = np.array(([(len(candidate_pool) - 1), (2 * max_len), (len(tokenizer.sampling_vocab) - 1), (len(self.op_types) - 1)] * batch_size))
n_var = (4 * batch_size)
super().__init__(n_var=n_var, n_obj=obj_dim, n_constr=0, xl=xl, xu=xu, type_var=int)
self.tokenizer = tokenizer
self.candidate_pool = list(candidate_pool)
self.candidate_weights = candidate_weights
self.obj_dim = obj_dim
self.transform = transform
self.batch_size = batch_size
self.max_len = max_len
self.max_ngram_size = max_ngram_size
self.allow_len_change = allow_len_change
def make_new_candidates(self, base_candidates, new_seqs):
assert (base_candidates.shape[0] == new_seqs.shape[0])
new_candidates = []
for (b_cand, n_seq) in zip(base_candidates, new_seqs):
mutation_ops = mutation_list(b_cand.mutant_residue_seq, n_seq, self.tokenizer)
new_candidates.append(b_cand.new_candidate(mutation_ops, self.tokenizer))
return np.stack(new_candidates)
def task_setup(self, *args, **kwargs):
raise NotImplementedError
def x_to_query_batches(self, x):
return x.reshape((- 1), self.batch_size, 4)
def query_batches_to_x(self, query_batches):
return query_batches.reshape((- 1), self.n_var)
def _evaluate(self, x, out, *args, **kwargs):
raise NotImplementedError
def score(self, str_array):
raise NotImplementedError
def is_feasible(self, candidates):
if (self.max_len is None):
is_feasible = np.ones(candidates.shape).astype(bool)
else:
is_feasible = np.array([(len(cand) <= self.max_len) for cand in candidates]).reshape((- 1))
return is_feasible |
class CycleFC(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size, stride: int=1, padding: int=0, dilation: int=1, groups: int=1, bias: bool=True):
super(CycleFC, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels must be divisible by groups')
if ((out_channels % groups) != 0):
raise ValueError('out_channels must be divisible by groups')
if (stride != 1):
raise ValueError('stride must be 1')
if (padding != 0):
raise ValueError('padding must be 0')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = nn.Parameter(torch.empty(out_channels, (in_channels // groups), 1, 1))
if bias:
self.bias = nn.Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.register_buffer('offset', self.gen_offset())
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def gen_offset(self):
offset = torch.empty(1, (self.in_channels * 2), 1, 1)
start_idx = ((self.kernel_size[0] * self.kernel_size[1]) // 2)
assert ((self.kernel_size[0] == 1) or (self.kernel_size[1] == 1)), self.kernel_size
for i in range(self.in_channels):
if (self.kernel_size[0] == 1):
offset[(0, ((2 * i) + 0), 0, 0)] = 0
offset[(0, ((2 * i) + 1), 0, 0)] = (((i + start_idx) % self.kernel_size[1]) - (self.kernel_size[1] // 2))
else:
offset[(0, ((2 * i) + 0), 0, 0)] = (((i + start_idx) % self.kernel_size[0]) - (self.kernel_size[0] // 2))
offset[(0, ((2 * i) + 1), 0, 0)] = 0
return offset
def forward(self, input: Tensor) -> Tensor:
(B, C, H, W) = input.size()
return deform_conv2d_tv(input, self.offset.expand(B, (- 1), H, W), self.weight, self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation)
def extra_repr(self) -> str:
s = (self.__class__.__name__ + '(')
s += '{in_channels}'
s += ', {out_channels}'
s += ', kernel_size={kernel_size}'
s += ', stride={stride}'
s += (', padding={padding}' if (self.padding != (0, 0)) else '')
s += (', dilation={dilation}' if (self.dilation != (1, 1)) else '')
s += (', groups={groups}' if (self.groups != 1) else '')
s += (', bias=False' if (self.bias is None) else '')
s += ')'
return s.format(**self.__dict__) |
def perm_invert(p):
q = ([None] * len(p))
for (i, j) in enumerate(p):
q[j] = i
return q |
def _suggest_semantic_version(s):
result = s.strip().lower()
for (pat, repl) in _REPLACEMENTS:
result = pat.sub(repl, result)
if (not result):
result = '0.0.0'
m = _NUMERIC_PREFIX.match(result)
if (not m):
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while (len(prefix) < 3):
prefix.append(0)
if (len(prefix) == 3):
suffix = result[m.end():]
else:
suffix = ('.'.join([str(i) for i in prefix[3:]]) + result[m.end():])
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
for (pat, repl) in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if (not suffix):
result = prefix
else:
sep = ('-' if ('dev' in suffix) else '+')
result = ((prefix + sep) + suffix)
if (not is_semver(result)):
result = None
return result |
def resolve_entity_map(qid, query, el_results, el_extractor):
_delimiter = ';'
if (not (qid in el_results)):
return {}
entity_map = el_results[qid]['entities']
entities = set(entity_map.keys())
for k in entity_map:
v = entity_map[k]['friendly_name']
entity_map[k] = ' '.join(v.replace(_delimiter, ' ').split()[:5])
literals = set()
mentions = el_extractor.detect_mentions(query)
for m in mentions:
literals.add(el_extractor.process_literal(m))
return entity_map |
def save_config(logdir, config):
param_path = os.path.join(logdir, 'params.json')
print(('[*] PARAM path: %s' % param_path))
with open(param_path, 'w') as fp:
json.dump(config.__dict__, fp, indent=4, sort_keys=True) |
def train(args, train_batches, model, tokenizer, evaluator):
t_total = (len(train_batches) * args.train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
head_params = ['coref', 'mention', 'antecedent']
model_decay = [p for (n, p) in model.named_parameters() if ((not any(((hp in n) for hp in head_params))) and (not any(((nd in n) for nd in no_decay))))]
model_no_decay = [p for (n, p) in model.named_parameters() if ((not any(((hp in n) for hp in head_params))) and any(((nd in n) for nd in no_decay)))]
head_decay = [p for (n, p) in model.named_parameters() if (any(((hp in n) for hp in head_params)) and (not any(((nd in n) for nd in no_decay))))]
head_no_decay = [p for (n, p) in model.named_parameters() if (any(((hp in n) for hp in head_params)) and any(((nd in n) for nd in no_decay)))]
head_learning_rate = (args.head_learning_rate if args.head_learning_rate else args.learning_rate)
optimizer_grouped_parameters = [{'params': model_decay, 'lr': args.learning_rate, 'weight_decay': args.weight_decay}, {'params': model_no_decay, 'lr': args.learning_rate, 'weight_decay': 0.0}, {'params': head_decay, 'lr': head_learning_rate, 'weight_decay': args.weight_decay}, {'params': head_no_decay, 'lr': head_learning_rate, 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=(t_total * 0.1), num_training_steps=t_total)
scaler = torch.cuda.amp.GradScaler()
logger.info('***** Running training *****')
logger.info(' Num Epochs = %d', args.train_epochs)
logger.info(' Total optimization steps = %d', t_total)
(global_step, tr_loss, logging_loss) = (0, 0.0, 0.0)
(best_f1, best_global_step) = ((- 1), (- 1))
train_iterator = tqdm(range(int(args.train_epochs)), desc='Epoch')
teacher_logits_dir = os.path.dirname(args.dataset_files['train'])
for _ in train_iterator:
epoch_iterator = tqdm(train_batches, desc='Iteration')
for (step, batch) in enumerate(epoch_iterator):
batch['input_ids'] = torch.tensor(batch['input_ids'], device=args.device)
batch['attention_mask'] = torch.tensor(batch['attention_mask'], device=args.device)
if ('leftovers' in batch):
batch['leftovers']['input_ids'] = torch.tensor(batch['leftovers']['input_ids'], device=args.device)
batch['leftovers']['attention_mask'] = torch.tensor(batch['leftovers']['attention_mask'], device=args.device)
keys = [doc_key.replace('/', '_') for doc_key in batch['doc_key']]
teacher_coref_logits = torch.from_numpy(np.stack([np.load(os.path.join(teacher_logits_dir, (k + '_coref_logits.npy'))) for k in keys], axis=0)).to(args.device)
topk_1d_indices = torch.from_numpy(np.stack([np.load(os.path.join(teacher_logits_dir, (k + '_top_indices.npy'))) for k in keys], axis=0)).to(args.device)
model.zero_grad()
model.train()
with torch.cuda.amp.autocast():
outputs = model(batch, topk_1d_indices=topk_1d_indices, return_all_outputs=True)
span_mask = outputs[0]
student_coref_logits = outputs[(- 1)]
loss = softXEnt(teacher_logits=teacher_coref_logits, student_logits=student_coref_logits, span_mask=span_mask)
tr_loss += loss.item()
scaler.scale(loss).backward()
scaler.step(optimizer)
scheduler.step()
scaler.update()
global_step += 1
if ((global_step % args.logging_steps) == 0):
loss = ((tr_loss - logging_loss) / args.logging_steps)
logger.info(f'''
loss step {global_step}: {loss}''')
wandb.log({'loss': loss}, step=global_step)
logging_loss = tr_loss
if ((global_step % args.eval_steps) == 0):
results = evaluator.evaluate(model, prefix=f'step_{global_step}')
wandb.log(results, step=global_step)
f1 = results['f1']
if (f1 > best_f1):
(best_f1, best_global_step) = (f1, global_step)
wandb.run.summary['best_f1'] = best_f1
output_dir = os.path.join(args.output_dir, f'model')
save_all(tokenizer=tokenizer, model=model, output_dir=output_dir)
logger.info(f'best f1 is {best_f1} on global step {best_global_step}')
with open(os.path.join(args.output_dir, f'best_f1.json'), 'w') as f:
json.dump({'best_f1': best_f1, 'best_global_step': best_global_step}, f)
return (global_step, (tr_loss / global_step)) |
def load_or_extract_features(args, cfg):
if (cfg.MODEL.SPEC.TEXT.TOKENIZER == 'clip'):
tokenizer = SimpleTokenizer()
elif ('hf_' in cfg.MODEL.SPEC.TEXT.TOKENIZER):
tokenizer = HFPTTokenizer(pt_name=cfg.MODEL.SPEC.TEXT.TOKENIZER[3:])
else:
tokenizer = None
feature_file = os.path.join(cfg.DATASET.ROOT, (((('zeroshot_features_' + cfg.MODEL.NAME.replace('/', '')) + f'_wiki_{cfg.KNOWLEDGE.WIKITIONARY.USE_DEFINITION}') + f'_gpt3_{cfg.KNOWLEDGE.GPT3.USE_GPT3}') + '.npy'))
logging.info(f'feature_file: {feature_file}')
if os.path.exists(feature_file):
logging.info('Loading features from existing files.')
with open(feature_file, 'rb') as fread:
image_features = np.load(fread)
text_features = np.load(fread)
image_labels = np.load(fread)
else:
(image_features, image_labels) = extract_features(cfg, test_split_only=True)
text_features = extract_text_features(cfg, tokenizer, args)
logging.info(f'Test size is {image_features.shape[0]}.')
return (image_features, text_features, image_labels) |
def rand_float(input_shape):
a = np.random.rand(*input_shape)
a = a.astype(np.float32)
return a |
def get_instr_trace_count(instr: LeanPreprocessedCodeElement) -> int:
if isinstance(instr, LeanPreprocessedAddAp):
return 1
if isinstance(instr, LeanPreprocessedAssertEq):
return 1
if (isinstance(instr, LeanPreprocessedConst) or isinstance(instr, LeanPreprocessedNop)):
return 0
count = 0
if isinstance(instr, LeanPreprocessedWithAsserts):
count += len([asrt for asrt in instr.asserts if (isinstance(asrt, LeanPreprocessedAssertEq) or (isinstance(asrt, LeanPreprocessedTempVarAlloc) and (asrt.add_ap_instr is not None)))])
if (isinstance(instr, LeanPreprocessedCompoundAssertEq) or isinstance(instr, LeanPreprocessedReference)):
return count
if isinstance(instr, LeanPreprocessedTempVar):
return (count + (1 if instr.add_ap_instr else 0))
if isinstance(instr, LeanPreprocessedFuncCall):
return (count + (2 if isinstance(instr, LeanPreprocessedTailCall) else 1))
if isinstance(instr, LeanPreprocessedIf):
return (count + 1)
if isinstance(instr, LeanPreprocessedJumpToLabelInstruction):
return (count + 1)
if isinstance(instr, LeanPreprocessedReturn):
return (count + 1)
raise Exception('Could not determine trace count of instruction.') |
class MulticodeKScheduler(transformers.TrainerCallback):
def __init__(self, k_max, k_min, decay_steps, decay_power=1):
self.k_max = k_max
self.k_min = k_min
self.decay_steps = (decay_steps - 1)
self.decay_power = decay_power
def k_scheduler(self, step):
return int((self.k_max - ((self.k_max - self.k_min) * min(1, ((step / self.decay_steps) ** (1 / self.decay_power))))))
def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs):
models.BaseSnapFunction.k = self.k_scheduler(state.global_step)
def on_train_begin(self, *args, **kwargs):
models.BaseSnapFunction.k = self.k_max
def on_train_end(self, *args, **kwargs):
models.BaseSnapFunction.k = self.k_min |
def buffer_to_bits(buffer):
cmmand_buf = np.frombuffer(buffer, dtype=np.uint8)
return np.unpackbits(cmmand_buf, bitorder='little') |
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-g', '--path', type=str, default='search_targets/default.json')
parser.add_argument('-l', '--large', action='store_true')
args = parser.parse_args()
with open(args.path, 'r') as f:
options = json.load(f)
if args.large:
options['nsamples_per_class'] = 1000
else:
options['nsamples_per_class'] = 100
run(options) |
def main(unused_argv):
(wide_columns, deep_columns) = create_feature_columns()
global total_feature_columns
total_feature_columns = (wide_columns + deep_columns)
estimator = tf.estimator.DNNLinearCombinedClassifier(linear_feature_columns=wide_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=FLAGS.hidden_units.split(','), dnn_optimizer='Adam', loss_reduction='weighted_mean', batch_norm=True, config=tf.estimator.RunConfig(model_dir=FLAGS.model_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps))
train_spec = tf.estimator.TrainSpec(input_fn=(lambda : train_input_fn(filepath=FLAGS.train_data, example_parser=example_parser, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs, shuffle_buffer_size=FLAGS.shuffle_buffer_size)), max_steps=FLAGS.train_steps)
feature_spec = tf.feature_column.make_parse_example_spec(total_feature_columns)
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
exporters = [tf.estimator.BestExporter(name='best_exporter', serving_input_receiver_fn=serving_input_receiver_fn, exports_to_keep=5)]
eval_spec = tf.estimator.EvalSpec(input_fn=(lambda : eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)), throttle_secs=600, steps=None, exporters=exporters)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
metrics = estimator.evaluate(input_fn=(lambda : eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)))
for key in sorted(metrics):
print(('%s: %s' % (key, metrics[key])))
results = estimator.predict(input_fn=(lambda : eval_input_fn(filepath=FLAGS.eval_data, example_parser=example_parser, batch_size=FLAGS.batch_size)))
predicts_df = pd.DataFrame.from_dict(results)
predicts_df['probabilities'] = predicts_df['probabilities'].apply((lambda x: x[0]))
predicts_df.to_csv('predictions.csv')
print('after evaluate') |
(reuse_venv=True)
def diagnostics(session):
session.install(*requirements_dev)
session.run('python', 'dev/kernel-diagnostics.py', *session.posargs) |
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
itow = json.load(open(params['dict_json'], 'r'))['ix_to_word']
wtoi = {w: i for (i, w) in itow.items()}
imgs = imgs['images']
(ngram_words, ngram_idxs, ref_len) = build_dict(imgs, wtoi, params)
utils.pickle_dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open((params['output_pkl'] + '-words.p'), 'w'))
utils.pickle_dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open((params['output_pkl'] + '-idxs.p'), 'w')) |
def pesq_wb(predicted, target, sampling_frequency=16000):
g = torch.manual_seed(1)
wb_pesq = PerceptualEvaluationSpeechQuality(sampling_frequency, 'wb')
return wb_pesq(predicted, target) |
def googlenet_arg_scope(weight_decay=0.0002):
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope([slim.conv2d], weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu) as sc:
return sc |
def read_file_multi_lab(lab_fp, score_fp, pred_thresh):
chan_lab_d = {}
for line in open(lab_fp):
(chan_id, lab) = line.strip('\n').split('\t')
if (chan_id not in chan_lab_d):
chan_lab_d[chan_id] = set([])
chan_lab_d[chan_id].add(lab.replace(' ', ''))
lab_with_pred_l = []
lab_pred_d = collections.defaultdict((lambda : collections.defaultdict(int)))
tot_pred_d = collections.defaultdict(int)
tot_insts = 0
for line in open(score_fp):
(chan_id, pred_l_str) = line.strip('\n').split('\t')[0:2]
if (chan_id not in chan_lab_d):
continue
tot_insts += 1
lab_s = chan_lab_d[chan_id]
pred_s = set([])
for pred_perc_str in pred_l_str.split(','):
(pred, pred_perc) = pred_perc_str.split('|')
pred = pred.replace(' ', '')
if ((pred_thresh is None) or (float(pred_perc) >= pred_thresh)):
pred_s.add(pred)
for lab in lab_s:
lab_with_pred_l.append(lab)
pred = ('NONE' if (lab not in pred_s) else lab)
lab_pred_d[lab][pred] += 1
for pred in pred_s:
tot_pred_d[pred] += 1
return (lab_with_pred_l, lab_pred_d, tot_pred_d, tot_insts) |
def __is_protected(method_name: str) -> bool:
return (method_name.startswith('_') and (not method_name.startswith('__'))) |
def post_process_hook(out, pb, state, extend=False):
ts = pb.ts
if (ts.step == (ts.n_step - 1)):
(fig, (ax1, ax2)) = plt.subplots(nrows=2)
temperature_image = nm.array(probe_results).squeeze()
m = ax1.imshow(temperature_image.T, origin='lower', aspect='auto')
ax1.set_xlabel('time step')
ax1.set_ylabel('distance across build\nplate and cylinder')
fig.colorbar(m, ax=ax1, label='temperature')
ax2.plot(temperature_image.T[0], label='bottom')
ax2.plot(temperature_image.T[(- 1)], label='top')
ax2.set_xlabel('time step')
ax2.set_ylabel('temperature (C)')
ax2.legend()
fig.tight_layout()
fig.savefig(os.path.join(pb.output_dir, 'heat_probe_time_evolution.png'), bbox_inches='tight')
return out |
def sorted_nicely(l):
def convert(text):
return (int(text) if text.isdigit() else text)
def alphanum_key(key):
return [convert(c) for c in re.split('([0-9]+)', key[0])]
return sorted(l, key=alphanum_key) |
def mincut_split_darts(dist_avg, split_num):
assert (split_num == 2), 'always split into 2 groups for darts space (when using gradient to split)'
assert isinstance(dist_avg, np.ndarray)
vertex = [i for i in range(dist_avg.shape[0])]
max_cut = 100000
for subset in chain(*map((lambda x: combinations(vertex, x)), range(1, (len(vertex) + 1)))):
if ((len(subset) >= 2) and (len(subset) <= (len(vertex) // 2))):
cut = 0
for edge in combinations(vertex, 2):
if ((edge[0] in subset) and (edge[1] in subset)):
cut += dist_avg[(edge[0], edge[1])]
if ((edge[0] not in subset) and (edge[1] not in subset)):
cut += dist_avg[(edge[0], edge[1])]
if (cut < max_cut):
group0 = np.array([i for i in vertex if (i in subset)])
group1 = np.array([i for i in vertex if (i not in subset)])
max_cut = cut
best_groups = [group0, group1]
return (best_groups, max_cut) |
def test_callbacks():
def check(caller, func, user_data):
caller = CALLERS[caller]
func = FUNCS[func]()
user_data = USER_DATAS[user_data]()
if (func is callback_python):
def func2(x):
return func(x, 2.0)
else:
func2 = LowLevelCallable(func, user_data)
func = LowLevelCallable(func)
assert_equal(caller(func, 1.0), 2.0)
assert_raises(ValueError, caller, func, ERROR_VALUE)
assert_equal(caller(func2, 1.0), 3.0)
for caller in sorted(CALLERS.keys()):
for func in sorted(FUNCS.keys()):
for user_data in sorted(USER_DATAS.keys()):
check(caller, func, user_data) |
def get_node_ratio(history_data, eval_data):
eval_uniq_nodes = set(eval_data['sources']).union(set(eval_data['destinations']))
hist_uniq_nodes = set(history_data['sources']).union(set(history_data['destinations']))
new_nodes = []
for node in eval_uniq_nodes:
if (node not in hist_uniq_nodes):
new_nodes.append(node)
new_nodes = set(new_nodes)
new_node_ratio = float(((len(new_nodes) * 1.0) / len(eval_uniq_nodes)))
return new_node_ratio |
def test_multi_objective_correctness():
(final_loss, alphas) = multi_cdv.get_descent_vector(losses, gradient)
assert (final_loss.data == ((alphas[0] * loss_1) + (alphas[1] * loss_2)).data)
assert (alphas == alpha_base) |
def Unet_with_inception(input_img, n_filters=16, dropout=0.3, batch_norm=True):
c1 = Conv2D(16, kernel_size=(1, 6), strides=(1, 1), padding='valid')(input_img)
if batch_norm:
c1 = BatchNormalization()(c1)
c1 = Activation('relu')(c1)
c1 = convB(c1, 10, 2, 2)
c1 = convA(c1, 10, batch_norm)
c2 = convB(c1, 10, 3, 3)
c2 = convA(c2, 10, batch_norm)
c3 = convB(c2, 10, 2, 1)
c3 = convA(c3, 10, batch_norm)
c4 = convB(c3, 10, 4, 5)
c4 = convA(c4, 10, batch_norm)
c4 = convA(c4, 10, batch_norm)
c5 = convC(c4, 10, 4, 5)
c5 = concatenate([c5, c3])
c5 = convA(c5, 10, batch_norm)
c6 = convC(c5, 10, 2, 1)
c6 = concatenate([c6, c2])
c6 = convA(c6, 10, batch_norm)
c7 = convC(c6, 10, 3, 3)
c7 = concatenate([c7, c1])
c7 = convA(c7, 10, batch_norm)
c8 = convC(c7, 10, 2, 2)
c9 = Conv2DTranspose(n_filters, kernel_size=(1, 6), strides=(1, 1), padding='valid')(c8)
outputs = Conv2D(4, kernel_size=(1, 1), activation='softmax')(c9)
model = Model(inputs=input_img, outputs=outputs)
return model |
def test_net_on_dataset(args, multi_gpu=False):
dataset = build_dataset(cfg.TEST.DATASETS, is_train=False)
total_timer = Timer()
total_timer.tic()
if multi_gpu:
num_images = len(dataset)
(all_boxes, all_segms, all_parss, all_pscores) = multi_gpu_test_net_on_dataset(args, num_images)
else:
(all_boxes, all_segms, all_parss, all_pscores) = test_net(args)
total_timer.toc(average=False)
logging_rank('Total inference time: {:.3f}s'.format(total_timer.average_time), local_rank=0)
return evaluation(dataset, all_boxes, all_segms, all_parss, all_pscores) |
class DeformableDetrModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class HparamsAbsorbing(HparamsBase):
def __init__(self, dataset):
self.loss_type = 'reweighted_elbo'
self.sample_type = 'diffusion'
self.mask_schedule = 'random'
self.total_steps = 256
self.sample_steps = 256
self.attn_pdrop = 0.0
self.embd_pdrop = 0.0
self.resid_pdrop = 0.0
self.temp = 1.0
super().__init__(dataset)
if ((self.dataset == 'churches') or (self.dataset == 'bedrooms')):
self.batch_size = 20
self.bert_n_emb = 512
self.bert_n_head = 8
self.bert_n_layers = 24
self.block_size = 256
self.lr = 0.0002
self.warmup_iters = 10000
elif (self.dataset == 'ffhq'):
self.batch_size = 20
self.bert_n_emb = 512
self.bert_n_head = 8
self.bert_n_layers = 24
self.block_size = 256
self.lr = 0.0001
self.warmup_iters = 30000
else:
raise KeyError(f'Defaults not defined for multinomial diffusion model on dataset: {self.dataset}') |
def get_instance_kwargs(args, num_exps, variant):
mode = args.mode
ssh_host = None
gpu_id = args.gpu_id
if (mode == 'local_docker'):
interactive_docker = True
else:
interactive_docker = False
instance_kwargs = dict(mode=mode, ssh_host=ssh_host, use_gpu=(not args.no_gpu), gpu_id=gpu_id, num_exps_per_instance=int(num_exps), interactive_docker=interactive_docker)
variant['instance_kwargs'] = instance_kwargs
return instance_kwargs |
class Combine(TokenConverter):
def __init__(self, expr, joinString='', adjacent=True):
super(Combine, self).__init__(expr)
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([''.join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)
if (self.resultsName and retToks.haskeys()):
return [retToks]
else:
return retToks |
class TopologyZooProblem(Problem):
def __init__(self, fname, *, model='gravity', seed=0, scale_factor=1.0, **kwargs):
self._fname = fname
G = Problem._read_graph_graphml(os.path.join(TOPOLOGIES_DIR, 'topology-zoo', fname))
super().__init__(G, model=model, seed=seed, scale_factor=scale_factor, **kwargs)
def name(self):
return self._fname |
class SourceLoc(L.Layer):
def __init__(self, xs, **kwargs):
super(SourceLoc, self).__init__(**kwargs)
self.xs = self.add_weight(name=kwargs.get('name', 'xs'), shape=(len(xs),), trainable=False, initializer=Initializer(xs))
def call(self, x):
return (tf.ones_like(x) * self.xs)
def get_config(self):
config = super(SourceLoc, self).get_config()
config.update({'xs': self.xs.numpy()})
return config |
def load_lib():
global _tifffile
try:
import tifffile as _tifffile
except ImportError:
from . import _tifffile
return _tifffile |
def test_reduce_mean_dyn_time():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reduce_mean(x, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
def _dump_str(v):
if ((sys.version_info < (3,)) and hasattr(v, 'decode') and isinstance(v, str)):
v = v.decode('utf-8')
v = ('%r' % v)
if (v[0] == 'u'):
v = v[1:]
singlequote = v.startswith("'")
if (singlequote or v.startswith('"')):
v = v[1:(- 1)]
if singlequote:
v = v.replace("\\'", "'")
v = v.replace('"', '\\"')
v = v.split('\\x')
while (len(v) > 1):
i = (- 1)
if (not v[0]):
v = v[1:]
v[0] = v[0].replace('\\\\', '\\')
joinx = (v[0][i] != '\\')
while (v[0][:i] and (v[0][i] == '\\')):
joinx = (not joinx)
i -= 1
if joinx:
joiner = 'x'
else:
joiner = 'u00'
v = ([((v[0] + joiner) + v[1])] + v[2:])
return unicode((('"' + v[0]) + '"')) |
class LambdaWarmUpCosineScheduler():
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
self.lr_warm_up_steps = warm_up_steps
self.lr_start = lr_start
self.lr_min = lr_min
self.lr_max = lr_max
self.lr_max_decay_steps = max_decay_steps
self.last_lr = 0.0
self.verbosity_interval = verbosity_interval
def schedule(self, n, **kwargs):
if (self.verbosity_interval > 0):
if ((n % self.verbosity_interval) == 0):
print(f'current step: {n}, recent lr-multiplier: {self.last_lr}')
if (n < self.lr_warm_up_steps):
lr = ((((self.lr_max - self.lr_start) / self.lr_warm_up_steps) * n) + self.lr_start)
self.last_lr = lr
return lr
else:
t = ((n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps))
t = min(t, 1.0)
lr = (self.lr_min + ((0.5 * (self.lr_max - self.lr_min)) * (1 + np.cos((t * np.pi)))))
self.last_lr = lr
return lr
def __call__(self, n, **kwargs):
return self.schedule(n, **kwargs) |
_processor('clip_image_eval')
class ClipImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None):
super().__init__(mean=mean, std=std)
self.transform = transforms.Compose([transforms.Resize(image_size, interpolation=InterpolationMode.BICUBIC), transforms.CenterCrop(image_size), _convert_to_rgb, transforms.ToTensor(), self.normalize])
def from_config(cls, cfg=None):
if (cfg is None):
cfg = OmegaConf.create()
image_size = cfg.get('image_size', 224)
mean = cfg.get('mean', None)
std = cfg.get('std', None)
return cls(image_size=image_size, mean=mean, std=std) |
.core
def test_hdfs_index_store_exception():
local_warehouse_dir = 'file:///tmp'
with pytest.raises(ValueError, match=f"Can't recognize path {(local_warehouse_dir + '/index_dir')} as HDFS path!"):
HdfsIndexStore(warehouse_dir=local_warehouse_dir, index_dir='index_dir') |
_lr_scheduler('cosine')
class CosineSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with cosine. Consider --lr-scheduler=fixed instead.')
warmup_end_lr = args.max_lr
if (args.warmup_init_lr < 0):
args.warmup_init_lr = args.lr[0]
self.min_lr = args.lr[0]
self.max_lr = args.max_lr
assert (self.max_lr > self.min_lr), 'max_lr must be more than lr'
self.t_mult = args.t_mult
self.period = args.lr_period_updates
if (args.warmup_updates > 0):
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
else:
self.lr_step = 1
self.warmup_updates = args.warmup_updates
self.lr_shrink = args.lr_shrink
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr')
parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period')
parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
curr_updates = (num_updates - self.args.warmup_updates)
if (self.t_mult != 1):
i = math.floor(math.log((1 - ((curr_updates / self.period) * (1 - self.t_mult))), self.t_mult))
t_i = ((self.t_mult ** i) * self.period)
t_curr = (curr_updates - (((1 - (self.t_mult ** i)) / (1 - self.t_mult)) * self.period))
else:
i = math.floor((curr_updates / self.period))
t_i = self.period
t_curr = (curr_updates - (self.period * i))
lr_shrink = (self.lr_shrink ** i)
min_lr = (self.min_lr * lr_shrink)
max_lr = (self.max_lr * lr_shrink)
self.lr = (min_lr + ((0.5 * (max_lr - min_lr)) * (1 + math.cos(((math.pi * t_curr) / t_i)))))
self.optimizer.set_lr(self.lr)
return self.lr |
def processGuiEvent(_gui):
global fade
while _gui.get_event((ti.GUI.PRESS, ti.GUI.LMB), (ti.GUI.PRESS, ti.GUI.RMB)):
if (_gui.is_pressed(ti.GUI.LMB) and _gui.is_pressed(ti.GUI.RMB)):
for i in range(maxElements):
if ((sources[i].q != 0) and ((sources[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
sources[i].q = 0
if ((vortexes[i].q != 0) and ((vortexes[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
vortexes[i].q = 0
if ((dipoles[i].m != 0) and ((dipoles[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
dipoles[i].m = 0
elif _gui.is_pressed('s'):
for i in range(maxElements):
if (sources[i].q == 0):
if _gui.is_pressed(ti.GUI.RMB):
sources[i].q -= 1
else:
sources[i].q += 1
sources[i].pos = vec2(*_gui.get_cursor_pos())
break
elif _gui.is_pressed('v'):
for i in range(maxElements):
if (vortexes[i].q == 0):
if _gui.is_pressed(ti.GUI.RMB):
vortexes[i].q -= 0.5
else:
vortexes[i].q += 0.5
vortexes[i].pos = vec2(*_gui.get_cursor_pos())
break
elif _gui.is_pressed('d'):
for i in range(maxElements):
if (dipoles[i].m == 0):
if _gui.is_pressed(ti.GUI.RMB):
dipoles[i].m -= 0.01
else:
dipoles[i].m += 0.01
dipoles[i].pos = vec2(*_gui.get_cursor_pos())
break
else:
for i in range(maxElements):
if ((sources[i].q != 0) and ((sources[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
if _gui.is_pressed(ti.GUI.RMB):
sources[i].q -= (0.5 * int(((sources[i].q >= 0.0) - (sources[i].q <= 0.0))))
else:
sources[i].q += (0.5 * int(((sources[i].q >= 0.0) - (sources[i].q <= 0.0))))
if ((vortexes[i].q != 0) and ((vortexes[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
if _gui.is_pressed(ti.GUI.RMB):
vortexes[i].q -= (0.1 * int(((vortexes[i].q >= 0.0) - (vortexes[i].q <= 0.0))))
else:
vortexes[i].q += (0.1 * int(((vortexes[i].q >= 0.0) - (vortexes[i].q <= 0.0))))
if ((dipoles[i].m != 0) and ((dipoles[i].pos - vec2(*_gui.get_cursor_pos())).norm() < (5 / guiHeight))):
if _gui.is_pressed(ti.GUI.RMB):
dipoles[i].m -= (0.001 * int(((dipoles[i].m >= 0.0) - (dipoles[i].m <= 0.0))))
else:
dipoles[i].m += (0.001 * int(((dipoles[i].m >= 0.0) - (dipoles[i].m <= 0.0))))
fade = (- abs(fade)) |
class TorchModel(nn.Module):
def __init__(self, config: DeepConfig):
super(TorchModel, self).__init__()
self.config = config
def forward(self, past, past_timestamp, future_timestamp, *args, **kwargs):
raise NotImplementedError
def device(self):
return next(self.parameters()).device |
def selu(x):
with ops.name_scope('elu') as scope:
alpha = 1.
scale = 1.
return (scale * tf.where((x >= 0.0), x, (alpha * tf.nn.elu(x)))) |
class CustomAdamOptimizer(BaseCustomOptimizer):
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-08, **kwargs):
super(CustomAdamOptimizer, self).__init__(**kwargs)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def _prepare(self):
super(CustomAdamOptimizer, self)._prepare()
self._beta1_t = tf.convert_to_tensor(self._beta1, name='beta1')
self._beta2_t = tf.convert_to_tensor(self._beta2, name='beta2')
self._epsilon_t = tf.convert_to_tensor(self._epsilon, name='epsilon')
def _create_slots(self, var_list):
self._beta1_power = tf.Variable(initial_value=self._beta1, name='beta1_power')
self._beta2_power = tf.Variable(initial_value=self._beta2, name='beta2_power')
for v in var_list:
self._zeros_slot(v, 'm', self._name)
self._zeros_slot(v, 'v', self._name)
def _apply(self, grad, var, indices=None):
lr = tf.cast(self._learning_rate_tensor, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
lr *= (tf.sqrt((1.0 - self._beta2_power)) / (1.0 - self._beta1_power))
m = self._assign(m, updates=((beta1_t * self._gather(m, indices)) + ((1.0 - beta1_t) * grad)), indices=indices)
v = self._assign(v, updates=((beta2_t * self._gather(v, indices)) + ((1.0 - beta2_t) * (grad * grad))), indices=indices)
update = (lr * (self._gather(m, indices) / (tf.sqrt(self._gather(v, indices)) + epsilon_t)))
var_update = self._assign_sub(ref=var, updates=update, indices=indices)
return tf.group(*[var_update, m, v])
def _finish(self, update_ops, name_scope):
with tf.control_dependencies(update_ops), tf_compat.v1.colocate_with(self._beta1_power):
update_beta1 = self._beta1_power.assign((self._beta1_power * self._beta1_t), use_locking=self._use_locking)
update_beta2 = self._beta2_power.assign((self._beta2_power * self._beta2_t), use_locking=self._use_locking)
return tf.group(*(update_ops + [update_beta1, update_beta2]), name=name_scope) |
def add_confints(df):
df['minconf'] = df.apply((lambda row: confint(row)[0]), axis=1)
df['maxconf'] = df.apply((lambda row: confint(row)[1]), axis=1) |
def generate_subsystem_code(config):
scorep_config = (['scorep-config'] + config)
(return_code, _, _) = scorep.helper.call(scorep_config)
if (return_code != 0):
raise ValueError('given config {} is not supported'.format(scorep_config))
(_, scorep_adapter_init, _) = scorep.helper.call((scorep_config + ['--adapter-init']))
return scorep_adapter_init |
def detoxify(data, use_cuda):
D_scorer = (Detoxify('original', device='cuda') if use_cuda else Detoxify('original'))
(scores, all_scores) = ([], [])
for sample in tqdm(data):
score = D_scorer.predict(sample['output'])
score = {k: float(v) for (k, v) in score.items()}
all_scores.append(score)
scores.append((1 - np.max(list(score.values()))))
return (scores, all_scores) |
_config
def task_finetune_action_recognition_hmdb51():
exp_name = 'finetune_action_recognition_hmdb51'
datasets = ['hmdb51']
loss_names = _loss_names({'openend_vqa': 1})
msrvttqa_label_size = 52
batch_size = 256
max_epoch = 50
max_steps = None
warmup_steps = 0.1
draw_false_text = 15
learning_rate = 0.0001 |
def _try_load_schema(config: LoaderConfig, first: Loader, second: Loader) -> BaseSchema:
from urllib3.exceptions import InsecureRequestWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', InsecureRequestWarning)
try:
return first(config)
except SchemaError as exc:
if should_try_more(exc):
try:
return second(config)
except Exception as second_exc:
if is_specific_exception(second, second_exc):
raise second_exc
raise exc |
def accum_node_fts(encoders, dp: _DataPoint, node_fts: _Array) -> _Array:
is_pointer = (dp.type_ in [_Type.POINTER, _Type.PERMUTATION_POINTER])
if (((dp.location == _Location.NODE) and (not is_pointer)) or ((dp.location == _Location.GRAPH) and (dp.type_ == _Type.POINTER))):
encoding = _encode_inputs(encoders, dp)
node_fts += encoding
return node_fts |
class TransferPair():
def __init__(self, src_obj: ObjectStoreObject, dst_objs: Dict[(str, ObjectStoreObject)], dst_key: str):
self.src_obj = src_obj
self.dst_objs = dst_objs
self.dst_key = dst_key |
def _list_unsupported_tensor_ops():
header = '\n\n\nUnsupported Tensor Methods\n\n '
(methods, properties) = _gen_unsupported_methods_properties()
return (((((header + '\n') + methods) + '\n\nUnsupported Tensor Properties\n\n ') + '\n') + properties) |
def conway_diagonal_factor(self, p):
if (p == 2):
species_list = self.conway_species_list_at_2()
else:
species_list = self.conway_species_list_at_odd_prime(p)
diag_factor = QQ(1)
for s in species_list:
if (s == 0):
pass
elif ((s % 2) == 1):
diag_factor = (diag_factor / (2 * prod([(1 - (QQ(p) ** (- i))) for i in range(2, s, 2)])))
else:
diag_factor = (diag_factor / (2 * prod([(1 - (QQ(p) ** (- i))) for i in range(2, abs(s), 2)])))
s_sign = ZZ((s / abs(s)))
diag_factor = (diag_factor / (ZZ(1) - (s_sign * (QQ(p) ** ZZ(((- abs(s)) / ZZ(2)))))))
return diag_factor |
def check_format(file_path):
with open(file_path, encoding='UTF-8') as out:
file_content = out.read().strip()
for (i, line) in enumerate(file_content.split('\n')):
(topic_id, tweet_id, score, run_id) = line.strip().split('\t')
if (not _LINE_PATTERN_A.match(('%s\t%s' % (tweet_id, score)))):
logging.error('Wrong line format: {}'.format(line))
return False
tweet_id = int(tweet_id)
score = float(score.strip())
return True |
class GaussianPolicy(nn.Module):
def __init__(self, state_shape, action_shape, hidden_units=(256, 256), hidden_activation=nn.ReLU(inplace=True)):
super().__init__()
self.mlp = MLP(input_dim=state_shape[0], output_dim=hidden_units[(- 1)], hidden_units=hidden_units[:(- 1)], hidden_activation=hidden_activation, output_activation=hidden_activation)
self.mean = nn.Linear(hidden_units[(- 1)], action_shape[0]).apply(initialize_weight)
self.log_std = nn.Sequential(nn.Linear(hidden_units[(- 1)], action_shape[0]), Clamp()).apply(initialize_weight)
def forward(self, states):
return torch.tanh(self.mean(self.mlp(states)))
def sample(self, states):
x = self.mlp(states)
return reparameterize(self.mean(x), self.log_std(x)) |
class Vgg(nn.Module):
def __init__(self, img_size=256, fc_layer=4096, classes=10):
super(Vgg, self).__init__()
self.fc_layer = fc_layer
self.classes = classes
if (img_size == 256):
self.final_size = 8
if (img_size == 96):
self.final_size = 3
if (img_size == 64):
self.final_size = 2
if (img_size == 32):
self.final_size = 1
self.conv_block1 = nn.Sequential(nn.Conv2d(3, 64, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.conv_block2 = nn.Sequential(nn.Conv2d(64, 128, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(128, 128, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.conv_block3 = nn.Sequential(nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.conv_block4 = nn.Sequential(nn.Conv2d(256, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.conv_block5 = nn.Sequential(nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.classifier = nn.Sequential(nn.Linear(((512 * self.final_size) * self.final_size), self.fc_layer), nn.ReLU(inplace=True), nn.Dropout2d(0.5), nn.Linear(self.fc_layer, self.fc_layer), nn.ReLU(inplace=True), nn.Dropout2d(0.5), nn.Linear(self.fc_layer, self.classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
m.bias.data.zero_()
def forward(self, x):
conv1 = self.conv_block1(x)
conv2 = self.conv_block2(conv1)
conv3 = self.conv_block3(conv2)
conv4 = self.conv_block4(conv3)
conv5 = self.conv_block5(conv4)
conv5_flatten = conv5.view(conv5.shape[0], (- 1))
score = self.classifier(conv5_flatten)
return score
def init_vggface_params(self, pretrained_model_path):
pretrained_dict = torch.load(pretrained_model_path)
new_state_dict = OrderedDict()
for (k, v) in pretrained_dict.items():
if (k[0:4] == 'conv'):
conv_blk_id = k[4]
conv_layer_id = k[6]
new_state_name = (((('conv_block' + conv_blk_id) + '.') + str(((int(conv_layer_id) - 1) * 2))) + k[7:])
new_state_dict[new_state_name] = v
return new_state_dict |
def visualize(state, fname='tests/assets/mahjong/xxx.svg'):
state.save_svg(fname, color_theme='dark') |
class ProgressiveWavLM(nn.Module):
def __init__(self, source, save_path, output_norm=False, freeze=False, freeze_encoder=False, freeze_feature_extractor=False, apply_spec_augment=False, hidden_size=1024, num_layers=1, dropout=0.0, bidirectional=False):
super().__init__()
download_file(_TOKENIZER_URL, f'{_TOKENIZER_PATH}.zip', unpack=True, dest_unpack=_TOKENIZER_PATH)
self.tokenizer = SentencePiece(model_dir=_TOKENIZER_PATH, vocab_size=4887, model_type='char').sp
vocab_size = self.tokenizer.vocab_size()
encoder_kwargs = {'output_norm': output_norm, 'freeze': (freeze_encoder or freeze), 'freeze_feature_extractor': freeze_feature_extractor, 'apply_spec_augment': apply_spec_augment, 'output_all_hiddens': False}
decoder_kwargs = {'hidden_size': hidden_size, 'num_layers': num_layers, 'dropout': dropout, 'bidirectional': bidirectional}
self.model = Model(source, save_path, vocab_size, encoder_kwargs, decoder_kwargs)
if freeze:
self.model.requires_grad_(False)
def forward(self, wav, wav_lens=None):
output = self.model(wav, wav_lens)
return output |
class E2ESeq2SeqModel(Seq2SeqModel):
def setup(self, data):
self.set_flags()
self.set_data_dependent_params(data)
self.set_embeddings()
self.set_encoder()
self.set_decoder()
def set_data_dependent_params(self, data):
vocabsize = len(data.vocab)
self.set_src_vocab_size(vocabsize)
self.set_tgt_vocab_size(vocabsize)
self.set_max_src_len(data.max_src_len)
self.set_max_tgt_len(data.max_tgt_len)
def set_embeddings(self):
self.embedding_dim = self.config['embedding_dim']
self.embedding_mat = get_embed_matrix(self.src_vocab_size, self.embedding_dim)
embedding_drop_prob = self.config.get('embedding_dropout', 0.0)
self.embedding_dropout_layer = nn.Dropout(embedding_drop_prob)
def embedding_lookup(self, ids, *args, **kwargs):
return self.embedding_mat(ids)
def set_flags(self):
self.teacher_forcing_ratio = self.config.get('teacher_forcing_ratio', 1.0)
def set_encoder(self):
raise NotImplementedError()
def set_decoder(self):
raise NotImplementedError() |
def build_backbone(cfg):
param = dict()
for key in cfg:
if (key == 'type'):
continue
param[key] = cfg[key]
backbone = models.backbone.__dict__[cfg.type](**param)
return backbone |
def get_args_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='config/config.yml', help='config file path')
parser.add_argument('--load_pretrained', type=int, required=False, help='whether to load pretrained weights for training')
parser.add_argument('--checkpoint', type=str, required=False, help='a path to model checkpoint file to load pretrained weights')
parser.add_argument('--use_wandb', type=int, default=0, help='1 means use wandb to log experiments, 0 otherwise')
parser.add_argument('--use_ddp', type=int, default=0, help='1 means use pytorch GPU parallel distributed training, 0 otherwise')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
with open(args.cfg, 'r') as ymlfile:
cfg = yaml.safe_load(ymlfile)
for (k, v) in cfg.items():
parser.add_argument('--{}'.format(k), default=v, type=type(v))
args = parser.parse_args()
args.curr_time = datetime.now().strftime('%Y-%m-%dT%H-%M-%SZ')
args.log_dir = os.path.abspath(os.path.join(args.log_dir, args.curr_time))
args.checkpoint_dir = os.path.abspath(os.path.join(args.checkpoint_dir, args.curr_time))
os.makedirs(args.log_dir, exist_ok=True)
os.makedirs(args.checkpoint_dir, exist_ok=True)
try:
copy_source(os.getcwd(), args.log_dir)
sourcecode_folder_name = os.path.splitext(os.path.basename(os.getcwd()))[0]
wandb_dir = os.path.join(args.log_dir, sourcecode_folder_name, 'wandb')
if os.path.exists(wandb_dir):
shutil.rmtree(wandb_dir)
except:
pass
if (args.num_workers == (- 1)):
args.num_workers = (torch.get_num_threads() - 1)
args.working_abspath = os.path.abspath('./')
if (args.seed < 0):
args.seed = random.randint(0, 1000000)
if args.use_ddp:
init_distributed()
args.device = torch.device('cuda')
args.world_size = torch.cuda.device_count()
args.local_rank = int(os.environ['LOCAL_RANK'])
elif (not torch.cuda.is_available()):
args.device = torch.device('cpu')
else:
args.device = torch.device(args.device)
if args.use_wandb:
import wandb
if ((not args.use_ddp) or (args.use_ddp and is_main_process())):
wandb.init(project=args.project, entity=args.entity, name=args.exp_name, notes=args.notes)
wandb.config.update(args)
return args |
class typedef(CythonType):
def __init__(self, type, name=None):
self._basetype = type
self.name = name
def __call__(self, *arg):
value = cast(self._basetype, *arg)
return value
def __repr__(self):
return (self.name or str(self._basetype))
__getitem__ = index_type |
def print_successes(succ_traj):
print('\n')
print('Successes: ')
print(succ_traj)
print('\n') |
class CaptionInstructDataset(CaptionDataset):
def __getitem__(self, index):
data = super().__getitem__(index)
if (data != None):
data['text_output'] = data['text_input']
data['text_input'] = self.text_processor('')
return data |
def remove_fc(state_dict):
for key in list(state_dict.keys()):
if key.startswith('fc.'):
del state_dict[key]
return state_dict |
class SpacepyLibFindTests(unittest.TestCase):
def setUp(self):
warnings.simplefilter('always')
def testExists(self):
self.assertTrue(spacepy.lib.have_libspacepy) |
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):
transform_list = []
if grayscale:
transform_list.append(transforms.Grayscale(1))
if ('resize' in opt.preprocess):
osize = [opt.load_size, opt.load_size]
transform_list.append(transforms.Resize(osize, method))
elif ('scale_width' in opt.preprocess):
transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))))
if ('crop' in opt.preprocess):
if (params is None):
transform_list.append(transforms.RandomCrop(opt.crop_size))
else:
transform_list.append(transforms.Lambda((lambda img: __crop(img, params['crop_pos'], opt.crop_size))))
if (opt.preprocess == 'none'):
transform_list.append(transforms.Lambda((lambda img: __make_power_2(img, base=4, method=method))))
if (not opt.no_flip):
if (params is None):
transform_list.append(transforms.RandomHorizontalFlip())
elif params['flip']:
transform_list.append(transforms.Lambda((lambda img: __flip(img, params['flip']))))
if convert:
transform_list += [transforms.ToTensor()]
if grayscale:
transform_list += [transforms.Normalize((0.5,), (0.5,))]
else:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list) |
class TestModelClass(BaseModelClass):
def setup_anndata(cls, adata: AnnData, layer: Optional[str]=None, batch_key: Optional[str]=None, labels_key: Optional[str]=None, size_factor_key: Optional[str]=None, categorical_covariate_keys: Optional[list[str]]=None, continuous_covariate_keys: Optional[list[str]]=None, **kwargs):
setup_method_args = cls._get_setup_method_args(**locals())
anndata_fields = [fields.LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True), fields.CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key), fields.CategoricalObsField(REGISTRY_KEYS.LABELS_KEY, labels_key), fields.NumericalObsField(REGISTRY_KEYS.SIZE_FACTOR_KEY, size_factor_key, required=False), fields.CategoricalJointObsField(REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys), fields.NumericalJointObsField(REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys)]
adata_minify_type = _get_adata_minify_type(adata)
if (adata_minify_type is not None):
anndata_fields += cls._get_fields_for_adata_minification(adata_minify_type)
adata_manager = AnnDataManager(fields=anndata_fields, setup_method_args=setup_method_args)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
def train(self):
pass |
class PARALoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, score, predicate_one_hot_labels):
entity_mask = predicate_one_hot_labels.sum(dim=1, keepdim=True).repeat_interleave(score.shape[1], dim=1)
entity_mask = (entity_mask > 0).float()
entity_sum = (entity_mask != 0).sum(dim=(2, 3)).float()
loss = ((F.binary_cross_entropy(score, predicate_one_hot_labels, reduction='none') * entity_mask).sum(dim=(2, 3)) / entity_sum).mean()
if (loss.item() < 0):
print('debug')
return loss |
def plot_value_hist(vals, dp):
nbins = 100
mn = np.min(vals)
mx = np.max(vals)
pl = dp.get_next_plot()
(n, bins) = np.histogram(vals, bins=nbins, density=True)
width = (0.7 * (bins[1] - bins[0]))
center = ((bins[:(- 1)] + bins[1:]) / 2)
plt.bar(center, n, align='center', width=width, facecolor='green', alpha=0.9) |
def np_array(tensor):
tensor = tensor.squeeze(0)
tensor = tensor.detach().cpu()
return tensor.numpy() |
def _ipaddress_match(ipname, host_ip):
ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
return (ip == host_ip) |
def is_integral(dtype: torch.dtype) -> bool:
return (dtype in (torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)) |
def _maybe_cast_reduce_op_input(g, self):
dtype = self.type().scalarType()
if (dtype is not None):
if ((not sym_help._is_fp(self)) and (not (dtype == 'Long'))):
self = _cast_Long(g, self, False)
return self |
class FreezeGradientsMatchingRegexTest(tf.test.TestCase):
def _create_grads_and_vars(self):
return [(tf.constant(1.0), tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')), (tf.constant(2.0), tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')), (tf.constant(3.0), tf.Variable(3.0, name='StackProposalGenerator/weights')), (tf.constant(4.0), tf.Variable(4.0, name='StackProposalGenerator/biases'))]
def test_freeze_all_feature_extractor_variables(self):
grads_and_vars = self._create_grads_and_vars()
regex_list = ['FeatureExtractor/.*']
grads_and_vars = variables_helper.freeze_gradients_matching_regex(grads_and_vars, regex_list)
exp_output = [(3.0, 3.0), (4.0, 4.0)]
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
output = sess.run(grads_and_vars)
self.assertItemsEqual(output, exp_output) |
def test_multidim():
sdfg = dace.SDFG('mapfission_multidim')
sdfg.add_array('A', [2, 3], dace.float64)
state = sdfg.add_state()
(me, mx) = state.add_map('outer', dict(i='0:2', j='0:3'))
nsdfg = dace.SDFG('nested')
nsdfg.add_array('a', [1], dace.float64)
nstate = nsdfg.add_state()
t = nstate.add_tasklet('reset', {}, {'out'}, 'out = 0')
a = nstate.add_write('a')
nstate.add_edge(t, 'out', a, None, dace.Memlet.simple('a', '0'))
nsdfg_node = state.add_nested_sdfg(nsdfg, None, {}, {'a'})
state.add_edge(me, None, nsdfg_node, None, dace.Memlet())
anode = state.add_write('A')
state.add_memlet_path(nsdfg_node, mx, anode, src_conn='a', memlet=dace.Memlet.simple('A', 'i,j'))
assert (sdfg.apply_transformations_repeated(MapFission) > 0)
A = np.random.rand(2, 3)
sdfg(A=A)
assert np.allclose(A, np.zeros_like(A)) |
class TestUtilApproxDividable(unittest.TestCase):
def test_int(self):
self.assertTrue(util.approx_dividable(24, 2, rel_overhead=0, abs_overhead=0))
self.assertTrue(util.approx_dividable(24, 3, rel_overhead=0, abs_overhead=0))
self.assertTrue(util.approx_dividable(24, 4, rel_overhead=0, abs_overhead=0))
self.assertTrue(util.approx_dividable(11, 2))
self.assertFalse(util.approx_dividable(8, 5))
self.assertTrue(util.approx_dividable(19, 5))
self.assertTrue(util.approx_dividable(7, 2, rel_overhead=0.2, abs_overhead=0))
self.assertTrue(util.approx_dividable(7, 2, rel_overhead=0, abs_overhead=1))
self.assertTrue(util.approx_dividable(19, 7, rel_overhead=0.2, abs_overhead=0))
self.assertTrue(util.approx_dividable(19, 7, rel_overhead=0, abs_overhead=2))
self.assertFalse(util.approx_dividable(22, 7, rel_overhead=0.2, abs_overhead=0))
self.assertFalse(util.approx_dividable(23, 7, rel_overhead=0, abs_overhead=1))
ovhd = ((21 - 19) / max(21.0, 19.0))
self.assertFalse(util.approx_dividable(19, 7, rel_overhead=(ovhd - 0.01), abs_overhead=0))
self.assertTrue(util.approx_dividable(19, 7, rel_overhead=(ovhd + 0.01), abs_overhead=0))
def test_float(self):
self.assertTrue(util.approx_dividable(18.4, 3))
self.assertTrue(util.approx_dividable(21.4, 3)) |
def build_conv_layer(cfg, *args, **kwargs):
if (cfg is None):
cfg_ = dict(type='Conv')
else:
assert (isinstance(cfg, dict) and ('type' in cfg))
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in conv_cfg):
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer |
class MyModule(nn.Module):
def forward(self, x, y):
x = nn.ReLU()(x)
return MyFunction.apply(x, y) |
def test_slate_ope_performance_using_standard_additive_log():
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = 'binary'
random_state = 12345
n_rounds = 1000
reward_structure = 'standard_additive'
click_model = None
behavior_policy_function = linear_behavior_policy_logit
reward_function = logistic_reward_function
dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, click_model=click_model, random_state=random_state, behavior_policy_function=behavior_policy_function, base_reward_function=reward_function)
random_behavior_dataset = SyntheticSlateBanditDataset(n_unique_action=n_unique_action, len_list=len_list, dim_context=dim_context, reward_type=reward_type, reward_structure=reward_structure, click_model=click_model, random_state=random_state, behavior_policy_function=None, base_reward_function=reward_function)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
slate_id = bandit_feedback['slate_id']
context = bandit_feedback['context']
action = bandit_feedback['action']
reward = bandit_feedback['reward']
pscore = bandit_feedback['pscore_cascade']
position = bandit_feedback['position']
random_behavior_feedback = random_behavior_dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
evaluation_policy_logit_ = (np.ones((n_rounds, n_unique_action)) / n_unique_action)
evaluation_policy_action_dist = (np.ones(((n_rounds * len_list) * n_unique_action)) / n_unique_action)
(_, _, evaluation_policy_pscore) = dataset.obtain_pscore_given_evaluation_policy_logit(action=action, evaluation_policy_logit_=evaluation_policy_logit_, return_pscore_item_position=False)
evaluation_policy_action_dist = dataset.calc_evaluation_policy_action_dist(action=action, evaluation_policy_logit_=evaluation_policy_logit_)
base_regression_model = SlateRegressionModel(base_model=DecisionTreeRegressor(max_depth=3, random_state=12345), len_list=len_list, n_unique_action=n_unique_action, fitting_method='iw')
q_hat = base_regression_model.fit_predict(context=context, action=action, reward=reward, pscore_cascade=pscore, evaluation_policy_pscore_cascade=evaluation_policy_pscore, evaluation_policy_action_dist=evaluation_policy_action_dist)
cascade_dr_estimated_policy_value = dr.estimate_policy_value(slate_id=slate_id, action=action, reward=reward, pscore_cascade=pscore, position=position, evaluation_policy_pscore_cascade=evaluation_policy_pscore, q_hat=q_hat, evaluation_policy_action_dist=evaluation_policy_action_dist)
q_pi_e = random_behavior_feedback['reward'].reshape((n_rounds, dataset.len_list)).sum(axis=1)
gt_mean = q_pi_e.mean()
gt_std = q_pi_e.std(ddof=1)
print('Cascade additive')
ci_bound = ((gt_std * 3) / np.sqrt(q_pi_e.shape[0]))
print(f'gt_mean: {gt_mean}, 3 * gt_std / sqrt(n): {ci_bound}')
estimated_policy_value = {'cascade-dr': cascade_dr_estimated_policy_value}
for key in estimated_policy_value:
print(f'estimated_value: {estimated_policy_value[key]} ------ estimator: {key}, ')
assert (np.abs((gt_mean - estimated_policy_value[key])) <= ci_bound), f'OPE of {key} did not work well (absolute error is greater than 3*sigma)'
cascade_dr_estimated_policy_value_ = dr.estimate_policy_value(slate_id=slate_id, action=action, reward=reward, pscore_cascade=pscore, position=position, evaluation_policy_pscore_cascade=evaluation_policy_pscore, q_hat=np.zeros_like(q_hat), evaluation_policy_action_dist=evaluation_policy_action_dist)
rips_estimated_policy_value = rips.estimate_policy_value(slate_id=slate_id, reward=reward, pscore_cascade=pscore, position=position, evaluation_policy_pscore_cascade=evaluation_policy_pscore)
assert np.allclose(np.array([cascade_dr_estimated_policy_value_]), np.array([rips_estimated_policy_value])) |
def register_Ns3LteStatsCalculator_methods(root_module, cls):
cls.add_constructor([param('ns3::LteStatsCalculator const &', 'arg0')])
cls.add_constructor([])
cls.add_method('ExistsCellIdPath', 'bool', [param('std::string', 'path')])
cls.add_method('ExistsImsiPath', 'bool', [param('std::string', 'path')])
cls.add_method('GetCellIdPath', 'uint16_t', [param('std::string', 'path')])
cls.add_method('GetDlOutputFilename', 'std::string', [])
cls.add_method('GetImsiPath', 'uint64_t', [param('std::string', 'path')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUlOutputFilename', 'std::string', [])
cls.add_method('SetCellIdPath', 'void', [param('std::string', 'path'), param('uint16_t', 'cellId')])
cls.add_method('SetDlOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('SetImsiPath', 'void', [param('std::string', 'path'), param('uint64_t', 'imsi')])
cls.add_method('SetUlOutputFilename', 'void', [param('std::string', 'outputFilename')])
cls.add_method('FindCellIdFromEnbMac', 'uint16_t', [param('std::string', 'path'), param('uint16_t', 'rnti')], is_static=True, visibility='protected')
cls.add_method('FindCellIdFromEnbRlcPath', 'uint16_t', [param('std::string', 'path')], is_static=True, visibility='protected')
cls.add_method('FindImsiForEnb', 'uint64_t', [param('std::string', 'path'), param('uint16_t', 'rnti')], is_static=True, visibility='protected')
cls.add_method('FindImsiForUe', 'uint64_t', [param('std::string', 'path'), param('uint16_t', 'rnti')], is_static=True, visibility='protected')
cls.add_method('FindImsiFromEnbMac', 'uint64_t', [param('std::string', 'path'), param('uint16_t', 'rnti')], is_static=True, visibility='protected')
cls.add_method('FindImsiFromEnbRlcPath', 'uint64_t', [param('std::string', 'path')], is_static=True, visibility='protected')
cls.add_method('FindImsiFromLteNetDevice', 'uint64_t', [param('std::string', 'path')], is_static=True, visibility='protected')
cls.add_method('FindImsiFromUePhy', 'uint64_t', [param('std::string', 'path')], is_static=True, visibility='protected')
return |
def deconv3d(norm_type, in_planes, out_planes, num_groups=2):
if (norm_type == 'batch'):
return nn.Sequential(nn.ConvTranspose3d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.BatchNorm3d(out_planes), nn.LeakyReLU(0.2, inplace=True))
elif (norm_type == 'group'):
return nn.Sequential(nn.ConvTranspose3d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.GroupNorm(num_groups, out_planes), nn.LeakyReLU(0.2, inplace=True))
else:
return nn.Sequential(nn.ConvTranspose3d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True)) |
def get_toxicity_metric_specs() -> List[MetricSpec]:
return [MetricSpec(class_name='helm.benchmark.metrics.toxicity_metrics.ToxicityMetric', args={})] |
def ocp(F, bcs, J, y, u, p, config):
return cashocs.OptimalControlProblem(F, bcs, J, y, u, p, config=config) |
class BaseAssigner(metaclass=ABCMeta):
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
pass |
class GPTNeoForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class MaxUnpool2d(_MaxUnpoolNd):
kernel_size: _size_2_t
stride: _size_2_t
padding: _size_2_t
def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t]=None, padding: _size_2_t=0) -> None:
super(MaxUnpool2d, self).__init__()
self.kernel_size = _pair(kernel_size)
self.stride = _pair((stride if (stride is not None) else kernel_size))
self.padding = _pair(padding)
def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]]=None) -> Tensor:
return cF.complex_fcaller(F.max_unpool2d, input, indices, self.kernel_size, self.stride, self.padding, output_size) |
class TFltRect(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
MnX = _swig_property(_snap.TFltRect_MnX_get, _snap.TFltRect_MnX_set)
MnY = _swig_property(_snap.TFltRect_MnY_get, _snap.TFltRect_MnY_set)
MxX = _swig_property(_snap.TFltRect_MxX_get, _snap.TFltRect_MxX_set)
MxY = _swig_property(_snap.TFltRect_MxY_get, _snap.TFltRect_MxY_set)
def __init__(self, *args):
_snap.TFltRect_swiginit(self, _snap.new_TFltRect(*args))
def Save(self, SOut):
return _snap.TFltRect_Save(self, SOut)
def GetMnX(self):
return _snap.TFltRect_GetMnX(self)
def GetMnY(self):
return _snap.TFltRect_GetMnY(self)
def GetMxX(self):
return _snap.TFltRect_GetMxX(self)
def GetMxY(self):
return _snap.TFltRect_GetMxY(self)
def GetXLen(self):
return _snap.TFltRect_GetXLen(self)
def GetYLen(self):
return _snap.TFltRect_GetYLen(self)
def GetXCenter(self):
return _snap.TFltRect_GetXCenter(self)
def GetYCenter(self):
return _snap.TFltRect_GetYCenter(self)
def IsXYIn(self, X, Y):
return _snap.TFltRect_IsXYIn(self, X, Y)
def Intersection(Rect1, Rect2):
return _snap.TFltRect_Intersection(Rect1, Rect2)
Intersection = staticmethod(Intersection)
def GetStr(self):
return _snap.TFltRect_GetStr(self)
__swig_destroy__ = _snap.delete_TFltRect |
class RewardPlusDelay(ValueFunction):
def __init__(self, DELAY_COEFFICIENT: float=0.001, log_dir='../logs/'):
super(RewardPlusDelay, self).__init__(log_dir)
self.DELAY_COEFFICIENT = DELAY_COEFFICIENT
def get_value(self, experiences: List[Experience]) -> List[List[Tuple[(Action, float)]]]:
scored_actions_all_agents: List[List[Tuple[(Action, float)]]] = []
for experience in experiences:
for feasible_actions in experience.feasible_actions_all_agents:
scored_actions: List[Tuple[(Action, float)]] = []
for action in feasible_actions:
assert action.new_path
immediate_reward = sum([request.value for request in action.requests])
remaining_delay_bonus = (self.DELAY_COEFFICIENT * action.new_path.total_delay)
score = (immediate_reward + remaining_delay_bonus)
scored_actions.append((action, score))
scored_actions_all_agents.append(scored_actions)
return scored_actions_all_agents
def update(self, *args, **kwargs):
pass
def remember(self, *args, **kwargs):
pass |
def _impl(array, fill_value, highlevel, behavior, dtype, including_unknown, attrs):
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
(layout, _) = ensure_same_backend(ctx.unwrap(array, primitive_policy='error'), ctx.unwrap(fill_value, primitive_policy='pass-through', string_policy='pass-through', allow_unknown=True))
if (dtype is not None):
dtype = np.dtype(dtype)
fill_value = layout.backend.nplike.asarray([fill_value], dtype=dtype)[0]
if (dtype == np.dtype(np.bool_)):
fill_value = fill_value.view(np.uint8)
def action(layout, backend, **kwargs):
nplike = backend.nplike
index_nplike = backend.index_nplike
if layout.is_numpy:
original = nplike.asarray(layout.data)
if ((fill_value is _ZEROS) or (is_integer_like(fill_value) and (not is_unknown_scalar(fill_value)) and (fill_value == 0))):
return ak.contents.NumpyArray(nplike.zeros_like(original, dtype=dtype), parameters=layout.parameters)
elif (is_integer_like(fill_value) and (not is_unknown_scalar(fill_value)) and (fill_value == 1)):
return ak.contents.NumpyArray(nplike.ones_like(original, dtype=dtype), parameters=layout.parameters)
else:
return ak.contents.NumpyArray(nplike.full_like(original, fill_value, dtype=dtype), parameters=layout.parameters)
elif layout.is_unknown:
if ((dtype is not None) and including_unknown):
return layout.to_NumpyArray(dtype=dtype)
else:
return None
elif (layout.parameter('__array__') in {'bytestring', 'string'}):
stringlike_type = layout.parameter('__array__')
if (fill_value is _ZEROS):
asbytes = nplike.frombuffer(b'', dtype=np.uint8)
result = ak.contents.ListArray(ak.index.Index64(index_nplike.zeros(layout.length, dtype=np.int64), nplike=index_nplike), ak.index.Index64(index_nplike.zeros(layout.length, dtype=np.int64), nplike=index_nplike), ak.contents.NumpyArray(asbytes, parameters={'__array__': ('byte' if (stringlike_type == 'bytestring') else 'char')}), parameters={'__array__': stringlike_type})
elif (stringlike_type == 'bytestring'):
if isinstance(fill_value, bytes):
asbytes = fill_value
else:
asbytes = str(fill_value).encode('utf-8', 'surrogateescape')
asbytes = nplike.frombuffer(asbytes, dtype=np.uint8)
result = ak.contents.ListArray(ak.index.Index64(index_nplike.zeros(layout.length, dtype=np.int64), nplike=index_nplike), ak.index.Index64(index_nplike.full(layout.length, len(asbytes), dtype=np.int64)), ak.contents.NumpyArray(asbytes, parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'})
else:
assert (stringlike_type == 'string')
asstr = str(fill_value).encode('utf-8', 'surrogateescape')
asbytes = nplike.frombuffer(asstr, dtype=np.uint8)
result = ak.contents.ListArray(ak.index.Index64(index_nplike.zeros(layout.length, dtype=np.int64), nplike=index_nplike), ak.index.Index64(index_nplike.full(layout.length, len(asbytes), dtype=np.int64)), ak.contents.NumpyArray(asbytes, parameters={'__array__': 'char'}), parameters={'__array__': 'string'})
if (dtype is not None):
result = ak.operations.strings_astype(result, dtype, highlevel=False, behavior=behavior)
result = ak.operations.values_astype(result, dtype, highlevel=False, behavior=behavior)
return result
else:
return None
out = ak._do.recursively_apply(layout, action)
return ctx.wrap(out, highlevel=highlevel) |
class COGS(TypedTextDataset):
URL_BASE = '
SPLT_TYPES = ['train', 'test', 'valid', 'gen']
NAME_MAP = {'valid': 'dev'}
def build_cache(self) -> TypedTextDatasetCache:
types = []
type_list = []
type_map = {}
index_table = {}
in_sentences = []
out_sentences = []
for st in self.SPLT_TYPES:
fname = (self.NAME_MAP.get(st, st) + '.tsv')
split_fn = os.path.join(self.cache_dir, fname)
os.makedirs(os.path.dirname(split_fn), exist_ok=True)
full_url = (self.URL_BASE + fname)
print('Downloading', full_url)
download(full_url, split_fn, ignore_if_exists=True)
index_table[st] = []
with open(split_fn, 'r') as f:
d = csv.reader(f, delimiter='\t')
for line in d:
(i, o, t) = line
index_table[st].append(len(in_sentences))
in_sentences.append(i)
out_sentences.append(o)
tind = type_map.get(t)
if (tind is None):
type_map[t] = tind = len(type_list)
type_list.append(t)
types.append(tind)
assert (len(in_sentences) == len(out_sentences))
return TypedTextDatasetCache().build({'default': index_table}, in_sentences, out_sentences, types, type_list)
def start_test(self) -> TypedTextSequenceTestState:
return TypedTextSequenceTestState((lambda x: ' '.join(self.in_vocabulary(x))), (lambda x: ' '.join(self.out_vocabulary(x))), self._cache.type_names) |
def df():
folder = dirname(replay.__file__)
res = pd.read_csv(join(folder, '../examples/data/ml1m_ratings.dat'), sep='\t', names=['user_id', 'item_id', 'relevance', 'timestamp']).head(1000)
res = convert2spark(res)
encoder = LabelEncoder([LabelEncodingRule('user_id'), LabelEncodingRule('item_id')])
res = encoder.fit_transform(res)
return res |
def main(argv=sys.argv):
(dataset, subset_size, method, subset_file, rest_file) = process_options(argv)
selected_lines = []
if (method == 0):
selected_lines = stratified_selection(dataset, subset_size)
elif (method == 1):
selected_lines = random_selection(dataset, subset_size)
dataset = open(dataset, 'r')
prev_selected_linenum = (- 1)
for i in xrange(len(selected_lines)):
for cnt in xrange(((selected_lines[i] - prev_selected_linenum) - 1)):
line = dataset.readline()
if rest_file:
rest_file.write(line)
subset_file.write(dataset.readline())
prev_selected_linenum = selected_lines[i]
subset_file.close()
if rest_file:
for line in dataset:
rest_file.write(line)
rest_file.close()
dataset.close() |
class DualMatroid(Matroid):
def __init__(self, matroid):
if (not isinstance(matroid, Matroid)):
raise TypeError('no matroid provided to take dual of.')
self._matroid = matroid
def groundset(self):
return self._matroid.groundset()
def _rank(self, X):
return self._matroid._corank(X)
def _corank(self, X):
return self._matroid._rank(X)
def _max_independent(self, X):
return self._matroid._max_coindependent(X)
def _circuit(self, X):
return self._matroid._cocircuit(X)
def _closure(self, X):
return self._matroid._coclosure(X)
def _max_coindependent(self, X):
return self._matroid._max_independent(X)
def _coclosure(self, X):
return self._matroid._closure(X)
def _cocircuit(self, X):
return self._matroid._circuit(X)
def _minor(self, contractions=None, deletions=None):
return DualMatroid(self._matroid._minor(contractions=deletions, deletions=contractions))
def dual(self):
return self._matroid
def _repr_(self):
return (("Dual of '" + repr(self._matroid)) + "'")
def __hash__(self):
return hash(('Dual', hash(self._matroid)))
def __eq__(self, other):
if (not isinstance(other, DualMatroid)):
return False
return (self._matroid == other._matroid)
def __ne__(self, other):
return (not (self == other))
def __copy__(self):
N = DualMatroid(self._matroid)
N.rename(self.get_custom_name())
return N
def __deepcopy__(self, memo={}):
from copy import deepcopy
N = DualMatroid(deepcopy(self._matroid, memo))
N.rename(deepcopy(self.get_custom_name(), memo))
return N
def __reduce__(self):
import sage.matroids.unpickling
data = (self._matroid, self.get_custom_name())
version = 0
return (sage.matroids.unpickling.unpickle_dual_matroid, (version, data)) |
def map_aa_idx_to_tok_set(msa_sampler):
return set((msa_sampler.model.alphabet.get_tok(idx) for idx in msa_sampler.valid_aa_idx)) |
class DecoderSCAR(nn.Module):
def __init__(self, n_input: int, n_output: int, n_layers: int=2, n_hidden: int=150, use_batch_norm: bool=True, use_layer_norm: bool=False, scale_activation: Literal[('softmax', 'softplus', 'softplus_sp')]='softplus_sp', sparsity: float=0.9):
super().__init__()
self.px_decoder = FCLayers(n_in=n_input, n_out=n_hidden, n_layers=n_layers, n_hidden=n_hidden, dropout_rate=0, use_batch_norm=use_batch_norm, use_layer_norm=use_layer_norm)
if (scale_activation == 'softmax'):
px_scale_activation = nn.Softmax(dim=(- 1))
elif (scale_activation == 'softplus'):
px_scale_activation = nn.Softplus()
elif (scale_activation == 'softplus_sp'):
px_scale_activation = softplus(sparsity)
self.px_scale_decoder = nn.Sequential(nn.Linear(n_hidden, n_output), px_scale_activation, hnormalization())
self.px_noise_decoder = nn.Sequential(nn.Linear(n_hidden, 1), tanh())
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(self, z: torch.Tensor, library: torch.Tensor):
px = self.px_decoder(z)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
px_noise_ratio = self.px_noise_decoder(px)
px_rate = (torch.exp(library) * px_scale)
return (px_scale, px_noise_ratio, px_rate, px_dropout) |
def build_plugin_layer(cfg, postfix='', **kwargs):
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if (layer_type not in PLUGIN_LAYERS):
raise KeyError(f'Unrecognized plugin type {layer_type}')
plugin_layer = PLUGIN_LAYERS.get(layer_type)
abbr = infer_abbr(plugin_layer)
assert isinstance(postfix, (int, str))
name = (abbr + str(postfix))
layer = plugin_layer(**kwargs, **cfg_)
return (name, layer) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.