code stringlengths 101 5.91M |
|---|
class DirectPlannerSourceOneSided(MulticastDirectPlanner):
def plan(self, jobs: List[TransferJob]) -> TopologyPlan:
src_region_tag = jobs[0].src_iface.region_tag()
dst_region_tags = [iface.region_tag() for iface in jobs[0].dst_ifaces]
for job in jobs[1:]:
assert (job.src_iface.region_tag() == src_region_tag), 'All jobs must have same source region'
assert ([iface.region_tag() for iface in job.dst_ifaces] == dst_region_tags), 'Add jobs must have same destination set'
plan = TopologyPlan(src_region_tag=src_region_tag, dest_region_tags=dst_region_tags)
(vm_types, n_instances) = self._get_vm_type_and_instances(src_region_tag=src_region_tag)
for i in range(n_instances):
plan.add_gateway(src_region_tag, vm_types[src_region_tag])
src_program = GatewayProgram()
for job in jobs:
src_bucket = job.src_iface.bucket()
src_region_tag = job.src_iface.region_tag()
src_provider = src_region_tag.split(':')[0]
partition_id = job.uuid
obj_store_read = src_program.add_operator(GatewayReadObjectStore(src_bucket, src_region_tag, self.n_connections), partition_id=partition_id)
mux_and = src_program.add_operator(GatewayMuxAnd(), parent_handle=obj_store_read, partition_id=partition_id)
dst_prefixes = job.dst_prefixes
for i in range(len(job.dst_ifaces)):
dst_iface = job.dst_ifaces[i]
dst_prefix = dst_prefixes[i]
dst_region_tag = dst_iface.region_tag()
dst_bucket = dst_iface.bucket()
plan.get_region_gateways(dst_region_tag)
src_program.add_operator(GatewayWriteObjectStore(dst_bucket, dst_region_tag, self.n_connections, key_prefix=dst_prefix), parent_handle=mux_and, partition_id=partition_id)
plan.cost_per_gb += compute.CloudProvider.get_transfer_cost(src_region_tag, dst_region_tag)
plan.set_gateway_program(src_region_tag, src_program)
return plan |
def generate_online_performance_plot(performances=None, colors=None, xticks=[], xticks_labels=None, yticks=[], yticks_labels=None, m=20000, xlabel='', ylabel='', labels=None, caption=None, fontsize=24, log_scale_x=False, log_scale_y=False, svg=False):
shape = np.shape(performances)
if (colors is None):
colors = [(1, 0, 0, 1), (0.5, 0.5, 0, 1), (0, 1, 0, 1)]
(fig, ax) = plt.subplots()
for index_hyper_param_label in range(shape[0]):
x = np.array([i for i in range(shape[(- 1)])])
mean = np.mean(performances[index_hyper_param_label], axis=0)
num_samples = np.shape(performances)[1]
std_err = (np.std(performances[index_hyper_param_label], axis=0) / sqrt(num_samples))
label = ''
if (labels is not None):
label = labels[index_hyper_param_label]
color = colors[index_hyper_param_label]
plt.plot((x * m), mean, '-', label=label, color=color)
plt.fill_between((x * m), (mean - std_err), (mean + std_err), color=color, alpha=0.2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if (xticks_labels is None):
xticks_labels = xticks
ax.set_xticks(xticks)
ax.set_xticklabels(xticks_labels, fontsize=fontsize)
if (len(yticks) > 0):
ax.set_yticks(yticks)
ax.set_ylim(yticks[0], yticks[(- 1)])
if (yticks_labels is not None):
ax.set_yticklabels(yticks_labels, fontsize=fontsize)
elif (len(yticks) > 0):
ax.set_yticklabels(yticks, fontsize=fontsize)
ax.set_ylim(yticks[0], yticks[(- 1)])
if log_scale_y:
ax.set_yscale('log')
if log_scale_x:
ax.set_xscale('log')
ax.yaxis.grid()
if (labels is not None):
plt.legend()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if (caption is not None):
ax.set_title(caption)
if svg:
plt.savefig('comparison.svg', bbox_inches='tight', dpi=500)
else:
plt.savefig('comparison.png', bbox_inches='tight', dpi=500)
plt.close() |
class DownstreamExpert(nn.Module):
def __init__(self, upstream_dim, downstream_expert, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
idtable = (Path(kwargs['expdir']) / 'idtable.pkl')
self.train_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'train_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable)
self.dev_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'valid_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_test_dataset = VCC18SegmentalDataset(preprocess(self.datarc['vcc2018_file_path'], 'test_judge.csv'), self.datarc['vcc2018_file_path'], idtable=idtable, valid=False)
self.vcc2018_system_mos = pd.read_csv(Path(self.datarc['vcc2018_file_path'], 'VCC2018_Results/system_mos_all_trackwise.csv'))
self.vcc2016_test_dataset = VCC16SegmentalDataset(list(Path.iterdir(Path(self.datarc['vcc2016_file_path'], 'unified_speech'))), Path(self.datarc['vcc2016_file_path'], 'unified_speech'))
self.vcc2016_system_mos = pd.read_csv(Path(self.datarc['vcc2016_file_path'], 'system_mos.csv'), index_col=False)
self.connector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = Model(input_dim=self.modelrc['projector_dim'], clipping=(self.modelrc['clipping'] if ('clipping' in self.modelrc) else False), attention_pooling=(self.modelrc['attention_pooling'] if ('attention_pooling' in self.modelrc) else False), num_judges=5000)
self.objective = nn.MSELoss()
self.segment_weight = self.modelrc['segment_weight']
self.bias_weight = self.modelrc['bias_weight']
self.best_scores = {'dev_loss': np.inf, 'dev_LCC': (- np.inf), 'dev_SRCC': (- np.inf), 'vcc2016_test_LCC': (- np.inf), 'vcc2016_test_SRCC': (- np.inf)}
def get_dataloader(self, mode):
if (mode == 'train'):
return self._get_train_dataloader(self.train_dataset)
elif (mode == 'dev'):
return self._get_eval_dataloader(self.dev_dataset)
elif (mode == 'vcc2018_test'):
return self._get_eval_dataloader(self.vcc2018_test_dataset)
elif (mode == 'vcc2016_test'):
return self._get_eval_dataloader(self.vcc2016_test_dataset)
def _get_train_dataloader(self, dataset):
sampler = (DistributedSampler(dataset) if is_initialized() else None)
return DataLoader(dataset, batch_size=self.datarc['train_batch_size'], shuffle=(sampler is None), sampler=sampler, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def _get_eval_dataloader(self, dataset):
return DataLoader(dataset, batch_size=self.datarc['eval_batch_size'], shuffle=False, num_workers=self.datarc['num_workers'], collate_fn=dataset.collate_fn)
def forward(self, mode, features, prefix_sums, means, system_names, moses, judge_ids, records, **kwargs):
features = torch.stack(features)
features = self.connector(features)
uttr_scores = []
bias_scores = []
if (mode == 'train'):
means = means.to(features.device)
judge_ids = judge_ids.to(features.device)
moses = moses.to(features.device)
(segments_scores, segments_bias_scores) = self.model(features, judge_ids=judge_ids)
segments_loss = 0
uttr_loss = 0
bias_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
current_bias_scores = segments_bias_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
bias_score = current_bias_scores.mean(dim=(- 1))
bias_scores.append(bias_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
bias_loss += self.objective(bias_score, moses[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
bias_loss /= (len(prefix_sums) - 1)
loss = (((self.segment_weight * segments_loss) + (self.bias_weight * bias_loss)) + uttr_loss)
records['segment loss'].append(segments_loss.item())
records['utterance loss'].append(uttr_loss.item())
records['bias loss'].append(bias_loss.item())
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if ((mode == 'dev') or (mode == 'vcc2018_test')):
means = means.to(features.device)
segments_scores = self.model(features)
segments_loss = 0
uttr_loss = 0
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
segments_loss += self.objective(current_segment_scores, means[i])
uttr_loss += self.objective(uttr_score, means[i])
segments_loss /= (len(prefix_sums) - 1)
uttr_loss /= (len(prefix_sums) - 1)
loss = (segments_loss + uttr_loss)
records['total loss'].append(loss.item())
records['pred_scores'] += uttr_scores
records['true_scores'] += means.detach().cpu().tolist()
if (mode == 'vcc2016_test'):
segments_scores = self.model(features)
for i in range((len(prefix_sums) - 1)):
current_segment_scores = segments_scores[prefix_sums[i]:prefix_sums[(i + 1)]]
uttr_score = current_segment_scores.mean(dim=(- 1))
uttr_scores.append(uttr_score.detach().cpu())
if (len(records['system']) == 0):
records['system'].append(defaultdict(list))
for i in range(len(system_names)):
records['system'][0][system_names[i]].append(uttr_scores[i].tolist())
if (mode == 'train'):
return loss
return 0
def log_records(self, mode, records, logger, global_step, batch_ids, total_batch_num, **kwargs):
save_names = []
if (mode == 'train'):
avg_uttr_loss = torch.FloatTensor(records['utterance loss']).mean().item()
avg_frame_loss = torch.FloatTensor(records['segment loss']).mean().item()
avg_bias_loss = torch.FloatTensor(records['bias loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-utterance loss', avg_uttr_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-segment loss', avg_frame_loss, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-bias loss', avg_bias_loss, global_step=global_step)
if ((mode == 'train') or (mode == 'dev')):
avg_total_loss = torch.FloatTensor(records['total loss']).mean().item()
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-total loss', avg_total_loss, global_step=global_step)
if ((mode == 'dev') or (mode == 'vcc2018_test')):
all_pred_scores = records['pred_scores']
all_true_scores = records['true_scores']
all_pred_scores = np.array(all_pred_scores)
all_true_scores = np.array(all_true_scores)
MSE = np.mean(((all_true_scores - all_pred_scores) ** 2))
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level MSE', MSE, global_step=global_step)
(pearson_rho, _) = pearsonr(all_true_scores, all_pred_scores)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level LCC', pearson_rho, global_step=global_step)
(spearman_rho, _) = spearmanr(all_true_scores.T, all_pred_scores.T)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-Utterance level SRCC', spearman_rho, global_step=global_step)
tqdm.write(f'[{mode}] Utterance-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] Utterance-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] Utterance-level SRCC = {spearman_rho:.4f}')
if ((mode == 'dev') or (mode == 'vcc2018_test')):
system_level_mos = self.vcc2018_system_mos
if (mode == 'vcc2016_test'):
system_level_mos = self.vcc2016_system_mos
if ((mode == 'dev') or (mode == 'vcc2018_test') or (mode == 'vcc2016_test')):
all_system_pred_scores = []
all_system_true_scores = []
for (key, values) in records['system'][0].items():
all_system_pred_scores.append(np.mean(values))
all_system_true_scores.append(system_level_mos[key].iloc[0])
all_system_pred_scores = np.array(all_system_pred_scores)
all_system_true_scores = np.array(all_system_true_scores)
MSE = np.mean(((all_system_true_scores - all_system_pred_scores) ** 2))
(pearson_rho, _) = pearsonr(all_system_true_scores, all_system_pred_scores)
(spearman_rho, _) = spearmanr(all_system_true_scores, all_system_pred_scores)
tqdm.write(f'[{mode}] System-level MSE = {MSE:.4f}')
tqdm.write(f'[{mode}] System-level LCC = {pearson_rho:.4f}')
tqdm.write(f'[{mode}] System-level SRCC = {spearman_rho:.4f}')
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level MSE', MSE, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level LCC', pearson_rho, global_step=global_step)
logger.add_scalar(f'wav2MOS_segment_MBNet/{mode}-System level SRCC', spearman_rho, global_step=global_step)
if (mode == 'dev'):
if (avg_total_loss < self.best_scores['dev_loss']):
self.best_scores[mode] = avg_total_loss
save_names.append(f'{mode}-best.ckpt')
if (pearson_rho > self.best_scores['dev_LCC']):
self.best_scores['dev_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['dev_SRCC']):
self.best_scores['dev_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
if (mode == 'vcc2016_test'):
if (pearson_rho > self.best_scores['vcc2016_test_LCC']):
self.best_scores['vcc2016_test_LCC'] = pearson_rho
save_names.append(f'{mode}-LCC-best.ckpt')
if (spearman_rho > self.best_scores['vcc2016_test_SRCC']):
self.best_scores['vcc2016_test_SRCC'] = spearman_rho
save_names.append(f'{mode}-SRCC-best.ckpt')
return save_names |
def build_checkpoint_ops(flags):
checkpoint_dir = ('./logs/' + FLAGS.name)
if (not os.path.exists(checkpoint_dir)):
os.mkdir(checkpoint_dir)
saved_op = {}
for var in tf.trainable_variables():
saved_op[var.name] = var
return (tf.train.Saver(var_list=saved_op, max_to_keep=1000), checkpoint_dir) |
def test_match_entities():
es = IndexSearch()
print(es.match_entities())
query = 'license'
print(es.match_entities(query)) |
class ProcessingReader(Reader):
def __init__(self, reader, processor):
Reader.__init__(self)
self.reader = reader
self.processor = make_processor(processor, reader)
def schema(self):
return self.processor.schema()
def setup_ex(self, init_net, finish_net):
self.reader.setup_ex(init_net, finish_net)
def read_ex(self, init_net, exit_net):
(read_nets, status, rec) = self.reader.read_record_ex(init_net, exit_net)
with NetBuilder() as nb:
result = normalize_processor_output(self.processor(rec))
read_nets += result.nets
if (result.should_stop or nb._stop_blob):
stop_net = core.Net('stop_net')
if result.should_stop:
stop_net.Or([status, result.should_stop], [status])
if nb._stop_blob:
stop_net.Or([status, nb._stop_blob], [status])
read_nets.append(stop_net)
if hasattr(self.processor, 'setup'):
init_net.add_attribute(TaskGroup.LOCAL_SETUP, self.processor)
self._set_schema(result.record)
fields = (result.record.field_blobs() if result.record else None)
return (read_nets, status, fields) |
.parametrize('array_type', ['array', 'sparse_csr'])
def test_mutual_reachability_graph_inplace(array_type):
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = (X.T X)
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
mr_graph = mutual_reachability_graph(X)
assert (id(mr_graph) == id(X)) |
class AcquisitionOnSubspace():
def __init__(self, acq, free_idx, fixed_vals):
self.acq = acq
self.free_idx = free_idx
self.fixed_vals = fixed_vals
def evaluate(self, x: np.ndarray, **kwargs):
x_fixed = ([self.fixed_vals] * len(x))
x_complete = np.hstack((np.vstack(x_fixed), x))
return self.acq.evaluate(x_complete) |
def replace_default_birthdate(patient: RawPatient) -> Optional[RawPatient]:
for event in patient.events:
if ((event.concept_id == OMOP_BIRTH) and (event.start == datetime.datetime(1, 1, 1))):
event.start = datetime.datetime(1900, 1, 1)
patient.resort()
return patient |
def var_shape(x):
out = x.get_shape().as_list()
assert all((isinstance(a, int) for a in out)), 'shape function assumes that shape is fully known'
return out |
def test_chunk_ordering_is_correct_with_slow_shards():
class SlowShardSource(ShardedDataset[List[int]]):
def shard_names(self) -> Sequence[str]:
return ['shard_0', 'shard_1']
def open_shard_at_row(self, shard_name: str, row: int) -> Iterator[List[int]]:
max_count = (40 if (shard_name == 'shard_1') else 20)
for i in range(0, max_count):
(yield ([i] * 10))
with tempfile.TemporaryDirectory() as tmpdir:
cache = build_cache(tmpdir, SlowShardSource(), TestProcessor(1), batch_size=1, rows_per_chunk=10, await_finished=False)
print('at wait')
cache.await_finished(timeout=10)
print('done waiting')
chunks: List[ChunkMetadata] = ray.get([cache._broker.get_chunk.remote(i) for i in range(6)])
assert (chunks[0].name == 'shard_0/chunk-0')
assert (chunks[1].name == 'shard_1/chunk-0')
assert (chunks[2].name == 'shard_0/chunk-1')
assert (chunks[3].name == 'shard_1/chunk-1')
assert (chunks[4].name == 'shard_1/chunk-2')
assert (chunks[5].name == 'shard_1/chunk-3')
chunk = ray.get(cache._broker.get_chunk.remote(6), timeout=0.5)
assert (chunk is None) |
class NilCoxeterAlgebra(IwahoriHeckeAlgebra.T):
def __init__(self, W, base_ring=QQ, prefix='u'):
self._W = W
self._n = W.n
self._base_ring = base_ring
self._cartan_type = W.cartan_type()
H = IwahoriHeckeAlgebra(W, 0, 0, base_ring=base_ring)
super(IwahoriHeckeAlgebra.T, self).__init__(H, prefix=prefix)
def _repr_(self):
return ('The Nil-Coxeter Algebra of Type %s over %s' % (self._cartan_type._repr_(compact=True), self.base_ring()))
def homogeneous_generator_noncommutative_variables(self, r):
ct = self._cartan_type
msg = f'Analogue of symmetric functions in noncommutative variables is not defined in type {ct}'
assert (((len(ct) == 2) and (ct[0] in ['A', 'B'])) or ((len(ct) == 3) and (ct[2] == 1))), msg
if (r >= self._n):
return self.zero()
return self.sum_of_monomials((w for w in self._W.pieri_factors() if (w.length() == r)))
def homogeneous_noncommutative_variables(self, la):
return prod((self.homogeneous_generator_noncommutative_variables(p) for p in la))
def k_schur_noncommutative_variables(self, la):
assert ((self._cartan_type[0] == 'A') and (len(self._cartan_type) == 3) and (self._cartan_type[2] == 1)), ('%s is not affine type A.' % self._W)
assert (la in Partitions()), ('%s is not a partition.' % la)
assert ((len(la) == 0) or (la[0] < self._W.n)), ('%s is not a %s-bounded partition.' % (la, (self._W.n - 1)))
Sym = SymmetricFunctions(self._base_ring)
h = Sym.homogeneous()
ks = Sym.kschur((self._n - 1), 1)
f = h(ks[la])
return sum(((f.coefficient(x) * self.homogeneous_noncommutative_variables(x)) for x in f.support())) |
def load_from_npz(file_name: str) -> SparseGraph:
with np.load(file_name, allow_pickle=True) as loader:
loader = dict(loader)
dataset = SparseGraph.from_flat_dict(loader)
return dataset |
def main():
parser = argparse.ArgumentParser('Text Matching task')
parser.add_argument('--model_arch', default='bge', const='bge', nargs='?', choices=['bge'], help='model architecture')
parser.add_argument('--model_name', default='BAAI/bge-large-zh-noinstruct', type=str, help='Transformers model model or path')
parser.add_argument('--train_file', default='data/bge_finetune_data.jsonl', type=str, help='Train data path')
parser.add_argument('--valid_file', default='data/snli_zh_50.jsonl', type=str, help='Train data path')
parser.add_argument('--test_file', default='data/snli_zh_50.jsonl', type=str, help='Test data path')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_predict', action='store_true', help='Whether to run predict.')
parser.add_argument('--output_dir', default='./outputs/bge-model', type=str, help='Model output directory')
parser.add_argument('--query_max_len', default=32, type=int, help='Max sequence length for query')
parser.add_argument('--passage_max_len', default=64, type=int, help='Max sequence length for passage')
parser.add_argument('--num_epochs', default=3, type=int, help='Number of training epochs')
parser.add_argument('--batch_size', default=4, type=int, help='Batch size')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='Learning rate')
parser.add_argument('--train_group_size', default=4, type=int, help='Train group size')
parser.add_argument('--temperature', default=1.0, type=float, help='Temperature for softmax')
parser.add_argument('--save_model_every_epoch', action='store_true', help='Whether to save model after each epoch')
parser.add_argument('--encoder_type', default='MEAN', type=(lambda t: EncoderType[t]), choices=list(EncoderType), help='Encoder type, string name of EncoderType')
parser.add_argument('--bf16', action='store_true', help='Whether to use bfloat16 amp training.')
parser.add_argument('--data_parallel', action='store_true', help='Whether to use multi-gpu data parallel.')
parser.add_argument('--normalize_embeddings', action='store_true', help='Whether to normalize embeddings. set True if temperature < 1.0')
args = parser.parse_args()
logger.info(args)
if args.do_train:
model = BgeModel(model_name_or_path=args.model_name, encoder_type=args.encoder_type, max_seq_length=args.query_max_len, passage_max_len=args.passage_max_len)
model.train_model(args.train_file, args.output_dir, eval_file=args.valid_file, num_epochs=args.num_epochs, batch_size=args.batch_size, lr=args.learning_rate, save_model_every_epoch=args.save_model_every_epoch, bf16=args.bf16, data_parallel=args.data_parallel, train_group_size=args.train_group_size, temperature=args.temperature, normalize_embeddings=args.normalize_embeddings)
logger.info(f'Model saved to {args.output_dir}')
if args.do_predict:
model = SentenceModel(model_name_or_path=args.output_dir, encoder_type=args.encoder_type)
test_data = load_text_matching_test_data(args.test_file)
srcs = []
trgs = []
labels = []
for terms in test_data:
(src, trg, label) = (terms[0], terms[1], terms[2])
srcs.append(src)
trgs.append(trg)
labels.append(label)
logger.debug(f'{test_data[0]}')
sentence_embeddings = model.encode(srcs)
logger.debug(f'{type(sentence_embeddings)}, {sentence_embeddings.shape}, {sentence_embeddings[0].shape}')
calc_similarity_scores(model, srcs, trgs, labels) |
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor) |
class ECDFResult():
cdf: EmpiricalDistributionFunction
sf: EmpiricalDistributionFunction
def __init__(self, q, cdf, sf, n, d):
self.cdf = EmpiricalDistributionFunction(q, cdf, n, d, 'cdf')
self.sf = EmpiricalDistributionFunction(q, sf, n, d, 'sf') |
def test_builtins_cast_return_none():
assert (m.return_none_string() is None)
assert (m.return_none_char() is None)
assert (m.return_none_bool() is None)
assert (m.return_none_int() is None)
assert (m.return_none_float() is None) |
def run_experiment_disk_io(input_config):
experiments = []
experiments.append(analyzer_experiment(instances=1, name='disk-io', experiment_type='disk-io', input_config=input_config, port=8081))
return experiments |
def test_relabel_sequential_signed_overflow():
imax = np.iinfo(np.int32).max
labels = np.array([0, 1, 99, 42, 42], dtype=np.int32)
(output, fw, inv) = relabel_sequential(labels, offset=imax)
reference = np.array([0, imax, (imax + 2), (imax + 1), (imax + 1)], dtype=np.uint32)
assert_array_equal(output, reference)
assert (output.dtype == reference.dtype) |
def dic_of_chars(words_indexes):
lstChars = {}
for word in words_indexes:
for char in word:
if (char not in lstChars):
lstChars[char] = len(lstChars)
lstChars['unk'] = len(lstChars)
return lstChars |
class ErnieModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _print_select_config(configs, input_func=input):
if (len(configs) == 0):
return None
print('Config files detected in current directory are listed below:')
for (i, config) in enumerate(configs):
print('[{}] - {}'.format((i + 1), config))
key = input_func('Press the config index to load or any other keys to start with new one : ')
try:
return load_config(configs[(int(key) - 1)])
except BaseException:
return None |
def mwem_pgm(data, epsilon, delta=0.0, workload=None, rounds=None, maxsize_mb=25, pgm_iters=1000, noise='gaussian', bounded=False, alpha=0.9):
if (workload is None):
workload = list(itertools.combinations(data.domain, 2))
if (rounds is None):
rounds = len(data.domain)
if (noise == 'laplace'):
eps_per_round = (epsilon / rounds)
sigma = (1.0 / (alpha * eps_per_round))
exp_eps = ((1 - alpha) * eps_per_round)
marginal_sensitivity = (2 if bounded else 1.0)
else:
rho = cdp_rho(epsilon, delta)
rho_per_round = (rho / rounds)
sigma = np.sqrt((0.5 / (alpha * rho_per_round)))
exp_eps = np.sqrt(((8 * (1 - alpha)) * rho_per_round))
marginal_sensitivity = (np.sqrt(2) if bounded else 1.0)
domain = data.domain
total = (data.records if bounded else None)
def size(cliques):
return ((GraphicalModel(domain, cliques).size * 8) / (2 ** 20))
workload_answers = {cl: data.project(cl).datavector() for cl in workload}
engine = FactoredInference(data.domain, log=False, iters=pgm_iters, warm_start=True)
measurements = []
est = engine.estimate(measurements, total)
cliques = []
for i in range(1, (rounds + 1)):
candidates = [cl for cl in workload if (size((cliques + [cl])) <= ((maxsize_mb * i) / rounds))]
ax = worst_approximated(workload_answers, est, candidates, exp_eps)
print('Round', i, 'Selected', ax, 'Model Size (MB)', ((est.size * 8) / (2 ** 20)))
n = domain.size(ax)
x = data.project(ax).datavector()
if (noise == 'laplace'):
y = (x + np.random.laplace(loc=0, scale=(marginal_sensitivity * sigma), size=n))
else:
y = (x + np.random.normal(loc=0, scale=(marginal_sensitivity * sigma), size=n))
Q = sparse.eye(n)
measurements.append((Q, y, 1.0, ax))
est = engine.estimate(measurements, total)
cliques.append(ax)
print('Generating Data...')
return est.synthetic_data() |
class GradleRequirement(Requirement):
def __init__(self):
super().__init__('Gradle 1.10+')
def check(self):
Shell.exec('gradle -version') |
_arg_scope
def resid_unit(inputs, depth, depth_bottleneck, stride, rate=1, outputs_collections=None, scope=None):
with variable_scope.variable_scope(scope, 'resid_v1', [inputs]) as sc:
depth_in = utils.last_dimension(inputs.get_shape(), min_rank=5)
if (depth == depth_in):
shortcut = resnet_3d_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = layers.conv3d(inputs, depth, [1, 1, 1], stride=stride, activation_fn=None, scope='shortcut')
residual = resnet_3d_utils.conv3d_same(inputs, depth_bottleneck, 3, stride=1, scope='conv1')
residual = layers.conv3d(residual, depth_bottleneck, 3, stride, scope='conv2')
output = nn_ops.relu((shortcut + residual))
return utils.collect_named_outputs(outputs_collections, sc.name, output) |
def load_and_cache_defect_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int((len(examples) * 0.1)))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info('Load cache data from %s', cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info('Sample 10 percent of data from %s', filename)
elif (args.data_num == (- 1)):
logger.info('Create cache data into %s', cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for (idx, example) in enumerate(examples)]
features = pool.map(convert_defect_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if ((args.local_rank in [(- 1), 0]) and (args.data_num == (- 1))):
torch.save(data, cache_fn)
return (examples, data) |
class GroupedNDRange():
def __init__(self, r):
self.r = r
def __iter__(self):
for ind in self.r:
(yield Matrix(list(ind))) |
class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
def forward(self, input):
return F.adaptive_avg_pool2d(input, self.output_size) |
(frozen=True)
class Modules():
def create_checkpointer(self, device: str) -> Checkpointer:
modules = {k: v for (k, v) in asdict_without_copy(self).items() if isinstance(v, (nn.Module, torch.optim.Optimizer))}
return Checkpointer(modules=modules, device=device)
def freeze(self) -> None:
for v in asdict_without_copy(self).values():
if isinstance(v, nn.Module):
for p in v.parameters():
p.requires_grad = False
def unfreeze(self) -> None:
for v in asdict_without_copy(self).values():
if isinstance(v, nn.Module):
for p in v.parameters():
p.requires_grad = True
def set_eval(self) -> None:
for v in asdict_without_copy(self).values():
if isinstance(v, nn.Module):
v.eval()
def set_train(self) -> None:
for v in asdict_without_copy(self).values():
if isinstance(v, nn.Module):
v.train()
def reset_optimizer_states(self) -> None:
for v in asdict_without_copy(self).values():
if isinstance(v, torch.optim.Optimizer):
v.state = collections.defaultdict(dict)
def wrap_models_by_ddp(self) -> Self:
dict_values = asdict_without_copy(self)
for (k, v) in dict_values.items():
if isinstance(v, nn.Module):
device_id = next(v.parameters()).device.index
dict_values[k] = DDP(v, device_ids=([device_id] if device_id else None))
return self.__class__(**dict_values)
def unwrap_models_by_ddp(self) -> Self:
dict_values = asdict_without_copy(self)
for (k, v) in dict_values.items():
if isinstance(v, DDP):
dict_values[k] = v.module
return self.__class__(**dict_values) |
def quantize(model: nn.Module, get_representative_dataset: callable, tpc: TargetPlatformCapabilities, args: dict):
n_iter = math.ceil((int(args[NUM_REPRESENTATIVE_IMAGES]) // int(args[BATCH_SIZE])))
logging.info(f'Running MCT... number of representative images: {args[REPRESENTATIVE_DATASET_FOLDER]}, number of calibration iters: {n_iter}')
representative_data_gen = get_representative_dataset(representative_dataset_folder=args[REPRESENTATIVE_DATASET_FOLDER], n_iter=n_iter, batch_size=int(args[BATCH_SIZE]))
mp_wcr = args.get(MP_WEIGHTS_COMPRESSION, None)
if mp_wcr:
mp_conf = MixedPrecisionQuantizationConfigV2()
core_conf = CoreConfig(quantization_config=mct.core.QuantizationConfig(shift_negative_activation_correction=True), mixed_precision_config=mp_conf)
target_kpi = get_target_kpi(model, mp_wcr, representative_data_gen, core_conf, tpc)
else:
core_conf = CoreConfig(quantization_config=mct.core.QuantizationConfig(shift_negative_activation_correction=True))
target_kpi = None
if args.get('gptq', False):
workflow = 'GPTQ'
n_epochs = (args.get('gptq_num_calibration_iter') // n_iter)
logging.info(f'MCT Gradient-based Post Training Quantization is enabled. Number of epochs: {n_epochs}')
gptq_conf = mct.gptq.get_pytorch_gptq_config(n_epochs=n_epochs, optimizer=Adam([Tensor([])], lr=args['gptq_lr']))
(quantized_model, quantization_info) = mct.gptq.pytorch_gradient_post_training_quantization_experimental(model, representative_data_gen=representative_data_gen, target_kpi=target_kpi, core_config=core_conf, gptq_config=gptq_conf, gptq_representative_data_gen=representative_data_gen, target_platform_capabilities=tpc)
else:
workflow = 'PTQ'
(quantized_model, quantization_info) = mct.ptq.pytorch_post_training_quantization_experimental(model, representative_data_gen=representative_data_gen, target_kpi=target_kpi, core_config=core_conf, target_platform_capabilities=tpc)
if args.get('export_model', False):
(_, onnx_file_path) = tempfile.mkstemp('.onnx')
mct.exporter.pytorch_export_model(model=quantized_model, save_model_path=onnx_file_path, repr_dataset=representative_data_gen)
return (quantized_model, QuantInfo(user_info=quantization_info, tpc_info=tpc.get_info(), quantization_workflow=workflow, mp_weights_compression=mp_wcr)) |
class CerebrasInt8(CausalInt8Model):
config_name: str = 'cerebras_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(CerebrasInt8Engine.config_name, weights_path) |
def apply_to_all_dispatchers(operation: APIOperation, context: HookContext, hooks: (HookDispatcher | None), strategy: st.SearchStrategy, container: str) -> st.SearchStrategy:
strategy = GLOBAL_HOOK_DISPATCHER.apply_to_container(strategy, container, context)
strategy = operation.schema.hooks.apply_to_container(strategy, container, context)
if (hooks is not None):
strategy = hooks.apply_to_container(strategy, container, context)
return strategy |
class Each(ParseExpression):
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all((e.mayReturnEmpty for e in self.exprs))
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict(((id(e.expr), e) for e in self.exprs if isinstance(e, Optional)))
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [e for e in self.exprs if (e.mayReturnEmpty and (not isinstance(e, Optional)))]
self.optionals = (opt1 + opt2)
self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]
self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]
self.required = [e for e in self.exprs if (not isinstance(e, (Optional, ZeroOrMore, OneOrMore)))]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = (((tmpReqd + tmpOpt) + self.multioptionals) + self.multirequired)
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if (e in tmpReqd):
tmpReqd.remove(e)
elif (e in tmpOpt):
tmpOpt.remove(e)
if (len(failed) == len(tmpExprs)):
keepMatching = False
if tmpReqd:
missing = ', '.join((_ustr(e) for e in tmpReqd))
raise ParseException(instring, loc, ('Missing one or more required elements (%s)' % missing))
matchOrder += [e for e in self.exprs if (isinstance(e, Optional) and (e.expr in tmpOpt))]
resultlist = []
for e in matchOrder:
(loc, results) = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return (loc, finalResults)
def __str__(self):
if hasattr(self, 'name'):
return self.name
if (self.strRepr is None):
self.strRepr = (('{' + ' & '.join((_ustr(e) for e in self.exprs))) + '}')
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = (parseElementList[:] + [self])
for e in self.exprs:
e.checkRecursion(subRecCheckList) |
class Module(chainer.Chain):
def __init__(self, dim):
super(Module, self).__init__(x2z=L.Linear(dim, dim), bn=L.BatchNormalization(dim))
def __call__(self, x):
z = self.x2z(x)
z = self.bn(z)
z = F.relu(z)
return z |
def compute_alignment_score(rank_list, src_objects_count, ref_objects_count):
aligned_obj_counts = 0
for idx in range(src_objects_count):
e1_rank_list = list(rank_list[idx].detach().cpu().numpy())
e1_rank_list.remove(idx)
rank_idx = e1_rank_list[0]
if (rank_idx >= src_objects_count):
aligned_obj_counts += 1
alignment_score = (aligned_obj_counts / ref_objects_count)
return alignment_score |
class TestFiller(test_util.TestCase):
def test_filler(self):
net = core.Net('test_filler')
net.Concat(['X0', 'X1', 'X2'], ['concat_out', 'split_info'])
self.assertFalse(workspace.HasBlob('X0'))
input_dim = (30, 20)
workspace.FillRandomNetworkInputs(net, [[input_dim, input_dim, input_dim]], [['float', 'float', 'float']])
self.assertTrue(workspace.HasBlob('X0'))
self.assertEqual(workspace.FetchBlob('X0').shape, input_dim)
with self.assertRaises(RuntimeError):
workspace.FillRandomNetworkInputs(net, [[input_dim]], [['float']]) |
def main():
torch.set_num_threads(3)
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
gpu = (ig_utils.pick_gpu_lowest_memory() if (args.gpu == 'auto') else int(args.gpu))
torch.cuda.set_device(gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % gpu))
logging.info('args = %s', args)
if (args.dataset == 'cifar10'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'cifar100'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'svhn'):
(train_transform, valid_transform) = ig_utils._data_transforms_svhn(args)
train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform)
valid_data = dset.SVHN(root=args.data, split='test', download=True, transform=valid_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor((args.train_portion * num_train)))
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
test_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, pin_memory=True)
if (args.perturb_alpha == 'none'):
perturb_alpha = None
elif (args.perturb_alpha == 'pgd_linf'):
perturb_alpha = Linf_PGD_alpha
elif (args.perturb_alpha == 'random'):
perturb_alpha = Random_alpha
else:
print('ERROR PERTURB_ALPHA TYPE:', args.perturb_alpha)
exit(1)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if (args.method in ['darts', 'blank']):
model = DartsNetwork(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method == 'sdarts'):
model = SDartsNetwork(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method in ['darts-proj', 'blank-proj']):
model = DartsNetworkProj(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method in ['sdarts-proj']):
model = SDartsNetworkProj(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
else:
print('ERROR: WRONG MODEL:', args.method)
exit(0)
model = model.cuda()
architect = Architect(model, args)
logging.info('param size = %fMB', ig_utils.count_parameters_in_MB(model))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model.optimizer, float(args.epochs), eta_min=args.learning_rate_min)
start_epoch = 0
if (args.resume_epoch > 0):
logging.info('loading checkpoint from {}'.format(expid))
filename = os.path.join(args.save, 'checkpoint_{}.pth.tar'.format(args.resume_epoch))
if os.path.isfile(filename):
logging.info("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
resume_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
saved_arch_parameters = checkpoint['alpha']
model.set_arch_parameters(saved_arch_parameters)
scheduler.load_state_dict(checkpoint['scheduler'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
start_epoch = args.resume_epoch
logging.info("=> loaded checkpoint '{}' (epoch {})".format(filename, resume_epoch))
else:
logging.info("=> no checkpoint found at '{}'".format(filename))
logging.info('starting training at epoch {}'.format(start_epoch))
for epoch in range(start_epoch, args.epochs):
lr = scheduler.get_lr()[0]
if args.cutout:
train_transform.transforms[(- 1)].cutout_prob = ((args.cutout_prob * epoch) / (args.epochs - 1))
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[(- 1)].cutout_prob)
else:
logging.info('epoch %d lr %e', epoch, lr)
if args.perturb_alpha:
epsilon_alpha = (0.03 + (((args.epsilon_alpha - 0.03) * epoch) / args.epochs))
logging.info('epoch %d epsilon_alpha %e', epoch, epsilon_alpha)
num_params = ig_utils.count_parameters_in_Compact(model)
genotype = model.genotype()
logging.info('param size = %f', num_params)
logging.info('genotype = %s', genotype)
model.printing(logging)
(train_acc, train_obj) = train(train_queue, valid_queue, model, architect, model.optimizer, lr, epoch, perturb_alpha, epsilon_alpha)
logging.info('train_acc %f | train_obj %f', train_acc, train_obj)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
scheduler.step()
(valid_acc, valid_obj) = infer(valid_queue, model, log=False)
logging.info('valid_acc %f | valid_obj %f', valid_acc, valid_obj)
writer.add_scalar('Acc/valid', valid_acc, epoch)
writer.add_scalar('Obj/valid', valid_obj, epoch)
(test_acc, test_obj) = infer(test_queue, model, log=False)
logging.info('test_acc %f | test_obj %f', test_acc, test_obj)
writer.add_scalar('Acc/test', test_acc, epoch)
writer.add_scalar('Obj/test', test_obj, epoch)
if (((epoch + 1) % args.ckpt_interval) == 0):
save_state_dict = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'alpha': model.arch_parameters(), 'optimizer': model.optimizer.state_dict(), 'arch_optimizer': architect.optimizer.state_dict(), 'scheduler': scheduler.state_dict()}
ig_utils.save_checkpoint(save_state_dict, False, args.save, per_epoch=True)
if (args.dev == 'proj'):
pt_project(train_queue, valid_queue, model, architect, model.optimizer, start_epoch, args, infer, perturb_alpha, args.epsilon_alpha)
writer.close() |
def create_model(model_name, num_classes=1000, pretrained=False, **kwargs):
if ('test_time_pool' in kwargs):
test_time_pool = kwargs.pop('test_time_pool')
else:
test_time_pool = True
if (model_name == 'dpn68'):
model = dpn68(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn68b'):
model = dpn68b(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn92'):
model = dpn92(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn98'):
model = dpn98(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn131'):
model = dpn131(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'dpn107'):
model = dpn107(pretrained=pretrained, test_time_pool=test_time_pool, num_classes=num_classes)
elif (model_name == 'resnet18'):
model = resnet18(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet34'):
model = resnet34(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet50'):
model = resnet50(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet101'):
model = resnet101(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'resnet152'):
model = resnet152(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet121'):
model = densenet121(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet161'):
model = densenet161(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet169'):
model = densenet169(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'densenet201'):
model = densenet201(pretrained=pretrained, num_classes=num_classes, **kwargs)
elif (model_name == 'inception_v3'):
model = inception_v3(pretrained=pretrained, num_classes=num_classes, transform_input=False, **kwargs)
else:
assert False, ('Unknown model architecture (%s)' % model_name)
return model |
def _invert_nonzero(arr):
arr_inv = arr.copy()
nz = np.nonzero(arr)
arr_inv[nz] = (1 / arr[nz])
return arr_inv |
class ConllEntry():
def __init__(self, id, form, tasks, pos=None, ner_tag=None, srl_tag=None, chunk=None):
self.id = id
self.form = form
self.norm = normalize(form)
self.pos = pos
self.ner_tag = ner_tag
self.srl_tag = srl_tag
self.tasks = tasks
self.chunk = chunk |
def test_insert_random_call_no_accessible(test_case_mock):
test_cluster = MagicMock(ModuleTestCluster)
test_cluster.get_random_accessible.return_value = None
test_factory = tf.TestFactory(test_cluster)
assert (not test_factory.insert_random_call(test_case_mock, 0)) |
('/upload')
def upload():
xml_src = request.get_data()
doc = lxml.etree.fromstring(xml_src)
return lxml.etree.tostring(doc) |
def elsa_doc_model(hidden_dim=64, dropout=0.5, mode='train'):
I_en = Input(shape=(nb_maxlen[0], nb_feature[1]), dtype='float32')
en_out = AttentionWeightedAverage()(I_en)
I_ot = Input(shape=(nb_maxlen[1], nb_feature[0]), dtype='float32')
jp_out = AttentionWeightedAverage()(I_ot)
O_to = concatenate([jp_out, en_out])
O_to = Dense(hidden_dim, activation='selu')(O_to)
if (mode == 'train'):
O_to = Dropout(dropout)(O_to)
O_out = Dense(1, activation='sigmoid', name='softmax')(O_to)
model = Model(inputs=[I_ot, I_en], outputs=O_out)
return model |
class CatKLLoss(_Loss):
def __init__(self):
super(CatKLLoss, self).__init__()
def forward(self, log_qy, log_py, batch_size=None, unit_average=False):
if (log_qy.dim() > 2):
log_qy = log_qy.squeeze()
qy = torch.exp(log_qy)
y_kl = torch.sum((qy * (log_qy - log_py)), dim=1)
if unit_average:
return torch.mean(y_kl)
else:
return (torch.sum(y_kl) / batch_size) |
class MLP(nn.Module):
def __init__(self, input_dim=2048, embed_dim=768):
super().__init__()
self.proj = nn.Linear(input_dim, embed_dim)
def forward(self, x):
x = x.flatten(2).transpose(1, 2).contiguous()
x = self.proj(x)
return x |
def process(config) -> None:
def parse_param(name_value: str) -> Tuple[(str, str)]:
(name, value) = [x.strip() for x in name_value.split('=')]
return (name, value)
params = (config.param or [])
variables = dict((parse_param(param) for param in params))
engine = TemplateEngine(variables)
with open(config.filename, 'r') as file:
engine.process(file, sys.stdout) |
def fix_stanford_coref(stanford_json):
true_corefs = {}
for (key, coref) in stanford_json['corefs'].items():
true_coref = []
for entity in coref:
sent_num = (entity['sentNum'] - 1)
start_index = (entity['startIndex'] - 1)
end_index = (entity['endIndex'] - 1)
head_index = (entity['headIndex'] - 1)
entity_label = stanford_json['sentences'][sent_num]['tokens'][head_index]['ner']
entity['sentNum'] = sent_num
entity['startIndex'] = start_index
entity['endIndex'] = end_index
entity['headIndex'] = head_index
entity['headWord'] = entity['text'].split(' ')[(head_index - start_index)]
entity['entityType'] = entity_label
true_coref.append(entity)
if (len(true_coref) > 0):
no_representative = True
has_representative = False
for (idx, entity) in enumerate(true_coref):
if entity['isRepresentativeMention']:
if (not ((entity['type'] == 'PRONOMINAL') or bad_entity(entity['text'].lower()) or (len(entity['text'].split(' ')) > 10))):
no_representative = False
has_representative = True
else:
true_coref[idx]['isRepresentativeMention'] = False
if no_representative:
for (idx, entity) in enumerate(true_coref):
if (not ((entity['type'] == 'PRONOMINAL') or bad_entity(entity['text'].lower()) or (len(entity['text'].split(' ')) > 10))):
true_coref[idx]['isRepresentativeMention'] = True
has_representative = True
if has_representative:
true_corefs[key] = true_coref
return true_corefs |
def loadnpy(filename, N, dtype, mode='r'):
f = np.memmap(filename, mode=mode, dtype=dtype)
M = int((len(f) / N))
print(M, N)
f = f.reshape(M, N)
return f |
def srwl_uti_math_seq_halton(i, base=2):
h = 0
fac = (1.0 / base)
while (i != 0):
digit = (i % base)
h += (digit * fac)
i = ((i - digit) / base)
fac /= base
return h |
def build_nonlinearity(nonlinearity):
if (nonlinearity in NONLINEARITY):
return NONLINEARITY[nonlinearity]()
raise ValueError(('Chosen value of nonlinearity, "%s", not handled' % nonlinearity)) |
def _ensure_tuple(item: Filter) -> ((list | set) | tuple):
if (not isinstance(item, (list, set, tuple))):
return (item,)
return item |
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T) for T in dtypes]
for (i, arr) in enumerate(arrays):
self.assertTrue(workspace.FeedBlob(str(i), arr))
self.assertTrue(workspace.HasBlob(str(i)))
tmp_folder = tempfile.mkdtemp()
op = core.CreateOperator('Save', ([str(i) for i in range(len(arrays))] * 2), [], absolute_path=1, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testLoadExcessblobs(self):
tmp_folder = tempfile.mkdtemp()
(tmp_file, arrays) = self.saveFile(tmp_folder, 'db', self._db_type, 0)
op = core.CreateOperator('Load', [], ([str(i) for i in range(len(arrays))] * 2), absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=False)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
op = core.CreateOperator('Load', [], [str((len(arrays) + i)) for i in [(- 1), 0]], absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=True)
with self.assertRaises(RuntimeError):
workspace.ResetWorkspace()
workspace.RunOperatorOnce(op)
op = core.CreateOperator('Load', [], [str((len(arrays) + i)) for i in range(2)], absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=True)
with self.assertRaises(RuntimeError):
workspace.ResetWorkspace()
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testTruncatedFile(self):
tmp_folder = tempfile.mkdtemp()
(tmp_file, arrays) = self.saveFile(tmp_folder, 'db', self._db_type, 0)
with open(tmp_file, 'wb+') as fdest:
fdest.seek(20, os.SEEK_END)
fdest.truncate()
op = core.CreateOperator('Load', [], [str(i) for i in range(len(arrays))], absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=False)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
op = core.CreateOperator('Load', [], [], absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=True)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testBlobNameOverrides(self):
original_names = ['blob_a', 'blob_b', 'blob_c']
new_names = ['x', 'y', 'z']
blobs = [np.random.permutation(6) for i in range(3)]
for (i, blob) in enumerate(blobs):
self.assertTrue(workspace.FeedBlob(original_names[i], blob))
self.assertTrue(workspace.HasBlob(original_names[i]))
self.assertEqual(len(workspace.Blobs()), 3)
try:
tmp_folder = tempfile.mkdtemp()
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(core.CreateOperator('Save', original_names, [], absolute_path=1, strip_prefix='.temp', blob_name_overrides=new_names, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type))
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Save', original_names, [], absolute_path=1, blob_name_overrides=new_names, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type)))
self.assertTrue(workspace.ResetWorkspace())
self.assertEqual(len(workspace.Blobs()), 0)
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Load', [], [], absolute_path=1, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type, load_all=1)))
self.assertEqual(len(workspace.Blobs()), 3)
for (i, name) in enumerate(new_names):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
load_new_names = ['blob_x', 'blob_y', 'blob_z']
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Load', [], load_new_names[0:1], absolute_path=1, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type, source_blob_names=new_names[0:1])))
self.assertEqual(len(workspace.Blobs()), 4)
for (i, name) in enumerate(load_new_names[0:1]):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Load', [], load_new_names[0:3], absolute_path=1, db=os.path.join(tmp_folder, 'db'), db_type=self._db_type, source_blob_names=new_names[0:3])))
self.assertEqual(len(workspace.Blobs()), 6)
for (i, name) in enumerate(load_new_names[0:3]):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
finally:
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testMissingFile(self):
tmp_folder = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_folder, 'missing_db')
op = core.CreateOperator('Load', [], [], absolute_path=1, db=tmp_file, db_type=self._db_type, load_all=True)
with self.assertRaises(RuntimeError):
try:
workspace.RunOperatorOnce(op)
except RuntimeError as e:
print(e)
raise
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testLoadMultipleFilesGivenSourceBlobNames(self):
tmp_folder = tempfile.mkdtemp()
(db_file_1, arrays_1) = self.saveFile(tmp_folder, 'db1', self._db_type, 0)
(db_file_2, arrays_2) = self.saveFile(tmp_folder, 'db2', self._db_type, len(arrays_1))
db_files = [db_file_1, db_file_2]
blobs_names = [str(i) for i in range((len(arrays_1) + len(arrays_2)))]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Load', [], blobs_names, absolute_path=1, dbs=db_files, db_type=self._db_type, source_blob_names=blobs_names)))
self.assertEqual(len(workspace.Blobs()), len(blobs_names))
for i in range(len(arrays_1)):
np.testing.assert_array_equal(workspace.FetchBlob(str(i)), arrays_1[i])
for i in range(len(arrays_2)):
np.testing.assert_array_equal(workspace.FetchBlob(str((i + len(arrays_1)))), arrays_2[i])
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testLoadAllMultipleFiles(self):
tmp_folder = tempfile.mkdtemp()
(db_file_1, arrays_1) = self.saveFile(tmp_folder, 'db1', self._db_type, 0)
(db_file_2, arrays_2) = self.saveFile(tmp_folder, 'db2', self._db_type, len(arrays_1))
db_files = [db_file_1, db_file_2]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
self.assertTrue(workspace.RunOperatorOnce(core.CreateOperator('Load', [], [], absolute_path=1, dbs=db_files, db_type=self._db_type, load_all=True)))
self.assertEqual(len(workspace.Blobs()), (len(arrays_1) + len(arrays_2)))
for i in range(len(arrays_1)):
np.testing.assert_array_equal(workspace.FetchBlob(str(i)), arrays_1[i])
for i in range(len(arrays_2)):
np.testing.assert_array_equal(workspace.FetchBlob(str((i + len(arrays_1)))), arrays_2[i])
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testLoadAllMultipleFilesWithSameKey(self):
tmp_folder = tempfile.mkdtemp()
(db_file_1, arrays_1) = self.saveFile(tmp_folder, 'db1', self._db_type, 0)
(db_file_2, arrays_2) = self.saveFile(tmp_folder, 'db2', self._db_type, 0)
db_files = [db_file_1, db_file_2]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
op = core.CreateOperator('Load', [], [], absolute_path=1, dbs=db_files, db_type=self._db_type, load_all=True)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise
def testLoadRepeatedFiles(self):
tmp_folder = tempfile.mkdtemp()
(tmp_file, arrays) = self.saveFile(tmp_folder, 'db', self._db_type, 0)
db_files = [tmp_file, tmp_file]
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
op = core.CreateOperator('Load', [], [str(i) for i in range(len(arrays))], absolute_path=1, dbs=db_files, db_type=self._db_type, load_all=False)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if (e.errno != errno.ENOENT):
raise |
def GetModelAndOptNames():
if (len(sys.argv) < 2):
print('USAGE: main.py [Model]\n')
print('Model options: LCN, one2one, unet2, unet3, conv-deconv etc.')
sys.exit(1)
modelname = sys.argv[1]
return modelname |
def create_model(n_timesteps, n_features, n_outputs, _dff=512, d_model=128, nh=4, dropout_rate=0.2, use_pe=True):
inputs = tf.keras.layers.Input(shape=(n_timesteps, n_features))
(si, _) = SensorAttention(n_filters=128, kernel_size=3, dilation_rate=2)(inputs)
x = tf.keras.layers.Conv1D(d_model, 1, activation='relu')(si)
if use_pe:
x *= tf.math.sqrt(tf.cast(d_model, tf.float32))
x = PositionalEncoding(n_timesteps, d_model)(x)
x = tf.keras.layers.Dropout(rate=dropout_rate)(x)
x = EncoderLayer(d_model=d_model, num_heads=nh, dff=_dff, rate=dropout_rate)(x)
x = EncoderLayer(d_model=d_model, num_heads=nh, dff=_dff, rate=dropout_rate)(x)
x = AttentionWithContext()(x)
x = tf.keras.layers.Dense((n_outputs * 4), activation='relu')(x)
x = tf.keras.layers.Dropout(0.2)(x)
predictions = tf.keras.layers.Dense(n_outputs, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=predictions)
return model |
def db2velocity(db, db_median=65, vel_10db_low=20, vel_10db_high=30):
if (db <= db_median):
vel = int((80 - ((vel_10db_low * (db_median - db)) / 10)))
else:
vel = int((80 + ((vel_10db_high * (db - db_median)) / 10)))
vel = min(120, max(10, vel))
return vel |
class BertLM(MiniconsLM):
def __init__(self, model_name_or_path, gpu_batch_size=1, gpu_id=0):
super().__init__(model_name_or_path=model_name_or_path, device='cuda', gpu_batch_size=gpu_batch_size, model_type='MaskedLMScorer') |
class SeqAttnMatch(nn.Module):
def __init__(self, input_size, identity=False):
super(SeqAttnMatch, self).__init__()
if (not identity):
self.linear = nn.Linear(input_size, input_size)
else:
self.linear = None
def forward(self, x, y, y_mask):
if self.linear:
x_proj = self.linear(x.view((- 1), x.size(2))).view(x.size())
x_proj = F.relu(x_proj)
y_proj = self.linear(y.view((- 1), y.size(2))).view(y.size())
y_proj = F.relu(y_proj)
else:
x_proj = x
y_proj = y
scores = x_proj.bmm(y_proj.transpose(2, 1))
y_mask = y_mask.unsqueeze(1).expand(scores.size())
scores.masked_fill_(y_mask, (- float('inf')))
alpha = F.softmax(scores, dim=(- 1))
matched_seq = alpha.bmm(y)
return matched_seq |
class FlaskCliRunner(CliRunner):
def __init__(self, app, **kwargs):
self.app = app
super(FlaskCliRunner, self).__init__(**kwargs)
def invoke(self, cli=None, args=None, **kwargs):
if (cli is None):
cli = self.app.cli
if ('obj' not in kwargs):
kwargs['obj'] = ScriptInfo(create_app=(lambda : self.app))
return super(FlaskCliRunner, self).invoke(cli, args, **kwargs) |
def residual_collapsing_fn(first_node: BaseNode, kernel_str: str) -> np.ndarray:
if (first_node.type == Conv2d):
kernel = first_node.get_weights_by_keys(kernel_str)
(Cout, Cin, kH, kW) = kernel.shape
idxH = ((kH - 1) // 2)
idxW = ((kW - 1) // 2)
for i in range(Cout):
kernel[(i, i, idxH, idxW)] += 1
return kernel
else:
Logger.error('No supported add residual collapsing for {}'.format(first_node.type)) |
class GNN(torch.nn.Module):
def __init__(self, x_dims, y_dims, n_layers=2):
super(GNN, self).__init__()
self.n_layers = n_layers
self.W = torch.nn.Parameter(torch.zeros(x_dims, y_dims))
def forward(self, adj_t, x):
adj_t = sym_norm(adj_t)
for _ in range(self.n_layers):
x = adj_t.spmm(x)
return x
def pred(self, adj_t, x):
feats = self.forward(adj_t, x)
return (x self.W)
def ovr_lr_loss(self, X, Y, lam):
Y[(Y == 0)] = (- 1)
Z = X.mul_(Y)
return ((- F.logsigmoid(Z).mean(0).sum()) + ((lam * self.W.pow(2).sum()) / 2)) |
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 benchmark a recognizer')
parser.add_argument('config', help='test config file path')
parser.add_argument('--log-interval', default=10, help='interval of logging')
parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
args = parser.parse_args()
return args |
def _array_descr(descriptor):
fields = descriptor.fields
if (fields is None):
subdtype = descriptor.subdtype
if (subdtype is None):
if (descriptor.metadata is None):
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [(fields[x] + (x,)) for x in names]
result = []
offset = 0
for field in ordered_fields:
if (field[1] > offset):
num = (field[1] - offset)
result.append(('', ('|V%d' % num)))
offset += num
elif (field[1] < offset):
raise ValueError('dtype.descr is not defined for types with overlapping or out-of-order fields')
if (len(field) > 3):
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]), field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if (descriptor.itemsize > offset):
num = (descriptor.itemsize - offset)
result.append(('', ('|V%d' % num)))
return result |
class GradedModulesWithBasis(GradedModulesCategory):
class ParentMethods():
def degree_negation(self, element):
base_one = self.base_ring().one()
base_minusone = (- base_one)
diag = (lambda x: (base_one if ((self.degree_on_basis(x) % 2) == 0) else base_minusone))
return self.sum_of_terms([(key, (diag(key) * value)) for (key, value) in element.monomial_coefficients(copy=False).items()])
def submodule(self, gens, check=True, already_echelonized=False, unitriangular=False, support_order=None, category=None, *args, **opts):
from sage.sets.family import Family, AbstractFamily
if isinstance(gens, AbstractFamily):
gens = gens.map(self)
elif isinstance(gens, dict):
gens = Family(gens.keys(), gens.__getitem__)
else:
gens = [self(y) for y in gens]
support_order = self._compute_support_order(gens, support_order)
if (not already_echelonized):
gens = self.echelon_form(gens, unitriangular, order=support_order)
GMod = GradedModulesWithBasis(self.category().base_ring())
if (category is None):
if all((g.is_homogeneous() for g in gens)):
category = GMod.Subobjects()
elif (category.is_subcategory(GMod.Subobjects()) and (not all((g.is_homogeneous() for g in gens)))):
raise ValueError('all of the generators must be homogeneous')
from sage.modules.with_basis.subquotient import SubmoduleWithBasis
return SubmoduleWithBasis(gens, *args, ambient=self, support_order=support_order, unitriangular=unitriangular, category=category, **opts)
def quotient_module(self, submodule, check=True, already_echelonized=False, category=None):
from sage.modules.with_basis.subquotient import SubmoduleWithBasis, QuotientModuleWithBasis
if (not isinstance(submodule, SubmoduleWithBasis)):
submodule = self.submodule(submodule, check=check, unitriangular=True, already_echelonized=already_echelonized)
GMod = GradedModulesWithBasis(self.category().base_ring())
if (category is None):
if all((g.is_homogeneous() for g in submodule.basis())):
category = GMod.Quotients()
elif (category.is_subcategory(GMod.Quotients()) and (not all((g.is_homogeneous() for g in submodule.basis())))):
raise ValueError('all of the basis elements must be homogeneous')
return QuotientModuleWithBasis(submodule, category=category)
class ElementMethods():
def degree_negation(self):
return self.parent().degree_negation(self)
class Quotients(QuotientsCategory):
class ParentMethods():
def degree_on_basis(self, m):
return self.basis()[m].lift().degree()
class ElementMethods():
def degree(self):
return self.lift().degree() |
def get_wsl_blob_names(is_training=True):
blob_names = ['im_info']
if is_training:
blob_names += ['cls_labels']
return blob_names |
def upsample_bilinear(input, size=None, scale_factor=None):
warnings.warn('nn.quantized.functional.upsample_bilinear is deprecated. Use nn.quantized.functional.interpolate instead.')
return interpolate(input, size, scale_factor, mode='bilinear', align_corners=True) |
.skip()
def gen_data():
(n_samples, C) = (10, 5)
open_pred = np.random.rand(n_samples)
open_labels = np.random.randint(low=0, high=2, size=(n_samples,))
close_pred = np.random.rand(n_samples, C)
close_labels = np.random.randint(low=0, high=C, size=(n_samples,))
(n_close_samples, C, n_open_samples) = (10, 5, 8)
open_set_pred_known = np.random.rand(n_close_samples)
open_set_pred_unknown = np.random.rand(n_open_samples)
close_set_pred_class = np.random.randint(low=0, high=C, size=(n_close_samples,))
close_set_labels = np.random.randint(low=0, high=C, size=(n_close_samples,))
return (open_pred, open_labels, close_pred, close_labels, open_set_pred_known, open_set_pred_unknown, close_set_pred_class, close_set_labels) |
def test_isotonic_regression_ties_max():
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y)) |
class HFIndexBase(Index):
def __init__(self, vector_size, dataset, index_initialized=False):
self.vector_size = vector_size
self.dataset = dataset
self._index_initialized = index_initialized
self._check_dataset_format(with_index=index_initialized)
dataset.set_format('numpy', columns=['embeddings'], output_all_columns=True, dtype='float32')
def _check_dataset_format(self, with_index: bool):
if (not isinstance(self.dataset, Dataset)):
raise ValueError('Dataset should be a datasets.Dataset object, but got {}'.format(type(self.dataset)))
if (len(({'title', 'text', 'embeddings'} - set(self.dataset.column_names))) > 0):
raise ValueError('Dataset should be a dataset with the following columns: title (str), text (str) and embeddings (arrays of dimension vector_size), but got columns {}'.format(self.dataset.column_names))
if (with_index and ('embeddings' not in self.dataset.list_indexes())):
raise ValueError('Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it or `dataset.load_faiss_index` to load one from the disk.')
def init_index(self):
raise NotImplementedError()
def is_initialized(self):
return self._index_initialized
def get_doc_dicts(self, doc_ids: np.ndarray) -> List[dict]:
return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[(np.ndarray, np.ndarray)]:
(_, ids) = self.dataset.search_batch('embeddings', question_hidden_states, n_docs)
docs = [self.dataset[[i for i in indices if (i >= 0)]] for indices in ids]
vectors = [doc['embeddings'] for doc in docs]
for i in range(len(vectors)):
if (len(vectors[i]) < n_docs):
vectors[i] = np.vstack([vectors[i], np.zeros(((n_docs - len(vectors[i])), self.vector_size))])
return (np.array(ids), np.array(vectors)) |
def resolve_egg_link(path):
referenced_paths = non_empty_lines(path)
resolved_paths = (os.path.join(os.path.dirname(path), ref) for ref in referenced_paths)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ()) |
def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length):
num_total_tokens = (max_seq_length * num_samples)
samples = defaultdict(list)
i = 0
while (i < num_total_tokens):
tokenized_samples = next(train_iterator)
i += len(tokenized_samples['input_ids'])
samples = {k: (samples[k] + tokenized_samples[k]) for k in tokenized_samples.keys()}
def group_texts(examples):
result = {k: [t[i:(i + max_seq_length)] for i in range(0, num_total_tokens, max_seq_length)] for (k, t) in examples.items()}
return result
grouped_samples = group_texts(samples)
return grouped_samples |
class Breakpoint():
type: str = None
pattern: re.Pattern = None
def __init__(self, text, cond=None, index=(- 1)) -> None:
self.enabled = True
self.text = text
self.cond = cond
self.index = index
self.hit_conut = 0
self.ignore = 0
def __init_subclass__(cls) -> None:
breakpoint_cls.append(cls)
def __str__(self) -> str:
return '\t'.join(self.tostrlist())
def tostrlist(self) -> List[str]:
enable_str = ('y' if self.enabled else 'n')
return [f'{self.index}', self.type, enable_str, self.text, f'{self.hit_conut}']
def should_stop(self, tdb: TdbCmdBackend) -> bool:
pass
def toggle_enable(self, flag: bool):
self.enabled = flag
def match_break(cls, text, tdb: TdbCmdBackend) -> bool:
if isinstance(cls.pattern, re.Pattern):
return (cls.pattern.search(text) is not None)
return False |
def get_out_entities(entity: str, relation: str):
neighbors = set()
query2 = (((('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT (?x1 AS ?value) WHERE {\n SELECT DISTINCT ?x1 WHERE {\n :' + entity) + ':') + relation) + ' ?x1 . \n FILTER regex(?x1, " }\n }\n ')
sparql.setQuery(query2)
try:
results = sparql.query().convert()
except urllib.error.URLError:
print(query2)
exit(0)
for result in results['results']['bindings']:
neighbors.add(result['value']['value'].replace(' ''))
return neighbors |
class CaselessKeyword(Keyword):
def __init__(self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
def parseImpl(self, instring, loc, doActions=True):
if ((instring[loc:(loc + self.matchLen)].upper() == self.caselessmatch) and ((loc >= (len(instring) - self.matchLen)) or (instring[(loc + self.matchLen)].upper() not in self.identChars))):
return ((loc + self.matchLen), self.match)
raise ParseException(instring, loc, self.errmsg, self) |
def match_classes_with_shuffle(views, deranged_classes_ratio, class_datapoints_threshold, shuffle_datapoints, shuffle_each_cluster, return_class_dict=False, add_vid=False, align=False, if_shuffle_each_view=True, if_shuffle_classes=True):
views = categorize_data_view(views, add_vid=add_vid, align=align)
(keys, nclasses, num_matched_classes) = get_keys(views, deranged_classes_ratio, shuffle_each_cluster, if_shuffle_classes)
(all_features, keys, dataset_size, subset_size, num_matched_classes, nclasses) = cut_class_datapoints(views, class_datapoints_threshold, keys, nclasses, num_matched_classes, shuffle_datapoints, if_shuffle_each_view=if_shuffle_each_view, return_class_dict=return_class_dict)
return (all_features, keys, dataset_size, subset_size, num_matched_classes, nclasses) |
def moving_average(x: np.ndarray, n: int=1000):
out = np.cumsum(x, dtype=np.float32)
out[n:] = (out[n:] - out[:(- n)])
return (out[(n - 1):] / n) |
class FileInterface(base.FileInterface):
def __init__(self, glove_dir, glove_size, elmo_options_file, elmo_weights_file, **kwargs):
self._glove_dir = glove_dir
self._glove_size = glove_size
self._elmo_options_file = elmo_options_file
self._elmo_weights_file = elmo_weights_file
super(FileInterface, self).__init__(**kwargs)
def load_train(self):
return _load_squad(self._train_path, draft=self._draft)
def load_test(self):
return _load_squad(self._test_path, draft=self._draft)
def load_metadata(self):
(glove_vocab, glove_emb_mat) = _load_glove(self._glove_size, glove_dir=self._glove_dir, draft=self._draft)
return {'glove_vocab': glove_vocab, 'glove_emb_mat': glove_emb_mat, 'elmo_options_file': self._elmo_options_file, 'elmo_weights_file': self._elmo_weights_file} |
def __plot_relay_goodput(args, torperf_dbs, tornet_dbs, net_scale):
for tornet_db in tornet_dbs:
tornet_db['data'] = []
for (i, d) in enumerate(tornet_db['dataset']):
l = [((b / (1024 ** 3)) * 8) for b in d.values()]
tornet_db['data'].append(l)
for torperf_db in torperf_dbs:
gput = torperf_db['dataset']['relay_goodput']
torperf_db['data'] = [[(net_scale * gbits) for gbits in gput.values()]]
dbs_to_plot = (torperf_dbs + tornet_dbs)
__plot_cdf_figure(args, dbs_to_plot, 'relay_goodput', xlabel="Sum of Relays' Goodput (Gbit/s)") |
def imread(filename, new_dims=None):
im = sp.misc.imread(filename)
if (new_dims is None):
return (im / 255.0)
else:
return (imresize(im, new_dims) / 255.0) |
class EphemWheelCache(SimpleWheelCache):
def __init__(self, format_control):
self._temp_dir = TempDirectory(kind='ephem-wheel-cache')
self._temp_dir.create()
super(EphemWheelCache, self).__init__(self._temp_dir.path, format_control)
def cleanup(self):
self._temp_dir.cleanup() |
def get_loss_across_trials(directory: str) -> List[float]:
directories = os.listdir(directory)
valid = filter(operator.methodcaller('isnumeric'), directories)
sorted_valid = sorted(valid, key=int)
return [get_best_loss(directory, trial) for trial in sorted_valid] |
def create_batches(sampler, dataset_files, cache_dir='cache'):
key = Hasher.hash(dataset_files)
if isinstance(sampler.collator, LeftOversCollator):
key += '_segment_collator'
elif isinstance(sampler.collator, PadCollator):
key += '_longformer_collator'
else:
raise NotImplementedError('this collator not implemented!')
cache_key = Hasher.hash(key)
dataset_path = os.path.join(cache_dir, cache_key)
try:
batches = datasets.load_from_disk(dataset_path)
logger.info(f'Batches restored from: {dataset_path}')
except FileNotFoundError:
logger.info(f'Creating batches for {len(sampler.dataset)} examples...')
batches_dict = defaultdict((lambda : []))
for (i, batch) in enumerate(tqdm(sampler)):
for (k, v) in batch.items():
batches_dict[k].append(v)
batches = Dataset.from_dict(batches_dict)
logger.info(f'{len(batches)} batches created.')
logger.info(f'Saving batches to {dataset_path}')
batches.save_to_disk(dataset_path)
return batches |
def test_make_with_kwargs():
env = envs.make('test.ArgumentEnv-v0', arg2='override_arg2', arg3='override_arg3')
assert (env.spec.id == 'test.ArgumentEnv-v0')
assert isinstance(env.unwrapped, ArgumentEnv)
assert (env.arg1 == 'arg1')
assert (env.arg2 == 'override_arg2')
assert (env.arg3 == 'override_arg3') |
def create_color_mapper() -> Tuple[(LinearColorMapper, ColorBar)]:
mapper = LinearColorMapper(palette=list(reversed(GREYS256)), low=0, high=1)
colorbar = ColorBar(color_mapper=mapper, major_label_text_font_size='8pt', ticker=BasicTicker(), formatter=NumeralTickFormatter(format='0 %'), label_standoff=10, border_line_color=None, location=(0, 0))
return (mapper, colorbar) |
class CommonConfiguration(Configuration):
def __init__(self, *args, warning_suppress=False, **kwargs):
super(CommonConfiguration, self).__init__(*args, **kwargs)
self._warning_suppress = warning_suppress
def __getattr__(self, item):
if (item.startswith('__') and item.endswith('__')):
raise AttributeError
elif (not self._warning_suppress):
msg = 'Config item {} not defined or properly set in config file.'.format(item)
warnings.warn(msg)
def from_yaml(cls, path, warning_suppress=False):
cls.validate_path(path)
with open(path, 'r') as f:
y = yaml.load(f, Loader=yaml.FullLoader)
return CommonConfiguration.from_dict(y, warning_suppress=warning_suppress)
def from_json(cls, path, warning_suppress=False):
cls.validate_path(path)
with open(path, 'r') as f:
j = json.load(f)
return CommonConfiguration.from_dict(j, warning_suppress=warning_suppress)
def from_dict(cls, d, warning_suppress=False):
cls.validate_dict(d)
cfg = CommonConfiguration(warning_suppress=warning_suppress)
for (k, v) in d.items():
if isinstance(v, dict):
setattr(cfg, k, CommonConfiguration.from_dict(v, warning_suppress=warning_suppress))
elif isinstance(v, list):
setattr(cfg, k, [(CommonConfiguration.from_dict(d, warning_suppress=warning_suppress) if isinstance(d, dict) else d) for d in v])
else:
setattr(cfg, k, v)
return cfg |
def test_reverse_sequence():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __call__(self, x: Tensor) -> Tensor:
return rf.reverse_sequence(x, axis=time_dim)
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, in_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
def create_right_column(state) -> html.Div:
explanation_views = create_explanation_layout(state, explanation_type='global')
return html.Div(id='right-column-global', children=explanation_views) |
class ZeldaCtrlProblem(ZeldaProblem):
def __init__(self, cfg: Config):
super(ZeldaCtrlProblem, self).__init__(cfg=cfg)
self._max_nearest_enemy = (np.ceil(((self._width / 2) + 1)) * self._height)
self._max_path_length = ((((np.ceil((self._width / 2)) * self._height) + np.floor((self._height / 2))) * 2) - 1)
self._reward_weights = {'player': 3, 'key': 3, 'door': 3, 'regions': 5, 'enemies': 1, 'nearest-enemy': 1, 'path-length': 1}
self._ctrl_reward_weights = self._reward_weights
self.static_trgs = {'enemies': (2, self._max_enemies), 'path-length': self._max_path_length, 'nearest-enemy': (5, self._max_nearest_enemy), 'regions': 1, 'player': 1, 'key': 1, 'door': 1}
self.cond_bounds = {'nearest-enemy': (0, self._max_nearest_enemy), 'enemies': (0, ((self._width * self._height) - 2)), 'player': (0, ((self._width * self._height) - 2)), 'key': (0, ((self._width * self._height) - 2)), 'door': (0, ((self._width * self._height) - 2)), 'regions': (0, ((self._width * self._height) / 2)), 'path-length': (0, self._max_path_length)}
def get_episode_over(self, new_stats, old_stats):
return False
def get_reward(self, new_stats, old_stats):
return None
'\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.\n The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map\n '
def get_stats(self, map, lenient_paths=False):
self.path = []
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {'player': calc_certain_tile(map_locations, ['player']), 'key': calc_certain_tile(map_locations, ['key']), 'door': calc_certain_tile(map_locations, ['door']), 'enemies': calc_certain_tile(map_locations, ['bat', 'spider', 'scorpion']), 'regions': calc_num_regions(map, map_locations, ['empty', 'player', 'key', 'bat', 'spider', 'scorpion']), 'nearest-enemy': 0, 'path-length': 0}
if (map_stats['player'] == 1):
(p_x, p_y) = map_locations['player'][0]
enemies = []
enemies.extend(map_locations['spider'])
enemies.extend(map_locations['bat'])
enemies.extend(map_locations['scorpion'])
UPPER_DIST = ((self._width * self._height) * 100)
if (len(enemies) > 0):
(dijkstra, _) = run_dijkstra(p_x, p_y, map, ['empty', 'player', 'key', 'bat', 'spider', 'scorpion'])
min_dist = UPPER_DIST
for (e_x, e_y) in enemies:
if ((dijkstra[e_y][e_x] > 0) and (dijkstra[e_y][e_x] < min_dist)):
min_dist = dijkstra[e_y][e_x]
if (min_dist == UPPER_DIST):
min_dist = 0
map_stats['nearest-enemy'] = min_dist
if ((map_stats['key'] == 1) and (map_stats['door'] == 1)):
(k_x, k_y) = map_locations['key'][0]
(d_x, d_y) = map_locations['door'][0]
(dijkstra_k, _) = run_dijkstra(p_x, p_y, map, ['empty', 'key', 'player', 'bat', 'spider', 'scorpion'])
map_stats['path-length'] += dijkstra_k[k_y][k_x]
(dijkstra_d, _) = run_dijkstra(k_x, k_y, map, ['empty', 'player', 'key', 'door', 'bat', 'spider', 'scorpion'])
map_stats['path-length'] += dijkstra_d[d_y][d_x]
if self.render_path:
self.path = np.vstack((get_path_coords(dijkstra_k, init_coords=(k_y, k_x))[:], get_path_coords(dijkstra_d, init_coords=(d_y, d_x))[:]))
front_tiles = set(((k_x, k_y), (d_x, d_y), (p_x, p_y)))
i = 0
render_path = self.path.copy()
for (y, x) in self.path:
if ((x, y) in front_tiles):
continue
render_path[i] = [y, x]
i += 1
self.path = render_path[:i]
self.path_length = map_stats['path-length']
return map_stats |
def rotate_bbox(x, y, w, h, angle):
(c, s) = (np.cos(np.radians(angle)), np.sin(np.radians(angle)))
R = np.asarray([[c, s], [(- s), c]])
pts = np.asarray([[((- w) / 2), ((- h) / 2)], [(w / 2), ((- h) / 2)], [(w / 2), (h / 2)], [((- w) / 2), (h / 2)]])
rot_pts = []
for pt in pts:
rot_pts.append(([x, y] + (pt R)).astype(int))
return rot_pts |
def get_transform(transform_type='default', image_size=32, args=None):
if (transform_type == 'imagenet'):
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
interpolation = args.interpolation
crop_pct = args.crop_pct
train_transform = transforms.Compose([transforms.Resize(int((image_size / crop_pct)), interpolation), transforms.RandomCrop(image_size), transforms.RandomHorizontalFlip(p=0.5), transforms.ColorJitter(), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
test_transform = transforms.Compose([transforms.Resize(int((image_size / crop_pct)), interpolation), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
elif (transform_type == 'pytorch-cifar'):
mean = (0.4914, 0.4822, 0.4465)
std = (0.2023, 0.1994, 0.201)
train_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.RandomCrop(image_size, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
test_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
elif (transform_type == 'herbarium_default'):
train_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.RandomResizedCrop(image_size, scale=(args.resize_lower_bound, 1)), transforms.RandomHorizontalFlip(), transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor()])
elif (transform_type == 'cutout'):
mean = np.array([0.4914, 0.4822, 0.4465])
std = np.array([0.247, 0.2435, 0.2616])
train_transform = transforms.Compose([transforms.RandomCrop(image_size, padding=4), transforms.RandomHorizontalFlip(), normalize(mean, std), cutout(mask_size=int((image_size / 2)), p=1, cutout_inside=False), to_tensor()])
test_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor(), transforms.Normalize(mean, std)])
elif (transform_type == 'rand-augment'):
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
train_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.RandomCrop(image_size, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
train_transform.transforms.insert(0, RandAugment(args.rand_aug_n, args.rand_aug_m, args=None))
test_transform = transforms.Compose([transforms.Resize((image_size, image_size)), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
elif (transform_type == 'random_affine'):
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
interpolation = args.interpolation
crop_pct = args.crop_pct
train_transform = transforms.Compose([transforms.Resize((image_size, image_size), interpolation), transforms.RandomAffine(degrees=((- 45), 45), translate=(0.1, 0.1), shear=((- 15), 15), scale=(0.7, args.crop_pct)), transforms.ColorJitter(), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
test_transform = transforms.Compose([transforms.Resize(int((image_size / crop_pct)), interpolation), transforms.CenterCrop(image_size), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
else:
raise NotImplementedError
return (train_transform, test_transform) |
def make_test_data_loader(cfg, datasets):
ims_per_gpu = cfg.TEST.IMS_PER_GPU
test_sampler = torch.utils.data.distributed.DistributedSampler(datasets)
num_workers = cfg.TEST.LOADER_THREADS
collator = BatchCollator((- 1))
data_loader = torch.utils.data.DataLoader(datasets, batch_size=ims_per_gpu, shuffle=False, sampler=test_sampler, num_workers=num_workers, collate_fn=collator)
return data_loader |
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self, spatial_scale, out_size, out_channels, no_trans, group_size=1, part_size=None, sample_per_part=4, trans_std=0.0, num_offset_fcs=3, deform_fc_channels=1024):
super(DeformRoIPoolingPack, self).__init__(spatial_scale, out_size, out_channels, no_trans, group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.deform_fc_channels = deform_fc_channels
if (not no_trans):
seq = []
ic = ((self.out_size * self.out_size) * self.out_channels)
for i in range(self.num_offset_fcs):
if (i < (self.num_offset_fcs - 1)):
oc = self.deform_fc_channels
else:
oc = ((self.out_size * self.out_size) * 2)
seq.append(nn.Linear(ic, oc))
ic = oc
if (i < (self.num_offset_fcs - 1)):
seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*seq)
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
def forward(self, data, rois):
assert (data.size(1) == self.out_channels)
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, True, self.group_size, self.part_size, self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, (- 1)))
offset = offset.view(n, 2, self.out_size, self.out_size)
return deform_roi_pooling(data, rois, offset, self.spatial_scale, self.out_size, self.out_channels, self.no_trans, self.group_size, self.part_size, self.sample_per_part, self.trans_std) |
def merge_log(log_list):
log = dict()
log['total_reward'] = sum([x['total_reward'] for x in log_list])
log['num_episodes'] = sum([x['num_episodes'] for x in log_list])
log['num_steps'] = sum([x['num_steps'] for x in log_list])
log['avg_reward'] = (log['total_reward'] / log['num_episodes'])
log['max_reward'] = max([x['max_reward'] for x in log_list])
log['min_reward'] = min([x['min_reward'] for x in log_list])
if ('total_c_reward' in log_list[0]):
log['total_c_reward'] = sum([x['total_c_reward'] for x in log_list])
log['avg_c_reward'] = (log['total_c_reward'] / log['num_steps'])
log['max_c_reward'] = max([x['max_c_reward'] for x in log_list])
log['min_c_reward'] = min([x['min_c_reward'] for x in log_list])
return log |
def do_tojson(eval_ctx, value, indent=None):
policies = eval_ctx.environment.policies
dumper = policies['json.dumps_function']
options = policies['json.dumps_kwargs']
if (indent is not None):
options = dict(options)
options['indent'] = indent
return htmlsafe_json_dumps(value, dumper=dumper, **options) |
def _conv(x, filters, kernel_size, strides=1, normalizer_fn=tf.keras.layers.BatchNormalization, activation_fn=tf.nn.relu6, normalization_op_params=None):
if (activation_fn is None):
raise ValueError('Activation function cannot be None. Use tf.identity instead to better support quantized training.')
if ((normalization_op_params is None) and (normalizer_fn is not None)):
raise ValueError('Normalization params cannot be `None`')
x = tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer=tf.initializers.VarianceScaling())(x)
if (normalizer_fn is not None):
normaliztion_op = get_normalization_op(**normalization_op_params)
x = normaliztion_op()(x)
x = activation_fn(x)
return x |
def _expand_to_minibatch(np_array, batch_size):
print(batch_size)
tiles = ([batch_size] + ([1] * np_array.ndim))
return gen_array_ops.tile(np.expand_dims(np_array, 0), tiles) |
def apply_edits(edits, raw):
if (len(edits) != len(raw)):
((print >> sys.stderr), 'Number of edits is not equal to number of characters')
((print >> sys.stderr), (' word: %s\n edits: %s' % (raw, ', '.join(edits))))
raise AssertionError
labels = [crf_label(raw[i], edits[i]) for i in range(len(raw))]
norew = ''.join((rewrite_with_label(raw[i], labels[i], False) for i in range(len(raw))))
rew = ''.join((rewrite_with_label(raw[i], labels[i], True) for i in range(len(raw))))
return (norew, rew) |
def bio_random_split(dataset, frac_train=0.8, frac_valid=0.1, frac_test=0.1, seed=0):
np.testing.assert_almost_equal(((frac_train + frac_valid) + frac_test), 1.0)
num_mols = len(dataset)
random.seed(seed)
all_idx = list(range(num_mols))
random.shuffle(all_idx)
train_idx = all_idx[:int((frac_train * num_mols))]
valid_idx = all_idx[int((frac_train * num_mols)):(int((frac_valid * num_mols)) + int((frac_train * num_mols)))]
test_idx = all_idx[(int((frac_valid * num_mols)) + int((frac_train * num_mols))):]
assert (len(set(train_idx).intersection(set(valid_idx))) == 0)
assert (len(set(valid_idx).intersection(set(test_idx))) == 0)
assert (((len(train_idx) + len(valid_idx)) + len(test_idx)) == num_mols)
train_dataset = dataset[torch.tensor(train_idx)]
valid_dataset = dataset[torch.tensor(valid_idx)]
if (frac_test == 0):
test_dataset = None
else:
test_dataset = dataset[torch.tensor(test_idx)]
return (train_dataset, valid_dataset, test_dataset) |
def dict_from_string(string, allow_tuple=False, free_word=False):
if (string is None):
return {}
if isinstance(string, dict):
return string
parser = create_bnf(allow_tuple=allow_tuple, free_word=free_word)
out = {}
for r in parser.parseString(string, parseAll=True):
out.update(r)
return out |
class HighLevelAction():
def __init__(self, motion_goals):
self.motion_goals = motion_goals
def _check_valid(self):
for goal in self.motion_goals:
assert (len(goal) == 2)
(pos, orient) = goal
assert (orient in Direction.ALL_DIRECTIONS)
assert (type(pos) is tuple)
assert (len(pos) == 2)
def __getitem__(self, i):
return self.motion_goals[i] |
def ref_norm_normalization(x, p, axis, eps=1e-12):
if (p is None):
p = 2.0
y = x
y = np.abs(y)
y = np.power(y, p)
y = (np.sum(y, axis, keepdims=True) + eps)
y = np.power(y, (1.0 / p))
y = (x / y)
return y |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.