code stringlengths 101 5.91M |
|---|
def false_negative_edges(true_adj, pred_adj, abs_tol=0.5):
diff = remove_diag((true_adj - pred_adj))
return num_incorrect(diff, abs_tol) |
class MMBTModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class WeightedSumAndMax(nn.Module):
def __init__(self, in_feats):
super(WeightedSumAndMax, self).__init__()
self.weight_and_sum = WeightAndSum(in_feats)
self.out_dim = (2 * in_feats)
def forward(self, bg, feats):
h_g_sum = self.weight_and_sum(bg, feats)
with bg.local_scope():
bg.ndata['h'] = feats
h_g_max = dgl.max_nodes(bg, 'h')
h_g = torch.cat([h_g_sum, h_g_max], dim=1)
return h_g |
class TFDistilBertForMaskedLM():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class Processor(object):
def __init__(self, vocab_file, max_seq_length):
self.tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file)
self.idx_to_word = self.inverse_vocab(self.tokenizer.vocab)
self.max_seq_length = max_seq_length
def inverse_vocab(vocab):
idx_to_word = {}
for word in vocab:
idx_to_word[vocab[word]] = word
return idx_to_word
def create_single_instance(self, sentence):
tokens_raw = self.tokenizer.tokenize(tokenization.convert_to_unicode(sentence))
assert (len(sentence) <= (self.max_seq_length - 2))
tokens = ((['[CLS]'] + tokens_raw) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
(input_tokens, masked_lm_positions, masked_lm_labels) = self.create_pseudo_ground_truth(tokens)
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = ([1] * len(input_ids))
segment_ids = list(segment_ids)
masked_lm_positions = list(masked_lm_positions)
masked_lm_ids = self.tokenizer.convert_tokens_to_ids(masked_lm_labels)
masked_lm_weights = ([1.0] * len(masked_lm_ids))
return (input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights)
def create_pseudo_ground_truth(tokens):
input_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
for (index, token) in enumerate(tokens):
if ((token == '[CLS]') or (token == '[SEP]')):
continue
masked_token = tokens[index]
input_tokens[index] = masked_token
masked_lm_positions.append(index)
masked_lm_labels.append(tokens[index])
return (input_tokens, masked_lm_positions, masked_lm_labels) |
def test_and_exchange_map(tester, model, distributed):
results = tester(model=model, distributed=distributed)
if is_main_process():
(map_results, raw_results) = results[0]
bbox_map = map_results.results['bbox']['AP']
segm_map = map_results.results['segm']['AP']
else:
bbox_map = 0.0
segm_map = 0.0
if distributed:
map_tensor = torch.tensor([bbox_map, segm_map], dtype=torch.float32, device=torch.device('cuda'))
torch.distributed.broadcast(map_tensor, 0)
bbox_map = map_tensor[0].item()
segm_map = map_tensor[1].item()
return (bbox_map, segm_map) |
class GPTJForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def gpt_generate_causal_events(db_base_name, gpt, pred_data, source_data, inference_type: str='type', top_k: int=5, num_threads: int=16):
msg_head = 'Now I give you an effect event, and you give me three to four cause events.\n\n'
def _process_one_type_or_time(idx, type_or_date, text):
try:
existed_dict = DDB.at(db_base_name, inference_type, key=str(idx)).read()
if ((existed_dict is not None) and (str(type_or_date) in existed_dict)):
return
ret_body = gpt.query((msg_head + text))
with DDB.at(db_base_name, inference_type).session() as (sess, obj):
if (str(idx) in obj):
obj[str(idx)][str(type_or_date)] = ret_body
else:
obj[str(idx)] = {str(type_or_date): ret_body}
sess.write()
print(idx, type_or_date)
except Exception as e:
print('Error of', str(e))
executor = ThreadPoolExecutor(max_workers=num_threads)
init_db = DDB.at(db_base_name, inference_type).read()
if (init_db is None):
DDB.at(db_base_name, inference_type).create()
for prediction_seq in pred_data[(- 10):]:
for one_prediction in prediction_seq:
seq_idx = one_prediction['seq_idx']
original_idx = one_prediction['original_idx']
idx = (str(seq_idx), str(original_idx))
if (inference_type == 'type'):
msg_content_dict = generate_gdelt_prompt_amazon(one_prediction, source_data, top_k=top_k, pred_type=True)
else:
msg_content_dict = generate_gdelt_prompt_amazon(one_prediction, source_data, top_k=top_k, pred_type=False)
for (rel_or_obj, content) in msg_content_dict.items():
if ((init_db is not None) and (str(idx) in init_db) and (str(rel_or_obj) in init_db[str(idx)])):
continue
executor.submit(_process_one_type_or_time, str(idx), rel_or_obj, content)
executor.shutdown(wait=True) |
def reset():
global _running_timer
_total_times.clear()
_start_times.clear()
_timer_stack.clear()
_running_timer = None |
def create_model(opt):
model = None
print(opt.model)
if (opt.model == 'shiftnet'):
assert ((opt.dataset_mode == 'aligned') or (opt.dataset_mode == 'aligned_resized'))
from models.shift_net.shiftnet_model import ShiftNetModel
model = ShiftNetModel()
elif (opt.model == 'res_shiftnet'):
assert ((opt.dataset_mode == 'aligned') or (opt.dataset_mode == 'aligned_resized'))
from models.res_shift_net.shiftnet_model import ResShiftNetModel
model = ResShiftNetModel()
elif (opt.model == 'patch_soft_shiftnet'):
assert ((opt.dataset_mode == 'aligned') or (opt.dataset_mode == 'aligned_resized'))
from models.patch_soft_shift.patch_soft_shiftnet_model import PatchSoftShiftNetModel
model = PatchSoftShiftNetModel()
elif (opt.model == 'res_patch_soft_shiftnet'):
assert ((opt.dataset_mode == 'aligned') or (opt.dataset_mode == 'aligned_resized'))
from models.res_patch_soft_shift.res_patch_soft_shiftnet_model import ResPatchSoftShiftNetModel
model = ResPatchSoftShiftNetModel()
elif (opt.model == 'face_shiftnet'):
assert ((opt.dataset_mode == 'aligned') or (opt.dataset_mode == 'aligned_resized'))
from models.face_shift_net.face_shiftnet_model import FaceShiftNetModel
model = FaceShiftNetModel()
else:
raise ValueError(('Model [%s] not recognized.' % opt.model))
model.initialize(opt)
print(('model [%s] was created' % model.name()))
return model |
def evaluate(model, dataset, data):
batch = dataset.get_batch(data)
tot_loss = 0
tot_cnt = 0
while True:
try:
batchInput = dataset.next_batch(batch)
(global_step, loss) = model.eval(batchInput)
slens = batchInput.slens
tot_cnt += len(slens)
tot_loss += (loss * len(slens))
except tf.errors.OutOfRangeError:
break
return (tot_loss / tot_cnt) |
def parse_cmd_options(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=None, help='number of instances in one mini-batch.')
parser.add_argument('--input_image_size', type=int, default=None, help='resolution of input image, usually 32 for CIFAR and 224 for ImageNet.')
parser.add_argument('--save_dir', type=str, default=None, help='output directory')
parser.add_argument('--repeat_times', type=int, default=1)
parser.add_argument('--gpu', type=int, default=None)
parser.add_argument('--fp16', action='store_true')
(module_opt, _) = parser.parse_known_args(argv)
return module_opt |
def WideResNet28x20(num_classes=10, activation='relu', dropRate=0.0, return_feature_map=False):
return WideResNet(28, num_classes, 20, activation=activation, dropRate=dropRate, return_feature_map=return_feature_map) |
def makedirs(path):
try:
os.makedirs(path)
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise |
class BatchNorm(nn.Module):
def __init__(self, input_size, momentum=0.9, eps=1e-05):
super().__init__()
self.momentum = momentum
self.eps = eps
self.log_gamma = nn.Parameter(torch.zeros(input_size))
self.beta = nn.Parameter(torch.zeros(input_size))
self.register_buffer('running_mean', torch.zeros(input_size))
self.register_buffer('running_var', torch.ones(input_size))
def forward(self, x, cond_y=None):
if self.training:
self.batch_mean = x.mean(0)
self.batch_var = x.var(0)
self.running_mean.mul_(self.momentum).add_((self.batch_mean.data * (1 - self.momentum)))
self.running_var.mul_(self.momentum).add_((self.batch_var.data * (1 - self.momentum)))
mean = self.batch_mean
var = self.batch_var
else:
mean = self.running_mean
var = self.running_var
x_hat = ((x - mean) / torch.sqrt((var + self.eps)))
y = ((self.log_gamma.exp() * x_hat) + self.beta)
log_abs_det_jacobian = (self.log_gamma - (0.5 * torch.log((var + self.eps))))
return (y, log_abs_det_jacobian.expand_as(x))
def inverse(self, y, cond_y=None):
if self.training:
mean = y.mean(0)
var = y.var(0)
else:
mean = self.running_mean
var = self.running_var
x_hat = ((y - self.beta) * torch.exp((- self.log_gamma)))
x = ((x_hat * torch.sqrt((var + self.eps))) + mean)
log_abs_det_jacobian = ((0.5 * torch.log((var + self.eps))) - self.log_gamma)
return (x, log_abs_det_jacobian.expand_as(x)) |
def pre_transform(data_ori):
data = data_ori.clone()
(data.edge_index, data.edge_type, data.input) = (standard_edge_index, standard_edge_type, standard_node_fea)
return data |
def create_dir(path):
if (not os.path.exists(path)):
try:
os.makedirs(path)
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise |
def recreate_dirs(*dirs):
for d in dirs:
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d) |
def loss_calculation(semantic, target):
bs = semantic.size()[0]
pix_num = (480 * 640)
target = target.view(bs, (- 1)).view((- 1)).contiguous()
semantic = semantic.view(bs, 22, pix_num).transpose(1, 2).contiguous().view((bs * pix_num), 22).contiguous()
semantic_loss = CEloss(semantic, target)
return semantic_loss |
def format_results(results_df, config_list, param_list):
config_df = pd.DataFrame.from_dict(config_list)
keep = list(set([list(hyper_option.option.keys())[0] for hyper_option in param_list]))
keep.append(ConfigKW.PATH_OUTPUT)
config_df = config_df[keep]
results_df = config_df.set_index(ConfigKW.PATH_OUTPUT).join(results_df.set_index(ConfigKW.PATH_OUTPUT))
results_df = results_df.reset_index()
results_df = results_df.sort_values(by=['best_validation_loss'])
return results_df |
def make_custom_seris_splitter(preset_names):
legendNote = None
if (preset_names == 'default'):
def custom_series_splitter(x):
params = x['flat_params']
if (params['her_failed_goal_option'] is None):
ret = 'Distance Reward'
elif (params['her_failed_goal_option'] == 'dist_behaviour'):
ret = 'Exact Match'
else:
ret = FILTERED
return ret
legendNote = None
else:
raise NotImplementedError
return (custom_series_splitter, legendNote) |
class TestLookaheadSwap(QiskitTestCase):
def test_lookahead_swap_doesnt_modify_mapped_circuit(self):
qr = QuantumRegister(3, name='q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[2])
circuit.cx(qr[0], qr[1])
original_dag = circuit_to_dag(circuit)
coupling_map = CouplingMap([[0, 1], [0, 2]])
mapped_dag = LookaheadSwap(coupling_map).run(original_dag)
self.assertEqual(original_dag, mapped_dag)
remapped_dag = LookaheadSwap(coupling_map).run(mapped_dag)
self.assertEqual(mapped_dag, remapped_dag)
def test_lookahead_swap_should_add_a_single_swap(self):
qr = QuantumRegister(3)
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[2])
dag_circuit = circuit_to_dag(circuit)
coupling_map = CouplingMap([[0, 1], [1, 2]])
mapped_dag = LookaheadSwap(coupling_map).run(dag_circuit)
self.assertEqual(mapped_dag.count_ops().get('swap', 0), (dag_circuit.count_ops().get('swap', 0) + 1))
def test_lookahead_swap_finds_minimal_swap_solution(self):
qr = QuantumRegister(3)
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[2])
circuit.cx(qr[0], qr[1])
dag_circuit = circuit_to_dag(circuit)
coupling_map = CouplingMap([[0, 1], [1, 2]])
mapped_dag = LookaheadSwap(coupling_map).run(dag_circuit)
self.assertEqual(mapped_dag.count_ops().get('swap', 0), (dag_circuit.count_ops().get('swap', 0) + 1))
def test_lookahead_swap_maps_measurements(self):
qr = QuantumRegister(3)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[0], qr[2])
circuit.measure(qr[0], cr[0])
circuit.measure(qr[2], cr[1])
dag_circuit = circuit_to_dag(circuit)
coupling_map = CouplingMap([[0, 1], [1, 2]])
mapped_dag = LookaheadSwap(coupling_map).run(dag_circuit)
mapped_measure_qargs = set((op.qargs[0] for op in mapped_dag.named_nodes('measure')))
self.assertIn(mapped_measure_qargs, [set(((QuantumRegister(3, 'q'), 0), (QuantumRegister(3, 'q'), 1))), set(((QuantumRegister(3, 'q'), 1), (QuantumRegister(3, 'q'), 2)))])
def test_lookahead_swap_maps_barriers(self):
qr = QuantumRegister(3)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.cx(qr[0], qr[2])
circuit.barrier(qr[0], qr[2])
dag_circuit = circuit_to_dag(circuit)
coupling_map = CouplingMap([[0, 1], [1, 2]])
mapped_dag = LookaheadSwap(coupling_map).run(dag_circuit)
mapped_barrier_qargs = [set(op.qargs) for op in mapped_dag.named_nodes('barrier')][0]
self.assertIn(mapped_barrier_qargs, [set(((QuantumRegister(3, 'q'), 0), (QuantumRegister(3, 'q'), 1))), set(((QuantumRegister(3, 'q'), 1), (QuantumRegister(3, 'q'), 2)))]) |
def unpack_tracking_results(download_path, output_path=None):
if (output_path is None):
output_path = env_settings().results_path
if (not os.path.exists(output_path)):
os.makedirs(output_path)
trackers = os.listdir(download_path)
for t in trackers:
runfiles = os.listdir(os.path.join(download_path, t))
for r in runfiles:
save_path = os.path.join(output_path, t)
if (not os.path.exists(save_path)):
os.makedirs(save_path)
shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:(- 4)]), 'zip') |
def search(query_ids: np.ndarray, query_embeds: np.ndarray, corpus_ids: np.ndarray, index: faiss.IndexPQ, topk: int):
(topk_scores, topk_idx) = index.search(query_embeds, topk)
topk_ids = np.vstack([corpus_ids[x] for x in topk_idx])
assert (len(query_ids) == len(topk_scores) == len(topk_ids))
return (topk_scores, topk_ids) |
class SkipBlock(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, bias, pad, act_fun):
super(SkipBlock, self).__init__()
self.op = nn.Sequential(conv(in_f=in_channel, out_f=out_channel, kernel_size=kernel_size, bias=bias, pad=pad), bn(num_features=out_channel), act(act_fun=act_fun))
def forward(self, data):
return self.op(data) |
def qualification_loss(x_minus, x_plus, y_minus, y_plus, a, b, c, confidence=(- 0.1)):
alpha1 = torch.sigmoid(y_minus)
loss1 = ts.tanh_lower(alpha1, a, ((b * y_minus) + c), x_minus, x_plus, plot=False, num=0)
valid = (loss1 <= 0)
loss1 = torch.clamp(loss1, min=confidence)
alpha2 = torch.sigmoid((y_minus * 0))
loss2 = ts.tanh_lower(alpha2, a, c, x_minus, x_plus, plot=False, num=0)
valid = (valid * (loss2 <= 0))
loss2 = torch.clamp(loss2, min=confidence)
loss = (loss1 + loss2)
return (loss, valid) |
def update_datasets(self_adaptation=False):
if (cfg.db_name == 'AwA2'):
cfg.data_root = './data/AwA2/'
cfg.attr_dims = 85
cfg.nseen = 40
elif (cfg.db_name == 'CUB'):
cfg.data_root = './data/CUB/'
cfg.attr_dims = 312
cfg.nseen = 150
elif (cfg.db_name == 'SUN'):
cfg.data_root = './data/SUN/'
cfg.attr_dims = 102
cfg.nseen = 645
else:
raise NotImplementedError
cfg.attribute = osp.join(cfg.data_root, 'predicate-matrix-continuous.txt')
cfg.class_name = osp.join(cfg.data_root, 'classes.txt')
cfg.image = osp.join(cfg.data_root, 'JPEGImages')
cfg.ss_train = osp.join(cfg.data_root, 'trainclasses.txt')
cfg.ss_test = osp.join(cfg.data_root, 'testclasses.txt')
cfg.ps_train = osp.join(cfg.data_root, 'proposed_split/trainval_ps.txt')
cfg.ps_test_seen = osp.join(cfg.data_root, 'proposed_split/test_seen_ps.txt')
cfg.ps_test_unseen = osp.join(cfg.data_root, 'proposed_split/test_unseen_ps.txt')
cfg.ps_seen_cls = osp.join(cfg.data_root, 'proposed_split/seen_cls.txt')
cfg.ps_unseen_cls = osp.join(cfg.data_root, 'proposed_split/unseen_cls.txt')
postfix = ('sa' if self_adaptation else 'hybrid')
cfg.test.report_path = osp.join(cfg.test.report_base_path, f'{cfg.ckpt_name}_{cfg.test.setting}_{cfg.test.imload_mode}_{postfix}.txt') |
class GroupedBatchSampler(BatchSampler):
def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
if (not isinstance(sampler, Sampler)):
raise ValueError('sampler should be an instance of torch.utils.dataset.Sampler, but got sampler={}'.format(sampler))
self.sampler = sampler
self.group_ids = torch.as_tensor(group_ids)
assert (self.group_ids.dim() == 1)
self.batch_size = batch_size
self.drop_uneven = drop_uneven
self.groups = torch.unique(self.group_ids).sort(0)[0]
def _prepare_batches(self):
dataset_size = len(self.group_ids)
sampled_ids = torch.as_tensor(list(self.sampler))
order = torch.full((dataset_size,), (- 1), dtype=torch.int64)
order[sampled_ids] = torch.arange(len(sampled_ids))
mask = (order >= 0)
clusters = [((self.group_ids == i) & mask) for i in self.groups]
relative_order = [order[cluster] for cluster in clusters]
permutation_ids = [s[s.sort()[1]] for s in relative_order]
permuted_clusters = [sampled_ids[idx] for idx in permutation_ids]
splits = [c.split(self.batch_size) for c in permuted_clusters]
merged = tuple(itertools.chain.from_iterable(splits))
first_element_of_batch = [t[0].item() for t in merged]
inv_sampled_ids_map = {v: k for (k, v) in enumerate(sampled_ids.tolist())}
first_index_of_batch = torch.as_tensor([inv_sampled_ids_map[s] for s in first_element_of_batch])
permutation_order = first_index_of_batch.sort(0)[1].tolist()
batches = [merged[i].tolist() for i in permutation_order]
if self.drop_uneven:
kept = []
for batch in batches:
if (len(batch) == self.batch_size):
kept.append(batch)
batches = kept
return batches
def __iter__(self):
batches = self._prepare_batches()
self._batches = batches
return iter(batches)
def __len__(self):
if (not hasattr(self, '_batches')):
self._batches = self._prepare_batches()
return len(self._batches) |
def update_q(critic: Model, target_value: Model, batch: Batch, discount: float) -> Tuple[(Model, InfoDict)]:
next_v = target_value(batch.next_observations)
target_q = (batch.rewards + ((discount * batch.masks) * next_v))
def critic_loss_fn(critic_params: Params) -> Tuple[(jnp.ndarray, InfoDict)]:
(q1, q2) = critic.apply({'params': critic_params}, batch.observations, batch.actions)
critic_loss = (((q1 - target_q) ** 2) + ((q2 - target_q) ** 2)).mean()
return (critic_loss, {'critic_loss': critic_loss, 'q1': q1.mean(), 'q2': q2.mean()})
(new_critic, info) = critic.apply_gradient(critic_loss_fn)
return (new_critic, info) |
_module
class Tusimple(nn.Module):
def __init__(self, cfg):
super(Tusimple, self).__init__()
self.cfg = cfg
exp_dir = os.path.join(self.cfg.work_dir, 'output')
if (not os.path.exists(exp_dir)):
os.mkdir(exp_dir)
self.out_path = os.path.join(exp_dir, 'coord_output')
if (not os.path.exists(self.out_path)):
os.mkdir(self.out_path)
self.dump_to_json = []
self.thresh = cfg.evaluator.thresh
self.logger = get_logger('resa')
if cfg.view:
self.view_dir = os.path.join(self.cfg.work_dir, 'vis')
def evaluate_pred(self, dataset, seg_pred, exist_pred, batch):
img_name = batch['meta']['img_name']
img_path = batch['meta']['full_img_path']
for b in range(len(seg_pred)):
seg = seg_pred[b]
exist = [(1 if (exist_pred[(b, i)] > 0.5) else 0) for i in range((self.cfg.num_classes - 1))]
lane_coords = dataset.probmap2lane(seg, exist, thresh=self.thresh)
for i in range(len(lane_coords)):
lane_coords[i] = sorted(lane_coords[i], key=(lambda pair: pair[1]))
path_tree = split_path(img_name[b])
(save_dir, save_name) = (path_tree[(- 3):(- 1)], path_tree[(- 1)])
save_dir = os.path.join(self.out_path, *save_dir)
save_name = (save_name[:(- 3)] + 'lines.txt')
save_name = os.path.join(save_dir, save_name)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
with open(save_name, 'w') as f:
for l in lane_coords:
for (x, y) in l:
print('{} {}'.format(x, y), end=' ', file=f)
print(file=f)
json_dict = {}
json_dict['lanes'] = []
json_dict['h_sample'] = []
json_dict['raw_file'] = os.path.join(*path_tree[(- 4):])
json_dict['run_time'] = 0
for l in lane_coords:
if (len(l) == 0):
continue
json_dict['lanes'].append([])
for (x, y) in l:
json_dict['lanes'][(- 1)].append(int(x))
for (x, y) in lane_coords[0]:
json_dict['h_sample'].append(y)
self.dump_to_json.append(json.dumps(json_dict))
if self.cfg.view:
img = cv2.imread(img_path[b])
new_img_name = img_name[b].replace('/', '_')
save_dir = os.path.join(self.view_dir, new_img_name)
dataset.view(img, lane_coords, save_dir)
def evaluate(self, dataset, output, batch):
(seg_pred, exist_pred) = (output['seg'], output['exist'])
seg_pred = F.softmax(seg_pred, dim=1)
seg_pred = seg_pred.detach().cpu().numpy()
exist_pred = exist_pred.detach().cpu().numpy()
self.evaluate_pred(dataset, seg_pred, exist_pred, batch)
def summarize(self):
best_acc = 0
output_file = os.path.join(self.out_path, 'predict_test.json')
with open(output_file, 'w+') as f:
for line in self.dump_to_json:
print(line, end='\n', file=f)
(eval_result, acc) = LaneEval.bench_one_submit(output_file, self.cfg.test_json_file)
self.logger.info(eval_result)
self.dump_to_json = []
best_acc = max(acc, best_acc)
return best_acc |
_materialize('core')
class ConstPad(Pad):
def __init__(self, *padding_list):
super().__init__(padding_list, 'constant') |
class OnlineItemSimilarity():
def __init__(self, item_size):
self.item_size = item_size
self.item_embeddings = None
self.cuda_condition = torch.cuda.is_available()
self.device = torch.device(('cuda' if self.cuda_condition else 'cpu'))
self.total_item_list = torch.tensor([i for i in range(self.item_size)], dtype=torch.long).to(self.device)
def update_embedding_matrix(self, item_embeddings):
print(item_embeddings)
self.item_embeddings = copy.deepcopy(item_embeddings)
self.base_embedding_matrix = self.item_embeddings(self.total_item_list)
(self.max_score, self.min_score) = self.get_maximum_minimum_sim_scores()
def get_maximum_minimum_sim_scores(self):
(max_score, min_score) = ((- 1), 100)
for item_idx in range(1, self.item_size):
try:
item_vector = self.item_embeddings(torch.tensor(item_idx).to(self.device)).view((- 1), 1)
item_similarity = torch.mm(self.base_embedding_matrix, item_vector).view((- 1))
max_score = max(torch.max(item_similarity), max_score)
min_score = min(torch.min(item_similarity), min_score)
except:
print('ssssss')
continue
return (max_score, min_score)
def most_similar(self, item_idx, top_k=1, with_score=False):
item_idx = torch.tensor(item_idx, dtype=torch.long).to(self.device)
item_vector = self.item_embeddings(item_idx).view((- 1), 1)
item_similarity = torch.mm(self.base_embedding_matrix, item_vector).view((- 1))
item_similarity = ((item_similarity - self.min_score) / (self.max_score - self.min_score))
(values, indices) = item_similarity.topk((top_k + 1))
if with_score:
item_list = indices.tolist()
score_list = values.tolist()
if (item_idx in item_list):
idd = item_list.index(item_idx)
item_list.remove(item_idx)
score_list.pop(idd)
return list(zip(item_list, score_list))
item_list = indices.tolist()
if (item_idx in item_list):
item_list.remove(item_idx)
return item_list |
class Block(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, expand_ratio=1, se_ratio=0.0, drop_rate=0.0):
super(Block, self).__init__()
self.stride = stride
self.drop_rate = drop_rate
self.expand_ratio = expand_ratio
channels = (expand_ratio * in_channels)
self.conv1 = nn.Conv2d(in_channels, channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=kernel_size, stride=stride, padding=(1 if (kernel_size == 3) else 2), groups=channels, bias=False)
self.bn2 = nn.BatchNorm2d(channels)
se_channels = int((in_channels * se_ratio))
self.se = SE(channels, se_channels)
self.conv3 = nn.Conv2d(channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.has_skip = ((stride == 1) and (in_channels == out_channels))
def forward(self, x):
out = (x if (self.expand_ratio == 1) else swish(self.bn1(self.conv1(x))))
out = swish(self.bn2(self.conv2(out)))
out = self.se(out)
out = self.bn3(self.conv3(out))
if self.has_skip:
if (self.training and (self.drop_rate > 0)):
out = drop_connect(out, self.drop_rate)
out = (out + x)
return out |
class ConvNet(nn.Module):
def __init__(self, input_size=(1, 257, 1091)):
super(ConvNet, self).__init__()
self.features = nn.Sequential(nn.Conv2d(1, 32, kernel_size=(3, 3), padding=(2, 2), dilation=(2, 2)), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(2, 2), dilation=(2, 2)), nn.BatchNorm2d(32), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=(3, 3), padding=(2, 2), dilation=(2, 2)), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(64, 64, kernel_size=(3, 3), dilation=(2, 2)), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=2))
self.flat_feats = self._get_flat_feats(input_size, self.features)
self.classifier = nn.Sequential(nn.Linear(self.flat_feats, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Dropout(p=0.5), nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Dropout(p=0.6), nn.Linear(64, 64), nn.BatchNorm1d(64), nn.ReLU(), nn.Dropout(p=0.7), nn.Linear(64, 1), nn.Sigmoid())
def _weights_init(m):
if isinstance(m, (nn.Conv2d or nn.Linear)):
kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d or nn.BatchNorm1d)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def _get_flat_feats(self, in_size, feats):
f = feats(Variable(ones(1, *in_size)))
return int(np.prod(f.size()[1:]))
def forward(self, x):
feats = self.features(x)
flat_feats = feats.view((- 1), self.flat_feats)
out = self.classifier(flat_feats)
return out |
def mahalanobis_metric(p, S, args):
mu_S = torch.mean(S, dim=0, keepdim=True)
cov_S = torch.matmul((S - mu_S).t(), (S - mu_S))
I = torch.eye(p.shape[1], p.shape[1])
if args.CUDA:
I = Variable(I).cuda()
covi_S = (cov_S + (args.cov_gamma * I)).inverse()
mahalanobis_distances = (p - mu_S).mm(covi_S).mm((p - mu_S).t())
return mahalanobis_distances.diag().sqrt().data |
def parse_json(embeddings):
embeddings.sort_index(inplace=True)
X = np.zeros((len(embeddings), (3 * 768)))
for i in range(len(embeddings)):
A = np.array(embeddings.loc[(i, 'emb_A')])
B = np.array(embeddings.loc[(i, 'emb_B')])
P = np.array(embeddings.loc[(i, 'emb_P')])
X[i] = np.concatenate((A, B, P))
return X |
def row_accuracy(row, model):
y = np.array([row['A'], row['B'], row['N']])
pred = np.array([row[(model + '-A')], row[(model + '-B')], row[(model + '-N')]])
return y[np.argmax(pred)] |
def get_dtype_and_ctype(type_obj: Any) -> Tuple[(np.dtype, Any)]:
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, '__name__'):
type_str = type_obj.__name__
elif hasattr(type_obj, 'name'):
type_str = type_obj.name
else:
raise RuntimeError('Cannot infer type name from input')
assert (type_str in _str_to_ctype.keys())
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert (my_dtype.itemsize == ctypes.sizeof(my_ctype))
return (my_dtype, my_ctype) |
def pad_all_cases(x, y, model_params, min_len_before=7, max_len_before=9, min_len_after=7, max_len_after=9, targetlength=9):
total_x = []
total_y = []
total_len_x = []
totle_len_before_x = []
for l_before in range(min_len_before, (max_len_before + 1)):
for l_after in range(min_len_after, (max_len_after + 1)):
(case_x, case_y) = generate_samples(x.values, y, model_params, l_before, l_after, targetlength)
len_x = np.full(case_x.shape[0], case_x.shape[1])
len_before_sequence_x = np.full(case_x.shape[0], l_before)
npad = ((0, 0), (0, (((max_len_before - l_before) + max_len_after) - l_after)), (0, 0))
same_length_x = np.pad(case_x, pad_width=npad, mode='constant', constant_values=0)
total_x.append(same_length_x)
total_y.append(case_y)
total_len_x.append(len_x)
totle_len_before_x.append(len_before_sequence_x)
concatenated_x = np.concatenate(total_x, axis=0)
concatenated_y = np.concatenate(total_y, axis=0)
len_all_case = np.concatenate(total_len_x).ravel()
len_before_all_case = np.concatenate(totle_len_before_x).ravel()
return (concatenated_x, concatenated_y, len_all_case, len_before_all_case) |
def retry_with_exponential_backoff(errors: tuple, initial_delay: float=30, exponential_base: float=2, jitter: bool=True, max_retries: int=5):
def decorator(func):
(func)
def wrapper(*args, **kwargs):
num_retries = 0
delay = initial_delay
while True:
try:
return func(*args, **kwargs)
except errors as e:
print(f'Error: {e}. Retrying in {delay} seconds...')
num_retries += 1
if (num_retries > max_retries):
raise Exception(f'Maximum number of retries ({max_retries}) exceeded.')
delay *= (exponential_base * (1 + (jitter * random.random())))
time.sleep(delay)
except Exception as e:
raise e
return wrapper
return decorator |
class MetaSingletonHash(type):
def __call__(*args, **kwargs):
cls = args[0]
try:
cache = cls._cache
except:
cache = dict()
cls._cache = cache
obj = type.__call__(*args, **kwargs)
key = (cls.__name__, obj.__hash__())
return cache.setdefault(key, obj) |
def evaluate_2nd_item_task_fastgcnnew(valid_batch_index, model, sess, valid_data, is_training):
(evaluate_loss, evaluate_pearson) = (0.0, 0.0)
(valid_target_item, valid_k_shot_user, valid_second_order_items, valid_third_order_users, valid_oracle_item_ebd, valid_mask_num_second_order_item, valid_mask_num_third_order_user) = valid_data
for index in tqdm.tqdm(valid_batch_index):
(batch_target_item, batch_kshot_user, batch_2nd_item, batch_3rd_user, batch_oracle_item_ebd, batch_mask_num_2nd_item, batch_mask_num_3rd_user) = gfn.split_batch_item(valid_target_item, valid_k_shot_user, valid_second_order_items, valid_third_order_users, valid_oracle_item_ebd, valid_mask_num_second_order_item, valid_mask_num_third_order_user, index)
feed_dict = {model.target_item: batch_oracle_item_ebd, model.support_user_1st_pos: batch_kshot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos: batch_2nd_item, model.training_phrase_item_task: is_training}
(batch_evaluate_loss, batch_predict_ebd, batch_target_ebd) = sess.run([model.loss_2nd_item_pos, model.predict_i_2nd_pos, model.target_item], feed_dict)
evaluate_loss += batch_evaluate_loss
batch_pearson = Pearson_correlation(batch_predict_ebd, batch_target_ebd)
evaluate_pearson += batch_pearson
return ((evaluate_loss / len(valid_batch_index)), (evaluate_pearson / len(valid_batch_index))) |
class ReusableHyperOptimizer(PathOptimizer):
suboptimizer = HyperOptimizer
set_surface_order = False
def __init__(self, *, directory=None, overwrite=False, hash_method='a', cache_only=False, **opt_kwargs):
self._suboptimizers = {}
self._suboptimizer_kwargs = opt_kwargs
if (directory is True):
directory = f'ctg_cache/opts{self.auto_hash_path_relevant_opts()}'
self._cache = DiskDict(directory)
self.overwrite = overwrite
self._hash_method = hash_method
self.cache_only = cache_only
def last_opt(self):
return self._suboptimizers.get(threading.get_ident(), None)
def get_path_relevant_opts(self):
return tuple(((key, make_hashable(self._suboptimizer_kwargs.get(key, default))) for (key, default) in [('methods', None), ('minimize', 'flops'), ('max_repeats', 128), ('max_time', None), ('slicing_opts', None), ('slicing_reconf_opts', None), ('reconf_opts', None), ('compressed', False), ('multicontraction', False)]))
def auto_hash_path_relevant_opts(self):
return hashlib.sha1(pickle.dumps(self.get_path_relevant_opts())).hexdigest()
def hash_query(self, inputs, output, size_dict):
h = hash_contraction(inputs, output, size_dict, self._hash_method)
missing = (self.overwrite or (h not in self._cache))
return (h, missing)
def _compute_path(self, inputs, output, size_dict):
opt = self.suboptimizer(**self._suboptimizer_kwargs)
opt._search(inputs, output, size_dict)
thrid = threading.get_ident()
self._suboptimizers[thrid] = opt
return {'path': opt.path, 'sliced_inds': tuple(opt.tree.sliced_inds)}
def update_from_tree(self, tree, overwrite=True):
(h, missing) = self.hash_query(tree.inputs, tree.output, tree.size_dict)
if (overwrite or missing):
self._cache[h] = {'path': tree.get_path(), 'sliced_inds': tuple(tree.sliced_inds)}
def __call__(self, inputs, output, size_dict, memory_limit=None):
(h, missing) = self.hash_query(inputs, output, size_dict)
if missing:
if self.cache_only:
raise KeyError('Contraction missing from cache.')
self._cache[h] = self._compute_path(inputs, output, size_dict)
return self._cache[h]['path']
def search(self, inputs, output, size_dict):
(h, missing) = self.hash_query(inputs, output, size_dict)
if missing:
if self.cache_only:
raise KeyError('Contraction missing from cache.')
self._cache[h] = self._compute_path(inputs, output, size_dict)
return self.last_opt.tree
con = self._cache[h]
if self.set_surface_order:
tree = ContractionTreeCompressed.from_path(inputs, output, size_dict, path=con['path'])
else:
tree = ContractionTree.from_path(inputs, output, size_dict, path=con['path'])
for ix in con['sliced_inds']:
tree.remove_ind_(ix)
return tree
def cleanup(self):
self._cache.cleanup() |
def _train():
arg_parser = train_argparser()
process_configs(target=__train, arg_parser=arg_parser) |
class Path():
def __init__(self, x_list, y_list, yaw_list, direction_list, cost):
self.x_list = x_list
self.y_list = y_list
self.yaw_list = yaw_list
self.direction_list = direction_list
self.cost = cost |
def _build_man_feature_extractor(feature_extractor_config, is_training, reuse_weights=None):
depth_multiplier = feature_extractor_config.depth_multiplier
min_depth = feature_extractor_config.min_depth
conv_hyperparams = hyperparams_builder.build(feature_extractor_config.conv_hyperparams, is_training)
return MobileNetFeaturePyramidExtractor(depth_multiplier, min_depth, conv_hyperparams, reuse_weights) |
.no_cover
.mujoco
.timeout(300)
def test_te_ppo_metaworld_mt10():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'tf/te_ppo_metaworld_mt10.py')), '--n_epochs', '1', '--batch_size_per_task', '100'], check=False).returncode == 0) |
def init_args():
parser = argparse.ArgumentParser(description='Convert cartesian coordinate system to site-center NEU.')
parser.add_argument('-x0', metavar='<x0>', dest='x0', type=float, help='topocentric X coordinate.')
parser.add_argument('-y0', metavar='<y0>', dest='y0', type=float, help='topocentric Y coordinate.')
parser.add_argument('-z0', metavar='<z0>', dest='z0', type=float, help='topocentricZ coordinate.')
parser.add_argument('-x', metavar='<x>', dest='x', type=float, help='X coordinate will convert.')
parser.add_argument('-y', metavar='<y>', dest='y', type=float, help='Y coordinate will convert.')
parser.add_argument('-z', metavar='<z>', dest='z', type=float, help='Z coordinate will convert.')
return parser.parse_args() |
class NeuralProcessImg(nn.Module):
def __init__(self, img_size, r_dim, z_dim, h_dim):
super(NeuralProcessImg, self).__init__()
self.img_size = img_size
(self.num_channels, self.height, self.width) = img_size
self.r_dim = r_dim
self.z_dim = z_dim
self.h_dim = h_dim
self.neural_process = NeuralProcess(x_dim=2, y_dim=self.num_channels, r_dim=r_dim, z_dim=z_dim, h_dim=h_dim)
def forward(self, img, context_mask, target_mask):
(x_context, y_context) = img_mask_to_np_input(img, context_mask)
(x_target, y_target) = img_mask_to_np_input(img, target_mask)
return self.neural_process(x_context, y_context, x_target, y_target) |
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json', help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json', help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0, help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None, help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
return parser.parse_args() |
class ImagePipelineOutput(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
def main():
parser = argparse.ArgumentParser('Save data from SQL to disk')
parser.add_argument('dest', help='Location to save the data to')
parser.add_argument('--format', default='text', help='Format to save data in')
parser.add_argument('--arch', type=int, help='Architecture of data to pull', required=True)
parser.add_argument('--database', help='Database to pull from (if not default)')
parser.add_argument('--config', help='Database configuration to use (if not deafult)')
args = parser.parse_args()
save_data(args.dest, args.arch, args.format, database=args.database, config=args.config) |
def to_bh(data, bins, cumulative=False):
h1 = bh.Histogram(bh.axis.Variable(bins))
h1.fill(data)
if cumulative:
h1[:] = (np.sum(h1.values()) - np.cumsum(h1))
return h1 |
def padded_sequence_accuracy(logits, labels):
with tf.compat.v1.variable_scope('padded_sequence_accuracy', values=[logits, labels]):
(logits, labels) = _pad_tensors_to_same_length(logits, labels)
weights = tf.cast(tf.not_equal(labels, 0), dtype=tf.float32)
outputs = tf.cast(tf.argmax(input=logits, axis=(- 1)), dtype=tf.int32)
padded_labels = tf.cast(labels, dtype=tf.int32)
not_correct = (tf.cast(tf.not_equal(outputs, padded_labels), dtype=tf.float32) * weights)
axis = list(range(1, len(outputs.get_shape())))
correct_seq = (1.0 - tf.minimum(1.0, tf.reduce_sum(input_tensor=not_correct, axis=axis)))
return (correct_seq, tf.constant(1.0)) |
def get_original_source_tweet(source_tweet_json: dict):
if ('retweeted_status' in source_tweet_json):
return source_tweet_json['retweeted_status'] |
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger') |
class EncoderLayer(nn.Module):
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation='relu'):
super(EncoderLayer, self).__init__()
d_ff = (d_ff or (4 * d_model))
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = (F.relu if (activation == 'relu') else F.gelu)
def forward(self, x, attn_mask=None, non_station_factor=None):
(new_x, attn) = self.attention(x, x, x, attn_mask=attn_mask, non_station_factor=non_station_factor)
x = (x + self.dropout(new_x))
y = x = self.norm1(x)
y = self.dropout(self.activation(self.conv1(y.transpose((- 1), 1))))
y = self.dropout(self.conv2(y).transpose((- 1), 1))
return (self.norm2((x + y)), attn) |
_tokenizers
class SqueezeBertTokenizationTest(BertTokenizationTest):
tokenizer_class = SqueezeBertTokenizer
rust_tokenizer_class = SqueezeBertTokenizerFast
test_rust_tokenizer = True
def get_rust_tokenizer(self, **kwargs):
return SqueezeBertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
def test_sequence_builders(self):
tokenizer = SqueezeBertTokenizer.from_pretrained('squeezebert/squeezebert-mnli-headless')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]))
assert (encoded_pair == (((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]) + text_2) + [tokenizer.sep_token_id])) |
def calc_all_metrics(pred):
res = {}
ic = pred.groupby(level='datetime').apply((lambda x: robust_zscore(x.label).corr(robust_zscore(x.score))))
raw_ic = pred.groupby(level='datetime').apply((lambda x: x.label.corr(x.score)))
rank_ic = pred.groupby(level='datetime').apply((lambda x: x.label.corr(x.score, method='spearman')))
print(('Robust IC %.3f, Robust ICIR %.3f, Rank IC %.3f, Rank ICIR %.3f, Raw IC %.3f, Raw ICIR %.3f' % (ic.mean(), (ic.mean() / ic.std()), rank_ic.mean(), (rank_ic.mean() / rank_ic.std()), raw_ic.mean(), (raw_ic.mean() / raw_ic.std()))))
res['IC'] = ic.mean()
res['ICIR'] = (ic.mean() / ic.std())
res['RankIC'] = rank_ic.mean()
res['RankICIR'] = (rank_ic.mean() / rank_ic.std())
res['RawIC'] = raw_ic.mean()
res['RawICIR'] = (raw_ic.mean() / raw_ic.std())
return res |
class StartPage(tk.Frame):
def __init__(self, parent, controller):
global start_page
tk.Frame.__init__(self, parent)
start_page = self
self.target_image = ''
self.controller = controller
self.pil_image = None
self.opencv_image_r_g_b = None
self.top = tk.Frame(self, borderwidth=1, relief='solid')
self.bottom = tk.Frame(self, borderwidth=1, relief='solid')
self.top_box = tk.Frame(self.top, borderwidth=1, relief='solid')
self.bottom_left_box = tk.Frame(self.bottom, borderwidth=1, relief='solid')
self.bottom_right_box = tk.Frame(self.bottom, borderwidth=1, relief='solid')
self.top.pack(side='top', expand=True, fill='both')
self.bottom.pack(side='bottom', expand=True, fill='both')
self.top_box.pack(expand=True, fill='both', padx=5, pady=5)
self.bottom_left_box.pack(side='left', expand=True, fill='both', padx=5, pady=5)
self.bottom_right_box.pack(side='right', expand=True, fill='both', padx=5, pady=5)
self.page_label = ttk.Label(self.top_box, text='Start Page', font=LARGE_FONT)
self.page_label.grid(row=0, column=2, columnspan=2)
self.main_page_button = ttk.Button(self.top_box, text='To Main Page', command=self.initialize_evolution)
self.main_page_button.grid(row=1, column=2)
self.stats_page_button = ttk.Button(self.top_box, text='To Statistics Page', command=self.to_stats_page)
self.stats_page_button.grid(row=1, column=3)
self.amount_of_parents_label = ttk.Label(self.top_box, text='Amount of Parents')
self.amount_of_parents_slider = tk.Scale(self.top_box, from_=1, to=100, resolution=1, orient='horizontal')
self.amount_of_parents_label.grid(row=2, column=0)
self.amount_of_parents_slider.grid(row=2, column=1)
self.children_per_parent_label = ttk.Label(self.top_box, text='Children per Parent')
self.children_per_parent_slider = tk.Scale(self.top_box, from_=1, to=100, resolution=1, orient='horizontal')
self.children_per_parent_label.grid(row=3, column=0)
self.children_per_parent_slider.grid(row=3, column=1)
self.vertices_label = ttk.Label(self.top_box, text='Number of Vertices')
self.vertices_slider = tk.Scale(self.top_box, from_=3, to=20, orient='horizontal')
self.vertices_label.grid(row=4, column=0)
self.vertices_slider.grid(row=4, column=1)
self.number_of_genes_label = ttk.Label(self.top_box, text='# of Genes')
self.polygons_label = ttk.Label(self.top_box, text='Number of Polygons')
self.circles_label = ttk.Label(self.top_box, text='Number of Circles')
self.lines_label = ttk.Label(self.top_box, text='Number of Lines')
self.polygons_slider = tk.Scale(self.top_box, from_=0, to=100, resolution=1, orient='horizontal')
self.circles_slider = tk.Scale(self.top_box, from_=0, to=100, resolution=1, orient='horizontal')
self.lines_slider = tk.Scale(self.top_box, from_=0, to=100, resolution=1, orient='horizontal')
self.polygons_label.grid(row=5, column=0)
self.circles_label.grid(row=6, column=0)
self.lines_label.grid(row=7, column=0)
self.polygons_slider.grid(row=5, column=1)
self.circles_slider.grid(row=6, column=1)
self.lines_slider.grid(row=7, column=1)
self.mutation_var = tk.DoubleVar()
self.gene_structure_var = tk.DoubleVar()
self.soft_mutation_var = tk.DoubleVar()
self.save_image_rate_var = tk.IntVar()
self.max_generation_var = tk.IntVar()
self.alpha_limit_var = tk.DoubleVar()
self.hybrid_soft_mutation_var = tk.IntVar()
self.hybrid_medium_mutation_var = tk.IntVar()
self.mutation_type_var = tk.DoubleVar()
self.mutation_type_var = tk.BooleanVar()
self.crossover_mutation_var = tk.BooleanVar()
self.mutation_label = ttk.Label(self.top_box, text='Mutation Probability')
self.mutation_slider = tk.Scale(self.top_box, from_=0.05, to=1.0, resolution=0.05, orient='horizontal', variable=self.mutation_var)
self.mutation_label.grid(row=2, column=2)
self.mutation_slider.grid(row=2, column=3)
self.gene_structure_label = ttk.Label(self.top_box, text='Genetic Restructure Rate')
self.gene_structure_slider = tk.Scale(self.top_box, from_=0, to=1, resolution=0.1, orient='horizontal', variable=self.gene_structure_var)
self.gene_structure_label.grid(row=3, column=2)
self.gene_structure_slider.grid(row=3, column=3)
self.soft_mutation_label = ttk.Label(self.top_box, text='Soft Mutation Rate')
self.soft_mutation_slider = tk.Scale(self.top_box, from_=0.1, to=1.0, resolution=0.05, orient='horizontal', variable=self.soft_mutation_var)
self.soft_mutation_label.grid(row=4, column=2)
self.soft_mutation_slider.grid(row=4, column=3)
self.save_rate_label = ttk.Label(self.top_box, text='Save Rate')
self.save_rate_slider = tk.Scale(self.top_box, from_=10, to=10000, resolution=10, orient='horizontal', variable=self.save_image_rate_var)
self.save_rate_label.grid(row=5, column=2)
self.save_rate_slider.grid(row=5, column=3)
self.max_generation_label = ttk.Label(self.top_box, text='Maximum Generations')
self.max_generation_slider = tk.Scale(self.top_box, from_=0, to=100000, resolution=1000, orient='horizontal', variable=self.max_generation_var)
self.max_generation_label.grid(row=6, column=2)
self.max_generation_slider.grid(row=6, column=3)
self.hybrid_soft_mutate_label = ttk.Label(self.top_box, text='Hybrid (Soft Mutation)')
self.hybrid_soft_mutate_slider = tk.Scale(self.top_box, from_=0, to=10, resolution=1, orient='horizontal', variable=self.hybrid_soft_mutation_var)
self.hybrid_medium_mutate_label = ttk.Label(self.top_box, text='Hybrid (Medium Mutation)')
self.hybrid_medium_mutate_slider = tk.Scale(self.top_box, from_=0, to=10, resolution=1, orient='horizontal', variable=self.hybrid_medium_mutation_var)
self.hybrid_soft_mutate_label.grid(row=2, column=4)
self.hybrid_soft_mutate_slider.grid(row=2, column=5)
self.hybrid_medium_mutate_label.grid(row=3, column=4)
self.hybrid_medium_mutate_slider.grid(row=3, column=5)
self.mutation_type_button = ttk.Checkbutton(self.top_box, text='Chunk Mutation', variable=self.mutation_type_var, onvalue=True, offvalue=False)
self.mutation_type_button.var = self.mutation_type_var
self.mutation_type_button.grid(row=4, column=5)
self.crossover_mutation_button = ttk.Checkbutton(self.top_box, text='Crossover Mutation', variable=self.crossover_mutation_var, onvalue=True, offvalue=False, command=self.crossover_mutation_func)
self.crossover_mutation_button.var = self.crossover_mutation_var
self.crossover_mutation_button.grid(row=5, column=5)
self.amount_of_parents_slider.set(1)
self.children_per_parent_slider.set(5)
self.vertices_slider.set(8)
self.polygons_slider.set(25)
self.circles_slider.set(25)
self.mutation_slider.set(0.1)
self.soft_mutation_slider.set(0.15)
self.save_rate_slider.set(1000)
self.max_generation_slider.set(10000)
self.hybrid_soft_mutate_slider.set(2)
self.hybrid_medium_mutate_slider.set(1)
self.top_box.rowconfigure(0, weight=1)
self.top_box.rowconfigure(1, weight=1)
self.top_box.rowconfigure(2, weight=1)
self.top_box.rowconfigure(3, weight=1)
self.top_box.rowconfigure(4, weight=1)
self.top_box.rowconfigure(5, weight=1)
self.top_box.rowconfigure(6, weight=1)
self.top_box.rowconfigure(7, weight=1)
self.top_box.columnconfigure(0, weight=1)
self.top_box.columnconfigure(1, weight=1)
self.top_box.columnconfigure(2, weight=1)
self.top_box.columnconfigure(3, weight=1)
self.top_box.columnconfigure(4, weight=1)
self.top_box.columnconfigure(5, weight=1)
self.choose_image = ttk.Button(self.bottom_left_box, text='Choose Image...', command=self.open_image)
self.choose_image.grid(row=0, column=0)
self.take_image = ttk.Button(self.bottom_left_box, text='Use Camera', command=self.web_image)
self.take_image.grid(row=0, column=1)
self.gif_query = tk.BooleanVar()
self.export_gif_button = ttk.Checkbutton(self.bottom_left_box, text='Export GIF', variable=self.gif_query, onvalue=True, offvalue=False, command=self.export_gif)
self.export_gif_button.var = self.gif_query
self.export_gif_button.grid(row=1, column=0)
self.bottom_left_box.rowconfigure(0, weight=1)
self.bottom_left_box.rowconfigure(1, weight=1)
self.bottom_left_box.columnconfigure(0, weight=1)
self.bottom_left_box.columnconfigure(1, weight=1)
self.img = Image.open(default_image)
self.photo = ImageTk.PhotoImage(self.img)
self.target_image_label = tk.Label(self.bottom_right_box, image=self.photo)
self.target_image_label.image = self.photo
self.target_image_label.pack(expand=1)
def crossover_mutation_func(self):
if self.crossover_mutation_button.var.get():
self.amount_of_parents_slider.config(from_=2)
else:
self.amount_of_parents_slider.config(from_=1)
def export_gif(self):
def web_image(self):
global new_image, start_height, start_width, image
cap = cv2.VideoCapture(cv2.CAP_DSHOW)
print('PRESS SPACE FOR CAPTURE')
img_scale = 0.5
while True:
(ret, frame) = cap.read()
frame = cv2.resize(frame, (1280, 800))
cv2.imshow('frame', frame)
key = cv2.waitKey(1)
if (key == ord(' ')):
print('Captured')
frame = cv2.resize(frame, (320, 200))
cv2.imwrite('../resources/webCam.jpg', frame)
cv2.destroyAllWindows()
break
cap.release()
self.opencv_image = frame
(start_height, start_width, can) = self.opencv_image.shape
if ((start_width, start_height) > (400, 400)):
self.opencv_image = resize_image(self.opencv_image)
else:
image = self.opencv_image
(b, g, r) = cv2.split(self.opencv_image)
self.opencv_image_r_g_b = cv2.merge((r, g, b))
self.pil_image = Image.fromarray(self.opencv_image_r_g_b)
self.tkinter_photo = ImageTk.PhotoImage(self.pil_image)
self.target_image_label.configure(image=self.tkinter_photo)
self.target_image_label.image = self.tkinter_photo
new_image = True
main_page.target_image_label.configure(image=self.tkinter_photo)
main_page.update()
def open_image(self):
global new_image, image_width, image_height, start_height, start_width, image
self.target_image = str(filedialog.askopenfilename(initialdir='resources\\', title='Select file', filetypes=(('jpeg files', '*.jpg'), ('all files', '*.*'))))
if self.target_image:
self.opencv_image = cv2.imread(self.target_image)
(start_height, start_width, can) = self.opencv_image.shape
if ((start_width, start_height) > (400, 400)):
self.opencv_image = resize_image(self.opencv_image)
else:
image = self.opencv_image
(b, g, r) = cv2.split(self.opencv_image)
self.opencv_image_r_g_b = cv2.merge((r, g, b))
self.pil_image = Image.fromarray(self.opencv_image_r_g_b)
(image_width, image_height) = self.pil_image.size
self.tkinter_photo = ImageTk.PhotoImage(self.pil_image)
self.target_image_label.configure(image=self.tkinter_photo)
self.target_image_label.image = self.tkinter_photo
new_image = True
main_page.target_image_label.configure(image=self.tkinter_photo)
main_page.update()
def initialize_evolution(self):
global new_image, amount_of_parents, children_per_parent, save_image_rate, vertices, number_of_genes, shapes_ratio, mutation_probability, soft_mutate_rate, hybrid_soft_mutate, hybrid_medium_mutate, mutation_type, alpha_limit, number_of_types, crossover_mutation, gene_structure_rate, target_image_name, image, max_generation, radius_limit, thickness_limit, wanted_width, wanted_height, export_gif_button, image_width, image_height, start_height, start_width
if (self.pil_image is None):
self.target_image = 'resources\\mona_lisa_crop.jpg'
self.opencv_image = cv2.imread(self.target_image)
(start_height, start_width, can) = self.opencv_image.shape
image = self.opencv_image
(b, g, r) = cv2.split(self.opencv_image)
self.opencv_image_r_g_b = cv2.merge((r, g, b))
self.pil_image = Image.fromarray(self.opencv_image_r_g_b)
(image_width, image_height) = self.pil_image.size
self.tkinter_photo = ImageTk.PhotoImage(self.pil_image)
self.target_image_label.configure(image=self.tkinter_photo)
self.target_image_label.image = self.tkinter_photo
new_image = True
main_page.target_image_label.configure(image=self.tkinter_photo)
main_page.update()
amount_of_parents = self.amount_of_parents_slider.get()
children_per_parent = self.children_per_parent_slider.get()
vertices = self.vertices_slider.get()
number_of_polygons = self.polygons_slider.get()
number_of_circles = self.circles_slider.get()
number_of_lines = self.lines_slider.get()
number_of_genes = ((number_of_polygons + number_of_circles) + number_of_lines)
shapes_ratio = [number_of_polygons, number_of_circles, number_of_lines]
mutation_probability = self.mutation_slider.get()
soft_mutate_rate = self.soft_mutation_slider.get()
hybrid_soft_mutate = self.hybrid_soft_mutate_slider.get()
hybrid_medium_mutate = self.hybrid_medium_mutate_slider.get()
max_generation = self.max_generation_slider.get()
mutation_type = self.mutation_type_button.var.get()
save_image_rate = self.save_rate_slider.get()
crossover_mutation = self.crossover_mutation_button.var.get()
gene_structure_rate = self.gene_structure_slider.get()
export_gif_button = self.export_gif_button.var.get()
(path, target_image_name) = os.path.split(self.target_image)
self.controller.show_frame(MainPage)
def to_stats_page(self):
self.controller.show_frame(StatsPage) |
def _get_model(model_src, model_config=None):
model_src = model_src.lower()
model_config = (model_config or {})
if (model_src == 'onnx'):
return Onnx(**model_config)
if (model_src == 'huggingface'):
return Huggingface(**model_config)
if (model_src == 'sbert'):
return SBERT(**model_config)
if (model_src == 'fasttext'):
return FastText(**model_config)
if (model_src == 'data2vecaudio'):
return Data2VecAudio(**model_config)
if (model_src == 'timm'):
return Timm(**model_config)
if (model_src == 'vit'):
return ViT(**model_config)
if (model_src == 'openai'):
return OpenAI(**model_config)
if (model_src == 'cohere'):
return Cohere(**model_config)
if (model_src == 'rwkv'):
return Rwkv(**model_config)
if (model_src == 'paddlenlp'):
return PaddleNLP(**model_config)
if (model_src == 'uform'):
return UForm(**model_config) |
class FlowCutterOptimizer(PathOptimizer):
def __init__(self, max_time=10, seed=None, executable='flow_cutter_pace17'):
self.max_time = max_time
self.seed = seed
self.executable = executable
def run_flowcutter(self, file, max_time=None):
if (max_time is None):
max_time = self.max_time
if (self.seed is None):
seed = random.randint(0, ((2 ** 32) - 1))
else:
seed = self.seed
args = [self.executable, '-s', str(seed)]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=file)
t0 = time.time()
while (process.poll() is None):
time.sleep(0.1)
if (time.time() > (t0 + max_time)):
process.send_signal(signal.SIGTERM)
break
self.out = process.stdout.read().decode('utf-8')
self.treewidth = int(re.findall('s td (\\d+) (\\d+)', self.out)[(- 1)][1])
def compute_edge_path(self, lg):
td = td_str_to_tree_decomposition(self.out)
eo = td_to_eo(td)
self.edge_path = [lg.nodes[(i - 1)] for i in eo.ordering]
def build_tree(self, inputs, output, size_dict, memory_limit=None):
self.lg = LineGraph(inputs, output)
with tempfile.NamedTemporaryFile(suffix='.gr') as file:
self.lg.to_gr_file(file.name)
max_time = self.max_time
while True:
try:
self.run_flowcutter(file, max_time=max_time)
break
except IndexError:
max_time *= 1.5
warnings.warn(f'FlowCutter produced no input, automatically repeating with max_time 1.5x increased to {max_time}.')
self.compute_edge_path(self.lg)
self.tree = ContractionTree.from_edge_path(self.edge_path, inputs, output, size_dict)
return self.tree
def __call__(self, inputs, output, size_dict, memory_limit=None):
return self.build_tree(inputs, output, size_dict).get_path() |
class MultiDatasetFastRCNNOutputLayers(CustomFastRCNNOutputLayers):
def __init__(self, cfg, num_classes_list, input_shape: ShapeSpec, **kwargs):
super().__init__(cfg, input_shape, **kwargs)
del self.cls_score
input_size = ((input_shape.channels * (input_shape.width or 1)) * (input_shape.height or 1))
prior_prob = cfg.MODEL.ROI_BOX_HEAD.PRIOR_PROB
if cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE:
bias_value = (- math.log(((1 - prior_prob) / prior_prob)))
else:
bias_value = 0
self.openimage_index = cfg.MULTI_DATASET.DATASETS.index('oid')
self.num_datasets = len(num_classes_list)
self.cls_score = nn.ModuleList()
for num_classes in num_classes_list:
self.cls_score.append(nn.Linear(input_size, (num_classes + 1)))
nn.init.normal_(self.cls_score[(- 1)].weight, std=0.01)
nn.init.constant_(self.cls_score[(- 1)].bias, bias_value)
def forward(self, x, dataset_source=(- 1)):
if (x.dim() > 2):
x = torch.flatten(x, start_dim=1)
proposal_deltas = self.bbox_pred(x)
if (dataset_source >= 0):
scores = self.cls_score[dataset_source](x)
else:
scores = [self.cls_score[d](x) for d in range(self.num_datasets)]
return (scores, proposal_deltas)
def losses(self, predictions, proposals, dataset_source):
use_advanced_loss = (dataset_source == self.openimage_index)
(scores, proposal_deltas) = predictions
gt_classes = (cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0))
_log_classification_stats(scores, gt_classes)
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0)
assert (not proposal_boxes.requires_grad), 'Proposals should not require gradients!'
gt_boxes = cat([(p.gt_boxes if p.has('gt_boxes') else p.proposal_boxes).tensor for p in proposals], dim=0)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
if self.use_sigmoid_ce:
loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes, use_advanced_loss)
else:
assert (not use_advanced_loss)
loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
return {'loss_cls': loss_cls, 'loss_box_reg': self.box_reg_loss(proposal_boxes, gt_boxes, proposal_deltas, gt_classes)} |
def draw_demo_img_corners(img, projectpts, color=(0, 255, 0), nV=9, thickness=2):
vertices = []
for i in range(nV):
x = projectpts[i][0]
y = projectpts[i][1]
coordinates = (int(x), int(y))
vertices.append(coordinates)
cv2.circle(img, coordinates, 2, color, (- 1))
cv2.line(img, vertices[0], vertices[1], color, thickness=thickness)
cv2.line(img, vertices[0], vertices[2], color, thickness=thickness)
cv2.line(img, vertices[0], vertices[4], color, thickness=thickness)
cv2.line(img, vertices[1], vertices[5], color, thickness=thickness)
cv2.line(img, vertices[1], vertices[3], color, thickness=thickness)
cv2.line(img, vertices[2], vertices[3], color, thickness=thickness)
cv2.line(img, vertices[2], vertices[6], color, thickness=thickness)
cv2.line(img, vertices[3], vertices[7], color, thickness=thickness)
cv2.line(img, vertices[4], vertices[5], color, thickness=thickness)
cv2.line(img, vertices[4], vertices[6], color, thickness=thickness)
cv2.line(img, vertices[5], vertices[7], color, thickness=thickness)
cv2.line(img, vertices[6], vertices[7], color, thickness=thickness)
return img |
def pd2(base_directory: Path) -> GermanClarinCorpus:
return GermanClarinCorpus('all.PD2.4.cmdi.16693.', base_directory) |
def train_epoch(model, training_data, optimizer, ema, device, opt, writer, epoch):
model.train()
total_loss = 0
n_word_total = 0
n_word_correct = 0
torch.autograd.set_detect_anomaly(True)
for (batch_idx, batch) in tqdm(enumerate(training_data), mininterval=2, desc=' Training =>', total=len(training_data)):
niter = ((epoch * len(training_data)) + batch_idx)
writer.add_scalar('Train/LearningRate', float(optimizer.param_groups[0]['lr']), niter)
if opt.recurrent:
batched_data = [prepare_batch_inputs(step_data, device=device, non_blocking=opt.pin_memory) for step_data in batch[0]]
input_ids_list = [e['input_ids'] for e in batched_data]
video_features_list = [e['video_feature'] for e in batched_data]
input_masks_list = [e['input_mask'] for e in batched_data]
token_type_ids_list = [e['token_type_ids'] for e in batched_data]
input_labels_list = [e['input_labels'] for e in batched_data]
if opt.debug:
def print_info(batched_data, step_idx, batch_idx):
cur_data = batched_data[step_idx]
logger.info('input_ids \n{}'.format(cur_data['input_ids'][batch_idx]))
logger.info('input_mask \n{}'.format(cur_data['input_mask'][batch_idx]))
logger.info('input_labels \n{}'.format(cur_data['input_labels'][batch_idx]))
logger.info('token_type_ids \n{}'.format(cur_data['token_type_ids'][batch_idx]))
print_info(batched_data, 0, 0)
optimizer.zero_grad()
(loss, pred_scores_list) = model(input_ids_list, video_features_list, input_masks_list, token_type_ids_list, input_labels_list)
elif (opt.untied or opt.mtrans):
batched_data = prepare_batch_inputs(batch[0], device=device, non_blocking=opt.pin_memory)
video_feature = batched_data['video_feature']
video_mask = batched_data['video_mask']
text_ids = batched_data['text_ids']
text_mask = batched_data['text_mask']
text_labels = batched_data['text_labels']
if opt.debug:
def print_info(cur_data, batch_idx):
logger.info('text_ids \n{}'.format(cur_data['text_ids'][batch_idx]))
logger.info('text_mask \n{}'.format(cur_data['text_mask'][batch_idx]))
logger.info('text_labels \n{}'.format(cur_data['text_labels'][batch_idx]))
print_info(batched_data, 0)
optimizer.zero_grad()
(loss, pred_scores) = model(video_feature, video_mask, text_ids, text_mask, text_labels)
pred_scores_list = [pred_scores]
input_labels_list = [text_labels]
else:
batched_data = prepare_batch_inputs(batch[0], device=device, non_blocking=opt.pin_memory)
input_ids = batched_data['input_ids']
video_features = batched_data['video_feature']
input_masks = batched_data['input_mask']
token_type_ids = batched_data['token_type_ids']
input_labels = batched_data['input_labels']
if opt.debug:
def print_info(cur_data, batch_idx):
logger.info('input_ids \n{}'.format(cur_data['input_ids'][batch_idx]))
logger.info('input_mask \n{}'.format(cur_data['input_mask'][batch_idx]))
logger.info('input_labels \n{}'.format(cur_data['input_labels'][batch_idx]))
logger.info('token_type_ids \n{}'.format(cur_data['token_type_ids'][batch_idx]))
print_info(batched_data, 0)
optimizer.zero_grad()
(loss, pred_scores) = model(input_ids, video_features, input_masks, token_type_ids, input_labels)
pred_scores_list = [pred_scores]
input_labels_list = [input_labels]
loss.backward()
if (opt.grad_clip != (- 1)):
nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip)
optimizer.step()
if (ema is not None):
ema(model, niter)
n_correct = 0
n_word = 0
for (pred, gold) in zip(pred_scores_list, input_labels_list):
n_correct += cal_performance(pred, gold)
valid_label_mask = gold.ne(RCDataset.IGNORE)
n_word += valid_label_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
total_loss += loss.item()
if opt.debug:
break
torch.autograd.set_detect_anomaly(False)
loss_per_word = ((1.0 * total_loss) / n_word_total)
accuracy = ((1.0 * n_word_correct) / n_word_total)
return (loss_per_word, accuracy) |
class Adapter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.input_dim = config.input_dim
self.down_sample_size = (self.input_dim // config.reduction_factor)
self.activation = Activations(config.non_linearity.lower())
self.down_sampler = nn.Linear(self.input_dim, self.down_sample_size)
self.up_sampler = nn.Linear(self.down_sample_size, self.input_dim)
def forward(self, x):
z = self.down_sampler(x)
z = self.activation(z)
output = self.up_sampler(z)
return output |
class MVTecAD(Dataset):
def __init__(self, image_list, label_list, transform):
self.image_list = image_list
self.label_list = label_list
self.transform = transform
def __getitem__(self, index):
image = Image.open(self.image_list[index])
label = self.label_list[index]
return (self.transform(image), label)
def __len__(self):
return len(self.image_list) |
_module()
class SemiPSPHead(SemiBaseDecodeHead):
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(SemiPSPHead, self).__init__(**kwargs)
assert isinstance(pool_scales, (list, tuple))
self.pool_scales = pool_scales
self.psp_modules = PPM(self.pool_scales, self.in_channels, self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, align_corners=self.align_corners)
self.bottleneck = ConvModule((self.in_channels + (len(pool_scales) * self.channels)), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def forward(self, inputs, return_feat=False):
x = self._transform_inputs(inputs)
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
if return_feat:
return output
output = self.cls_seg(output)
return output
def forward_ppm(self, inputs):
x = self._transform_inputs(inputs)
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output |
_config
def model_lifelong_finetune_std_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'n_channels_in': 3, 'n_channels_out': 8, 'base_class': 'TaskonomyEncoder', 'base_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'base_weights_path': '/mnt/models/curvature_encoder.dat', 'use_baked_encoding': False, 'normalize_pre_transfer': False, 'side_class': 'FCN5', 'side_kwargs': {'eval_only': False, 'normalize_outputs': False}, 'side_weights_path': '/mnt/models/curvature_encoder_student.dat'}, 'normalize_pre_transfer': True}}} |
def amp_context(amp_config=None):
if (amp_config is not None):
(yield autocast(**amp_config))
else:
(yield None) |
class ASPP(nn.Module):
def __init__(self, inplanes, output_stride):
super(ASPP, self).__init__()
self.inplanes = inplanes
self.outplanes = output_stride
mid_planes = 16
dilations = [1, 2, 6]
self.aspp1 = _ASPPModule(inplanes, mid_planes, 1, padding=0, dilation=dilations[0])
self.aspp2 = _ASPPModule(inplanes, mid_planes, 3, padding=dilations[1], dilation=dilations[1])
self.aspp3 = _ASPPModule(inplanes, mid_planes, 3, padding=dilations[2], dilation=dilations[2])
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool3d((1, 1, 1)), nn.Conv3d(inplanes, mid_planes, 1, stride=1, bias=False), nn.BatchNorm3d(mid_planes), nn.ReLU())
self.conv1 = nn.Conv3d((mid_planes * 4), inplanes, 1, bias=False)
self.bn1 = nn.BatchNorm3d(output_stride)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
input = x
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.global_avg_pool(x)
(dimx, dimy, dimz) = (x3.size()[2], x3.size()[3], x3.size()[4])
x4 = x4.repeat([1, 1, dimx, dimy, dimz])
x = torch.cat((x1, x2, x3, x4), dim=1)
x = self.conv1(x)
if (self.inplanes == self.outplanes):
x = (x + input)
x = self.bn1(x)
x = self.relu(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
def calculate_disp_diff(disp_map, ref_disp_map):
(ref_heigth, ref_width) = ref_disp_map.shape[:2]
disp_map = cv2.resize(disp_map, (ref_width, ref_heigth), cv2.INTER_CUBIC)
return np.abs((ref_disp_map - disp_map)) |
def test_digits_stochastic():
model = MaxCoverageSelection(100, optimizer='stochastic', random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_stochastic_ranking)
assert_array_almost_equal(model.gains, digits_stochastic_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def load_config(custom_config, default_config=CONFIG, prefix='CONFIG'):
if ('is_default' in default_config):
default_config.is_default = False
for key in custom_config.keys():
full_key = '.'.join([prefix, key])
if (key not in default_config):
raise NotImplementedError('Unknown config key: {}'.format(full_key))
elif isinstance(custom_config[key], dict):
if isinstance(default_config[key], dict):
load_config(default_config=default_config[key], custom_config=custom_config[key], prefix=full_key)
else:
raise ValueError('{}: Expected {}, got dict instead.'.format(full_key, type(custom_config[key])))
elif isinstance(default_config[key], dict):
raise ValueError('{}: Expected dict, got {} instead.'.format(full_key, type(custom_config[key])))
else:
default_config[key] = custom_config[key] |
_module()
class Posterize(object):
def __init__(self, bits, prob=0.5):
assert (bits <= 8), f'The bits must be less than 8, got {bits} instead.'
assert (0 <= prob <= 1.0), f'The prob should be in range [0,1], got {prob} instead.'
self.bits = int(bits)
self.prob = prob
def __call__(self, results):
if (np.random.rand() > self.prob):
return results
for key in results.get('img_fields', ['strong']):
img = results[key]
img_posterized = mmcv.posterize(img, bits=self.bits)
results[key] = img_posterized.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(bits={self.bits}, '
repr_str += f'prob={self.prob})'
return repr_str |
class Contrast(object):
def __init__(self, var):
self.var = var
def __call__(self, img):
gs = Grayscale()(img)
gs.fill_(gs.mean())
alpha = random.uniform(0, self.var)
return img.lerp(gs, alpha) |
class PanguFileSystem(AbstractFileSystem):
PANGU_BLOCK_SIZE = ((1024 * 1024) * 64)
FILE_TYPE_NORMAL = 0
FILE_TYPE_LOGFILE = 2
FILE_TYPE_RAIDFILE = 3
FLAG_GENERIC_READ = 1
FLAG_SEQUENTIAL_READ = 4
FLAG_SEQUENTIAL_WRITE = 8
def _to_exception(cls, err, path):
if (err < 0):
err = (- err)
if ((err == errno.EPERM) or (err == errno.EACCES)):
raise PermissionError(('%s no permission' % path))
elif (err == errno.ENOENT):
raise FileNotFoundError(('%s not found' % path))
elif (err == errno.EEXIST):
raise FileExistsError(('%s existed' % path))
elif (err == errno.EINVAL):
raise OSError(('%s, Invalid Arguement' % path))
elif (err == errno.ENOSPC):
raise IOError(('%s, No Space' % path))
elif (err == errno.EDQUOT):
raise IOError(('%s, Quota Exceed' % path))
elif (err == errno.EBUSY):
raise IOError(('%s, Busy' % path))
elif (err == errno.ENOTEMPTY):
raise OSError(('%s, Dir not Empty' % path))
elif (err == errno.EBADF):
raise IOError(('%s, Bad Descriptor' % path))
elif (err == errno.EIO):
raise IOError(('%s, IO Error' % path))
else:
raise Exception(('%s, Unknown Error %d' % (path, err)))
_pangu_before_exec
def exists(self, path):
try:
self.stat(path)
except FileNotFoundError:
return False
return True
_pangu_before_exec
def isdir(self, path):
meta = PanguFileMeta()
r = pangu_api.pangu_get_status(c_char_p(path.encode('utf-8')), byref(meta))
if (r != 0):
self._to_exception(r, path)
if (meta.is_dir > 0):
return True
return False
_pangu_before_exec
def ls(self, path, detail=False):
MAX_NAME_LEN = 1024
LIST_BATCH_SIZE = 1024
if path.endswith('/'):
uri = path
else:
uri = (path + '/')
try_count = 0
while True:
try_count += 1
dir_handle = c_void_p(0)
r = pangu_api.pangu_open_dir(c_char_p(uri.encode('utf-8')), byref(dir_handle), c_int(LIST_BATCH_SIZE))
if (r != 0):
if (try_count < 10):
time.sleep(1.0)
continue
self._to_exception(r, uri)
meta = PanguFileMeta()
cname = (c_byte * (MAX_NAME_LEN + 1))()
files = []
while (r == 0):
name_size = c_int(MAX_NAME_LEN)
meta.file_length = 0
meta.create_time = 0
meta.modified_time = 0
r = pangu_api.pangu_read_dir(dir_handle, cname, byref(name_size), byref(meta))
if (r != 0):
break
if (name_size.value >= MAX_NAME_LEN):
raise Exception('name length too long')
uri = str(bytearray(cname)[:name_size.value].decode())
if uri.endswith('/'):
uri = uri[:(- 1)]
files.append(uri)
pangu_api.pangu_close_dir(dir_handle)
if (r < 0):
self._to_exception(r, uri)
return files
_pangu_before_exec
def makedirs(self, path, exist_ok=False):
self._recursive_create_dir(path, exist_ok)
_pangu_before_exec
def _recursive_create_dir(self, dirname, exist_ok=False):
(head, tail) = os.path.split(dirname)
if (not tail):
(head, tail) = os.path.split(head)
if (head and tail):
try:
self.stat(head)
except FileNotFoundError:
self._recursive_create_dir(head)
self.create_dir(dirname, exist_ok)
_pangu_before_exec
def _recursive_remove_dir(self, dir_name):
file_under_dir = self.ls(dir_name)
for file_name in file_under_dir:
file_name = os.path.join(dir_name, file_name)
if self.isdir(file_name):
self._recursive_remove_dir(file_name)
else:
self.rm(file_name)
self.rmdir(dir_name)
_pangu_before_exec
def create_dir(self, dirname, exist_ok=False):
mode = 509
uri = (dirname + '/').encode()
rc = pangu_api.pangu_mkdir(c_char_p(uri), c_int(mode))
if (rc != 0):
if ((abs(rc) == errno.EEXIST) and self.isdir(dirname) and exist_ok):
pass
else:
self._to_exception(rc, uri)
return rc
_pangu_before_exec
def info(self, path):
meta = PanguFileMeta()
r = pangu_api.pangu_get_status(c_char_p(path.encode('utf-8')), byref(meta))
if (r != 0):
self._to_exception(r, path)
if (meta.is_dir > 0):
t = 'directory'
else:
t = 'file'
return {'name': path, 'size': meta.file_length, 'type': t, 'created': meta.create_time, 'owner': meta.owner, 'group': meta.group, 'sccess': meta.access}
_pangu_before_exec
def rm(self, path, recursive=False, maxdepth=None):
if (not isinstance(path, list)):
path = [path]
for p in path:
if ((not recursive) and self.isdir(p)):
self.rmdir(p)
elif (recursive and self.isdir(p)):
self._recursive_remove_dir(p)
else:
self.rm_file(p)
_pangu_before_exec
def rmdir(self, path):
uri = (path + '/').encode()
rc = pangu_api.pangu_rmdir(c_char_p(uri), c_int(0))
if ((rc != 0) and (rc != errno.ENOENT)):
self._to_exception(rc, uri)
return rc
_pangu_before_exec
def rm_file(self, path):
rc = pangu_api.pangu_remove(c_char_p(path.encode('utf-8')), c_int(0))
if ((rc != 0) and (rc != errno.ENOENT)):
self._to_exception(rc, path)
return rc
_pangu_before_exec
def _open(self, path, mode='rb', block_size=None, **kwargs):
pangu_flag = self.FLAG_GENERIC_READ
if (('w' in mode) or ('a' in mode)):
pangu_flag = self.FLAG_SEQUENTIAL_WRITE
if ('a' in mode):
if (not self.exists(path)):
self._create(path, 509, overwrite=True)
else:
self._create(path, 509, overwrite=True)
handle = c_void_p(0)
file_type = c_int(self.FILE_TYPE_NORMAL)
rc = pangu_api.pangu_open(c_char_p(path.encode('utf-8')), c_int(pangu_flag), c_int(0), file_type, byref(handle))
if (rc != 0):
self._to_exception(rc, path)
return PanguFile(path, mode, handle, **kwargs)
def _create(self, path, mode, overwrite=False, copys=3, ftt=1, options={}):
app_name = 'BIGFILE_APPNAME'
if ('appname' in options):
app_name = options['appname']
part_name = 'BIGFILE_PARTNAME'
if ('partname' in options):
part_name = options['partname']
file_type = self.FILE_TYPE_NORMAL
if ('filetype' in options):
file_type = int(options['filetype'])
trunz = 0
if overwrite:
trunz = 1
rc = pangu_api.pangu_create1(c_char_p(path.encode('utf-8')), c_int((copys - ftt)), c_int(copys), c_char_p(app_name.encode('utf-8')), c_char_p(part_name.encode('utf-8')), c_int(trunz), c_int(mode), c_int(file_type))
if (rc != 0):
self._to_exception(rc, path)
return rc |
def get_input_encoding(inputs, initializer=None, scope=None):
with tf.variable_scope(scope, 'Encoding', initializer=initializer):
(_, _, max_sentence_length, embedding_size) = inputs.get_shape().as_list()
positional_mask = tf.get_variable(name='positional_mask', shape=[max_sentence_length, embedding_size])
encoded_input = tf.reduce_sum((inputs * positional_mask), axis=2)
return encoded_input |
class Hyperparams(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value |
def _RowwiseUnsortedSegmentSum(values, indices, n):
(batch, k) = tf.unstack(tf.shape(indices), num=2)
indices_flat = (tf.reshape(indices, [(- 1)]) + (tf.div(tf.range((batch * k)), k) * n))
ret_flat = tf.unsorted_segment_sum(tf.reshape(values, [(- 1)]), indices_flat, (batch * n))
return tf.reshape(ret_flat, [batch, n]) |
def fully_connected(input_, output_dim, name='fc'):
shape = input_.shape
return conv3d(input_, output_dim, kernal=list(shape[1:4]), strides=(1, 1, 1), padding='VALID', name=name) |
def get_1x_lr_params(model):
b = [model.resnet_features]
for i in range(len(b)):
for k in b[i].parameters():
if k.requires_grad:
(yield k) |
_grad()
def evaluate(data_loader_query, data_loader_gallery, encoder, device, log_writer=None, rank=[1, 5, 10]):
encoder.eval()
recall_list = []
query_features = []
query_labels = []
for (images, targets) in tqdm(data_loader_query, total=len(data_loader_query), desc='query'):
images = images.to(device)
output = encoder(images)
if isinstance(output, tuple):
output = output[0]
output = F.normalize(output, dim=1)
query_features.append(output.detach().cpu())
query_labels += targets.tolist()
query_features = torch.cat(query_features, dim=0)
query_labels = torch.LongTensor(query_labels)
if (data_loader_gallery is None):
recall_list = recall(query_features, query_labels, rank=rank)
else:
gallery_features = []
gallery_labels = []
for (images, targets) in tqdm(data_loader_gallery, total=len(data_loader_gallery), desc='gallery'):
images = images.to(device)
with torch.cuda.amp.autocast():
output = encoder(images)
if isinstance(output, tuple):
output = output[0]
output = F.normalize(output, dim=1)
gallery_features.append(output.detach().cpu())
gallery_labels += targets.tolist()
gallery_features = torch.cat(gallery_features, dim=0)
gallery_labels = torch.LongTensor(gallery_labels)
recall_list = recall(query_features, query_labels, rank=rank, gallery_features=gallery_features, gallery_labels=gallery_labels)
for (k, _recall) in zip(rank, recall_list):
logging.info(f'{k} : {_recall:.2%}')
if (log_writer is not None):
log_writer.add_scalar(f'metric/Recall', _recall, k)
return recall_list |
def resnext50_32x4d(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet:
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs) |
class Non_local(nn.Module):
def __init__(self, in_channels, reduc_ratio=2):
super(Non_local, self).__init__()
self.in_channels = in_channels
self.inter_channels = (reduc_ratio // reduc_ratio)
self.g = nn.Sequential(nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0))
self.W = nn.Sequential(nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(self.in_channels))
self.Wbn_shape = nn.BatchNorm2d(self.in_channels)
nn.init.constant_(self.W[1].weight, 0.0)
nn.init.constant_(self.W[1].bias, 0.0)
nn.init.constant_(self.Wbn_shape.weight, 0.0)
nn.init.constant_(self.Wbn_shape.bias, 0.0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, shape=False):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, (- 1))
f = torch.matmul(theta_x, phi_x)
N = f.size((- 1))
f_div_C = (f / N)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
if shape:
W_y = self.Wbn_shape(self.W[0](y))
else:
W_y = self.W(y)
z = (W_y + x)
return z |
class AdMapAccessPythonTest(unittest.TestCase):
def test_interface(self):
self.assertTrue(ad.map.access.init('test_files/TPK.adm.txt'))
lanes = ad.map.lane.getLanes()
self.assertEqual(len(lanes), 141)
mapMatching = ad.map.match.AdMapMatching()
geoPoint = ad.map.point.GeoPoint()
geoPoint.longitude = ad.map.point.Longitude(8.4401803)
geoPoint.latitude = ad.map.point.Latitude(49.0191987)
geoPoint.altitude = ad.map.point.Altitude(0.0)
mapMatchingResults = mapMatching.getMapMatchedPositions(geoPoint, ad.physics.Distance(0.01), ad.physics.Probability(0.05))
self.assertEqual(len(mapMatchingResults), 1)
routingStart = mapMatchingResults[0].lanePoint.paraPoint
routingEnd = ad.map.point.ParaPoint()
routingEnd.laneId = routingStart.laneId
routingEnd.parametricOffset = ad.physics.ParametricValue(0.0)
routeResult = ad.map.route.planRoute(ad.map.route.createRoutingPoint(routingStart), ad.map.route.createRoutingPoint(routingEnd))
routeLength = ad.map.route.calcLength(routeResult.roadSegments[0])
self.assertEqual(int(float(routeLength)), 4)
ad.map.access.cleanup() |
(version='2.0')
def get_node_mapping(fp32_model, fp32_onnx_path):
def check_data(op_type, data, module_dict):
for (name, value) in module_dict.items():
if (value.shape == data.shape):
if (value == data).all():
module_dict.pop(name)
return name
return None
module_dict = {}
for (name, module) in fp32_model.named_modules():
if (('Conv' in str(module.__class__.__name__)) or ('Embedding' in str(module.__class__.__name__)) or ('Linear' in str(module.__class__.__name__))):
if hasattr(module, 'weight'):
value = module.weight.detach().cpu().numpy()
module_dict[name] = value
module_node_mapping = {}
fp32_onnx_model = onnx.load(fp32_onnx_path)
initializer_data = {tensor.name: tensor for tensor in fp32_onnx_model.graph.initializer}
from onnx import numpy_helper
for node in fp32_onnx_model.graph.node:
if (node.op_type in op_types_to_quantize):
if ((node.op_type == 'MatMul') and (node.input[1] in initializer_data)):
data = numpy_helper.to_array(initializer_data[node.input[1]]).T
elif ((node.op_type == 'Gather') and (node.input[0] in initializer_data)):
data = numpy_helper.to_array(initializer_data[node.input[0]])
elif (node.op_type in ['Gemm']):
data = numpy_helper.to_array(initializer_data[node.input[1]])
else:
continue
pt_name = check_data(node.op_type, data, module_dict)
if pt_name:
module_node_mapping[pt_name] = node.name
return module_node_mapping |
def set_object_pose(position, orientation):
bpy.context.object.location = position
bpy.context.object.rotation_quaternion = orientation |
class LogitBijection(ElementwiseBijection):
_EPS = 1e-07
def _F(self, x):
return (torch.log(x) - torch.log((1 - x)))
def _F_inv(self, z):
return torch.sigmoid(z)
def _log_dF(self, x):
x_clamped = x.clamp(self._EPS, (1 - self._EPS))
return ((- torch.log(x_clamped)) - torch.log((1 - x_clamped))) |
class UniversalDependenciesRawDatasetReader(UniversalDependenciesDatasetReader):
def __init__(self, language):
super().__init__()
self.tokenizer = SpacyWordSplitter(language=language, pos_tags=True)
def load(self, file_path):
file_path = cached_path(file_path)
counter = 1
with open(file_path, 'r') as conllu_file:
for sentence in conllu_file:
if sentence:
words = [word.text for word in self.tokenizer.split_words(sentence)]
upos_tags = [word.tag_ for word in self.tokenizer.split_words(sentence)]
xpos_tags = upos_tags
seq_len = len(words)
ids = [(i + 1) for i in range(seq_len)]
lemmas = ['_' for i in range(seq_len)]
feats = lemmas
heads = [1 for i in range(seq_len)]
dep_rels = ['<UNK>' for i in range(seq_len)]
multiword_ids = []
multiword_forms = []
sentence = UD_Sentence(ids, words, lemmas, upos_tags, xpos_tags, feats, heads, dep_rels, multiword_ids, multiword_forms)
self.sentences.append(sentence)
self.ids.append(counter)
counter = (counter + 1) |
def get_candidate_representation(candidate_desc, tokenizer, max_seq_length, candidate_title=None, title_tag=ENT_TITLE_TAG):
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
cand_tokens = tokenizer.tokenize(candidate_desc)
if (candidate_title is not None):
title_tokens = tokenizer.tokenize(candidate_title)
cand_tokens = ((title_tokens + [title_tag]) + cand_tokens)
cand_tokens = cand_tokens[:(max_seq_length - 2)]
cand_tokens = (([cls_token] + cand_tokens) + [sep_token])
input_ids = tokenizer.convert_tokens_to_ids(cand_tokens)
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
assert (len(input_ids) == max_seq_length)
return {'tokens': cand_tokens, 'ids': input_ids} |
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features, downsample=True):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if downsample:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=1, stride=1)) |
def random_adjust_brightness(img, brightness_factor):
if (not _is_pil_image(img)):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if (random.random() < PROB_THRESHOLD):
return img
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightness_factor)
return img |
def _upgrade_state_dict(state):
from fairseq import models, registry, tasks
if ('optimizer_history' not in state):
state['optimizer_history'] = [{'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss']}]
state['last_optimizer_state'] = state['optimizer']
del state['optimizer']
del state['best_loss']
if (('epoch' in state) and ('extra_state' not in state)):
state['extra_state'] = {'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss']}
del state['epoch']
del state['batch_offset']
del state['val_loss']
if ('optimizer' in state['optimizer_history'][(- 1)]):
state['last_optimizer_state'] = state['optimizer_history'][(- 1)]['optimizer']
for optim_hist in state['optimizer_history']:
del optim_hist['optimizer']
if ('optimizer_name' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['optimizer_name'] = 'FairseqNAG'
if ('lr_scheduler_state' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['lr_scheduler_state'] = {'best': state['optimizer_history'][(- 1)]['best_loss']}
del state['optimizer_history'][(- 1)]['best_loss']
if ('num_updates' not in state['optimizer_history'][(- 1)]):
state['optimizer_history'][(- 1)]['num_updates'] = 0
if (hasattr(state['args'], 'max_positions') and (not hasattr(state['args'], 'max_source_positions'))):
state['args'].max_source_positions = state['args'].max_positions
state['args'].max_target_positions = state['args'].max_positions
if ('train_iterator' not in state['extra_state']):
state['extra_state']['train_iterator'] = {'epoch': state['extra_state']['epoch'], 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0)}
if (not hasattr(state['args'], 'task')):
state['args'].task = 'translation'
if getattr(state['args'], 'raw_text', False):
state['args'].dataset_impl = 'raw'
elif getattr(state['args'], 'lazy_load', False):
state['args'].dataset_impl = 'lazy'
registry.set_defaults(state['args'], tasks.TASK_REGISTRY[state['args'].task])
registry.set_defaults(state['args'], models.ARCH_MODEL_REGISTRY[state['args'].arch])
for (registry_name, REGISTRY) in registry.REGISTRIES.items():
choice = getattr(state['args'], registry_name, None)
if (choice is not None):
cls = REGISTRY['registry'][choice]
registry.set_defaults(state['args'], cls)
return state |
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True, help='Path to configuration file')
parser.add_argument('--device', type=str, required=True, default='cpu', help='Training device')
(parsed_args, errors) = parser.parse_known_args(args[1:])
if (len(errors) != 0):
raise ValueError(f'Unknown arguments {errors}')
return parsed_args |
class TestGraphOptmizationFP32(unittest.TestCase):
_random()
def test_graph_optimization_without_yaml_without_precisions(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv_weights_2 = tf.compat.v1.get_variable('weight_2', [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
relu = tf.nn.relu(conv)
max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding='SAME')
conv_bias = tf.compat.v1.get_variable('bias', [16], initializer=tf.compat.v1.random_normal_initializer())
conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding='VALID', name='conv1_3')
conv_bias = tf.math.add(conv_1, conv_bias)
relu6 = tf.nn.relu6(conv_bias, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Graph_Optimization
graph_optimizer = Graph_Optimization()
graph_optimizer.input = 'input'
graph_optimizer.output = 'op_to_store'
graph_optimizer.model = output_graph_def
output_graph = graph_optimizer.fit()
found_cast_op = False
for i in output_graph.graph_def.node:
if (i.op == 'Cast'):
found_cast_op = True
break
precision = graph_optimizer.precisions
self.assertEqual(found_cast_op, False)
self.assertEqual(precision, 'fp32')
_random()
def test_graph_optimization_without_yaml_with_precisions(self):
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv_weights_2 = tf.compat.v1.get_variable('weight_2', [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
relu = tf.nn.relu(conv)
max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding='SAME')
conv_bias = tf.compat.v1.get_variable('bias', [16], initializer=tf.compat.v1.random_normal_initializer())
conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding='VALID', name='conv1_3')
conv_bias = tf.math.add(conv_1, conv_bias)
relu6 = tf.nn.relu6(conv_bias, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Graph_Optimization
graph_optimizer = Graph_Optimization()
graph_optimizer.precisions = 'fp32'
graph_optimizer.model = output_graph_def
output_graph = graph_optimizer.fit()
found_cast_op = False
for i in output_graph.graph_def.node:
if (i.op == 'Cast'):
found_cast_op = True
break
self.assertEqual(found_cast_op, False)
_random()
def test_graph_optimization_fp32_only_with_force_bf16(self):
os.environ['FORCE_BF16'] = '1'
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name='input')
top_relu = tf.nn.relu(x)
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
x_pad = tf.pad(top_relu, paddings, 'CONSTANT')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv_weights_2 = tf.compat.v1.get_variable('weight_2', [3, 8, 16, 16], initializer=tf.compat.v1.random_normal_initializer())
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding='VALID')
relu = tf.nn.relu(conv)
max_pool = tf.nn.max_pool(relu, ksize=1, strides=[1, 2, 2, 1], padding='SAME')
conv_bias = tf.compat.v1.get_variable('bias', [16], initializer=tf.compat.v1.random_normal_initializer())
conv_1 = tf.nn.conv2d(max_pool, conv_weights_2, strides=[1, 2, 2, 1], padding='VALID', name='conv1_3')
conv_bias = tf.math.add(conv_1, conv_bias)
relu6 = tf.nn.relu6(conv_bias, name='op_to_store')
out_name = relu6.name.split(':')[0]
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name])
from neural_compressor.experimental import Graph_Optimization
graph_optimizer = Graph_Optimization()
graph_optimizer.input = 'input'
graph_optimizer.output = 'op_to_store'
graph_optimizer.model = output_graph_def
output_graph = graph_optimizer.fit()
found_cast_op = False
for i in output_graph.graph_def.node:
if (i.op == 'Cast'):
found_cast_op = True
break
self.assertEqual(found_cast_op, False) |
class make_type_selector():
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, X_df):
renamer = get_renamer(X_df)
_X_df = X_df.rename(columns=renamer)
reverse_renamer = {new_name: name for (name, new_name) in renamer.items()}
selected_columns = make_column_selector(self.pattern)(_X_df)
if (len(selected_columns) == 0):
raise_error(f'No columns selected with pattern {self.pattern} in {_X_df.columns.to_list()}')
return [(reverse_renamer[col] if (col in reverse_renamer) else col) for col in selected_columns] |
class DeconvBlock(torch.nn.Module):
def __init__(self, fin, fout):
super(DeconvBlock, self).__init__()
self.conv = torch.nn.Conv2d(fin, fout, kernel_size=4, stride=2, padding=1, bias=False)
self.bn = torch.nn.BatchNorm2d(fout)
self.act = torch.nn.LeakyReLU(0.2, inplace=False)
def forward(self, x):
out = self.act(self.bn(self.conv(x)))
return out |
(argument('-q', '--quiet', action='store_true', help='only display numeric ids'), argument('-s', '--start_date', help='start date and time for report. Many formats accepted (optional)', type=str), argument('-e', '--end_date', help='end date and time for report. Many formats accepted (optional)', type=str), argument('-c', '--only_charges', action='store_true', help='Show only charge items.'), argument('-p', '--only_credits', action='store_true', help='Show only credit items.'), usage='vast.py generate pdf_invoices [OPTIONS]')
def generate__pdf_invoices(args):
try:
import vast_pdf
except ImportError:
print("\nWARNING: The 'vast_pdf' library is not present. This library is used to print invoices in PDF format. If\n you do not need this feature you can ignore this message. To get the library you should download the vast-python\n github repository. Just do ':vast-ai/vast-python.git' and then 'cd vast-python'. Once in that\n directory you can run 'vast.py' and it will have access to 'vast_pdf.py'. The library depends on a Python\n package called Borb to make the PDF files. To install this package do 'pip3 install borb'.\n")
req_url_inv = apiurl(args, '/users/me/invoices', {'owner': 'me'})
r_inv = requests.get(req_url_inv)
r_inv.raise_for_status()
rows_inv = r_inv.json()['invoices']
invoice_filter_data = filter_invoice_items(args, rows_inv)
rows_inv = invoice_filter_data['rows']
req_url = apiurl(args, '/users/current', {'owner': 'me'})
r = requests.get(req_url)
r.raise_for_status()
user_blob = r.json()
user_blob = translate_null_strings_to_blanks(user_blob)
if args.raw:
print(json.dumps(rows_inv, indent=1, sort_keys=True))
print('Current: ', user_blob)
print('Raw mode')
else:
display_table(rows_inv, invoice_fields)
vast_pdf.generate_invoice(user_blob, rows_inv, invoice_filter_data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.