code stringlengths 101 5.91M |
|---|
class LmdbDataset(Dataset):
def __init__(self, root, opt, mode='train'):
self.root = root
skip = 0
self.opt = opt
self.mode = mode
self.env = lmdb.open(root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
if (not self.env):
print(('cannot open lmdb from %s' % root))
sys.exit(0)
with self.env.begin(write=False) as txn:
self.nSamples = int(txn.get('num-samples'.encode()))
print(self.nSamples)
self.filtered_index_list = []
for index in range(self.nSamples):
index += 1
label_key = ('label-%09d'.encode() % index)
if (txn.get(label_key) == None):
skip += 1
print('skip --- {}\n'.format(skip))
continue
label = txn.get(label_key).decode('utf-8')
length_of_label = len(label)
if (length_of_label > opt.batch_max_length):
continue
self.filtered_index_list.append(index)
self.nSamples = len(self.filtered_index_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert (index <= len(self)), 'index range error'
index = self.filtered_index_list[index]
with self.env.begin(write=False) as txn:
label_key = ('label-%09d'.encode() % index)
label = txn.get(label_key).decode('utf-8')
img_key = ('image-%09d'.encode() % index)
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = PIL.Image.open(buf).convert('RGBA')
except IOError:
print(f'Corrupted image for {index}')
img = PIL.Image.new('RGBA', (self.opt.imgW, self.opt.imgH))
label = '[dummy_label]'
return (img, label) |
def find_nearest_training(question, n_results=10):
q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model)
(D, I) = eli5_train_q_index.search(q_rep, n_results)
nn_examples = [eli5_train[int(i)] for i in I[0]]
return nn_examples |
def tokenize(impressions, tokenizer):
new_impressions = []
print('\nTokenizing report impressions. All reports are cut off at 512 tokens.')
for i in tqdm(range(impressions.shape[0])):
tokenized_imp = tokenizer.tokenize(impressions.iloc[i])
if tokenized_imp:
res = tokenizer.encode_plus(tokenized_imp)['input_ids']
if (len(res) > 512):
res = (res[:511] + [tokenizer.sep_token_id])
new_impressions.append(res)
else:
new_impressions.append([tokenizer.cls_token_id, tokenizer.sep_token_id])
return new_impressions |
class VAE(BaseHModel):
def __init__(self, args):
super(VAE, self).__init__(args)
def create_model(self, args):
if (args.dataset_name == 'freyfaces'):
self.h_size = 210
elif ((args.dataset_name == 'cifar10') or (args.dataset_name == 'svhn')):
self.h_size = 384
else:
self.h_size = 294
fc_size = 300
self.q_z_layers = nn.Sequential(GatedConv2d(self.args.input_size[0], 32, 7, 1, 3, no_attention=args.no_attention), GatedConv2d(32, 32, 3, 2, 1, no_attention=args.no_attention), GatedConv2d(32, 64, 5, 1, 2, no_attention=args.no_attention), GatedConv2d(64, 64, 3, 2, 1, no_attention=args.no_attention), GatedConv2d(64, 6, 3, 1, 1, no_attention=args.no_attention))
self.q_z_mean = NonLinear(self.h_size, self.args.z2_size, activation=None)
self.q_z_logvar = NonLinear(self.h_size, self.args.z2_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.q_z1_layers_x = nn.Sequential(GatedConv2d(self.args.input_size[0], 32, 3, 1, 1, no_attention=args.no_attention), GatedConv2d(32, 32, 3, 2, 1, no_attention=args.no_attention), GatedConv2d(32, 64, 3, 1, 1, no_attention=args.no_attention), GatedConv2d(64, 64, 3, 2, 1, no_attention=args.no_attention), GatedConv2d(64, 6, 3, 1, 1, no_attention=args.no_attention))
self.q_z1_layers_z2 = nn.Sequential(GatedDense(self.args.z2_size, self.h_size))
self.q_z1_layers_joint = nn.Sequential(GatedDense((2 * self.h_size), fc_size))
self.q_z1_mean = NonLinear(fc_size, self.args.z1_size, activation=None)
self.q_z1_logvar = NonLinear(fc_size, self.args.z1_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.p_z1_layers_z2 = nn.Sequential(GatedDense(self.args.z2_size, fc_size, no_attention=args.no_attention), GatedDense(fc_size, fc_size, no_attention=args.no_attention))
self.p_z1_mean = NonLinear(fc_size, self.args.z1_size, activation=None)
self.p_z1_logvar = NonLinear(fc_size, self.args.z1_size, activation=nn.Hardtanh(min_val=(- 6.0), max_val=2.0))
self.p_x_layers_z1 = nn.Sequential(GatedDense(self.args.z1_size, fc_size, no_attention=args.no_attention))
self.p_x_layers_z2 = nn.Sequential(GatedDense(self.args.z2_size, fc_size, no_attention=args.no_attention))
self.p_x_layers_joint_pre = nn.Sequential(GatedDense((2 * fc_size), np.prod(self.args.input_size), no_attention=args.no_attention))
self.p_x_layers_joint = nn.Sequential(GatedConv2d(self.args.input_size[0], 64, 3, 1, 1, no_attention=args.no_attention), GatedConv2d(64, 64, 3, 1, 1, no_attention=args.no_attention), GatedConv2d(64, 64, 3, 1, 1, no_attention=args.no_attention), GatedConv2d(64, 64, 3, 1, 1, no_attention=args.no_attention))
if (self.args.input_type == 'binary'):
self.p_x_mean = Conv2d(64, 1, 1, 1, 0, activation=nn.Sigmoid())
elif ((self.args.input_type == 'gray') or (self.args.input_type == 'continuous')):
self.p_x_mean = Conv2d(64, self.args.input_size[0], 1, 1, 0)
self.p_x_logvar = Conv2d(64, self.args.input_size[0], 1, 1, 0, activation=nn.Hardtanh(min_val=(- 4.5), max_val=0.0))
elif (self.args.input_type == 'pca'):
self.p_x_mean = Conv2d(64, 1, 1, 1, 0)
self.p_x_logvar = Conv2d(64, self.args.input_size[0], 1, 1, 0, activation=nn.Hardtanh(min_val=(- 4.5), max_val=0.0))
def forward(self, x):
x = x.view((- 1), self.args.input_size[0], self.args.input_size[1], self.args.input_size[2])
return super(VAE, self).forward(x) |
def get_feature_names(quantized_features):
feature_names = ['source_identity']
for x in quantized_features:
split = x.split('_')
if ('source' in split):
continue
else:
feat = '_'.join(split[:(- 1)])
if (feat not in feature_names):
feature_names.append(feat)
return feature_names |
def all_reduce_multigpu(tensor_list, op=reduce_op.SUM, group=group.WORLD):
assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_all_reduce_multigpu(tensor_list, op, group) |
class BaseUnsField(BaseAnnDataField):
_attr_name = _constants._ADATA_ATTRS.UNS
def __init__(self, registry_key: str, uns_key: Optional[str], required: bool=True) -> None:
super().__init__()
if (required and (uns_key is None)):
raise ValueError('`uns_key` cannot be `None` if `required=True`. Please provide an `uns_key`.')
self._registry_key = registry_key
self._attr_key = uns_key
self._is_empty = (uns_key is None)
def registry_key(self) -> str:
return self._registry_key
def attr_name(self) -> str:
return self._attr_name
def attr_key(self) -> str:
return self._attr_key
def is_empty(self) -> bool:
return self._is_empty |
class get_numpy_include():
def __str__(self):
import numpy
return numpy.get_include() |
def _word_accuracy(label_file, pred_file):
with codecs.getreader('utf-8')(tf.gfile.GFile(label_file, 'r')) as label_fh:
with codecs.getreader('utf-8')(tf.gfile.GFile(pred_file, 'r')) as pred_fh:
(total_acc, total_count) = (0.0, 0.0)
for sentence in label_fh:
labels = sentence.strip().split(' ')
preds = pred_fh.readline().strip().split(' ')
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if (label == pred):
match += 1
total_acc += ((100 * match) / max(len(labels), len(preds)))
total_count += 1
return (total_acc / total_count) |
class objectnet(iData):
use_path = True
train_trsf = build_transform(True, None)
test_trsf = build_transform(False, None)
common_trsf = []
class_order = np.arange(200).tolist()
def download_data(self):
train_dir = './data/objectnet/train/'
test_dir = './data/objectnet/test/'
train_dset = datasets.ImageFolder(train_dir)
test_dset = datasets.ImageFolder(test_dir)
(self.train_data, self.train_targets) = split_images_labels(train_dset.imgs)
(self.test_data, self.test_targets) = split_images_labels(test_dset.imgs) |
class MSRVTTQADataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self.ans_lab_dict = None
if (split == 'train'):
names = ['msrvtt_qa_train']
elif (split == 'val'):
names = ['msrvtt_qa_test']
elif (split == 'test'):
names = ['msrvtt_qa_test']
super().__init__(*args, **kwargs, names=names, text_column_name='questions', remove_duplicate=False)
self.names = names
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/msrvtt'
split_files = {'train': 'msrvtt_qa_train.jsonl', 'val': 'msrvtt_qa_val.jsonl', 'test': 'msrvtt_qa_test.jsonl'}
answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json')
with open(answer_fp, 'r') as JSON:
self.ans_lab_dict = json.load(JSON)
for name in self.names:
split = name.split('_')[(- 1)]
target_split_fp = split_files[split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
if (self.metadata is None):
self.metadata = metadata
else:
self.metadata.update(metadata)
print('total {} samples for {}'.format(len(self.metadata), self.names))
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = (len(self.ans_lab_dict) + 1)
try:
ans_label = self.ans_lab_dict[text]
except KeyError:
ans_label = (- 100)
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return (text, ans_label, scores)
def __getitem__(self, index):
sample = self.metadata.iloc[index]
video_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if (self.split != 'test'):
(answers, labels, scores) = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {'video': video_tensor, 'text': text, 'vqa_answer': answers, 'vqa_labels': labels, 'vqa_scores': scores, 'qid': qid}
def __len__(self):
return len(self.metadata) |
class SICEValidation(SICE):
def __init__(self, dir_data, **kwargs):
super().__init__(dir_data, split='val', **kwargs)
self.transforms = tf.Compose([CenterCrop(size=self.crop_size), ImageToLDMTensor()]) |
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
cls.add_method('HasNext', 'bool', [], is_const=True)
cls.add_method('Next', 'ns3::PacketMetadata::Item', [])
return |
def RudinBall():
return UniqueSimplicialComplex([[1, 9, 2, 5], [1, 10, 2, 5], [1, 10, 5, 11], [1, 10, 7, 11], [1, 13, 5, 11], [1, 13, 7, 11], [2, 10, 3, 6], [2, 11, 3, 6], [2, 11, 6, 12], [2, 11, 8, 12], [2, 14, 6, 12], [2, 14, 8, 12], [3, 11, 4, 7], [3, 12, 4, 7], [3, 12, 5, 9], [3, 12, 7, 9], [3, 13, 5, 9], [3, 13, 7, 9], [4, 9, 1, 8], [4, 9, 6, 10], [4, 9, 8, 10], [4, 12, 1, 8], [4, 14, 6, 10], [4, 14, 8, 10], [9, 10, 2, 5], [9, 10, 2, 6], [9, 10, 5, 11], [9, 10, 11, 12], [9, 13, 5, 11], [10, 11, 3, 6], [10, 11, 3, 7], [10, 11, 6, 12], [10, 14, 6, 12], [11, 12, 4, 7], [11, 12, 4, 8], [11, 12, 7, 9], [11, 13, 7, 9], [12, 9, 1, 5], [12, 9, 1, 8], [12, 9, 8, 10], [12, 14, 8, 10]], name='Rudin ball') |
def _Workspace_fetch_int8_blob(ws, name):
result = ws.fetch_blob(name)
assert isinstance(result, tuple), 'You are not fetching an Int8Blob {}. Please use fetch_blob'.format(StringifyBlobName(name))
return Int8Tensor(*result) |
def cleanse_nans_from_metrics(metrics, test_name, selector_name):
metrics['removed_steps'] = pd.DataFrame.from_dict(metrics.ply_where((X.name == test_name)).apply((lambda x: np.array(x['steps'])[np.isnan(x['values'])]), axis=1))
removed_steps = metrics.ply_where((X.name == test_name))
removed_steps = dict(zip(removed_steps['run_id'], removed_steps['removed_steps']))
selector_steps = metrics.ply_where((X.name == selector_name)).apply((lambda x: np.array(x['steps'])[(~ np.isin(np.array(x['steps']), removed_steps[x['run_id']]))]), axis=1)
selector_values = metrics.ply_where((X.name == selector_name)).apply((lambda x: np.array(x['values'])[(~ np.isin(np.array(x['steps']), removed_steps[x['run_id']]))]), axis=1)
test_steps = metrics.ply_where((X.name == test_name)).apply((lambda x: np.array(x['steps'])[(~ np.isin(np.array(x['steps']), removed_steps[x['run_id']]))]), axis=1)
test_values = metrics.ply_where((X.name == test_name)).apply((lambda x: np.array(x['values'])[(~ np.isin(np.array(x['steps']), removed_steps[x['run_id']]))]), axis=1)
metrics.loc[((metrics.name == selector_name), 'steps')] = selector_steps
metrics.loc[((metrics.name == selector_name), 'values')] = selector_values
metrics.loc[((metrics.name == test_name), 'steps')] = test_steps
metrics.loc[((metrics.name == test_name), 'values')] = test_values
metrics = metrics.drop('removed_steps', axis=1)
return metrics |
class Generator(object):
def pack_msg(speaker, utt, **kwargs):
resp = {k: v for (k, v) in kwargs.items()}
resp['speaker'] = speaker
resp['utt'] = utt
return resp
def pprint(dialogs, in_json, domain_spec, output_file=None):
f = (sys.stdout if (output_file is None) else open(output_file, 'wb'))
if in_json:
combo = {'dialogs': dialogs, 'meta': domain_spec.to_dict()}
json.dump(combo, f, indent=2)
else:
for (idx, d) in enumerate(dialogs):
f.write(('## DIALOG %d ##\n' % idx))
for turn in d:
(speaker, utt, actions) = (turn['speaker'], turn['utt'], turn['actions'])
if utt:
str_actions = utt
else:
str_actions = ' '.join([a.dump_string() for a in actions])
if (speaker == 'USR'):
f.write(('%s(%f)-> %s\n' % (speaker, turn['conf'], str_actions)))
else:
f.write(('%s -> %s\n' % (speaker, str_actions)))
if (output_file is not None):
f.close()
def print_stats(dialogs):
print(('%d dialogs' % len(dialogs)))
all_lens = [len(d) for d in dialogs]
print('Avg len {} Max Len {}'.format(np.mean(all_lens), np.max(all_lens)))
total_cnt = 0.0
kb_cnt = 0.0
ratio = []
for d in dialogs:
local_cnt = 0.0
for t in d:
total_cnt += 1
if ('QUERY' in t['utt']):
kb_cnt += 1
local_cnt += 1
ratio.append((local_cnt / len(d)))
print((kb_cnt / total_cnt))
print(np.mean(ratio))
def gen(self, domain, complexity, num_sess=1):
dialogs = []
action_channel = ActionChannel(domain, complexity)
word_channel = WordChannel(domain, complexity)
sys_nlg = SysNlg(domain, complexity)
usr_nlg = UserNlg(domain, complexity)
bar = progressbar.ProgressBar(max_value=num_sess)
for i in range(num_sess):
bar.update(i)
usr = User(domain, complexity)
sys = System(domain, complexity)
noisy_usr_as = []
dialog = []
conf = 1.0
while True:
(sys_r, sys_t, sys_as, sys_s) = sys.step(noisy_usr_as, conf)
(sys_utt, sys_str_as) = sys_nlg.generate_sent(sys_as, domain=domain)
dialog.append(self.pack_msg('SYS', sys_utt, actions=sys_str_as, domain=domain.name, state=sys_s))
if sys_t:
break
(usr_r, usr_t, usr_as) = usr.step(sys_as)
(noisy_usr_as, conf) = action_channel.transmit2sys(usr_as)
usr_utt = usr_nlg.generate_sent(noisy_usr_as)
noisy_usr_utt = word_channel.transmit2sys(usr_utt)
dialog.append(self.pack_msg('USR', noisy_usr_utt, actions=noisy_usr_as, conf=conf, domain=domain.name))
dialogs.append(dialog)
return dialogs
def gen_corpus(self, name, domain_spec, complexity_spec, size):
if (not os.path.exists(name)):
os.mkdir(name)
domain = Domain(domain_spec)
complex = Complexity(complexity_spec)
corpus = self.gen(domain, complex, num_sess=size)
json_file = '{}-{}-{}.{}'.format(domain_spec.name, complexity_spec.__name__, size, 'json')
json_file = os.path.join(name, json_file)
self.pprint(corpus, True, domain_spec, json_file)
self.print_stats(corpus) |
def perform_val(model, HEAD1, HEAD_test1, cfg, feature_dim, pair_a, pair_b):
(test_lb2idxs, test_idx2lb) = read_meta(cfg.test_data['label_path'])
test_inst_num = len(test_idx2lb)
model.eval()
HEAD1.eval()
HEAD_test1.eval()
for (k, v) in cfg.model['kwargs'].items():
setattr(cfg.test_data, k, v)
dataset = build_dataset(cfg.model['type'], cfg.test_data)
features = torch.FloatTensor(dataset.features)
adj = sparse_mx_to_torch_sparse_tensor(dataset.adj)
labels = torch.LongTensor(dataset.gt_labels)
if cfg.cuda:
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
HEAD_test1 = HEAD_test1.cuda()
test_data = [features, adj, labels]
HEAD_test1.load_state_dict(HEAD1.state_dict(), False)
with torch.no_grad():
output_feature = model(test_data)
sum_acc = 0
patch_num = 10
patch_size = int((test_inst_num / patch_num))
for i in range(patch_num):
score = HEAD_test1(output_feature[pair_a[(i * patch_size):((i + 1) * patch_size)]], output_feature[pair_b[(i * patch_size):((i + 1) * patch_size)]], no_list=True)
pre_labels = (score > 0.5).long()
gt_labels = (labels[pair_a[(i * patch_size):((i + 1) * patch_size)]] == labels[pair_b[(i * patch_size):((i + 1) * patch_size)]]).long()
acc = (pre_labels == gt_labels).long().sum()
sum_acc += acc
avg_acc = (float(sum_acc) / test_inst_num)
return avg_acc |
class TestUnmaskOp(serial.SerializedTestCase):
(N=st.integers(min_value=2, max_value=20), dtype=st.sampled_from([np.bool_, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.float16, np.float32, np.float64]), **hu.gcs)
def test(self, N, dtype, gc, dc):
if (dtype is np.bool_):
all_value = np.random.choice(a=[True, False], size=N)
else:
all_value = (np.random.rand(N) * N).astype(dtype)
M = np.random.randint(1, N)
split = sorted(np.random.randint(1, N, size=M))
indices = np.random.permutation(N)
pieces = np.split(indices, split)
def ref(*args, **kwargs):
return (all_value,)
inputs = []
inputs_names = []
for (i, piece) in enumerate(pieces):
piece.sort()
mask = np.zeros(N, dtype=np.bool_)
mask[piece] = True
values = all_value[piece]
inputs.extend([mask, values])
inputs_names.extend([('mask%d' % i), ('value%d' % i)])
op = core.CreateOperator('BooleanUnmask', inputs_names, 'output')
self.assertReferenceChecks(gc, op, inputs, ref)
self.assertDeviceChecks(dc, op, inputs, [0]) |
def TStrUtil_SplitSentences(ChA, SentenceV):
return _snap.TStrUtil_SplitSentences(ChA, SentenceV) |
class Path(object):
def db_root_dir(database):
if (database == 'pascal'):
return '/path/to/PASCAL/VOC2012'
elif (database == 'sbd'):
return '/path/to/SBD/'
else:
print('Database {} not available.'.format(database))
raise NotImplementedError
def models_dir():
return 'models/' |
def _validate_pruning_amount_init(amount):
if (not isinstance(amount, numbers.Real)):
raise TypeError('Invalid type for amount: {}. Must be int or float.'.format(amount))
if ((isinstance(amount, numbers.Integral) and (amount < 0)) or ((not isinstance(amount, numbers.Integral)) and ((float(amount) > 1.0) or (float(amount) < 0.0)))):
raise ValueError('amount={} should either be a float in the range [0, 1] or a non-negative integer'.format(amount)) |
def register_Ns3LteMacSapProviderTransmitPduParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteMacSapProvider::TransmitPduParameters const &', 'arg0')])
cls.add_instance_attribute('componentCarrierId', 'uint8_t', is_const=False)
cls.add_instance_attribute('harqProcessId', 'uint8_t', is_const=False)
cls.add_instance_attribute('layer', 'uint8_t', is_const=False)
cls.add_instance_attribute('lcid', 'uint8_t', is_const=False)
cls.add_instance_attribute('pdu', 'ns3::Ptr< ns3::Packet >', is_const=False)
cls.add_instance_attribute('rnti', 'uint16_t', is_const=False)
return |
def main(hparams):
torch.backends.cudnn.deterministic = True
random.seed(hparams.seed)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
np.random.seed(hparams.seed)
model = AffWild2VA(hparams)
if hparams.fusion_checkpoint:
checkpoint = torch.load(hparams.fusion_checkpoint, map_location=(lambda storage, loc: storage))
model.load_state_dict(checkpoint['state_dict'], strict=False)
print('Loaded pretrained weights for individual streams')
elif hparams.checkpoint:
model = model.load_from_checkpoint(hparams.checkpoint)
trainer = Trainer(early_stop_callback=None, check_val_every_n_epoch=1, gradient_clip_val=1.0, default_save_path=hparams.checkpoint_path, max_epochs=hparams.max_nb_epochs, gpus=hparams.gpus, nb_gpu_nodes=hparams.nodes, distributed_backend=('ddp' if hparams.distributed else 'dp'))
trainer.fit(model) |
class LFHImportanceMetric(BaseImportanceMetric):
def __init__(self, graph: Graph, representative_data_gen: Callable, fw_impl: PruningFrameworkImplementation, pruning_config: PruningConfig, fw_info: FrameworkInfo):
self.float_graph = graph
self.representative_data_gen = representative_data_gen
self.fw_impl = fw_impl
self.pruning_config = pruning_config
self.fw_info = fw_info
self._entry_node_to_hessian_score = {}
self._entry_node_count_oc_nparams = {}
self._entry_node_to_simd_score = {}
def get_entry_node_to_simd_score(self, entry_nodes: List[BaseNode]) -> Tuple[(Dict[(BaseNode, np.ndarray)], Dict[(BaseNode, List[np.ndarray])])]:
entry_node_to_score = self._get_entry_node_to_score(entry_nodes)
grouped_indices = self._compute_simd_groups_indices(entry_node_to_score)
_squared_l2_norm_by_groups = self._get_squaredl2norm(entry_nodes, grouped_indices)
entry_node_to_simd_score = {}
for (node, hessian_score) in self._entry_node_to_hessian_score.items():
group_hessian_score = [np.sum(hessian_score[g]) for g in grouped_indices[node]]
nparams_by_group = np.asarray([np.sum(self._entry_node_count_oc_nparams[node][g]) for g in grouped_indices[node]])
entry_node_to_simd_score[node] = ((np.asarray(group_hessian_score) * _squared_l2_norm_by_groups[node]) / nparams_by_group)
return (entry_node_to_simd_score, grouped_indices)
def _get_entry_node_to_score(self, entry_nodes: List[BaseNode]) -> Dict[(BaseNode, np.ndarray)]:
hessian_info_service = HessianInfoService(graph=self.float_graph, representative_dataset=self.representative_data_gen, fw_impl=self.fw_impl)
nodes_scores = []
for node in entry_nodes:
_request = TraceHessianRequest(mode=HessianMode.WEIGHTS, granularity=HessianInfoGranularity.PER_OUTPUT_CHANNEL, target_node=node)
_scores_for_node = hessian_info_service.fetch_hessian(_request, required_size=self.pruning_config.num_score_approximations)
nodes_scores.append(_scores_for_node)
self._entry_node_to_hessian_score = {node: np.mean(scores, axis=0) for (node, scores) in zip(entry_nodes, nodes_scores)}
self._entry_node_count_oc_nparams = self._count_oc_nparams(entry_nodes=entry_nodes)
_entry_node_l2_oc_norm = self._get_squaredl2norm(entry_nodes=entry_nodes)
_entry_node_to_score = self._normalize_lfh_scores(_entry_node_l2_oc_norm)
return _entry_node_to_score
def _compute_simd_groups_indices(self, entry_node_to_score: Dict[(BaseNode, np.ndarray)]) -> Dict[(BaseNode, List[np.ndarray])]:
channel_grouping = ChannelGrouping(prunable_nodes=list(entry_node_to_score.keys()), fw_info=self.fw_info)
channel_grouping.group_scores_by_simd_groups(entry_node_to_score)
grouped_indices = channel_grouping.simd_groups_indices
return grouped_indices
def _normalize_lfh_scores(self, entry_node_to_squaredl2norm: Dict[(BaseNode, np.ndarray)]) -> Dict[(BaseNode, np.ndarray)]:
new_scores = {}
for (node, hessian_score_vector) in self._entry_node_to_hessian_score.items():
new_scores[node] = ((hessian_score_vector * entry_node_to_squaredl2norm[node]) / self._entry_node_count_oc_nparams[node])
return new_scores
def _count_oc_nparams(self, entry_nodes: List[BaseNode]) -> Dict[(BaseNode, np.ndarray)]:
node_channel_params = {}
for entry_node in entry_nodes:
(kernel_attr, num_oc, oc_axis) = self._get_kernel_node_oc_info(entry_node)
kernel = entry_node.get_weights_by_keys(kernel_attr)
params_per_channel = (np.prod(kernel.shape) / kernel.shape[oc_axis])
num_params_array = np.full(kernel.shape[oc_axis], params_per_channel)
node_channel_params[entry_node] = num_params_array
return node_channel_params
def _get_squaredl2norm(self, entry_nodes: List[BaseNode], grouped_indices: Dict[(BaseNode, List[np.ndarray])]=None) -> Dict[(BaseNode, np.ndarray)]:
node_l2_channel_norm = {}
for entry_node in entry_nodes:
(kernel_attr, num_oc, oc_axis) = self._get_kernel_node_oc_info(entry_node)
kernel = entry_node.get_weights_by_keys(kernel_attr)
channels = np.split(kernel, indices_or_sections=num_oc, axis=oc_axis)
if grouped_indices:
concatenated_tensors = self._concatenate_tensors_by_indices(channels, grouped_indices[entry_node])
channels = concatenated_tensors
l2_norms = np.asarray([(np.linalg.norm(c.flatten(), ord=2) ** 2) for c in channels])
node_l2_channel_norm[entry_node] = l2_norms
return node_l2_channel_norm
def _get_kernel_node_oc_info(self, entry_node: BaseNode) -> Tuple[(str, int, int)]:
kernel_attr = self.fw_info.get_kernel_op_attributes(entry_node.type)
if (len(kernel_attr) != 1):
Logger.error(f'Expected to found a single attribute but found {len(kernel_attr)} for node {entry_node}')
kernel_attr = kernel_attr[0]
(oc_axis, _) = self.fw_info.kernel_channels_mapping.get(entry_node.type)
if ((oc_axis is None) or (int(oc_axis) != oc_axis)):
Logger.error(f'Expected output channel axis to be an integer but is {oc_axis} for node {entry_node}')
num_oc = entry_node.get_weights_by_keys(kernel_attr[0]).shape[oc_axis]
return (kernel_attr, num_oc, oc_axis)
def _concatenate_tensors_by_indices(self, channels: List[np.ndarray], index_list: List[np.ndarray]) -> List[np.ndarray]:
concatenated_tensors = []
for index_array in index_list:
tensors_to_concatenate = [channels[i] for i in index_array]
concatenated_tensor = np.concatenate(tensors_to_concatenate)
concatenated_tensors.append(concatenated_tensor)
return concatenated_tensors |
def identity_block(input_tensor, kernel_size, filters, stage, block, dilation=1):
(filters1, filters2, filters3) = filters
if (K.image_data_format() == 'channels_last'):
bn_axis = 3
else:
bn_axis = 1
conv_name_base = ((('res' + str(stage)) + block) + '_branch')
bn_name_base = ((('bn' + str(stage)) + block) + '_branch')
x = Conv2D(filters1, (1, 1), name=(conv_name_base + '2a'), use_bias=False)(input_tensor)
x = BN(axis=bn_axis, name=(bn_name_base + '2a'))(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', name=(conv_name_base + '2b'), use_bias=False, dilation_rate=dilation)(x)
x = BN(axis=bn_axis, name=(bn_name_base + '2b'))(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=(conv_name_base + '2c'), use_bias=False)(x)
x = BN(axis=bn_axis, name=(bn_name_base + '2c'))(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x |
def readable_size(n):
sizes = ['K', 'M', 'G']
fmt = ''
size = n
for (i, s) in enumerate(sizes):
nn = (n / (1000 ** (i + 1)))
if (nn >= 1):
size = nn
fmt = sizes[i]
else:
break
return ('%.2f%s' % (size, fmt)) |
class TestBleuMetricSpec(TestTextMetricSpec):
def test_bleu(self):
metric_spec = BleuMetricSpec({})
return self._test_metric_spec(metric_spec=metric_spec, hyps=['A B C D E F', 'A B C D E F'], refs=['A B C D E F', 'A B A D E F'], expected_scores=[100.0, 69.19]) |
def random_selection(dataset, subset_size):
l = sum((1 for line in open(dataset, 'r')))
return sorted(random.sample(xrange(l), subset_size)) |
class MLP_train():
def __init__(self, net, epochs=10, optimizer='Adam', momentum=0.9, lr=0.001, num_labels=3):
assert (optimizer in ['Adam', 'SGD'])
self.lr = lr
self.net = net
self.epochs = epochs
self.momentum = momentum
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optimizer
self.num_labels = num_labels
return
def init_weights(layer):
if (type(layer) == nn.Linear):
torch.nn.init.xavier_uniform(layer.weight)
layer.bias.data.fill_(0.01)
def label_mapping(self, labels):
self.label_map = list(zip(set(labels), [i for i in range(len(set(labels)))]))
return
def label2idx(self, labels):
d = dict(self.label_map)
return list(map((lambda x: d[x]), labels))
return
def idx2label(self, idx):
return list(map((lambda x: self.label_map[x][0]), idx))
def fit(self, x_train, y_train):
dtype = torch.float
label = np.asarray(y_train).astype(np.int_)
self.label_mapping(label)
label = self.label2idx(label)
dataset = Classification_data(x_train, label)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True, num_workers=5)
criterion = nn.CrossEntropyLoss()
optimize = getattr(optim, self.optimizer)(self.net.parameters(), lr=self.lr)
for epoch in range(self.epochs):
running_loss = 0
i = 0
for (data, label) in data_loader:
optimize.zero_grad()
output = self.net(data)
loss = criterion(output, label)
loss.backward()
optimize.step()
if ((i % 2000) == 1999):
print(('[%d, %5d] loss: %.3f' % ((epoch + 1), (i + 1), (running_loss / 2000))))
running_loss = 0.0
print('Finished Training')
return self
def predict(self, x_test):
correct = 0
total = 0
dataset = Classification_data(x_test)
testloader = torch.utils.data.DataLoader(dataset, batch_size=250, num_workers=5, shuffle=False)
predicted_list = []
with torch.no_grad():
for data in testloader:
outputs = self.net(data)
(_, predicted) = torch.max(outputs.data, 1)
predicted = torch.IntTensor(self.idx2label(predicted))
predicted_list.extend(predicted.numpy().tolist())
return predicted_list |
class PopREO(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects):
super().__init__(recommendations, config, params, eval_objects)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
self._short_head = set(self._evaluation_objects.pop.get_short_head())
self._long_tail = set(self._evaluation_objects.pop.get_long_tail())
self._train = self._evaluation_objects.data.train_dict
self._num = []
self._den = []
def name():
return 'PopREO'
def __user_pop_reo(self, user_recommendations, cutoff, long_tail, short_head, u_train, user_relevant_items):
recommended_items = set([i for (i, _) in user_recommendations[:cutoff] if (i in user_relevant_items)])
num_h = len((recommended_items & short_head))
num_t = len((recommended_items & long_tail))
den_h = len(((short_head & user_relevant_items) - u_train))
den_t = len(((long_tail & user_relevant_items) - u_train))
return (num_h, num_t, den_h, den_t)
def eval(self):
for (u, u_r) in self._recommendations.items():
if len(self._relevance.get_user_rel(u)):
(num_h, num_t, den_h, den_t) = self.__user_pop_reo(u_r, self._cutoff, self._long_tail, self._short_head, set(self._train[u].keys()), set(self._relevance.get_user_rel(u)))
self._num.append([num_h, num_t])
self._den.append([den_h, den_t])
self._num = np.sum(np.array(self._num), axis=0)
self._den = np.sum(np.array(self._den), axis=0)
pr = (self._num / self._den)
return (np.std(pr) / np.mean(pr)) |
def check_yaml_vs_script(hparam_file, script_file):
print(('Checking %s...' % hparam_file))
if (not os.path.exists(hparam_file)):
print(('File %s not found!' % (hparam_file,)))
return False
if (not os.path.exists(script_file)):
print(('File %s not found!' % (script_file,)))
return False
var_lst = get_yaml_var(hparam_file)
detected_vars_train = detect_script_vars(script_file, var_lst)
default_run_opt_keys = ['debug', 'debug_batches', 'debug_epochs', 'device', 'cpu', 'data_parallel_backend', 'distributed_launch', 'distributed_backend', 'find_unused_parameters', 'jit_module_keys', 'compile_module_keys', '--compile_mode', '--compile_using_fullgraph', '--compile_using_dynamic_shape_tracing', 'auto_mix_prec', 'max_grad_norm', 'nonfinite_patience', 'noprogressbar', 'ckpt_interval_minutes', 'grad_accumulation_factor', 'optimizer_step_limit']
unused_vars = list(((set(var_lst) - set(detected_vars_train)) - set(default_run_opt_keys)))
for unused_var in unused_vars:
print(('\tERROR: variable "%s" not used in %s!' % (unused_var, script_file)))
return (len(unused_vars) == 0) |
class FormDataParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, max_content_length=None, cls=None, silent=True):
if (stream_factory is None):
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if (cls is None):
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
(mimetype, options) = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
if ((self.max_content_length is not None) and (content_length is not None) and (content_length > self.max_content_length)):
raise exceptions.RequestEntityTooLarge()
if (options is None):
options = {}
parse_func = self.get_parse_func(mimetype, options)
if (parse_func is not None):
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if (not self.silent):
raise
return (stream, self.cls(), self.cls())
_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors, max_form_memory_size=self.max_form_memory_size, cls=self.cls)
boundary = options.get('boundary')
if (boundary is None):
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
(form, files) = parser.parse(stream, boundary, content_length)
return (stream, form, files)
_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if ((self.max_form_memory_size is not None) and (content_length is not None) and (content_length > self.max_form_memory_size)):
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return (stream, form, self.cls())
parse_functions = {'multipart/form-data': _parse_multipart, 'application/x-www-form-urlencoded': _parse_urlencoded, 'application/x-url-encoded': _parse_urlencoded} |
def skip_backend(backend):
try:
return backend.__ua_cache__['skip']
except AttributeError:
backend.__ua_cache__ = {}
except KeyError:
pass
ctx = _SkipBackendContext(backend)
backend.__ua_cache__['skip'] = ctx
return ctx |
class BiasParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _BIASPARAMETER |
class GConvLSTM(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, K: int=7, normalization: str='sym', id: int=(- 1), bias: bool=True):
super(GConvLSTM, self).__init__()
assert (id >= 0), 'kwarg id is required.'
self.in_channels = in_channels
self.out_channels = out_channels
self.K = K
self.normalization = normalization
self.bias = bias
self._create_parameters_and_layers()
self._set_parameters()
self.id = id
def _create_input_gate_parameters_and_layers(self):
self.conv_x_i = ChebConv(in_channels=self.in_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.conv_h_i = ChebConv(in_channels=self.out_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.w_c_i = Parameter(torch.Tensor(1, self.out_channels))
self.b_i = Parameter(torch.Tensor(1, self.out_channels))
def _create_forget_gate_parameters_and_layers(self):
self.conv_x_f = ChebConv(in_channels=self.in_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.conv_h_f = ChebConv(in_channels=self.out_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.w_c_f = Parameter(torch.Tensor(1, self.out_channels))
self.b_f = Parameter(torch.Tensor(1, self.out_channels))
def _create_cell_state_parameters_and_layers(self):
self.conv_x_c = ChebConv(in_channels=self.in_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.conv_h_c = ChebConv(in_channels=self.out_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.b_c = Parameter(torch.Tensor(1, self.out_channels))
def _create_output_gate_parameters_and_layers(self):
self.conv_x_o = ChebConv(in_channels=self.in_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.conv_h_o = ChebConv(in_channels=self.out_channels, out_channels=self.out_channels, K=self.K, normalization=self.normalization, bias=self.bias)
self.w_c_o = Parameter(torch.Tensor(1, self.out_channels))
self.b_o = Parameter(torch.Tensor(1, self.out_channels))
def _create_parameters_and_layers(self):
self._create_input_gate_parameters_and_layers()
self._create_forget_gate_parameters_and_layers()
self._create_cell_state_parameters_and_layers()
self._create_output_gate_parameters_and_layers()
def _set_parameters(self):
glorot(self.w_c_i)
glorot(self.w_c_f)
glorot(self.w_c_o)
zeros(self.b_i)
zeros(self.b_f)
zeros(self.b_c)
zeros(self.b_o)
def _set_hidden_state(self, X, H):
if (not isinstance(H, torch.Tensor)):
H = torch.zeros(X.shape[0], self.out_channels).to(X.device)
return H
def _set_cell_state(self, X, C):
if (not isinstance(C, torch.Tensor)):
C = torch.zeros(X.shape[0], self.out_channels).to(X.device)
return C
def _calculate_input_gate(self, X, edge_index, edge_weight, H, C):
I = self.conv_x_i(X, edge_index, edge_weight)
I = (I + self.conv_h_i(H, edge_index, edge_weight))
I = (I + (self.w_c_i * C))
I = (I + self.b_i)
I = torch.sigmoid(I)
return I
def _calculate_forget_gate(self, X, edge_index, edge_weight, H, C):
F = self.conv_x_f(X, edge_index, edge_weight)
F = (F + self.conv_h_f(H, edge_index, edge_weight))
F = (F + (self.w_c_f * C))
F = (F + self.b_f)
F = torch.sigmoid(F)
return F
def _calculate_cell_state(self, X, edge_index, edge_weight, H, C, I, F):
T = self.conv_x_c(X, edge_index, edge_weight)
T = (T + self.conv_h_c(H, edge_index, edge_weight))
T = (T + self.b_c)
T = torch.tanh(T)
C = ((F * C) + (I * T))
return C
def _calculate_output_gate(self, X, edge_index, edge_weight, H, C):
O = self.conv_x_o(X, edge_index, edge_weight)
O = (O + self.conv_h_o(H, edge_index, edge_weight))
O = (O + (self.w_c_o * C))
O = (O + self.b_o)
O = torch.sigmoid(O)
return O
def _calculate_hidden_state(self, O, C):
H = (O * torch.tanh(C))
return H
def _forward(self, X: torch.FloatTensor, edge_index: torch.LongTensor, edge_weight: torch.FloatTensor=None, H: torch.FloatTensor=None, C: torch.FloatTensor=None) -> (torch.FloatTensor, torch.FloatTensor):
H = self._set_hidden_state(X, H)
C = self._set_cell_state(X, C)
I = self._calculate_input_gate(X, edge_index, edge_weight, H, C)
F = self._calculate_forget_gate(X, edge_index, edge_weight, H, C)
C = self._calculate_cell_state(X, edge_index, edge_weight, H, C, I, F)
O = self._calculate_output_gate(X, edge_index, edge_weight, H, C)
H = self._calculate_hidden_state(O, C)
return (H, C)
def forward(self, batch):
if hasattr(batch, 'edge_weight'):
edge_weight = batch.edge_weight
else:
edge_weight = None
(H, C) = self._forward(X=batch.node_feature, edge_index=batch.edge_index, edge_weight=edge_weight, H=batch.node_states[self.id], C=batch.node_cells[self.id])
batch.node_states[self.id] = H
batch.node_cells[self.id] = C
batch.node_feature = H
return batch |
def main(arguments):
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', help='Path to the file containing the parameters for the experiment', type=str, default='temp_cfg/0.json')
args = parser.parse_args(arguments)
cfg_file = args.c
with open(cfg_file, 'r') as f:
params = json.load(f)
data = expr(params)
with open(params['data_file'], 'wb+') as f:
pickle.dump(data, f) |
class ResNet101(nn.Module):
def __init__(self, n_inputs=12, numCls=17):
super().__init__()
resnet = models.resnet101(pretrained=False)
self.conv1 = nn.Conv2d(n_inputs, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.encoder = nn.Sequential(self.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool)
self.FC = nn.Linear(2048, numCls)
self.apply(weights_init_kaiming)
self.apply(fc_init_weights)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), (- 1))
logits = self.FC(x)
return logits |
(scope='session')
def all_filenames(root_data_dir) -> list[Path]:
all_filenames = []
for d in ((root_data_dir / _d) for _d in _dirnames):
all_filenames += [(d / fname) for fname in d.iterdir()]
return all_filenames |
def check_EZ(EZ0, EZ, S):
EZsum = (EZ0 + EZ.sum(axis=(1, 3)))
if (not np.allclose(S, EZsum)):
print('_check_Z failed. Zsum does not add up to S!')
import pdb
pdb.set_trace() |
def train_printer():
print(f'Epoch {epoch}, Iteration {iter_counter}')
print(f'Train Set Loss: {loss_hist[counter]:.2f}')
print(f'Test Set Loss: {test_loss_hist[counter]:.2f}')
print_batch_accuracy(data, targets, train=True)
print_batch_accuracy(test_data, test_targets, train=False)
print('\n') |
class ParamViewer():
def __init__(self, shape, par_map, par_selection):
default_backend = pyhf.default_backend
batch_size = (shape[0] if (len(shape) > 1) else None)
fullsize = default_backend.product(default_backend.astensor(shape))
flat_indices = default_backend.astensor(range(int(fullsize)), dtype='int')
self._all_indices = default_backend.reshape(flat_indices, shape)
self.allpar_viewer = _tensorviewer_from_parmap(par_map, batch_size)
self.selected_viewer = _tensorviewer_from_sizes([(par_map[s]['slice'].stop - par_map[s]['slice'].start) for s in par_selection], par_selection, batch_size)
self._precompute()
events.subscribe('tensorlib_changed')(self._precompute)
def _precompute(self):
(tensorlib, _) = get_backend()
self.all_indices = tensorlib.astensor(self._all_indices)
(self.index_selection, self.stitched, self.indices_concatenated) = extract_index_access(self.allpar_viewer, self.selected_viewer, self.all_indices)
def get(self, data, indices=None):
if (not self.index_selection):
return None
(tensorlib, _) = get_backend()
indices = (indices if (indices is not None) else self.indices_concatenated)
return tensorlib.gather(tensorlib.reshape(data, ((- 1),)), indices) |
_processor('blip2_image_eval')
class Blip2ImageEvalProcessor(BlipImageBaseProcessor):
def __init__(self, image_size=224, mean=None, std=None, do_normalize=True):
super().__init__(mean=mean, std=std, do_normalize=do_normalize)
self.transform = transforms.Compose([transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), transforms.ToTensor(), self.normalize])
def __call__(self, item):
return self.transform(item)
def from_config(cls, cfg=None):
if (cfg is None):
cfg = OmegaConf.create()
image_size = cfg.get('image_size', 224)
mean = cfg.get('mean', None)
std = cfg.get('std', None)
do_normalize = cfg.get('do_normalize', True)
return cls(image_size=image_size, mean=mean, std=std, do_normalize=do_normalize) |
def generate_module_content_repr(m: GfxRuntime140, module_name: str, cgraph_kernel_names: Set[str]) -> List[str]:
out = []
if module_name:
module_name = f'AotModule_{module_name}'
else:
module_name = 'AotModule'
out += [f'struct {module_name} : public ti::AotModule {{', f' explicit {module_name}(TiRuntime runtime, TiAotModule aot_module, bool should_destroy = true) :', ' ti::AotModule(runtime, aot_module, should_destroy) {}', '', f' static {module_name} load(TiRuntime runtime, const char *path) {{', ' TiAotModule aot_module = ti_load_aot_module(runtime, path);', f' return {module_name}(runtime, aot_module, true);', ' }', f' static {module_name} load(TiRuntime runtime, const std::string &path) {{', f' return {module_name}::load(runtime, path.c_str());', ' }', f' static {module_name} create(TiRuntime runtime, const void *tcm, size_t size) {{', ' TiAotModule aot_module = ti_create_aot_module(runtime, tcm, size);', f' return {module_name}(runtime, aot_module, true);', ' }', f' static {module_name} create(TiRuntime runtime, const std::vector<uint8_t> &tcm) {{', f' return {module_name}::create(runtime, tcm.data(), tcm.size());', ' }', '']
for kernel in m.metadata.kernels.values():
if (kernel.name in cgraph_kernel_names):
continue
out += [f' Kernel_{kernel.name} get_kernel_{kernel.name}() const {{', f' return Kernel_{kernel.name}(runtime_, ti_get_aot_module_kernel(aot_module(), "{kernel.name}"));', ' }']
for graph in m.graphs:
out += [f' ComputeGraph_{graph.name} get_compute_graph_{graph.name}() const {{', f' return ComputeGraph_{graph.name}(runtime_, ti_get_aot_module_compute_graph(aot_module(), "{graph.name}"));', ' }']
out += ['};', '']
return out |
def quantize_weights_op(quant_scale, max_value):
ops = [v.assign(quantize(v, quant_scale, float(max_value))) for v in tf.trainable_variables()]
return tf.group(*ops) |
class Polygon(Element):
def __init__(self, LP, Priority=1, elevation=254, normdir=2, coordsystem=0):
Element.__init__(self, 'Polygon', Priority=str(Pr), Elevation=str(elevation), NormDir=str(normdir), CoordSystem=str(coordsystem))
for P in LP:
V = Vertex(x=P[0], y=P[1])
self.append(V) |
def _unique1d(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = (return_index or return_inverse)
if optional_indices:
perm = ar.argsort(kind=('mergesort' if return_index else 'quicksort'))
aux = ar[perm]
else:
ar.sort()
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
mask[1:] = (aux[1:] != aux[:(- 1)])
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = (np.cumsum(mask) - 1)
inv_idx = np.empty(mask.shape, dtype=np.intp)
inv_idx[perm] = imask
ret += (inv_idx,)
if return_counts:
idx = np.concatenate((np.nonzero(mask) + ([mask.size],)))
ret += (np.diff(idx),)
return ret |
def simCreateTexture(fileName, options):
handle = lib.simCreateTexture(fileName.encode('ascii'), options, ffi.NULL, ffi.NULL, ffi.NULL, 0, ffi.NULL, ffi.NULL, ffi.NULL)
_check_return(handle)
return handle |
def _test_classification_loader():
loader_ins = TimeSeriesLoader('classification', root='/meladyfs/newyork/nanx/Datasets/PSML')
(train_loader, test_loader) = loader_ins.load(batch_size=32, shuffle=True)
print(f'train_loader: {len(train_loader)}')
for i in train_loader:
(feature, label) = i
print(f'feature: {feature.shape}')
print(f'label: {label.shape}')
break
print(f'test_loader: {len(test_loader)}')
for i in test_loader:
print(f'feature: {i.shape}')
break
return |
class BasicIterativeMethod(ProjectedGradientDescent):
attack_params = ProjectedGradientDescent.attack_params
def __init__(self, classifier, norm=np.inf, eps=0.3, eps_step=0.1, max_iter=100, targeted=False, batch_size=1, distribution=None):
super(BasicIterativeMethod, self).__init__(classifier, norm=norm, eps=eps, eps_step=eps_step, max_iter=max_iter, targeted=targeted, num_random_init=0, batch_size=batch_size, distribution=distribution) |
def remove_file(f_list):
f_list = (f_list if isinstance(f_list, list) else [f_list])
for f_name in f_list:
silent_remove(f_name) |
def main():
target = 'AD'
bl = Baseline(target)
(X, y) = bl.load_data()
bl.get_classifiers(X, y) |
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
return self
def generate_execution_code(self, code):
if (not code.continue_label):
error(self.pos, 'continue statement not inside loop')
return
code.mark_pos(self.pos)
code.put_goto(code.continue_label) |
def test_crf():
batch_size = 10
step = 20
tag_dim = 5
emissions = torch.randn(batch_size, step, tag_dim)
tag_ids = torch.randint(0, tag_dim, (batch_size, step))
seq_lens = torch.randint(1, step, (batch_size,))
mask = (torch.arange(step).unsqueeze(0).expand(batch_size, (- 1)) >= seq_lens.unsqueeze((- 1)))
benchmark_crf = torchcrf.CRF(tag_dim, batch_first=True)
benchmark_llh = benchmark_crf(emissions, tag_ids, (~ mask).type(torch.uint8), reduction='none')
benchmark_best_paths = benchmark_crf.decode(emissions, (~ mask).type(torch.uint8))
crf = CRF(tag_dim, batch_first=True)
crf.sos_transitions.data = benchmark_crf.start_transitions.data
crf.eos_transitions.data = benchmark_crf.end_transitions.data
crf.transitions.data = benchmark_crf.transitions.data
losses = crf(emissions, tag_ids, mask)
best_paths = crf.decode(emissions, mask)
assert ((benchmark_llh + losses).abs().max() < 0.0001)
assert (best_paths == benchmark_best_paths) |
def unpack_and_unpad(lstm_out, reorder):
(unpacked, sizes) = pad_packed_sequence(lstm_out, batch_first=True)
unpadded = [unpacked[idx][:val] for (idx, val) in enumerate(sizes)]
regrouped = [unpadded[idx] for idx in reorder]
return regrouped |
def cvsecs(*args):
if (len(args) == 1):
return float(args[0])
elif (len(args) == 2):
return ((60 * float(args[0])) + float(args[1]))
elif (len(args) == 3):
return (((3600 * float(args[0])) + (60 * float(args[1]))) + float(args[2])) |
def generate_job(throughputs, reference_worker_type='v100', rng=None, job_id=None, fixed_job_duration=None, generate_multi_gpu_jobs=False, generate_multi_priority_jobs=False, run_dir=None, scale_factor_generator_func=_generate_scale_factor, duration_generator_func=_generate_duration, scale_factor_rng=None, duration_rng=None, SLO_rng=None, always_generate_scale_factor=True):
if (rng is None):
rng = random.Random()
if (scale_factor_rng is None):
scale_factor_rng = rng
if (duration_rng is None):
duration_rng = rng
job_template = None
if always_generate_scale_factor:
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
job_template = rng.choice(JobTable)
if (generate_multi_gpu_jobs and job_template.distributed):
scale_factor = scale_factor_generator_func(scale_factor_rng)
else:
scale_factor = 1
if fixed_job_duration:
run_time = fixed_job_duration
else:
run_time = duration_generator_func(duration_rng)
if (not generate_multi_gpu_jobs):
scale_factor = 1
assert (run_time > 0)
assert ((scale_factor >= 1) and (scale_factor <= 8))
if (job_template is None):
while True:
job_template = rng.choice(JobTable)
if ((scale_factor == 1) or ((scale_factor > 1) and job_template.distributed)):
break
job_type = job_template.model
command = job_template.command
if (run_dir is not None):
if job_template.needs_data_dir:
command = (command % (run_dir, run_dir))
else:
command = (command % run_dir)
key = (job_type, scale_factor)
assert (key in throughputs[reference_worker_type])
num_steps = (run_time * throughputs[reference_worker_type][key]['null'])
assert (num_steps > 0)
priority_weight = 1.0
if generate_multi_priority_jobs:
r = rng.uniform(0, 1)
if (0.0 <= r <= 0.2):
priority_weight = 5.0
SLO = None
if (SLO_rng is not None):
r = SLO_rng.uniform(0, 1)
if (0.0 <= r < 0.33):
SLO = 1.2
elif (0.33 <= r < 0.67):
SLO = 2.0
else:
SLO = 10.0
job = Job(job_id=job_id, job_type=job_type, command=command, working_directory=job_template.working_directory, num_steps_arg=job_template.num_steps_arg, total_steps=num_steps, duration=run_time, scale_factor=scale_factor, priority_weight=priority_weight, SLO=SLO, needs_data_dir=job_template.needs_data_dir)
return job |
def check_resume(opt, resume_iter):
if opt['path']['resume_state']:
networks = [key for key in opt.keys() if key.startswith('network_')]
flag_pretrain = False
for network in networks:
if (opt['path'].get(f'pretrain_{network}') is not None):
flag_pretrain = True
if flag_pretrain:
print('pretrain_network path will be ignored during resuming.')
for network in networks:
name = f'pretrain_{network}'
basename = network.replace('network_', '')
if ((opt['path'].get('ignore_resume_networks') is None) or (network not in opt['path']['ignore_resume_networks'])):
opt['path'][name] = osp.join(opt['path']['models'], f'net_{basename}_{resume_iter}.pth')
print(f"Set {name} to {opt['path'][name]}")
param_keys = [key for key in opt['path'].keys() if key.startswith('param_key')]
for param_key in param_keys:
if (opt['path'][param_key] == 'params_ema'):
opt['path'][param_key] = 'params'
print(f'Set {param_key} to params') |
def main():
args = parseArgs()
if (args.metric == 'fid'):
metric = FID()
elif (args.metric == 'ssim'):
metric = SSIM()
if (args.model in ['neural_style_transfer', 'fast_neural_style_transfer']):
data_path_real = os.path.join(args.output_path, 'real')
data_path_fake = os.path.join(args.output_path, 'fake')
score = metric.evaluate(data_path_real, data_path_fake)
score_list = [score]
elif (args.model in ['pix2pix', 'cyclegan']):
score_list = []
for epoch in range(args.start_epoch, (args.epochs + 1), args.save_ckpt_freq):
epoch_path = os.path.join(args.output_path, 'Epoch {}'.format(epoch))
if os.path.isdir(epoch_path):
data_path_real = os.path.join(epoch_path, 'real')
data_path_fake = os.path.join(epoch_path, 'fake')
score = metric.evaluate(data_path_real, data_path_fake)
score_list.append(score)
else:
score_list.append((- 1))
save_scores(score_list, score_path=(((args.metric + '_') + args.model) + '.txt'))
visualize_scores(score_list, metric=args.metric, model=args.model, epoch_range=range(args.start_epoch, (args.epochs + 1), args.save_ckpt_freq)) |
class Repository(Common):
def __init__(self, base_dir='.', log_level=Log.error):
self.set_base_dir(base_dir)
self.LogLevel = log_level
DEFAULT_DIR_MODE = 504
DEFAULT_HOSTS_CONF_MODE = 416
def secure_check(self, path, ref_mode):
if (os.path.exists(path) == False):
if (Log.notice <= self.LogLevel):
print("Notice: '{}' is not found.".format(path))
return True
MASK = int(511)
_mode = int((os.stat(path).st_mode & MASK))
_mode |= ref_mode
_mode ^= ref_mode
return (True if (int(_mode) == 0) else False)
'\n stat file related functions.\n '
def __create_stat_file(self, serverId, path):
stat = configparser.ConfigParser()
stat[serverId] = {'seqid': '0'}
with open(path, 'w') as configfile:
stat.write(configfile)
def __update_stat_file(self, serverId, max_seqid, _dir):
_dirpath = self.dirpath([serverId, _dir])
_path = self.path(_dirpath, self.STAT_FILE)
stat = configparser.ConfigParser()
stat[serverId] = {'seqid': max_seqid}
with open(_path, 'w') as configfile:
stat.write(configfile)
def __get_seqid_from_stat_file(self, serverId, _dir):
_dirpath = self.dirpath([serverId, _dir])
_path = self.path(_dirpath, self.STAT_FILE)
if os.path.exists(_dirpath):
stat = configparser.ConfigParser()
stat.read(_path)
if stat[serverId]:
return int(stat[serverId]['seqid'])
else:
return int(0)
'\n Check and create directory if not found.\n '
def __check_dir(self, serverId, _dir, additional_dir_list):
_dirpath = self.dirpath([serverId])
if (os.path.exists(_dirpath) == False):
os.mkdir(_dirpath, self.DEFAULT_DIR_MODE)
_dirpath = self.dirpath([serverId, _dir])
_path = self.path(_dirpath, self.STAT_FILE)
if (os.path.exists(_dirpath) == False):
os.mkdir(_dirpath, self.DEFAULT_DIR_MODE)
for d in additional_dir_list:
os.mkdir((_dirpath + d), self.DEFAULT_DIR_MODE)
self.__create_stat_file(serverId, _path)
def __reset_dir(self, serverId, _dir, update_stat_file):
if (self.check_serverId(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId '{}' is not registered.".format(serverId))
sys.exit(1)
_rsdirpath = self.dirpath([serverId, _dir])
if os.path.exists(_rsdirpath):
if (_dir == self.TABLES_DIR):
if (Log.debug2 <= self.LogLevel):
print("Debug2: rm dir '{}'".format(_rsdirpath))
shutil.rmtree(_rsdirpath)
else:
_d = ((str(_rsdirpath) + '/') + '[0-9][0-9][0-9]')
_dirs = glob.glob(_d, recursive=True)
for _dir in _dirs:
if (Log.debug2 <= self.LogLevel):
print("Debug2: rm '{}'".format(_dir))
shutil.rmtree(_dir)
update_stat_file(serverId, 0)
'\n Public methods\n '
def set_base_dir(self, base_dir='.'):
self.base_dir = (base_dir + '/')
def get_conf_file_path(self):
_path = (((self.base_dir + self.REPOSITORY_DIR) + '/') + self.CONF_FILE)
if os.path.exists(_path):
if (self.secure_check(_path, self.DEFAULT_HOSTS_CONF_MODE) == False):
print("Error: {}'s mode should be set to {} or more secure.".format(self.CONF_FILE, oct(self.DEFAULT_HOSTS_CONF_MODE)))
sys.exit(1)
return _path
def check_serverId(self, serverId):
if (self.is_serverId_valid(serverId) == False):
if (Log.error <= self.LogLevel):
print("Error: serverId='{}' is invalid.".format(serverId))
print('\tserverId must be the following regular expression:[A-z0-9_]+')
sys.exit(1)
_path = self.get_conf_file_path()
config = configparser.ConfigParser()
config.read(_path)
return config.has_section(serverId)
def get_serverId(self, host, port):
_path = self.get_conf_file_path()
_config = configparser.ConfigParser()
_config.read(_path)
_ret = None
for section in _config.sections():
if (('host' in _config[section]) and ('port' in _config[section])):
if ((_config[section]['host'] == host) and (_config[section]['port'] == port)):
_ret = section
break
return _ret
def dirpath(self, dirlist):
_dir = ((self.base_dir + self.REPOSITORY_DIR) + '/')
if isinstance(dirlist, list):
for d in dirlist:
_dir += (d + '/')
else:
_dir += (dirlist + '/')
return _dir
def path(self, dirpath, filename):
return (dirpath + filename)
'\n top dir\n '
def create_repo(self):
def create_conf_file(path):
config = configparser.ConfigParser()
config['server_1'] = {'host': 'localhost', 'port': '5432', 'username': 'postgres', 'input_password': 'false', 'password': ''}
config['server_2'] = {}
with open(path, 'w') as configfile:
config.write(configfile)
if os.path.exists((self.base_dir + self.REPOSITORY_DIR)):
print("Error: directory '{}' already exists.".format((self.base_dir + self.REPOSITORY_DIR)))
sys.exit(1)
if (os.path.exists(self.base_dir) == False):
os.mkdir(self.base_dir)
os.mkdir((self.base_dir + self.REPOSITORY_DIR), mode=self.DEFAULT_DIR_MODE)
_conf_file_path = self.get_conf_file_path()
create_conf_file(_conf_file_path)
os.chmod(_conf_file_path, self.DEFAULT_HOSTS_CONF_MODE)
def is_serverId_valid(self, serverId):
return (True if (re.search('\\w+', serverId, flags=0).group() == serverId) else False)
def check_host_conf_file(self):
_path = self.get_conf_file_path()
print('Checking hosts.conf mode....')
if (self.secure_check(_path, self.DEFAULT_HOSTS_CONF_MODE) == True):
print('\tReport: {} is secure.'.format(self.CONF_FILE))
else:
print("\tError: {}'s mode should be set to {} or more secure.".format(self.CONF_FILE, oct(self.DEFAULT_HOSTS_CONF_MODE)))
print('Checking serverIds....')
_config = configparser.ConfigParser()
_config.read(_path)
_ret = True
for s in _config.sections():
if (self.is_serverId_valid(s) == False):
print("\tError: serverId '{}' is invalid name.".format(s))
_ret = False
if (_ret == True):
print('\tReport: All serverIds are valid.')
def check_dirs(self):
print('Checking directories....')
_path = ((self.base_dir + self.REPOSITORY_DIR) + '/')
_dirlist = os.listdir(_path)
for _dir in _dirlist:
_dirpath = (_path + _dir)
if os.path.isdir(_dirpath):
if (self.secure_check(_dirpath, self.DEFAULT_DIR_MODE) == True):
print('\tReport: {} is secure.'.format(_dirpath))
else:
print("\tError: {}'s mode should be set to {} or more secure.".format(_dirpath, oct(self.DEFAULT_DIR_MODE)))
for subdir in (self.TABLES_DIR, self.GROUPING_DIR, self.REGRESSION_DIR, self.FORMATTED_REGRESSION_PARAMS_DIR):
_subdirpath = ((_dirpath + '/') + subdir)
if (self.secure_check(_subdirpath, self.DEFAULT_DIR_MODE) == True):
print('\tReport: {} is secure.'.format(_subdirpath))
else:
print("\tError: {}'s mode should be set to {} or more secure.".format(_subdirpath, oct(self.DEFAULT_DIR_MODE)))
def rename_serverId(self, old_serverId, new_serverId):
def mv_dir(old_serverId, new_serverId):
_dirpath = ((self.base_dir + self.REPOSITORY_DIR) + '/')
os.rename((_dirpath + old_serverId), (_dirpath + new_serverId))
if (self.is_serverId_valid(new_serverId) == False):
print("Error: new serverId '{}' is invalid name.".format(new_serverId))
sys.exit(1)
if (self.check_serverId(old_serverId) == False):
print("Error: old serverId '{}' does not exit.".format(old_serverId))
(sys, exit(1))
mv_dir(old_serverId, new_serverId)
_conf_path = self.get_conf_file_path()
_conf_tmp_path = (_conf_path + '.tmp')
os.rename(_conf_path, _conf_tmp_path)
try:
fp_conf = open(_conf_path, mode='w')
with open(_conf_tmp_path, mode='r') as fp_conf_tmp:
for _line in fp_conf_tmp:
if (str((('[' + old_serverId) + ']')) in _line):
_line = str(((('[' + new_serverId) + ']') + '\n'))
fp_conf.write(_line)
os.remove(_conf_tmp_path)
except Exception as e:
os.rename(_conf_tmp_path, _conf_path)
mv_dir(new_serverId, old_serverId)
print(e)
print('Error: Could not rename serverId.')
finally:
os.chmod(_conf_path, self.DEFAULT_HOSTS_CONF_MODE)
fp_conf.close()
def remove_serverId(self, serverId):
def rm_dir(serverId):
_dirpath = (((self.base_dir + self.REPOSITORY_DIR) + '/') + serverId)
if os.path.exists(_dirpath):
shutil.rmtree(_dirpath)
if (Log.debug1 <= self.LogLevel):
print('Debug1: Deleted {}.'.format(_dirpath))
return True
else:
print('Debug1: {} Not Found.'.format(_dirpath))
return False
if (self.check_serverId(serverId) == False):
print("Error: serverId '{}' does not exit.".format(serverId))
(sys, exit(1))
return rm_dir(serverId)
def show_hosts(self, verbose):
_path = self.get_conf_file_path()
_config = configparser.ConfigParser()
_config.read(_path)
print('ServerId:')
for section in _config.sections():
if ('host' in _config[section]):
print('\t{}'.format(section))
if (verbose == True):
print('\t\thost = {}'.format(_config[section]['host']))
if ('port' in _config[section]):
print('\t\tport = {}'.format(_config[section]['port']))
if ('username' in _config[section]):
print('\t\tusername = {}'.format(_config[section]['username']))
'\n tables subdir\n '
def update_tables_stat_file(self, serverId, max_seqid):
self.__update_stat_file(serverId, max_seqid, self.TABLES_DIR)
def get_seqid_from_tables_stat(self, serverId):
return self.__get_seqid_from_stat_file(serverId, self.TABLES_DIR)
def check_tables_dir(self, serverId):
self.__check_dir(serverId, self.TABLES_DIR, [])
def reset_tables_dir(self, serverId):
self.__reset_dir(serverId, self.TABLES_DIR, self.update_tables_stat_file)
def get_log_csv_path(self, serverId):
_csvdirpath = self.dirpath([serverId, self.TABLES_DIR])
return self.path(_csvdirpath, self.TABLES_FILE)
def get_query_dir_path(self, serverId, queryid):
return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_QUERY_DIR, self.hash_dir(queryid), str(queryid)])
def get_plan_dir_path(self, serverId, queryid, planid):
return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_PLAN_DIR, self.hash_dir(planid), ((str(queryid) + '.') + str(planid))])
def get_plan_json_dir_path(self, serverId, queryid, planid):
return self.dirpath([serverId, self.TABLES_DIR, self.TABLES_PLAN_JSON_DIR, self.hash_dir(planid), ((str(queryid) + '.') + str(planid))])
def get_plan_json_path(self, serverId, seqid, queryid, planid):
_logdirpath = self.get_plan_json_dir_path(serverId, queryid, planid)
return self.path(_logdirpath, str(seqid))
def get_query(self, serverId, queryid):
_dirpath = self.dirpath([serverId, self.TABLES_DIR, self.TABLES_QUERY_DIR, self.hash_dir(int(queryid)), str(queryid)])
_files = glob.glob((_dirpath + '[0-9]*'))
for _qf in _files:
_seqid_file = _qf.split('/')[(- 1)]
with open(_qf) as fp:
_query = fp.read()
with open(self.get_log_csv_path(self.ServerId), newline='') as f:
_reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
for _row in _reader:
_seqid = int(_row[0])
_database = str(_row[3])
_planid = int(_row[7])
if (int(_seqid_file) == _seqid):
return (_database, _query, _planid)
return (None, None, None)
'\n grouping subdir\n '
def update_grouping_stat_file(self, serverId, max_seqid):
self.__update_stat_file(serverId, max_seqid, self.GROUPING_DIR)
def get_seqid_from_grouping_stat(self, serverId):
return self.__get_seqid_from_stat_file(serverId, self.GROUPING_DIR)
def check_grouping_dir(self, serverId):
self.__check_dir(serverId, self.GROUPING_DIR, [])
def reset_grouping_dir(self, serverId):
self.__reset_dir(serverId, self.GROUPING_DIR, self.update_grouping_stat_file)
def get_grouping_plan_dir_path(self, serverId, planid):
return self.dirpath([str(serverId), self.GROUPING_DIR, self.hash_dir(planid)])
def get_grouping_plan_path(self, serverId, queryid, planid):
return self.path(self.get_grouping_plan_dir_path(serverId, planid), ((str(queryid) + '.') + str(planid)))
def get_grouping_dir_path(self, serverId):
return self.dirpath([serverId, self.GROUPING_DIR])
def get_grouping_dir_list(self, serverId):
return os.listdir(self.dirpath([serverId, self.GROUPING_DIR]))
def get_grouping_subdir_path(self, serverId, subdir):
return self.dirpath([serverId, self.GROUPING_DIR, subdir])
def get_grouping_subdir_list(self, serverId, subdir):
return os.listdir(self.dirpath([serverId, self.GROUPING_DIR, subdir]))
'\n regression subdir\n '
def update_regression_stat_file(self, serverId, max_seqid):
self.__update_stat_file(serverId, max_seqid, self.REGRESSION_DIR)
def get_seqid_from_regression_stat(self, serverId):
return self.__get_seqid_from_stat_file(serverId, self.REGRESSION_DIR)
def check_regression_dir(self, serverId):
self.__check_dir(serverId, self.REGRESSION_DIR, [])
def reset_regression_dir(self, serverId):
self.__reset_dir(serverId, self.REGRESSION_DIR, self.update_regression_stat_file)
def get_regression_subdir_path(self, serverId, subdir):
return self.dirpath([serverId, self.REGRESSION_DIR, subdir])
def get_regression_param(self, serverId, queryid, planid):
_key = ((str(queryid) + '.') + str(planid))
_pathdir = self.dirpath([serverId, self.REGRESSION_DIR, self.hash_dir(planid)])
_path = self.path(_pathdir, _key)
if os.path.exists(_path):
return self.read_plan_json(_path)
else:
return None
'\n formatted regression parameter subdir\n '
def check_formatted_regression_params_dir(self, serverId):
self.__check_dir(serverId, self.FORMATTED_REGRESSION_PARAMS_DIR, [])
def get_formatted_regression_params_subdir_path(self, serverId):
return self.dirpath([serverId, self.FORMATTED_REGRESSION_PARAMS_DIR])
def truncate_formatted_regression_params(self, serverId):
_dir = self.get_formatted_regression_params_subdir_path(serverId)
for _file_name in os.listdir(_dir):
os.remove(((str(_dir) + '/') + str(_file_name)))
def write_formatted_regression_params(self, serverId, queryid, param):
_dir = self.get_formatted_regression_params_subdir_path(serverId)
with open(((str(_dir) + '/') + str(queryid)), mode='w') as _fp:
_fp.write(param)
def check_formatted_regression_params(self, serverId, queryid):
_dir = self.get_formatted_regression_params_subdir_path(serverId)
for _file in os.listdir(_dir):
if (str(_file) == str(queryid)):
return True
return False |
class AppContext(object):
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
self._refcnt = 0
def push(self):
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=_sentinel):
try:
self._refcnt -= 1
if (self._refcnt <= 0):
if (exc is _sentinel):
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
finally:
rv = _app_ctx_stack.pop()
assert (rv is self), ('Popped wrong app context. (%r instead of %r)' % (rv, self))
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
if (BROKEN_PYPY_CTXMGR_EXIT and (exc_type is not None)):
reraise(exc_type, exc_value, tb) |
def run_worker(factory, to_worker, to_sampler, worker_number, agent, env):
to_sampler.cancel_join_thread()
setproctitle.setproctitle(('worker:' + setproctitle.getproctitle()))
inner_worker = factory(worker_number)
inner_worker.update_agent(cloudpickle.loads(agent))
inner_worker.update_env(env)
version = 0
streaming_samples = False
while True:
if streaming_samples:
try:
(tag, contents) = to_worker.get_nowait()
except queue.Empty:
tag = 'continue'
contents = None
else:
(tag, contents) = to_worker.get()
if (tag == 'start'):
(agent_update, env_update, version) = contents
inner_worker.update_agent(cloudpickle.loads(agent_update))
inner_worker.update_env(env_update)
streaming_samples = True
elif (tag == 'stop'):
streaming_samples = False
elif (tag == 'continue'):
batch = inner_worker.rollout()
try:
to_sampler.put_nowait(('trajectory', (batch, version, worker_number)))
except queue.Full:
streaming_samples = False
elif (tag == 'exit'):
to_worker.close()
to_sampler.close()
inner_worker.shutdown()
return
else:
raise AssertionError('Unknown tag {} with contents {}'.format(tag, contents)) |
def save_gt_instance(path, gt_inst, nyu_id=None):
if (nyu_id is not None):
sem = (gt_inst // 1000)
ignore = (sem == 0)
ins = (gt_inst % 1000)
nyu_id = np.array(nyu_id)
sem = nyu_id[(sem - 1)]
sem[ignore] = 0
gt_inst = ((sem * 1000) + ins)
np.savetxt(path, gt_inst, fmt='%d') |
def overall_jaccard_index_calc(jaccard_list):
try:
jaccard_sum = sum(jaccard_list)
jaccard_mean = (jaccard_sum / len(jaccard_list))
return (jaccard_sum, jaccard_mean)
except Exception:
return 'None' |
def isAcidic(mol):
if (nAcidicGroup(mol) > nBasicGroup(mol)):
return 1
else:
return 0 |
class DebertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
super().__init__(do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.do_lower_case = do_lower_case
self.gpt2_tokenizer = GPT2Tokenizer(vocab_file)
def vocab_size(self):
return len(self.vocab)
def vocab(self):
return self.gpt2_tokenizer.vocab
def get_vocab(self):
vocab = self.vocab.copy()
vocab.update(self.get_added_vocab())
return vocab
def _tokenize(self, text):
if self.do_lower_case:
text = text.lower()
return self.gpt2_tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return (self.gpt2_tokenizer.sym(index) if (index < self.vocab_size) else self.unk_token)
def convert_tokens_to_string(self, tokens):
return self.gpt2_tokenizer.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', False)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
return self.gpt2_tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) |
def load_flax_weights_in_pytorch_model(pt_model, flax_state):
try:
import torch
except ImportError:
logger.error('Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see and for installation instructions.')
raise
is_type_bf16 = flatten_dict(jax.tree_util.tree_map((lambda x: (x.dtype == jnp.bfloat16)), flax_state)).values()
if any(is_type_bf16):
logger.warning('Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` before loading those in PyTorch model.')
flax_state = jax.tree_util.tree_map((lambda params: (params.astype(np.float32) if (params.dtype == jnp.bfloat16) else params)), flax_state)
flax_state_dict = flatten_dict(flax_state)
pt_model_dict = pt_model.state_dict()
load_model_with_head_into_base_model = ((pt_model.base_model_prefix in flax_state) and (pt_model.base_model_prefix not in {k.split('.')[0] for k in pt_model_dict.keys()}))
load_base_model_into_model_with_head = ((pt_model.base_model_prefix not in flax_state) and (pt_model.base_model_prefix in {k.split('.')[0] for k in pt_model_dict.keys()}))
unexpected_keys = []
missing_keys = set(pt_model_dict.keys())
for (flax_key_tuple, flax_tensor) in flax_state_dict.items():
has_base_model_prefix = (flax_key_tuple[0] == pt_model.base_model_prefix)
require_base_model_prefix = ('.'.join(((pt_model.base_model_prefix,) + flax_key_tuple)) in pt_model_dict)
if (load_model_with_head_into_base_model and has_base_model_prefix):
flax_key_tuple = flax_key_tuple[1:]
elif (load_base_model_into_model_with_head and require_base_model_prefix):
flax_key_tuple = ((pt_model.base_model_prefix,) + flax_key_tuple)
if ((flax_key_tuple[(- 1)] == 'kernel') and (flax_tensor.ndim == 4) and ('.'.join(flax_key_tuple) not in pt_model_dict)):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
elif ((flax_key_tuple[(- 1)] == 'kernel') and ('.'.join(flax_key_tuple) not in pt_model_dict)):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
flax_tensor = flax_tensor.T
elif (flax_key_tuple[(- 1)] in ['scale', 'embedding']):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('weight',))
elif ('mean' in flax_key_tuple[(- 1)]):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('running_mean',))
elif ('var' in flax_key_tuple[(- 1)]):
flax_key_tuple = (flax_key_tuple[:(- 1)] + ('running_var',))
if ('batch_stats' in flax_state):
flax_key = '.'.join(flax_key_tuple[1:])
else:
flax_key = '.'.join(flax_key_tuple)
if (flax_key in pt_model_dict):
if (flax_tensor.shape != pt_model_dict[flax_key].shape):
raise ValueError(f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.')
else:
flax_tensor = (np.asarray(flax_tensor) if (not isinstance(flax_tensor, np.ndarray)) else flax_tensor)
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
missing_keys.remove(flax_key)
else:
unexpected_keys.append(flax_key)
pt_model.load_state_dict(pt_model_dict)
missing_keys = list(missing_keys)
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the Flax model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).
- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model).''')
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
else:
logger.warning(f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {pt_model.__class__.__name__} for predictions without further training.''')
return pt_model |
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if (not func_scripts):
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if (not script):
continue
if is_string(script):
log.info((" adding '%s' to scripts" % (script,)))
new_scripts.append(script)
else:
[log.info((" adding '%s' to scripts" % (s,))) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run(self):
if (not self.scripts):
return
self.scripts = self.generate_scripts(self.scripts)
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts) |
def build_constrained_ellipsoidal_problem():
var = Variable(2)
x_var = var[0]
y_var = var[1]
obj = ((((x_var ** 2) + (2 * (y_var ** 2))) - (5 * y_var)) - ((2 * x_var) * y_var))
cons_ineq = [((y_var - x_var) + 1)]
opt = OptimizationProblem(obj, cons_ineq=cons_ineq)
param = DirectParam(np.array([0, 0]), bounds=[(- 10), 10])
return (opt, param, [(7 / 2), (5 / 2)]) |
class TestCellPrecision():
def instance(self):
return CellPrecisionTag()
def test_no_matching_cells(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['x', 'y'], ['z', 'w']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_all_matching_cells(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_partial_matching_cells(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'x'], ['y', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.5)
def test_empty_values(self, instance):
target = []
prediction = []
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
target = '[]'
prediction = '[]'
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_single_cell_table(self, instance):
target = [['a']]
prediction = [['a']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_no_cells_in_prediction(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = []
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_no_cells_in_target(self, instance):
target = []
prediction = [['a', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 0.0)
def test_duplicate_cells_in_prediction(self, instance):
target = [['a', 'b'], ['c', 'd']]
prediction = [['a', 'a'], ['b', 'b'], ['c', 'd']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_special_chatgpt_case(self, instance):
target = [['573585', '10/31/2019', '22282', '12-Egg-House-Painted-Wood', 35.83, 2, 14585.0, 'United-Kingdom']]
prediction = [['573585']]
result = instance.evaluate_single_test_metric(target, prediction)
assert (result == 1.0)
def test_cell_precision_time(self, instance):
target = np.random.rand(20, 1000)
prediction = np.random.rand(20, 1000)
start_time = time.time()
instance.evaluate_single_test_metric(list(target), list(prediction))
assert ((start_time - time.time()) < 0.05) |
def cho_factor(a, lower=False, overwrite_a=False, check_finite=True):
(c, lower) = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False, check_finite=check_finite)
return (c, lower) |
class ResetTags(Tagger):
def tag(self, document, ngrams=6, stopwords=[]):
document.annotations = {i: {} for i in range(len(document.sentences))} |
def standardConvection(rhs, u_dealias, u_hat, K, VFSp, FSTp, FCTp, work, mat, la):
rhs[:] = 0
U = u_dealias
Uc = work[(U, 1, True)]
Uc2 = work[(U, 2, True)]
dudx = project(Dx(u_hat[0], 0, 1), FSTp).backward()
dvdx = project(Dx(u_hat[1], 0, 1), FCTp).backward()
dwdx = project(Dx(u_hat[2], 0, 1), FCTp).backward()
dudy = Uc2[0] = FSTp.backward(((1j * K[1]) * u_hat[0]), Uc2[0])
dudz = Uc2[1] = FSTp.backward(((1j * K[2]) * u_hat[0]), Uc2[1])
rhs[0] = FSTp.forward((((U[0] * dudx) + (U[1] * dudy)) + (U[2] * dudz)), rhs[0])
Uc2[:] = 0
dvdy = Uc2[0] = FSTp.backward(((1j * K[1]) * u_hat[1]), Uc2[0])
dvdz = Uc2[1] = FSTp.backward(((1j * K[2]) * u_hat[1]), Uc2[1])
rhs[1] = FSTp.forward((((U[0] * dvdx) + (U[1] * dvdy)) + (U[2] * dvdz)), rhs[1])
Uc2[:] = 0
dwdy = Uc2[0] = FSTp.backward(((1j * K[1]) * u_hat[2]), Uc2[0])
dwdz = Uc2[1] = FSTp.backward(((1j * K[2]) * u_hat[2]), Uc2[1])
rhs[2] = FSTp.forward((((U[0] * dwdx) + (U[1] * dwdy)) + (U[2] * dwdz)), rhs[2])
return rhs |
class TFAlbertForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def bspline(x, n):
ax = (- abs(asarray(x)))
(funclist, condfuncs) = _bspline_piecefunctions(n)
condlist = [func(ax) for func in condfuncs]
return piecewise(ax, condlist, funclist) |
def test_reset(stopping_condition):
stopping_condition.after_search_iteration(None)
stopping_condition.reset()
assert (stopping_condition.current_value() == 0) |
def resnet50(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
print('Loading pretrained Resnet 50 weights.')
return model |
def find_all(filter_dict):
if (not filter_dict):
return entries
filtered_entries = []
for entry in entries:
is_match = True
for (k, v) in filter_dict.items():
if ((k not in entry) or (entry[k] != v)):
is_match = False
break
if is_match:
filtered_entries.append(entry)
return filtered_entries |
def percent_good_pts(x_fake, means, threshold):
count = 0
counts = np.zeros(len(means))
visited = set()
for point in x_fake:
minimum = 0
diff_minimum = [.0, .0]
for (i, mean) in enumerate(means):
diff = np.abs((point - mean))
if np.all((diff < threshold)):
visited.add(tuple(mean))
count += 1
break
for (i, mean) in enumerate(means):
diff = np.abs((point - mean))
if (np.linalg.norm(diff) < np.linalg.norm(diff_minimum)):
minimum = i
diff_minimum = diff
counts[minimum] += 1
kl = 0
counts = (counts / len(x_fake))
for generated in counts:
if (generated != 0):
kl += (generated * np.log((len(means) * generated)))
return ((count / len(x_fake)), len(visited), kl) |
def is_past_tense(c, verb_window='left'):
past_tense = set(['VBD', 'VBN'])
btw = list(get_between_tokens(c, attrib='pos_tags', case_sensitive=True))
return (True if past_tense.intersection(btw) else False) |
def average_checkpoints(inputs):
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with PathManager.open(fpath, 'rb') as f:
state = torch.load(f, map_location=(lambda s, _: torch.serialization.default_restore_location(s, 'cpu')))
if (new_state is None):
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if (params_keys is None):
params_keys = model_params_keys
elif (params_keys != model_params_keys):
raise KeyError('For checkpoint {}, expected list of params: {}, but found: {}'.format(f, params_keys, model_params_keys))
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if (k not in params_dict):
params_dict[k] = p.clone()
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for (k, v) in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state |
def getidperobject(object_name, id_env, id_mapping):
object_name = object_name.lower().replace(' ex', '_')
cont_object = 0
for (elem, id_en) in id_mapping.items():
if (id_en == int(id_env)):
if (object_name == 'door'):
raise ValueError
return int(elem[1])
if (elem[0] == object_name):
cont_object += 1
id_object = (cont_object + 1)
id_mapping[(object_name, id_object)] = int(id_env)
return id_object |
.parametrize('observation_shape', [(100,)])
.parametrize('batch_size', [32])
def test_min_max_observation_scaler(observation_shape: Sequence[int], batch_size: int) -> None:
shape = (batch_size, *observation_shape)
observations = np.random.random(shape).astype('f4')
maximum = observations.max(axis=0)
minimum = observations.min(axis=0)
scaler = MinMaxObservationScaler(maximum=maximum, minimum=minimum)
assert scaler.built
assert (scaler.get_type() == 'min_max')
y = scaler.transform(torch.tensor(observations))
assert np.all((y.numpy() >= (- 1.0)))
assert np.all((y.numpy() <= 1.0))
x = torch.rand((batch_size, *observation_shape))
y = scaler.transform(x)
ref_y = ((x.numpy() - minimum.reshape((1, (- 1)))) / (maximum - minimum).reshape((1, (- 1))))
assert np.allclose(y.numpy(), ((ref_y * 2.0) - 1.0), atol=1e-06)
assert torch.allclose(scaler.reverse_transform(y), x, atol=1e-06)
y = scaler.transform_numpy(x.numpy())
assert np.allclose(y, ((ref_y * 2.0) - 1.0), atol=1e-06)
assert np.allclose(scaler.reverse_transform_numpy(y), x.numpy(), atol=1e-06)
new_scaler = MinMaxObservationScaler.deserialize(scaler.serialize())
assert np.all((new_scaler.minimum == scaler.minimum))
assert np.all((new_scaler.maximum == scaler.maximum)) |
class CsBbox3d(CsObject):
def __init__(self):
CsObject.__init__(self, CsObjectType.BBOX3D)
self.bbox_2d = None
self.center = []
self.dims = []
self.rotation = []
self.instanceId = (- 1)
self.label = ''
self.score = (- 1.0)
def __str__(self):
bbox2dText = str(self.bbox_2d)
bbox3dText = ''
bbox3dText += '\n - Center (x/y/z) [m]: {}/{}/{}'.format(self.center[0], self.center[1], self.center[2])
bbox3dText += '\n - Dimensions (l/w/h) [m]: {}/{}/{}'.format(self.dims[0], self.dims[1], self.dims[2])
bbox3dText += '\n - Rotation: {}/{}/{}/{}'.format(self.rotation[0], self.rotation[1], self.rotation[2], self.rotation[3])
text = 'Object: {}\n2D {}\n - 3D {}'.format(self.label, bbox2dText, bbox3dText)
return text
def fromJsonText(self, jsonText, objId=(- 1)):
self.bbox_2d = CsBbox2d()
self.bbox_2d.fromJsonText(jsonText['2d'])
self.center = jsonText['3d']['center']
self.dims = jsonText['3d']['dimensions']
self.rotation = jsonText['3d']['rotation']
self.label = jsonText['label']
self.score = jsonText['score']
if ('instanceId' in jsonText.keys()):
self.instanceId = jsonText['instanceId']
def toJsonText(self):
objDict = {}
objDict['label'] = self.label
objDict['instanceId'] = self.instanceId
objDict['2d']['amodal'] = self.bbox_2d.bbox_amodal_xywh
objDict['2d']['modal'] = self.bbox_2d.bbox_modal_xywh
objDict['3d']['center'] = self.center
objDict['3d']['dimensions'] = self.dims
objDict['3d']['rotation'] = self.rotation
return objDict
def depth(self):
return np.sqrt(((self.center[0] ** 2) + (self.center[1] ** 2))).astype(int) |
def get_task_dataset(data, args):
nentity = len(np.unique(data['train']['edge_index'].reshape((- 1))))
nrelation = len(np.unique(data['train']['edge_type']))
train_triples = np.stack((data['train']['edge_index'][0], data['train']['edge_type'], data['train']['edge_index'][1])).T
valid_triples = np.stack((data['valid']['edge_index'][0], data['valid']['edge_type'], data['valid']['edge_index'][1])).T
test_triples = np.stack((data['test']['edge_index'][0], data['test']['edge_type'], data['test']['edge_index'][1])).T
all_triples = np.concatenate([train_triples, valid_triples, test_triples])
train_dataset = TrainDataset(train_triples, nentity, args.num_neg)
valid_dataset = TestDataset(valid_triples, all_triples, nentity)
test_dataset = TestDataset(test_triples, all_triples, nentity)
return (train_dataset, valid_dataset, test_dataset, nrelation, nentity) |
def function_factory(name, nargs=0, latex_name=None, conversions=None, evalf_params_first=True, eval_func=None, evalf_func=None, conjugate_func=None, real_part_func=None, imag_part_func=None, derivative_func=None, tderivative_func=None, power_func=None, series_func=None, print_func=None, print_latex_func=None):
class NewSymbolicFunction(SymbolicFunction):
def __init__(self):
SymbolicFunction.__init__(self, name, nargs, latex_name, conversions, evalf_params_first)
def _maxima_init_(self):
return ("'%s" % self.name())
def _fricas_init_(self):
return ('operator("%s")' % self.name())
def _sympy_(self):
from sympy import Function
return Function(self.name())
def __reduce__(self):
pickled_functions = self.__getstate__()[6]
return (unpickle_function, (name, nargs, latex_name, conversions, evalf_params_first, pickled_functions))
l = locals()
for func_name in sfunctions_funcs:
func = l.get((func_name + '_func'), None)
if func:
if (not callable(func)):
raise ValueError(((func_name + '_func') + ' parameter must be callable'))
setattr(NewSymbolicFunction, ('_%s_' % func_name), func)
return NewSymbolicFunction() |
.cpublas
def test_openblas_compiles():
A = np.random.rand(2, 3)
B = np.random.rand(3, 4)
C = np.random.rand(2, 4)
blas.default_implementation = 'OpenBLAS'
def prog(A, B, C):
C[:] = (A B)
prog(A, B, C) |
class DMA_nonzero_reg(atomic_reg):
OP_NAME = 'DMA_nonzero'
_fields_ = [('intr_en', ctypes.c_uint64, 1), ('stride_enable', ctypes.c_uint64, 1), ('nchw_copy', ctypes.c_uint64, 1), ('cmd_short', ctypes.c_uint64, 1), ('reserved', ctypes.c_uint64, 1), ('reserved', ctypes.c_uint64, 4), ('reserved', ctypes.c_uint64, 20), ('Reserved', ctypes.c_uint64, 3), ('cmd_type', ctypes.c_uint64, 4), ('cmd_special_function', ctypes.c_uint64, 3), ('fill_constant_en', ctypes.c_uint64, 1), ('src_data_format', ctypes.c_uint64, 3), ('index_data_format', ctypes.c_uint64, 3), ('reserved', ctypes.c_uint64, 18), ('cmd_id_dep', ctypes.c_uint64, 24), ('reserved', ctypes.c_uint64, 8), ('constant_value', ctypes.c_uint64, 32), ('src_nstride', ctypes.c_uint64, 32), ('src_cstride', ctypes.c_uint64, 32), ('src_hstride', ctypes.c_uint64, 32), ('src_wstride', ctypes.c_uint64, 32), ('dst_nstride(base_i)', ctypes.c_uint64, 32), ('dst_cstride', ctypes.c_uint64, 32), ('dst_hstride', ctypes.c_uint64, 32), ('dst_wstride', ctypes.c_uint64, 32), ('src_nsize', ctypes.c_uint64, 16), ('src_csize', ctypes.c_uint64, 16), ('src_hsize', ctypes.c_uint64, 16), ('src_wsize', ctypes.c_uint64, 16), ('dst_nsize', ctypes.c_uint64, 16), ('dst_csize', ctypes.c_uint64, 16), ('dst_hsize', ctypes.c_uint64, 16), ('dst_wsize', ctypes.c_uint64, 16), ('src_start_addr_l32', ctypes.c_uint64, 32), ('src_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('dst_start_addr_l32', ctypes.c_uint64, 32), ('dst_start_addr_h8', ctypes.c_uint64, 8), ('reserved', ctypes.c_uint64, 24), ('Reserved', ctypes.c_uint64, 32), ('Reserved', ctypes.c_uint64, 32), ('localmem_mask_l32', ctypes.c_uint64, 32), ('localmem_mask_h32', ctypes.c_uint64, 32)]
intr_en: int
stride_enable: int
nchw_copy: int
cmd_short: int
reserved: int
reserved: int
reserved: int
Reserved: int
cmd_type: int
cmd_special_function: int
fill_constant_en: int
src_data_format: int
index_data_format: int
reserved: int
cmd_id_dep: int
reserved: int
constant_value: int
src_nstride: int
src_cstride: int
src_hstride: int
src_wstride: int
dst_nstride_base_i_: int
dst_cstride: int
dst_hstride: int
dst_wstride: int
src_nsize: int
src_csize: int
src_hsize: int
src_wsize: int
dst_nsize: int
dst_csize: int
dst_hsize: int
dst_wsize: int
src_start_addr_l32: int
src_start_addr_h8: int
reserved: int
dst_start_addr_l32: int
dst_start_addr_h8: int
reserved: int
Reserved: int
Reserved: int
localmem_mask_l32: int
localmem_mask_h32: int
length: int = 768
def dst_nstride_base_i_(self) -> int:
return self['dst_nstride(base_i)'] |
def tok2int_list(src_list, tokenizer, max_seq_length, max_seq_size=(- 1)):
inp_padding = list()
msk_padding = list()
seg_padding = list()
for (step, sent) in enumerate(src_list):
(input_ids, input_mask, input_seg) = tok2int_sent(sent, tokenizer, max_seq_length)
inp_padding.append(input_ids)
msk_padding.append(input_mask)
seg_padding.append(input_seg)
return (inp_padding, msk_padding, seg_padding) |
def test_tensordot_1():
def tensordot_1(A: dace.float32[(3, 3, 3, 3, 3, 3)], B: dace.float32[(3, 3, 3, 3, 3, 3)]):
return np.tensordot(A, B, axes=([0, 3], [4, 2]))
A = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
B = np.arange((3 ** 6), dtype=np.float32).reshape(3, 3, 3, 3, 3, 3)
with dace.config.set_temporary('library', 'linalg', 'default_implementation', value='pure'):
assert np.allclose(tensordot_1(A.copy(), B.copy()), tensordot_1.f(A, B)) |
def foo(sleep=False):
print('Hello world.')
if sleep:
pointless_sleep()
baz()
print('Good by.') |
_INGREDIENT.capture
def build_model(graph_adj, node_features, labels, dataset_indices_placeholder, train_feed, trainval_feed, val_feed, test_feed, weight_decay, normalize_features, num_layers, hidden_size, num_kernels, r, dropout_prob, alt_opt):
dropout = tf.placeholder(dtype=tf.float32, shape=[])
train_feed[dropout] = dropout_prob
trainval_feed[dropout] = False
val_feed[dropout] = False
test_feed[dropout] = False
return MoNet(node_features, graph_adj, labels, dataset_indices_placeholder, num_layers=num_layers, hidden_size=hidden_size, num_kernels=num_kernels, r=r, dropout_prob=dropout, weight_decay=weight_decay, normalize_features=normalize_features, alt_opt=alt_opt) |
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return (args, kwargs)
assert (m.test_tuple_unpacking(f) == (('positional', 1, 2, 3, 4, 5, 6), {}))
assert (m.test_dict_unpacking(f) == (('positional', 1), {'key': 'value', 'a': 1, 'b': 2}))
assert (m.test_keyword_args(f) == ((), {'x': 10, 'y': 20}))
assert (m.test_unpacking_and_keywords1(f) == ((1, 2), {'c': 3, 'd': 4}))
assert (m.test_unpacking_and_keywords2(f) == (('positional', 1, 2, 3, 4, 5), {'key': 'value', 'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}))
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert ('Got multiple values for keyword argument' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert ('Got multiple values for keyword argument' in str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert ('Unable to convert call argument' in str(excinfo.value))
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert ('Unable to convert call argument' in str(excinfo.value)) |
class VariationalWarpEncoder(CriticModelMixin, StochasticActorModelMixin, BaseModel):
def __init__(self, batch_of_users, heldout_batch, input_dim=None, evaluation_metric='NDCG', batch_size=500, lr_actor=0.001, lr_critic=0.0001, lr_ac=2e-06, ac_reg_loss_scaler=0.0, actor_reg_loss_scaler=0.0001, **kwargs):
local_variables = locals()
local_variables.pop('kwargs')
self._set_locals(local_variables)
self.build_graph()
self.saver = tf.train.Saver()
def construct_actor_error(self):
self.actor_error_mask = tf.identity(self.batch_of_users)
self.error_vector_creator = ErrorVectorCreator(input_dim=self.input_dim)
error_scaler = tf.py_func(self.error_vector_creator, [self.prediction, self.actor_error_mask], tf.float32)
true_error = (self.prediction * error_scaler)
self.actor_error = tf.reduce_sum(true_error, axis=(- 1))
print('Shape of actor_error should be like 500: {}'.format(self.actor_error.get_shape()))
self.mean_actor_error = (tf.reduce_mean(self.actor_error) + (self.kl_loss_scaler * self.actor_regularization_loss))
def create_logging_ops(self):
tf.summary.scalar('mean_actor_error', self.mean_actor_error)
tf.summary.scalar('actor_reg', self.actor_regularization_loss)
tf.summary.scalar('', tf.reduce_mean(self.true_ndcg))
tf.summary.scalar('mean_critic_error', self.mean_critic_error) |
def slice_list(in_list, lens):
if (not isinstance(lens, list)):
raise TypeError('"indices" must be a list of integers')
elif (sum(lens) != len(in_list)):
raise ValueError('sum of lens and list length does not match: {} != {}'.format(sum(lens), len(in_list)))
out_list = []
idx = 0
for i in range(len(lens)):
out_list.append(in_list[idx:(idx + lens[i])])
idx += lens[i]
return out_list |
def fix_rows(table: str, prefix: str, root: str, target_root: str, all_concepts: Dict[(str, Any)], child: str) -> None:
try:
source_path = os.path.join(root, table, child)
target_path = os.path.join(target_root, table, child)
with io.TextIOWrapper(zstandard.ZstdDecompressor().stream_reader(open(source_path, 'rb'))) as f:
with io.TextIOWrapper(zstandard.ZstdCompressor().stream_writer(open(target_path, 'wb'))) as of:
reader = csv.DictReader(f)
writer = None
for row in reader:
if (writer is None):
assert (reader.fieldnames is not None)
writer = csv.DictWriter(of, fieldnames=reader.fieldnames)
writer.writeheader()
mapped_values = all_concepts.get(row[f'{prefix}_source_value'], [])
if (len(mapped_values) > 0):
if (len(mapped_values) == 1):
row[f'{prefix}_source_concept_id'] = mapped_values[0][1]
else:
assert (len(mapped_values) == 2)
icd9 = [a for a in mapped_values if (a[0] == 'ICD9CM')]
assert (len(icd9) == 1)
row[f'{prefix}_source_concept_id'] = icd9[0][1]
if (row['load_table_id'] in ('shc_medical_hx', 'lpch_medical_hx')):
row[f'{prefix}_source_concept_id'] = '0'
writer.writerow(row)
except Exception as e:
traceback.print_exc()
raise RuntimeError(((('Failed ' + root) + ' , ') + child), e) |
class OperatorTests(unittest.TestCase):
def setUp(self):
rng = np.random.RandomState(42)
Cluster.global_rng = rng
Genotype.global_rng = rng
setattr(Cluster, 'num_dims', 2)
setattr(Cluster, 'initial_mean_upper', 1.0)
setattr(Cluster, 'initial_cov_upper', 0.5)
clust1 = Cluster(70)
clust1.mean = np.array([0, 0])
clust1.cov = np.array([[1, 0], [0, 1]])
clust2 = Cluster(90)
clust2.mean = np.array([5, 5])
clust2.cov = np.array([[5, 0], [0, 10]])
self.indiv1 = Genotype([clust1, clust2])
self.indiv1.create_views()
self.indiv1.resample_values()
clust3 = Cluster(70)
clust3.mean = np.array([2, 2])
clust3.cov = np.array([[2, 0], [0, 2]])
clust4 = Cluster(90)
clust4.mean = np.array([10, 10])
clust4.cov = np.array([[4, 0], [0, 2]])
self.indiv2 = Genotype([clust3, clust4])
self.indiv2.create_views()
self.indiv2.resample_values()
def tearDown(self):
Cluster.global_rng = None
Genotype.global_rng = None
delattr(Cluster, 'num_dims')
def test_uniform_crossover_genes(self):
rng = np.random.RandomState(42)
swaps = [(True if (rng.rand() < 0.5) else False) for _ in range((len(self.indiv1) * 2))]
indiv1_means = [i.mean for i in self.indiv1]
indiv2_means = [i.mean for i in self.indiv2]
indiv1_covs = [i.cov for i in self.indiv1]
indiv2_covs = [i.cov for i in self.indiv2]
Genotype.global_rng = np.random.RandomState(42)
(self.indiv1, self.indiv2) = Genotype.xover_genes(self.indiv1, self.indiv2, mixing_ratio=0.5)
for (i, (clust1, clust2, swap)) in enumerate(zip(self.indiv1, self.indiv2, swaps[0::2])):
with self.subTest(i=i):
if swap:
self.assertTrue(np.array_equal(clust1.mean, indiv2_means[i]))
self.assertTrue(np.array_equal(clust2.mean, indiv1_means[i]))
else:
self.assertTrue(np.array_equal(clust1.mean, indiv1_means[i]))
self.assertTrue(np.array_equal(clust2.mean, indiv2_means[i]))
for (i, (clust1, clust2, swap)) in enumerate(zip(self.indiv1, self.indiv2, swaps[1::3])):
with self.subTest(i=i):
if swap:
self.assertTrue(np.array_equal(clust1.cov, indiv2_covs[i]))
self.assertTrue(np.array_equal(clust2.cov, indiv1_covs[i]))
else:
self.assertTrue(np.array_equal(clust1.cov, indiv1_covs[i]))
self.assertTrue(np.array_equal(clust2.cov, indiv2_covs[i]))
def test_uniform_crossover_clusters(self):
rng = np.random.RandomState(42)
swaps = [(True if (rng.rand() < 0.5) else False) for _ in self.indiv1]
indiv1_ids = [id(i) for i in self.indiv1]
indiv2_ids = [id(i) for i in self.indiv2]
Genotype.global_rng = np.random.RandomState(42)
(self.indiv1, self.indiv2) = Genotype.xover_cluster(self.indiv1, self.indiv2)
for (i, (clust1, clust2, swap)) in enumerate(zip(self.indiv1, self.indiv2, swaps)):
with self.subTest(i=i):
if swap:
self.assertEqual(indiv1_ids[i], id(clust2))
self.assertEqual(indiv2_ids[i], id(clust1))
else:
self.assertEqual(indiv1_ids[i], id(clust1))
self.assertEqual(indiv2_ids[i], id(clust2))
def test_uniform_crossover_none(self):
(self.indiv1, self.indiv2) = Genotype.xover_genes(self.indiv1, self.indiv2, mixing_ratio=0.0)
indiv1_unchanged = all([np.allclose(self.indiv1[0].mean, np.array([0, 0])), np.allclose(self.indiv1[0].cov, np.array([[1, 0], [0, 1]])), np.allclose(self.indiv1[1].mean, np.array([5, 5])), np.allclose(self.indiv1[1].cov, np.array([[5, 0], [0, 10]]))])
self.assertTrue(indiv1_unchanged)
def test_uniform_crossover_all(self):
(self.indiv1, self.indiv2) = Genotype.xover_genes(self.indiv1, self.indiv2, mixing_ratio=1.0)
indiv1_allchanged = (not any([np.allclose(self.indiv1[0].mean, np.array([0, 0])), np.allclose(self.indiv1[0].cov, np.array([[1, 0], [0, 1]])), np.allclose(self.indiv1[1].mean, np.array([5, 5])), np.allclose(self.indiv1[1].cov, np.array([[5, 0], [0, 10]]))]))
self.assertTrue(indiv1_allchanged) |
def mse_loss(f_1, f_2):
feat_1 = f_1.dense()
(N, C, D, H, W) = feat_1.shape
feat_1 = feat_1.view(N, (C * D), H, W)
feat_2 = f_2.dense().view(N, (C * D), H, W)
return (F.mse_loss(feat_1, feat_2.detach(), reduction='sum') / (f_2.features.shape[0] * 10)) |
def get_algorithm(config, expl_path_collector, eval_path_collector):
algorithm = TorchMBRLAlgorithm(trainer=config['trainer'], exploration_policy=config['exploration_policy'], model_trainer=config['model_trainer'], exploration_env=config['exploration_env'], evaluation_env=config['evaluation_env'], replay_buffer=config['replay_buffer'], exploration_data_collector=expl_path_collector, evaluation_data_collector=eval_path_collector, **config['algorithm_kwargs'])
return algorithm |
class PeriodMapping(SageObject):
def __init__(self, modsym, A):
self.__modsym = modsym
self.__domain = modsym.ambient_module()
self.__A = A
A.set_immutable()
def modular_symbols_space(self):
return self.__modsym
def __call__(self, x):
if isinstance(x, FreeModuleElement):
v = x
else:
v = self.__domain(x).element()
return (v * self.__A)
def matrix(self):
return self.__A
def domain(self):
return self.__domain
def codomain(self):
return self.__A.row_module() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.