code stringlengths 101 5.91M |
|---|
def main():
with tf.Session(config=TF_CONFIG) as sess:
gan = GAN(sess, MODEL_CONFIG)
gan.init_all()
refine_gan = RefineGAN(sess, MODEL_CONFIG, gan)
refine_gan.init_all()
refine_gan.load_latest('../checkpoints')
print('[*] Preparing data...')
z_sample = np.random.normal(size=(NUM_BATCH, refine_gan.config['batch_size'], refine_gan.config['z_dim']))
feed_dict_sample = [{refine_gan.z: z_sample[i]} for i in range(NUM_BATCH)]
print('[*] Running sampler...')
results = np.array([sess.run(refine_gan.G.tensor_out, feed_dict_sample[i]) for i in range(NUM_BATCH)])
reshaped = results.reshape((((- 1),) + results.shape[3:]))
print('[*] Running evaluation...')
mat_path = os.path.join(refine_gan.config['eval_dir'], 'test.npy')
_ = refine_gan.metrics.eval(reshaped, mat_path=mat_path) |
def run_sample_decode(infer_model, infer_sess, model_dir, hparams, summary_writer, src_data, tgt_data, ckpt_index=None):
with infer_model.graph.as_default():
(loaded_infer_model, global_step) = model_helper.create_or_load_model(infer_model.model, model_dir, infer_sess, 'infer', ckpt_index)
_sample_decode(loaded_infer_model, global_step, infer_sess, hparams, infer_model.iterator, src_data, tgt_data, infer_model.src_placeholder, infer_model.batch_size_placeholder, summary_writer) |
def prepare(params, samples):
(_, params.word2id) = create_dictionary(samples)
params.word_vec = get_wordvec(PATH_TO_VEC, params.word2id)
params.wvec_dim = 300
return |
def _ensure_hms(inner_result: ParsedDate, remain_tokens: List[str]) -> ParsedDate:
result = deepcopy(inner_result)
remain_str = remain_tokens[0]
hms_tokens = []
ispm = False
for token in AM:
if (token in remain_str):
hms_tokens = split(remain_str, AM)
break
for token in PM:
if (token in remain_str):
ispm = True
hms_tokens = split(remain_str, PM)
break
if (len(hms_tokens) == 0):
hms_tokens = split(remain_str, [':'])
else:
hms_tokens = split(hms_tokens[0], [':'])
if ispm:
result = _ensure_pm(result, hms_tokens, 12)
else:
result = _ensure_pm(result, hms_tokens, 0)
return result |
class MultiInheritanceEstimator(DontPickleAttributeMixin, BaseEstimator):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None |
.parametrize('use_inner, use_outter,sparse_feature_num', [(True, True, 3), (False, False, 1)])
def test_PNN(use_inner, use_outter, sparse_feature_num):
model_name = 'PNN'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num)
model = PNN(feature_columns, dnn_hidden_units=[4, 4], dnn_dropout=0.5, use_inner=use_inner, use_outter=use_outter)
check_model(model, model_name, x, y) |
def multi_perspective_expand_for_2D(in_tensor, decompose_params):
in_tensor = tf.expand_dims(in_tensor, axis=1)
decompose_params = tf.expand_dims(decompose_params, axis=0)
return tf.multiply(in_tensor, decompose_params) |
def adj_list_to_matrix(adj_list):
n = len(adj_list)
adj_matrix = np.zeros((n, n))
for (i, c) in enumerate(adj_list):
for (j, weight) in c:
adj_matrix[(i, j)] = weight
return adj_matrix |
class BaseInputExample(ABC):
words: List[str]
space_after: List[bool]
tree: Optional[nltk.Tree]
def leaves(self) -> Optional[List[str]]:
pass
def pos(self) -> Optional[List[Tuple[(str, str)]]]:
pass |
def test__sort_leaderboard_no_rank():
rank = None
metrics = METRICS
score = {k: range(5) for k in metrics.keys()}
score['pipeline'] = range(5)
score = pd.DataFrame(score)
expected_return = score.iloc[::(- 1)].reset_index(drop=True)
expected_return['rank'] = range(1, 6)
returned = benchmark._sort_leaderboard(score, rank, metrics)
assert (len(returned.columns) == len(expected_return.columns))
assert (sorted(returned.columns) == sorted(expected_return.columns))
pd.testing.assert_frame_equal(returned, expected_return[returned.columns], check_dtype=False) |
_numpy_output(check_dtype=True)
def test_ufunc_nextafter_fd(A: dace.float32[10], B: dace.float64[10]):
return np.nextafter(A, B) |
(config_path=None, config_name='config')
def xpreprocess(cfg: PreprocessingConfig) -> None:
overwatch.info('Preprocessing :: Running Phases for Frame Extraction, Language Compilation, and Batching...')
set_global_seed(cfg.seed)
(train_registry, val_registry, train_dir, val_dir) = preprocess_videos(cfg.dataset.name, path=cfg.dataset.path, artifact_path=cfg.dataset.artifact_path, resolution=cfg.dataset.resolution, n_val_videos=cfg.dataset.n_val_videos, dry_run=cfg.dry_run)
preprocess_language(cfg.dataset.name, train_registry, val_registry, max_lang_len=cfg.dataset.max_lang_len, language_model=cfg.dataset.language_model, hf_cache=cfg.dataset.hf_cache)
jsonify_language(train_registry, val_registry)
index_dir = index(train_registry, val_registry, cfg.dataset.name, artifact_path=cfg.dataset.artifact_path)
unify_batches(cfg.dataset.artifact_path, cfg.dataset.name, train_registry, val_registry, train_dir, val_dir, index_dir, cfg.dataset.batch_formats, max_epochs=cfg.dataset.max_epochs, initial_final_alpha=cfg.dataset.initial_final_alpha) |
def get_transforms(cfg):
train_transform = create_transform(input_size=cfg.DATA.CROP_SIZE, scale=(0.8, 1), is_training=True, color_jitter=0.4, auto_augment='rand-m9-mstd0.5-inc1', interpolation='bicubic', re_prob=0.25, re_mode='pixel', re_count=1)
test_transform = transforms.Compose([transforms.Resize((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)), transforms.ToTensor(), transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)])
test_tencrops_transform = transforms.Compose([transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)), transforms.TenCrop(cfg.DATA.CROP_SIZE), transforms.Lambda((lambda crops: torch.stack([transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)(transforms.ToTensor()(crop)) for crop in crops])))])
return (train_transform, test_transform, test_tencrops_transform) |
def filter_out_benchmarks(benchmark: str, deployment_name: str, language: str, language_version: str) -> bool:
if ((deployment_name == 'aws') and (language == 'python') and (language_version == '3.9')):
return ('411.image-recognition' not in benchmark)
return True |
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [(1,), (7,), (1, 3), (2, 5)]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator('MakeTwoClass', ['X'], ['Y'])
X = np.random.rand(*input_size).astype(np.float32)
X[(X < 0.01)] += 0.01
X[(X > 0.99)] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
(res, grad, grad_estimated) = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res) |
def main(args):
seed = args.seed
random = np.random.RandomState(seed)
n = args.number
path = args.file
format_ = args.format_
coords = file_utils.read_coordinates(path, format=format_)
image_names = []
groups = []
for (name, group) in coords.groupby('image_name'):
image_names.append(name)
groups.append(group)
print('# splitting {} micrographs with {} labeled particles into {} train and {} test micrographs'.format(len(image_names), len(coords), (len(image_names) - n), n), file=sys.stderr)
order = random.permutation(len(image_names))
image_names_test = []
groups_test = []
for i in range(n):
j = order[i]
image_names_test.append(image_names[j])
groups_test.append(groups[j])
image_names_train = []
groups_train = []
for i in range(n, len(image_names)):
j = order[i]
image_names_train.append(image_names[j])
groups_train.append(groups[j])
targets_train = pd.concat(groups_train, axis=0)
targets_test = pd.concat(groups_test, axis=0)
root = args.image_dir
ext = args.image_ext
paths_train = []
for image_name in image_names_train:
path = get_image_path(image_name, root, ext)
if (path is not None):
paths_train.append(path)
paths_test = []
for image_name in image_names_test:
path = get_image_path(image_name, root, ext)
if (path is not None):
paths_test.append(path)
image_list_train = pd.DataFrame({'image_name': image_names_train, 'path': paths_train})
image_list_test = pd.DataFrame({'image_name': image_names_test, 'path': paths_test})
root = os.path.dirname(args.file)
basename = os.path.splitext(args.file)[0]
path = (basename + '_train.txt')
print('# writing:', path, file=sys.stderr)
targets_train.to_csv(path, sep='\t', index=False)
path = (basename + '_test.txt')
print('# writing:', path, file=sys.stderr)
targets_test.to_csv(path, sep='\t', index=False)
path = ((root + os.sep) + 'image_list_train.txt')
print('# writing:', path, file=sys.stderr)
image_list_train.to_csv(path, sep='\t', index=False)
path = ((root + os.sep) + 'image_list_test.txt')
print('# writing:', path, file=sys.stderr)
image_list_test.to_csv(path, sep='\t', index=False) |
def get_mnist2_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, nrm_cls_idx=0, proportion=0.5, manualseed=(- 1)):
if (manualseed != (- 1)):
torch.manual_seed(manualseed)
nrm_trn_idx = torch.from_numpy(np.where((trn_lbl.numpy() == nrm_cls_idx))[0])
abn_trn_idx = torch.from_numpy(np.where((trn_lbl.numpy() != nrm_cls_idx))[0])
nrm_tst_idx = torch.from_numpy(np.where((tst_lbl.numpy() == nrm_cls_idx))[0])
abn_tst_idx = torch.from_numpy(np.where((tst_lbl.numpy() != nrm_cls_idx))[0])
abn_tst_idx = abn_tst_idx[torch.randperm(len(abn_tst_idx))]
abn_tst_idx = abn_tst_idx[:int((len(abn_tst_idx) * proportion))]
nrm_trn_img = trn_img[nrm_trn_idx]
abn_trn_img = trn_img[abn_trn_idx]
nrm_tst_img = tst_img[nrm_tst_idx]
abn_tst_img = tst_img[abn_tst_idx]
nrm_trn_lbl = trn_lbl[nrm_trn_idx]
abn_trn_lbl = trn_lbl[abn_trn_idx]
nrm_tst_lbl = tst_lbl[nrm_tst_idx]
abn_tst_lbl = tst_lbl[abn_tst_idx]
nrm_trn_lbl[:] = 0
nrm_tst_lbl[:] = 0
abn_trn_lbl[:] = 1
abn_tst_lbl[:] = 1
new_trn_img = nrm_trn_img.clone()
new_trn_lbl = nrm_trn_lbl.clone()
new_tst_img = torch.cat((nrm_tst_img, abn_tst_img), dim=0)
new_tst_lbl = torch.cat((nrm_tst_lbl, abn_tst_lbl), dim=0)
return (new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl) |
def is_triangular(B) -> bool:
if isinstance(B, (list, tuple)):
G = B
else:
try:
G = B.gens()
except Exception:
raise TypeError('is_triangular wants as input an ideal, or a list of polynomials\n')
vars = G[0].parent().gens()
n = len(G)
for i in range(n):
for t in G[i].monomials():
for x in vars[0:i]:
if (t.degree(x) != 0):
return False
return True |
def dirContainsTestSuite(path, lit_config):
cfgpath = os.path.join(path, lit_config.site_config_name)
if os.path.exists(cfgpath):
return cfgpath
cfgpath = os.path.join(path, lit_config.config_name)
if os.path.exists(cfgpath):
return cfgpath |
class SAP(nn.Module):
def __init__(self, out_dim):
super(SAP, self).__init__()
self.act_fn = nn.Tanh()
self.sap_layer = SelfAttentionPooling(out_dim)
def forward(self, feature, att_mask):
feature = self.act_fn(feature)
sap_vec = self.sap_layer(feature, att_mask)
return sap_vec |
_function()
def lf_regex_check_out(x):
return (SPAM if re.search('check.*out', x.text, flags=re.I) else ABSTAIN) |
def register_Ns3CsmaChannel_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('Attach', 'int32_t', [param('ns3::Ptr< ns3::CsmaNetDevice >', 'device')])
cls.add_method('Detach', 'bool', [param('ns3::Ptr< ns3::CsmaNetDevice >', 'device')])
cls.add_method('Detach', 'bool', [param('uint32_t', 'deviceId')])
cls.add_method('Reattach', 'bool', [param('uint32_t', 'deviceId')])
cls.add_method('Reattach', 'bool', [param('ns3::Ptr< ns3::CsmaNetDevice >', 'device')])
cls.add_method('TransmitStart', 'bool', [param('ns3::Ptr< ns3::Packet const >', 'p'), param('uint32_t', 'srcId')])
cls.add_method('TransmitEnd', 'bool', [])
cls.add_method('PropagationCompleteEvent', 'void', [])
cls.add_method('GetDeviceNum', 'int32_t', [param('ns3::Ptr< ns3::CsmaNetDevice >', 'device')])
cls.add_method('GetState', 'ns3::WireState', [])
cls.add_method('IsBusy', 'bool', [])
cls.add_method('IsActive', 'bool', [param('uint32_t', 'deviceId')])
cls.add_method('GetNumActDevices', 'uint32_t', [])
cls.add_method('GetNDevices', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'i')], is_const=True, is_virtual=True)
cls.add_method('GetCsmaDevice', 'ns3::Ptr< ns3::CsmaNetDevice >', [param('uint32_t', 'i')], is_const=True)
cls.add_method('GetDataRate', 'ns3::DataRate', [])
cls.add_method('GetDelay', 'ns3::Time', [])
return |
def get_c_function_param(x: Field):
is_dyn_array = (x.count and (not isinstance(x.count, int)))
name = _T(x.name)
if (is_dyn_array or x.by_ref):
return f'[MarshalAs(UnmanagedType.LPArray)] {get_type_name(x.type)}[] {name}'
elif x.by_mut:
return f'[MarshalAs(UnmanagedType.LPArray)] [In, Out] {get_type_name(x.type)}[] {name}'
elif x.count:
return f'{get_type_name(x.type)}[{x.count}] {name}'
else:
return f'{get_type_name(x.type)} {name}' |
class StandardSymplecticSpace(EuclideanSpace):
_symplectic_form: SymplecticForm
def __init__(self, dimension: int, name: Optional[str]=None, latex_name: Optional[str]=None, coordinates: str='Cartesian', symbols: Optional[str]=None, symplectic_name: Optional[str]='omega', symplectic_latex_name: Optional[str]=None, start_index: int=1, base_manifold: Optional[StandardSymplecticSpace]=None, names: Optional[Tuple[str]]=None):
if ((dimension % 2) == 1):
raise ValueError(f'the dimension of the manifold must be even but it is {dimension}')
dim_half = (dimension // 2)
if ((names is not None) and (symbols is None)):
symbols = ' '.join(names)
if (symbols is None):
if (dim_half == 1):
symbols = 'q:q p:p'
else:
symbols_list = [f'q{i}:q^{i} p{i}:p_{i}' for i in range(1, (dim_half + 1))]
symbols = ' '.join(symbols_list)
if (name is None):
name = f'R{dimension}'
category = Manifolds(RR).Smooth()
EuclideanSpace.__init__(self, dimension, name, latex_name=latex_name, coordinates=coordinates, symbols=symbols, start_index=start_index, base_manifold=base_manifold, category=category, init_coord_methods=None)
self._symplectic_form = SymplecticFormParal(self, symplectic_name, symplectic_latex_name)
for i in range(0, dim_half):
q_index = ((2 * i) + 1)
self._symplectic_form.set_comp()[(q_index, (q_index + 1))] = (- 1)
def _repr_(self):
return f'Standard symplectic space {self._name}'
def symplectic_form(self) -> SymplecticForm:
return self._symplectic_form |
def TD_product(k, TD1, n1, TD2, n2, check=True):
N = (n1 * n2)
TD = []
for X1 in TD1:
for X2 in TD2:
TD.append([((x1 * n2) + (x2 % n2)) for (x1, x2) in zip(X1, X2)])
if check:
assert is_transversal_design(TD, k, N)
return TD |
def probs(model, hyper, data, target):
(s_log_pw, s_log_qw, s_log_likelihood) = (0.0, 0.0, 0.0)
for _ in range(hyper.n_samples):
output = torch.log(model(data))
(sample_log_pw, sample_log_qw) = model.get_lpw_lqw()
sample_log_likelihood = ((- F.nll_loss(output, target, reduction='sum')) * hyper.multiplier)
s_log_pw += (sample_log_pw / hyper.n_samples)
s_log_qw += (sample_log_qw / hyper.n_samples)
s_log_likelihood += (sample_log_likelihood / hyper.n_samples)
return (s_log_pw, s_log_qw, s_log_likelihood) |
def _is_int_value(value, target_value: int) -> bool:
if isinstance(value, numbers.Integral):
return (value == target_value)
if ((len(value.free_symbols) > 0) or (int(value) != target_value)):
return False
return True |
.parametrize('sparse_feature_num,dense_feature_num', [(2, 0), (0, 2), (2, 2)])
def test_WDL(sparse_feature_num, dense_feature_num):
model_name = 'WDL'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num)
model = WDL(feature_columns, feature_columns, dnn_activation='prelu', dnn_hidden_units=[32, 32], dnn_dropout=0.5, device=get_device())
check_model(model, model_name, x, y) |
def spinning_up_ddpg_config():
config = spinning_up_td3_config()
config.target_network_update_freq = 1
config.activ = 'relu'
return config |
def revert_sync_batchnorm(module):
module_output = module
if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm):
new_cls = BatchNormXd
module_output = BatchNormXd(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats)
if module.affine:
with torch.no_grad():
module_output.weight = module.weight
module_output.bias = module.bias
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
if hasattr(module, 'qconfig'):
module_output.qconfig = module.qconfig
for (name, child) in module.named_children():
module_output.add_module(name, revert_sync_batchnorm(child))
del module
return module_output |
def get_total_page(html):
try:
page_count = json.loads(html, encoding='utf-8').get('data', '').get('page', '').get('totalpage', 1)
except Exception as e:
parser.error('Errors occurred when parsing total page of repost,specification is {}'.format(e))
page_count = 1
return page_count |
def test_rpad_recordarray():
keys = ['x', 'y']
offsets = ak.index.Index64(np.asarray([0, 0, 1, 3]))
content = ak.contents.numpyarray.NumpyArray(np.asarray([1.1, 2.2, 2.2]))
content1 = ak.contents.listoffsetarray.ListOffsetArray(offsets, content)
offsets = ak.index.Index64(np.asarray([0, 2, 3, 3]))
content = ak.contents.numpyarray.NumpyArray(np.asarray([2, 2, 1]))
content2 = ak.contents.listoffsetarray.ListOffsetArray(offsets, content)
contents = [content1, content2]
array = ak.contents.recordarray.RecordArray(contents, keys)
assert (to_list(ak._do.pad_none(array, 5, 0)) == [{'x': [], 'y': [2, 2]}, {'x': [1.1], 'y': [1]}, {'x': [2.2, 2.2], 'y': []}, None, None])
assert (ak._do.pad_none(array.to_typetracer(), 5, 0).form == ak._do.pad_none(array, 5, 0).form)
assert (to_list(ak._do.pad_none(array, 2, 1)) == [{'x': [None, None], 'y': [2, 2]}, {'x': [1.1, None], 'y': [1, None]}, {'x': [2.2, 2.2], 'y': [None, None]}])
assert (ak._do.pad_none(array.to_typetracer(), 2, 1).form == ak._do.pad_none(array, 2, 1).form) |
def test_synthetic_sample_results_in_sampled_delay_when_delay_function_is_given():
n_actions = 3
delay_function = ExponentialDelaySampler(max_scale=100.0, random_state=12345).exponential_delay_function
dataset = BanditEnvironmentSimulator(n_actions=n_actions, reward_function=logistic_sparse_reward_function, delay_function=delay_function, random_state=12345)
actual_bandits_dataset = dataset.next_bandit_round_batch(n_rounds=5)
expected_round_delays = np.tile([266.0, 39.0, 21.0, 23.0, 84.0], (n_actions, 1)).T
assert (actual_bandits_dataset.round_delays == expected_round_delays).all() |
def O7():
A = Matrix(GF(3), [[1, 0, 0, 1, 1, 1, 1], [0, 1, 0, 0, 1, 2, 2], [0, 0, 1, 1, 0, 1, 0]])
M = TernaryMatroid(A, 'abcdefg')
M.rename(('O7: ' + repr(M)))
return M |
.overload_method(TupleType, 'content')
def Tuple_content(builder, index):
if (isinstance(builder, TupleType) and isinstance(index, numba.types.Integer)):
def getter(builder, index):
content = builder._contents[numba.literally(index)]
return content
return getter |
def norm(edge_index, num_nodes, edge_weight=None, improved=False, dtype=None):
if (edge_weight is None):
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
fill_value = (1.0 if (not improved) else 2.0)
(edge_index, edge_weight) = add_remaining_self_loops(edge_index, edge_weight, fill_value, num_nodes)
(row, col) = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow((- 0.5))
deg_inv_sqrt[(deg_inv_sqrt == float('inf'))] = 0
return (edge_index, ((deg_inv_sqrt[row] * edge_weight) * deg_inv_sqrt[col])) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', help='data root for both image file and anno file')
parser.add_argument('--in-path', help='mapping file of image_name and ann_file, "image_name ann_file" in each line')
parser.add_argument('--out-path', help='output txt path with line-json format')
args = parser.parse_args()
return args |
def _bytes_feature(value):
if (value is None):
value = []
if (six.PY3 and isinstance(value, six.text_type)):
value = six.binary_type(value, encoding='utf-8')
if isinstance(value, np.ndarray):
value = value.reshape((- 1))
value = bytes(value)
if (not isinstance(value, list)):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value)) |
def Pooling_ansatz1(params, wires):
qml.CRZ(params[0], wires=[wires[0], wires[1]])
qml.PauliX(wires=wires[0])
qml.CRX(params[1], wires=[wires[0], wires[1]]) |
def get_unified_clusters(clusters, to_unify):
def to_set(v, s):
if (not isinstance(v, list)):
s.add(v)
return
for x in v:
to_set(x, s)
(A, B) = (set(), set())
to_set(clusters, A)
new_clusters = []
for (c_i, cluster) in enumerate(clusters):
to_unify_for_cluster = to_unify[c_i]
cluster_D = {i.Index: i for i in cluster}
deleted_from_cluster = {}
next_gen = []
first_time = True
while (first_time or next_gen):
first_time = False
if next_gen:
to_unify_for_cluster = next_gen
next_gen = []
for l in to_unify_for_cluster:
z = l[0]
x = l[1]
if isinstance(cluster_D[x], list):
v = cluster_D[x]
zz = cluster_D[z]
if isinstance(zz, list):
v.extend(zz)
else:
v.append(zz)
v.sort(key=(lambda y: y.Index))
else:
v = [cluster_D[x]]
try:
zz = cluster_D[z]
except KeyError as e:
if (not (z in deleted_from_cluster)):
raise e
else:
warnings.warn(f'found a double: {l}, I already deleted {z} and unified it with {deleted_from_cluster[z]}, will unify now to {x}')
next_gen.append(sorted([x, deleted_from_cluster[z]]))
continue
if isinstance(zz, list):
v.extend(zz)
else:
v.append(zz)
v.sort(key=(lambda y: y.Index))
cluster_D[x] = v
deleted_from_cluster[z] = x
del cluster_D[z]
cluster = []
for i in sorted(cluster_D.keys()):
cluster.append(cluster_D[i])
new_clusters.append(cluster)
to_set(new_clusters, B)
assert (A == B), (A, B)
return new_clusters |
_utils.test(arch=ti.cpu)
def test_vector_to_list():
a = ti.Vector.field(2, float, ())
data = [2, 3]
b = ti.Vector(data)
assert (list(b) == data)
assert (len(b) == len(data))
a[None] = b
assert all((a[None] == ti.Vector(data))) |
class UploadCommand(BaseUserCommand):
def walk_dir(self, rel_path):
entries: List[os.DirEntry] = list(os.scandir(rel_path))
files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()]
for f in entries:
if f.is_dir():
files += self.walk_dir(f.path)
return files
def run(self):
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit(1)
local_path = os.path.abspath(self.args.path)
if os.path.isdir(local_path):
if (self.args.filename is not None):
raise ValueError('Cannot specify a filename override when uploading a folder.')
rel_path = os.path.basename(local_path)
files = self.walk_dir(rel_path)
elif os.path.isfile(local_path):
filename = (self.args.filename if (self.args.filename is not None) else os.path.basename(local_path))
files = [(local_path, filename)]
else:
raise ValueError('Not a valid file or directory: {}'.format(local_path))
if (sys.platform == 'win32'):
files = [(filepath, filename.replace(os.sep, '/')) for (filepath, filename) in files]
if (len(files) > UPLOAD_MAX_FILES):
print('About to upload {} files to S3. This is probably wrong. Please filter files before uploading.'.format(ANSI.bold(len(files))))
exit(1)
for (filepath, filename) in files:
print('About to upload file {} to S3 under filename {}'.format(ANSI.bold(filepath), ANSI.bold(filename)))
choice = input('Proceed? [Y/n] ').lower()
if (not ((choice == '') or (choice == 'y') or (choice == 'yes'))):
print('Abort')
exit()
print(ANSI.bold('Uploading... This might take a while if files are large'))
for (filepath, filename) in files:
access_url = self._api.presign_and_upload(token=token, filename=filename, filepath=filepath)
print('Your file now lives at:')
print(access_url) |
def main():
graph = graph_loader(graph_type='ky2', seed=1)
params = {'runs': 1, 'steps': 30, 'seed': 1, 'attack': 'rb_node', 'attack_approx': int((0.1 * len(graph))), 'plot_transition': True, 'gif_animation': True, 'gif_snaps': True, 'edge_style': None, 'node_style': None, 'fa_iter': 20}
print('Creating example visualization')
a = Attack(graph, **params)
a.run_simulation()
node_attacks = ['rnd_node', 'id_node', 'rd_node', 'ib_node', 'rb_node']
edge_attacks = ['rnd_edge', 'id_edge', 'rd_edge', 'ib_edge', 'rb_edge']
params['runs'] = 10
params['steps'] = (len(graph) - 1)
params['plot_transition'] = False
params['gif_animation'] = False
params['gif_snaps'] = False
print('Running node attacks')
results = defaultdict(str)
for attack in node_attacks:
params['attack'] = attack
if (('rb' in attack) or ('ib' in attack)):
params['attack_approx'] = int((0.1 * len(graph)))
else:
params['attack_approx'] = None
a = Attack(graph, **params)
results[attack] = a.run_simulation()
plot_results(graph, params['steps'], results, title='water:node-attacks_runs={}'.format(params['runs']))
print('Running edge attacks')
results = defaultdict(str)
for attack in edge_attacks:
params['attack'] = attack
if (('rb' in attack) or ('ib' in attack)):
params['attack_approx'] = int((0.1 * len(graph)))
else:
params['attack_approx'] = None
a = Attack(graph, **params)
results[attack] = a.run_simulation()
plot_results(graph, params['steps'], results, title='water:edge-attacks_runs={}'.format(params['runs'])) |
def main(args):
config = load_config(args.config)
logger.info('config: {}'.format(json.dumps(config)))
set_seed((args.seed or config['seed']))
(model_ori, checkpoint, epoch, best) = prepare_model(args, logger, config)
logger.info('Model structure: \n {}'.format(str(model_ori)))
custom_ops = {}
bound_config = config['bound_params']
batch_size = (args.batch_size or config['batch_size'])
test_batch_size = (args.test_batch_size or batch_size)
(dummy_input, train_data, test_data) = load_data(args, config['data'], batch_size, test_batch_size, aug=(not args.no_data_aug))
lf = (args.loss_fusion and (args.bound_type == 'CROWN-IBP'))
bound_opts = bound_config['bound_opts']
model_ori.train()
model = BoundedModule(model_ori, dummy_input, bound_opts=bound_opts, custom_ops=custom_ops, device=args.device)
model_ori.to(args.device)
if (checkpoint is None):
if args.manual_init:
manual_init(args, model_ori, model, train_data)
if args.kaiming_init:
kaiming_init(model_ori)
if lf:
model_loss = BoundedModule(CrossEntropyWrapper(model_ori), (dummy_input.cuda(), torch.zeros(1, dtype=torch.long).cuda()), bound_opts=get_bound_opts_lf(bound_opts), device=args.device)
params = list(model_loss.parameters())
else:
model_loss = model
params = list(model_ori.parameters())
logger.info('Parameter shapes: {}'.format([p.shape for p in params]))
if args.multi_gpu:
raise NotImplementedError('Multi-GPU is not supported yet')
opt = get_optimizer(args, params, checkpoint)
max_eps = (args.eps or bound_config['eps'])
eps_scheduler = get_eps_scheduler(args, max_eps, train_data)
lr_scheduler = get_lr_scheduler(args, opt)
if ((epoch > 0) and (not args.plot)):
eps_scheduler.train()
for i in range(epoch):
lr_scheduler.step()
eps_scheduler.step_epoch(verbose=False)
if args.verify:
logger.info('Inference')
meter = Train(model, model_ori, 10000, test_data, eps_scheduler, None, loss_fusion=False)
logger.info(meter)
else:
timer = 0.0
for t in range((epoch + 1), (args.num_epochs + 1)):
logger.info('Epoch {}, learning rate {}, dir {}'.format(t, lr_scheduler.get_last_lr(), args.dir))
start_time = time.time()
if lf:
Train(model_loss, model_ori, t, train_data, eps_scheduler, opt, loss_fusion=True)
else:
Train(model, model_ori, t, train_data, eps_scheduler, opt)
update_state_dict(model_ori, model_loss)
epoch_time = (time.time() - start_time)
timer += epoch_time
lr_scheduler.step()
logger.info('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))
is_best = False
if ((t % args.test_interval) == 0):
logger.info('Test without loss fusion')
with torch.no_grad():
meter = Train(model, model_ori, t, test_data, eps_scheduler, None, loss_fusion=False)
if (eps_scheduler.get_eps() == eps_scheduler.get_max_eps()):
if (meter.avg('Rob_Err') < best[1]):
(is_best, best) = (True, (meter.avg('Err'), meter.avg('Rob_Err'), t))
logger.info('Best epoch {}, error {:.4f}, robust error {:.4f}'.format(best[(- 1)], best[0], best[1]))
save(args, epoch=t, best=best, model=model_ori, opt=opt, is_best=is_best) |
def preprocess_assumptions(args):
args = list(args)
last = None
for (i, x) in reversed(list(enumerate(args))):
if isinstance(x, str):
del args[i]
last = x
elif (((not hasattr(x, 'assume')) or (isinstance(x, Expression) and x.is_symbol())) and (last is not None)):
args[i] = GenericDeclaration(x, last)
else:
last = None
return args |
def getEdgesAndLabels(docs_dir, models_dir, comparator):
edges = []
labels = []
docs_edges = _getEdgesIter(docs_dir, comparator)
models_edges = _getEdgesIter(models_dir, comparator)
for topic in docs_edges:
curr_docs_edges = set(docs_edges[topic])
curr_models_edges = set(models_edges[topic])
for edge in curr_docs_edges:
label = (1 if (edge in curr_models_edges) else 0)
edges.append(edge.edge)
labels.append(label)
return (edges, labels) |
class WideAndDeepModel(tf.keras.Model):
def __init__(self, data, num_users, num_items, embedding_size, mlp_hidden_size, dropout_prob, lr, l_w, l_b, name='WideAndDeepModel', **kwargs):
super().__init__(name=name, **kwargs)
self._data = data
self._num_users = num_users
self._num_items = num_items
self._embedding_size = embedding_size
self._mlp_hidden_size = mlp_hidden_size
self._dropout_prob = dropout_prob
self._lr = lr
self._l_w = l_w
self._l_b = l_b
self._all_item_enc = None
self._all_item_features_enc = None
self._sparse_dimensions = ([self._num_users, self._num_items] + [sp_i_feature.shape[1] for sp_i_feature in self._data.sp_i_features])
self._num_type_of_categorical_features = len(self._data.sp_i_features)
self._size_list = ([(self._embedding_size * (self._num_type_of_categorical_features + 2))] + list(self._mlp_hidden_size))
self.initializer = tf.initializers.GlorotUniform()
self.regularizer = keras.regularizers.l2(self._l_w)
self.bias_regularizer = keras.regularizers.l2(self._l_b)
self._len_sparse_dimension = sum(self._sparse_dimensions)
self.wide = keras.layers.Dense(1, use_bias=True, kernel_regularizer=self.regularizer, bias_regularizer=self.bias_regularizer)
self.deep = keras.Sequential()
for units in self._size_list[:(- 1)]:
self.deep.add(keras.layers.Dense(units, use_bias=True, activation='relu', kernel_initializer=self.initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.bias_regularizer))
self.deep.add(keras.layers.Dense(self._size_list[(- 1)], use_bias=True, activation='linear', kernel_initializer=self.initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.bias_regularizer))
self.predict_layer = keras.layers.Dense(1, use_bias=True, activation='sigmoid', kernel_regularizer=self.regularizer, bias_regularizer=self.bias_regularizer)
self.loss = keras.losses.BinaryCrossentropy()
self.optimizer = tf.optimizers.Adam(self._lr)
def call(self, inputs, training=False, **kwargs):
(_, _, s) = inputs
wide_part = self.wide(s)
deep_part = self.deep(s)
concat = tf.concat([wide_part, deep_part], axis=1)
predict = self.predict_layer(concat)
return predict
def train_step(self, batch):
(u, i, s, label) = batch
with tf.GradientTape() as tape:
predict = self(inputs=(u, i, s), training=True)
loss = self.loss(label, predict)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def predict(self, user, **kwargs):
u_enc = self._data.user_encoder.transform([[user]])
if (self._all_item_enc is None):
self._all_item_enc = tf.convert_to_tensor(self._data.item_encoder.transform(np.reshape(np.arange(self._num_items), newshape=(self._num_items, 1))).todense())
if (self._all_item_features_enc is None):
self._all_item_features_enc = tf.convert_to_tensor(self._data.sp_i_features[0].todense())
u_enc = tf.repeat(u_enc.toarray(), self._num_items, axis=0)
s = tf.concat([tf.cast(u_enc, tf.float32), tf.cast(self._all_item_enc, tf.float32), tf.cast(self._all_item_features_enc, tf.float32)], axis=1)
return self(inputs=(None, None, s), transpose_b=True)
def get_user_recs(self, user, k=100):
user_items = self._data.train_dict[user].keys()
predictions = {i: self(inputs=(user, i, self.get_sparse(user, i))) for i in self._data.items if (i not in user_items)}
(indices, values) = zip(*predictions.items())
indices = np.array(indices)
values = np.array(tf.squeeze(values))
partially_ordered_preds_indices = np.argpartition(values, (- k))[(- k):]
real_values = values[partially_ordered_preds_indices]
real_indices = indices[partially_ordered_preds_indices]
local_top_k = real_values.argsort()[::(- 1)]
return [(real_indices[item], real_values[item]) for item in local_top_k]
def get_sparse(self, u, i):
u_one_hot = [0 for _ in range(self._num_users)]
u_one_hot[self._data.public_users[u]] = 1
i_one_hot = [0 for _ in range(self._num_items)]
i_one_hot[self._data.public_items[i]] = 1
f_one_hot = self._data.sp_i_features.getrow(self._data.public_items[i]).toarray()[0].tolist()
s = []
s += u_one_hot
s += i_one_hot
s += f_one_hot
return tf.reshape(tf.convert_to_tensor(np.array(s)), shape=(1, len(s)))
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
def pytorch_to_onnx(onnx_filename, model, input_example):
if (not os.path.exists(onnx_filename)):
torch.onnx.export(model, input_example, onnx_filename) |
class ModelType(ExplicitEnum):
LayoutLM = 'layoutlm'
LayoutLMv2andv3 = 'layoutlmv2andv3'
VisionEncoderDecoder = 'vision_encoder_decoder' |
def main(args):
if (args.modelpath is None):
savepath = f'./inferences/defaultsd/{args.dataset}/{args.capstyle}'
else:
mp = os.path.basename(os.path.normpath(args.modelpath))
if ('traintext' not in args.modelpath):
if ('imagenette' in args.modelpath):
args.dataset = 'imagenette10'
savepath = f'./inferences/imagenette10_frozentext/{mp}'
elif ('aesthetics' in args.modelpath):
args.dataset = 'laionaesthetics'
savepath = f'./inferences/laionaesthetics_ft/{mp}'
elif ('laion' in args.modelpath):
args.dataset = 'laion'
savepath = f'./inferences/laion_frozentext/{mp}'
elif ('l100kaion' in args.modelpath):
args.dataset = 'l100kaion'
savepath = f'./inferences/l100kaion_frozentext/{mp}'
else:
raise 'Savepath doesnt exist for this case'
elif ('imagenette' in args.modelpath):
args.dataset = 'imagenette10'
savepath = f'./inferences/imagenette10_traintext/{mp}'
elif ('laion' in args.modelpath):
args.dataset = 'laion'
savepath = f'./inferences/laion_traintext/{mp}'
else:
raise 'Savepath doesnt exist for this case'
if (args.iternum is not None):
savepath = f'{savepath}_{args.iternum}'
savepath = f'{savepath}/{args.modelstyle}'
if (args.rand_noise_lam is not None):
savepath = f'{savepath}_ginfer{args.rand_noise_lam}'
if (args.rand_augs is not None):
savepath = f'{savepath}_auginfer_{args.rand_augs}_{args.rand_aug_repeats}'
os.makedirs(savepath, exist_ok=True)
os.makedirs(f'{savepath}/generations', exist_ok=True)
if (args.modelpath is None):
checkpath = 'stabilityai/stable-diffusion-2-1'
elif (args.iternum is not None):
checkpath = f'{args.modelpath}/checkpoint_{str(args.iternum)}/'
else:
checkpath = f'{args.modelpath}/checkpoint/'
if (args.modelpath is None):
device = 'cuda'
pipe = StableDiffusionPipeline.from_pretrained(checkpath, use_auth_token=True)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.safety_checker = (lambda images, clip_input: (images, False))
pipe = pipe.to(device)
generator = torch.Generator(device=device).manual_seed(42)
elif (args.rand_noise_lam is not None):
pipe = Newpipe.from_pretrained(checkpath, safety_checker=None).to('cuda')
pipe.noiselam = args.rand_noise_lam
else:
pipe = StableDiffusionPipeline.from_pretrained(checkpath, safety_checker=None).to('cuda')
tokenizer = AutoTokenizer.from_pretrained(checkpath, subfolder='tokenizer', use_fast=False)
num = args.im_batch
num_batches = args.nbatches
count = 0
prompt_list = None
if (args.modelstyle == 'nolevel'):
prompt_list = (['An image'] * num_batches)
elif (args.modelstyle == 'classlevel'):
objects = ['tench', 'English springer', 'cassette player', 'chain saw', 'church', 'French horn', 'garbage truck', 'gas pump', 'golf ball', 'parachute']
np.random.seed(args.seed)
temp = list(np.random.choice(objects, num_batches))
prompt_list = [f'An image of {x}' for x in temp]
elif (args.modelstyle in ['instancelevel_blip', 'instancelevel_random']):
if (args.dataset == 'imagenette10'):
if (args.modelstyle == 'instancelevel_blip'):
prompt_json = './data/imagenette2-320/blip_captions.json'
else:
prompt_json = './data/imagenette2-320/random_captions_4.json'
elif (args.dataset == 'laionaesthetics'):
if (args.modelstyle == 'instancelevel_blip'):
prompt_json = './data/laion_10k_random_aesthetics_5plus/laion_aesthetics_combined_captions.json'
else:
raise Exception('Case not written')
elif (args.dataset == 'laion'):
if (args.modelstyle == 'instancelevel_blip'):
prompt_json = './data/laion_10k_random/laion_combined_captions.json'
else:
raise Exception('Case not written')
elif (args.dataset == 'l100kaion'):
if (args.modelstyle == 'instancelevel_blip'):
prompt_json = './data/laion_100k_random_sdv2p1/l100kaion_combined_captions.json'
else:
raise Exception('Case not written')
with open(prompt_json) as f:
all_prompts_dict = json.load(f)
okprompts = [v[0] for (k, v) in all_prompts_dict.items()]
if (args.dataset == 'l100kaion'):
okprompts = okprompts[:10000]
np.random.seed(args.seed)
prompt_list = list(np.random.choice(okprompts, num_batches))
if (args.modelstyle == 'instancelevel_random'):
new_prompts = []
for p in prompt_list:
instance_prompt = ast.literal_eval(p)
instance_prompt = tokenizer.decode(instance_prompt)
new_prompts.append(instance_prompt)
prompt_list = new_prompts[:]
if (args.rand_augs is not None):
final_prompt_list = []
for prompt in prompt_list:
newprompt = prompt_augmentation(prompt, args.rand_augs, tokenizer, args.rand_aug_repeats)
final_prompt_list.append(newprompt)
prompt_list = final_prompt_list
with open(f'{savepath}/prompts.txt', 'w') as f:
for line in prompt_list:
f.write(f'''{line}
''')
for i in range(num_batches):
if (prompt_list is not None):
prompt = prompt_list[i]
else:
raise 'no prompt list!'
if (args.modelpath is None):
images = pipe(prompt, num_inference_steps=50, generator=generator).images
else:
images = pipe(prompt=prompt, height=args.resolution, width=args.resolution, num_inference_steps=50, num_images_per_prompt=args.im_batch).images
for j in range(len(images)):
image = images[j]
if (image.size[0] > args.resolution):
image = resize(args.resolution, args.resolution, image)
image.save(f'{savepath}/generations/{count}.png')
count += 1 |
def vis_faces(log_hooks):
display_count = len(log_hooks)
fig = plt.figure(figsize=(8, (4 * display_count)))
gs = fig.add_gridspec(display_count, 3)
for i in range(display_count):
hooks_dict = log_hooks[i]
fig.add_subplot(gs[(i, 0)])
if ('diff_input' in hooks_dict):
vis_faces_with_id(hooks_dict, fig, gs, i)
else:
vis_faces_no_id(hooks_dict, fig, gs, i)
plt.tight_layout()
return fig |
def main(unused_argv):
if (FLAGS.hint_mode == 'encoded_decoded'):
encode_hints = True
decode_hints = True
elif (FLAGS.hint_mode == 'decoded_only'):
encode_hints = False
decode_hints = True
elif (FLAGS.hint_mode == 'none'):
encode_hints = False
decode_hints = False
else:
raise ValueError('Hint mode not in {encoded_decoded, decoded_only, none}.')
train_lengths = [int(x) for x in FLAGS.train_lengths]
rng = np.random.RandomState(FLAGS.seed)
rng_key = jax.random.PRNGKey(rng.randint((2 ** 32)))
(train_samplers, val_samplers, val_sample_counts, test_samplers, test_sample_counts, spec_list) = create_samplers(rng, train_lengths)
processor_factory = clrs.get_processor_factory(FLAGS.processor_type, use_ln=FLAGS.use_ln, nb_triplet_fts=FLAGS.nb_triplet_fts, nb_heads=FLAGS.nb_heads)
model_params = dict(processor_factory=processor_factory, hidden_dim=FLAGS.hidden_size, encode_hints=encode_hints, decode_hints=decode_hints, encoder_init=FLAGS.encoder_init, use_lstm=FLAGS.use_lstm, learning_rate=FLAGS.learning_rate, grad_clip_max_norm=FLAGS.grad_clip_max_norm, checkpoint_path=FLAGS.checkpoint_path, freeze_processor=FLAGS.freeze_processor, dropout_prob=FLAGS.dropout_prob, hint_teacher_forcing=FLAGS.hint_teacher_forcing, hint_repred_mode=FLAGS.hint_repred_mode, nb_msg_passing_steps=FLAGS.nb_msg_passing_steps)
if (not os.path.exists(FLAGS.checkpoint_path)):
os.makedirs(FLAGS.checkpoint_path)
with open(os.path.join(FLAGS.checkpoint_path, 'spec_list.pkl'), 'wb') as f:
pickle.dump(spec_list, f)
model_params_save = copy.deepcopy(model_params)
model_params_save['processor_factory'] = (FLAGS.processor_type, FLAGS.use_ln, FLAGS.nb_triplet_fts, FLAGS.nb_heads)
with open(os.path.join(FLAGS.checkpoint_path, 'model_params.pkl'), 'wb') as f:
pickle.dump(model_params_save, f)
eval_model = BaselineModel(spec=spec_list, dummy_trajectory=[next(t) for t in val_samplers], **model_params)
if FLAGS.chunked_training:
train_model = BaselineModelChunked(spec=spec_list, dummy_trajectory=[next(t) for t in train_samplers], **model_params)
else:
train_model = eval_model
best_score = (- 1.0)
current_train_items = ([0] * len(FLAGS.algorithms))
step = 0
next_eval = 0
val_scores = ([(- 99999.9)] * len(FLAGS.algorithms))
length_idx = 0
while (step < FLAGS.train_steps):
feedback_list = [next(t) for t in train_samplers]
if (step == 0):
all_features = [f.features for f in feedback_list]
if FLAGS.chunked_training:
all_length_features = ([all_features] + [[next(t).features for t in train_samplers] for _ in range(len(train_lengths))])
train_model.init(all_length_features[:(- 1)], (FLAGS.seed + 1))
else:
train_model.init(all_features, (FLAGS.seed + 1))
logging.set_verbosity(logging.INFO)
for algo_idx in range(len(train_samplers)):
feedback = feedback_list[algo_idx]
(rng_key, new_rng_key) = jax.random.split(rng_key)
if FLAGS.chunked_training:
length_and_algo_idx = (length_idx, algo_idx)
else:
length_and_algo_idx = algo_idx
cur_loss = train_model.feedback(rng_key, feedback, length_and_algo_idx)
rng_key = new_rng_key
if FLAGS.chunked_training:
examples_in_chunk = np.sum(feedback.features.is_last).item()
else:
examples_in_chunk = len(feedback.features.lengths)
current_train_items[algo_idx] += examples_in_chunk
if ((step % FLAGS.log_every) == 0):
logging.info('Algo %s step %i current loss %f, current_train_items %i.', FLAGS.algorithms[algo_idx], step, cur_loss, current_train_items[algo_idx])
if (step >= next_eval):
eval_model.params = train_model.params
for algo_idx in range(len(train_samplers)):
common_extras = {'examples_seen': current_train_items[algo_idx], 'step': step, 'algorithm': FLAGS.algorithms[algo_idx]}
(new_rng_key, rng_key) = jax.random.split(rng_key)
val_stats = collect_and_eval(val_samplers[algo_idx], functools.partial(eval_model.predict, algorithm_index=algo_idx), val_sample_counts[algo_idx], new_rng_key, extras=common_extras)
logging.info('(val) algo %s step %d: %s', FLAGS.algorithms[algo_idx], step, val_stats)
val_scores[algo_idx] = val_stats['score']
next_eval += FLAGS.eval_every
msg = f'best avg val score was {(best_score / len(FLAGS.algorithms)):.3f}, current avg val score is {np.mean(val_scores):.3f}, val scores are: '
msg += ', '.join([('%s: %.3f' % (x, y)) for (x, y) in zip(FLAGS.algorithms, val_scores)])
if ((sum(val_scores) > best_score) or (step == 0)):
best_score = sum(val_scores)
logging.info('Checkpointing best model, %s', msg)
train_model.save_model('best.pkl')
else:
logging.info('Not saving new best model, %s', msg)
step += 1
length_idx = ((length_idx + 1) % len(train_lengths))
logging.info('Restoring best model from checkpoint...')
eval_model.restore_model('best.pkl', only_load_processor=False)
for algo_idx in range(len(train_samplers)):
common_extras = {'examples_seen': current_train_items[algo_idx], 'step': step, 'algorithm': FLAGS.algorithms[algo_idx]}
(new_rng_key, rng_key) = jax.random.split(rng_key)
test_stats = collect_and_eval(test_samplers[algo_idx], functools.partial(eval_model.predict, algorithm_index=algo_idx), test_sample_counts[algo_idx], new_rng_key, extras=common_extras)
logging.info('(test) algo %s : %s', FLAGS.algorithms[algo_idx], test_stats)
logging.info('Done!') |
def parse_text_to_table(text, strict=False):
text = text.replace(' <NEWLINE> ', '\n').strip()
data = []
for line in text.splitlines():
line = line.strip()
if (not line.startswith(SEP)):
line = (SEP + line)
if (not line.endswith(SEP)):
line = (line + SEP)
data.append([x.strip() for x in line[1:(- 1)].split(SEP)])
if ((not strict) and (len(data) > 0)):
n_col = len(data[0])
data = [d[:n_col] for d in data]
data = [(d + ([''] * (n_col - len(d)))) for d in data]
try:
data = np.array(data, dtype=np.str)
except:
assert strict
data = None
return data |
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name('add')
def forward(self):
return torch.add(self.input_one, self.input_two) |
def create_batches(data_size, batch_size, shuffle=True):
batches = []
ids = list(range(data_size))
if shuffle:
random.shuffle(ids)
for i in range(int((data_size / batch_size))):
start = (i * batch_size)
end = ((i + 1) * batch_size)
batches.append(ids[start:end])
rest = (data_size % batch_size)
if (rest > 0):
batches.append((list(ids[(- rest):]) + ([(- 1)] * (batch_size - rest))))
return batches |
class GridEncoder(nn.Module):
def __init__(self, input_dim=3, num_levels=16, level_dim=2, per_level_scale=2, base_resolution=16, log2_hashmap_size=19, desired_resolution=None, gridtype='hash', align_corners=False, interpolation='linear'):
super().__init__()
if (desired_resolution is not None):
per_level_scale = np.exp2((np.log2((desired_resolution / base_resolution)) / (num_levels - 1)))
self.input_dim = input_dim
self.num_levels = num_levels
self.level_dim = level_dim
self.per_level_scale = per_level_scale
self.log2_hashmap_size = log2_hashmap_size
self.base_resolution = base_resolution
self.output_dim = (num_levels * level_dim)
self.gridtype = gridtype
self.gridtype_id = _gridtype_to_id[gridtype]
self.interpolation = interpolation
self.interp_id = _interp_to_id[interpolation]
self.align_corners = align_corners
offsets = []
offset = 0
self.max_params = (2 ** log2_hashmap_size)
for i in range(num_levels):
resolution = int(np.ceil((base_resolution * (per_level_scale ** i))))
params_in_level = min(self.max_params, ((resolution if align_corners else (resolution + 1)) ** input_dim))
params_in_level = int((np.ceil((params_in_level / 8)) * 8))
offsets.append(offset)
offset += params_in_level
offsets.append(offset)
offsets = torch.from_numpy(np.array(offsets, dtype=np.int32))
self.register_buffer('offsets', offsets)
self.n_params = (offsets[(- 1)] * level_dim)
self.embeddings = nn.Parameter(torch.empty(offset, level_dim))
self.reset_parameters()
def reset_parameters(self):
std = 0.0001
self.embeddings.data.uniform_((- std), std)
def __repr__(self):
return f'GridEncoder: input_dim={self.input_dim} num_levels={self.num_levels} level_dim={self.level_dim} resolution={self.base_resolution} -> {int(round((self.base_resolution * (self.per_level_scale ** (self.num_levels - 1)))))} per_level_scale={self.per_level_scale:.4f} params={tuple(self.embeddings.shape)} gridtype={self.gridtype} align_corners={self.align_corners} interpolation={self.interpolation}'
def forward(self, inputs, bound=1):
inputs = ((inputs + bound) / (2 * bound))
prefix_shape = list(inputs.shape[:(- 1)])
inputs = inputs.view((- 1), self.input_dim)
outputs = grid_encode(inputs, self.embeddings, self.offsets, self.per_level_scale, self.base_resolution, inputs.requires_grad, self.gridtype_id, self.align_corners, self.interp_id)
outputs = outputs.view((prefix_shape + [self.output_dim]))
return outputs
.amp.autocast(enabled=False)
def grad_total_variation(self, weight=1e-07, inputs=None, bound=1, B=1000000):
D = self.input_dim
C = self.embeddings.shape[1]
L = (self.offsets.shape[0] - 1)
S = np.log2(self.per_level_scale)
H = self.base_resolution
if (inputs is None):
inputs = torch.rand(B, self.input_dim, device=self.embeddings.device)
else:
inputs = ((inputs + bound) / (2 * bound))
inputs = inputs.view((- 1), self.input_dim)
B = inputs.shape[0]
if (self.embeddings.grad is None):
raise ValueError('grad is None, should be called after loss.backward() and before optimizer.step()!')
_backend.grad_total_variation(inputs, self.embeddings, self.embeddings.grad, self.offsets, weight, B, D, C, L, S, H, self.gridtype_id, self.align_corners) |
def train_one_epoch(model: torch.nn.Module, model_ema, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, mixup_fn: Optional[Mixup]=None, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if ((log_writer is not None) and (data_iter_step == 0)):
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def init_tf(config_dict: dict=None) -> None:
if (tf.compat.v1.get_default_session() is not None):
return
cfg = _sanitize_tf_config(config_dict)
np_random_seed = cfg['rnd.np_random_seed']
if (np_random_seed is not None):
np.random.seed(np_random_seed)
tf_random_seed = cfg['rnd.tf_random_seed']
if (tf_random_seed == 'auto'):
tf_random_seed = np.random.randint((1 << 31))
if (tf_random_seed is not None):
tf.compat.v1.set_random_seed(tf_random_seed)
for (key, value) in cfg.items():
fields = key.split('.')
if (fields[0] == 'env'):
assert (len(fields) == 2)
os.environ[fields[1]] = str(value)
create_session(cfg, force_as_default=True) |
def print_options(args, model):
message = ''
num_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
num_params = (num_params / 1000000)
message += (' FL train of %s with total model parameters: %2.1fM \n' % (args.model, num_params))
message += ' Other Train related parameters \n'
for (k, v) in sorted(vars(args).items()):
comment = ''
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End of show parameters '
args.file_name = os.path.join(args.output_dir, 'log_file.txt')
with open(args.file_name, 'wt') as args_file:
args_file.write(message)
args_file.write('\n')
print(message) |
.spark
.parametrize('sample, seed', [(False, None), (True, None), (True, 123)], ids=['no_sampling', 'sample_not_fixed', 'sample_fixed'])
def test_predict(fitted_model, log_ucb, sample, seed):
fitted_model.seed = seed
fitted_model.sample = sample
equality_check = (sparkDataFrameNotEqual if (fitted_model.sample and (fitted_model.seed is None)) else sparkDataFrameEqual)
dataset = create_dataset(log_ucb)
pred = fitted_model.predict(dataset, items=list(range(10)), k=1)
pred_checkpoint = pred.localCheckpoint()
pred.unpersist()
fitted_model.fit(dataset)
pred_after_refit = fitted_model.predict(dataset, items=list(range(10)), k=1)
equality_check(pred_checkpoint, pred_after_refit)
pred_after_refit_checkpoint = pred_after_refit.localCheckpoint()
pred_after_refit.unpersist()
pred_repeat = fitted_model.predict(dataset, items=list(range(10)), k=1)
equality_check(pred_after_refit_checkpoint, pred_repeat) |
('split-video', add_help_option=False)
('--output', '-o', metavar='DIR', type=click.Path(exists=False, dir_okay=True, writable=True, resolve_path=False), help='Output directory to save videos to. Overrides global option -o/--output if set.')
('--filename', '-f', metavar='NAME', default='$VIDEO_NAME-Scene-$SCENE_NUMBER', type=click.STRING, show_default=True, help='File name format, to use when saving image files. You can use the $VIDEO_NAME and $SCENE_NUMBER macros in the file name. Note that you may have to wrap the name using single quotes.')
('--high-quality', '-hq', is_flag=True, flag_value=True, help='Encode video with higher quality, overrides -f option if present. Equivalent to specifying --rate-factor 17 and --preset slow.')
('--override-args', '-a', metavar='ARGS', type=click.STRING, help='Override codec arguments/options passed to FFmpeg when splitting and re-encoding scenes. Use double quotes (") around specified arguments. Must specify at least audio/video codec to use (e.g. -a "-c:v [...] and -c:a [...]"). [default: "-c:v libx264 -preset veryfast -crf 22 -c:a aac"]')
('--quiet', '-q', is_flag=True, flag_value=True, help='Hides any output from the external video splitting tool.')
('--copy', '-c', is_flag=True, flag_value=True, help='Copy instead of re-encode using mkvmerge. All other options except -o/--output and -q/--quiet are ignored in this mode. Significantly faster, but far less precise. Output files will be named $VIDEO_NAME-$SCENE_NUMBER.mkv.')
('--rate-factor', '-crf', metavar='RATE', default=None, type=click.IntRange(0, 100), help='Video encoding quality (x264 constant rate factor), from 0-100, where lower values represent better quality, with 0 indicating lossless. [default: 22, if -hq/--high-quality is set: 17]')
('--preset', '-p', metavar='LEVEL', default=None, type=click.Choice(['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow']), help='Video compression quality preset (x264 preset). Can be one of: ultrafast, superfast, veryfast, faster, fast, medium, slow, slower, and veryslow. Faster modes take less time to run, but the output files may be larger. [default: veryfast, if -hq/--high quality is set: slow]')
_context
def split_video_command(ctx, output, filename, high_quality, override_args, quiet, copy, rate_factor, preset):
if ctx.obj.split_video:
logging.warning('split-video command is specified twice.')
ctx.obj.check_input_open()
ctx.obj.split_video = True
ctx.obj.split_quiet = (True if quiet else False)
ctx.obj.split_directory = output
ctx.obj.split_name_format = filename
if copy:
ctx.obj.split_mkvmerge = True
if high_quality:
logging.warning('-hq/--high-quality flag ignored due to -c/--copy.')
if override_args:
logging.warning('-f/--ffmpeg-args option ignored due to -c/--copy.')
if (not override_args):
if (rate_factor is None):
rate_factor = (22 if (not high_quality) else 17)
if (preset is None):
preset = ('veryfast' if (not high_quality) else 'slow')
override_args = '-c:v libx264 -preset {PRESET} -crf {RATE_FACTOR} -c:a aac'.format(PRESET=preset, RATE_FACTOR=rate_factor)
if (not copy):
logging.info('FFmpeg codec args set: %s', override_args)
if filename:
logging.info('Video output file name format: %s', filename)
if (ctx.obj.split_directory is not None):
logging.info('Video output path set: \n%s', ctx.obj.split_directory)
ctx.obj.split_args = override_args
mkvmerge_available = is_mkvmerge_available()
ffmpeg_available = is_ffmpeg_available()
if ((not (mkvmerge_available or ffmpeg_available)) or (((not mkvmerge_available) and copy) or ((not ffmpeg_available) and (not copy)))):
split_tool = 'ffmpeg/mkvmerge'
if ((not mkvmerge_available) and copy):
split_tool = 'mkvmerge'
elif ((not ffmpeg_available) and (not copy)):
split_tool = 'ffmpeg'
error_strs = ['{EXTERN_TOOL} is required for split-video{EXTRA_ARGS}.'.format(EXTERN_TOOL=split_tool, EXTRA_ARGS=(' -c/--copy' if copy else '')), ('Install the above tool%s to enable video splitting support.' % ('s' if (split_tool.find('/') > 0) else ''))]
if mkvmerge_available:
error_strs += ['You can also specify `split-video -c/--copy` to use mkvmerge for splitting.']
error_str = '\n'.join(error_strs)
logging.debug(error_str)
ctx.obj.options_processed = False
raise click.BadParameter(error_str, param_hint='split-video') |
class GTestParamTestInvalidName2Test(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND) |
class CopyInfo():
def __init__(self, src_addr, dst_addr, dir, size, begin_usec, end_usec, info=''):
self.src_addr = src_addr
self.dst_addr = dst_addr
self.dir = dir
self.size = size
self.begin_usec = begin_usec
self.end_usec = end_usec
self.info = info |
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*fp32_to_fp16(inputs), **kwargs))
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.module.state_dict(destination, prefix, keep_vars)
def load_state_dict(self, state_dict, strict=True):
self.module.load_state_dict(state_dict, strict=strict)
def resize_token_embeddings(self, len_tokenizer):
self.module.resize_token_embeddings(len_tokenizer) |
def make_optimizer_and_schedule(args, model, checkpoint, lr, step_lr):
optimizer = Adam(model.parameters(), lr)
schedule = None
if step_lr:
schedule = lr_scheduler.StepLR(optimizer, step_size=step_lr)
elif args.custom_schedule:
cs = args.custom_schedule
periods = (eval(cs) if (type(cs) is str) else cs)
def lr_func(ep):
for (milestone, _lr) in reversed(periods):
if (ep > milestone):
return (_lr / lr)
return lr
schedule = lr_scheduler.LambdaLR(optimizer, lr_func)
if (checkpoint and (args.task not in ['train-classifier', 'estimate-mi'])):
optimizer.load_state_dict(checkpoint['optimizer'])
try:
schedule.load_state_dict(checkpoint['schedule'])
except:
steps_to_take = checkpoint['epoch']
print(f'Could not load schedule (was probably LambdaLR). Stepping {steps_to_take} times instead...')
for i in range(steps_to_take):
schedule.step()
return (optimizer, schedule) |
def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):
original_vector = att_data.copy()
changes = 0
found_solution = 0
(_, error, temp) = scale_input_and_detect_single(row_index, att_data)
previous_best_error = error[row_index]
temp = sort_temp_and_drop(row_index, temp)
prev_col_name = None
num_changes_without_optimizations = 0
last_optimization = 0
newBest = att_data.copy()
optimized = False
changed_variables[row_index] = variables[max_concealable_variables]
while ((changes < budget) and ((changes - last_optimization) < patience) and (not found_solution)):
col_name = choose_column(row_index, prev_col_name, changed_variables, max_concealable_variables)
prev_col_name = col_name
if debug:
print('______________________________')
print(col_name)
print('______________________________')
values = np.arange(normal_op_ranges[col_name]['min'], (normal_op_ranges[col_name]['max'] + 0.1), normal_op_ranges[col_name]['step'])
att_data = att_data.append(([att_data] * len(values)), ignore_index=True)
att_data = att_data[:(- 1)]
att_data[col_name] = values
(att_data, error) = scale_input_and_detect(row_index, att_data)
if (error < previous_best_error):
if debug:
print(error, previous_best_error)
previous_best_error = error
newBest = att_data.copy()
last_optimization = changes
num_changes_without_optimizations = 0
optimized = True
try:
if (not (col_name in changed_variables[row_index])):
changed_variables[row_index].append(col_name)
except:
changed_variables[row_index] = [col_name]
else:
optimized = False
if (error < theta):
solutions_found = (solutions_found + 1)
found_solution = 1
print(('Found solution number: ' + str(solutions_found)))
if (optimized == False):
num_changes_without_optimizations = (num_changes_without_optimizations + 1)
att_data = newBest.copy()
(_, error, temp) = scale_input_and_detect_single(row_index, att_data)
temp = sort_temp_and_drop(row_index, temp)
changes = (changes + 1)
if debug:
print(temp)
print('--__--__--')
print(changes)
print('--__--__--')
compute_mutation_factor(original_vector, att_data.copy())
return (newBest.copy(), solutions_found) |
.parametrize('n_attacks, n_success, n_baseline, n_control, confidence_level, expected_rate, expected_baseline', [(100, 100, 0, None, 0.95, SuccessRate(value=0., error=0.), SuccessRate(value=0., error=0.)), (100, 100, 0, None, 0.68, SuccessRate(value=0., error=0.), SuccessRate(value=0., error=0.)), (100, 23, 11, None, 0.95, SuccessRate(value=0., error=0.), SuccessRate(value=0., error=0.))])
def test_evaluation_results_confidence(n_attacks, n_success, n_baseline, n_control, confidence_level, expected_rate, expected_baseline):
results = EvaluationResults(n_attacks=n_attacks, n_success=n_success, n_baseline=n_baseline, n_control=n_control, confidence_level=confidence_level)
np.testing.assert_equal(results.attack_rate, expected_rate)
np.testing.assert_equal(results.baseline_rate, expected_baseline)
np.testing.assert_equal(results.risk(baseline=False), expected_rate.to_risk())
np.testing.assert_equal(results.risk(baseline=True), expected_baseline.to_risk()) |
def register_Ns3ConstantPositionMobilityModel_methods(root_module, cls):
cls.add_constructor([param('ns3::ConstantPositionMobilityModel const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('DoGetPosition', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_const=True, visibility='private', is_virtual=True)
cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], visibility='private', is_virtual=True)
return |
def format_list(List, interval='\t', decimals=None):
if (decimals is None):
return interval.join(['{0}'.format(element) for element in List])
else:
return interval.join(['{0:.{1}f}'.format(element, decimals) for element in List]) |
def _print_metrics(stage, step, metrics, throttle=None):
for (k, v) in metrics.items():
print((' %s:' % k), v) |
def my_attention(inputs, merge_size=0, attention=True, attention_size=256, sep_attend=True, return_alphas=True, hidden_nl=0):
(W_projs, B_projs, hiddens) = ([], [], [])
(W_omegas, b_omegas, u_omegas) = ([], [], [])
inds = {}
for (i, x) in enumerate(inputs):
inds[i] = i
(w, b, u) = init_attention(x.shape[1].value, attention_size)
W_omegas.append(w)
b_omegas.append(b)
u_omegas.append(u)
pre_sm = []
for (i, h) in enumerate(inputs):
idx = inds[i]
pre_sm.append(tf.matmul(tf.tanh((tf.matmul(inputs[i], W_omegas[idx]) + tf.reshape(b_omegas[idx], [1, (- 1)]))), tf.reshape(u_omegas[idx], [(- 1), 1])))
alphas = tf.split(tf.nn.softmax(tf.concat(pre_sm, 1)), len(inputs), 1)
print(type(alphas), alphas)
return (None, alphas, None) |
class OneTypeList(list):
def __init__(self, item_class, seq=None):
self.item_class = item_class
if (seq is not None):
for obj in seq:
self.append(obj)
def __setitem__(self, key, value):
if (type(value) in (list, tuple)):
for (ii, val) in enumerate(value):
if (not isinstance(val, self.item_class)):
raise TypeError
elif (not isinstance(value, self.item_class)):
raise TypeError
list.__setitem__(self, key, value)
def __getitem__(self, ii):
if isinstance(ii, int):
return list.__getitem__(self, ii)
elif isinstance(ii, basestr):
ir = self.find(ii, ret_indx=True)
if ir:
return list.__getitem__(self, ir[0])
else:
raise IndexError(ii)
else:
raise IndexError(ii)
def __str__(self):
ss = '[\n'
for ii in self:
aux = ('\n' + ii.__str__())
aux = aux.replace('\n', '\n ')
ss += (aux[1:] + '\n')
ss += ']'
return ss
def find(self, name, ret_indx=False):
for (ii, item) in enumerate(self):
if (item.name == name):
if ret_indx:
return (ii, item)
else:
return item
return None
def print_names(self):
print([ii.name for ii in self])
def get_names(self):
return [ii.name for ii in self] |
def main():
tf_summary_writer = tf.summary.create_file_writer(args.checkpoint_dir)
train_data = Batch_generator(args.num_answer, args.img_dir, args.box_dir, args.anno_dir, args.prep_dir, 'train')
val_data = Batch_generator(args.num_answer, args.img_dir, args.box_dir, args.anno_dir, args.prep_dir, 'val')
trainloader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=4)
valloader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size, shuffle=False, num_workers=4)
nb_embedding = train_data.nb_embedding
vocab = train_data.word2idx
model = build_ban(nb_embedding, 2048, args.embedding_size, args.num_answer, op='', gamma=4, reasoning=True)
model = model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-06)
def train(iteration):
model.train()
avg_ans_loss = 0
avg_att_loss = 0
avg_sem_loss = 0
for (batch_idx, (img, bbox, que, ans, op, att)) in enumerate(trainloader):
(img, bbox, que, ans, op, att) = (Variable(img), Variable(bbox), Variable(que), Variable(ans), Variable(op), Variable(att))
(img, bbox, que, ans, op, att) = (img.cuda(), bbox.cuda(), que.cuda(), ans.cuda(), op.cuda(), att.cuda())
optimizer.zero_grad()
(output, pred_op, pred_att) = model(img, bbox, que)
(ans_mask, att_mask) = get_mask(op)
ans_loss = cross_entropy(output, ans)
att_loss = attention_loss_mask_kld(pred_att, att, att_mask)
sem_loss = semantic_loss(pred_op, op)
loss = ((ans_loss + ((att_loss * args.alpha) * max((1 + np.cos((np.pi * (iteration / 300000)))), 0))) + (args.beta * sem_loss))
loss.backward()
if (not (args.clip == 0)):
clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
avg_ans_loss = (((avg_ans_loss * np.maximum(0, batch_idx)) + ans_loss.data.cpu().numpy()) / (batch_idx + 1))
avg_att_loss = (((avg_att_loss * np.maximum(0, batch_idx)) + att_loss.data.cpu().numpy()) / (batch_idx + 1))
avg_sem_loss = (((avg_sem_loss * np.maximum(0, batch_idx)) + sem_loss.data.cpu().numpy()) / (batch_idx + 1))
if ((batch_idx % 25) == 0):
with tf_summary_writer.as_default():
tf.summary.scalar('answer loss', avg_ans_loss, step=iteration)
tf.summary.scalar('step attention loss', avg_att_loss, step=iteration)
tf.summary.scalar('semantic loss', avg_sem_loss, step=iteration)
iteration += 1
return iteration
def test(iteration):
model.eval()
total_acc = 0
total_count = 0
for (batch_idx, (img, bbox, que, ans)) in enumerate(valloader):
(img, bbox, que, ans) = (Variable(img), Variable(bbox), Variable(que), Variable(ans))
(img, bbox, que, ans) = (img.cuda(), bbox.cuda(), que.cuda(), ans.cuda())
(output, op, att) = model(img, bbox, que)
output = output.data.cpu().numpy()
ans = ans.data.cpu().numpy()
output = np.argmax(output, axis=(- 1))
ans = np.argmax(ans, axis=(- 1))
total_acc += np.count_nonzero((output == ans))
total_count += len(img)
total_acc = ((total_acc * 100.0) / total_count)
with tf_summary_writer.as_default():
tf.summary.scalar('validation accuracy', total_acc, step=iteration)
return total_acc
print('Start training model')
iteration = 0
val_acc = 0
for epoch in range(args.epoch):
adjust_learning_rate(warm_up_schedule, optimizer, epoch)
iteration = train(iteration)
cur_acc = test(iteration)
if (cur_acc > val_acc):
torch.save(model.state_dict(), os.path.join(args.checkpoint_dir, 'model_best.pth'))
val_acc = cur_acc
torch.save(model.state_dict(), os.path.join(args.checkpoint_dir, 'model.pth')) |
def test_plan_heavy(tmp_path):
plan_dir = (tmp_path / 'test_plan')
plan_dir.mkdir()
with goos.OptimizationPlan() as plan:
x = goos.Variable(3.0, name='x')
y = goos.Variable(2.0, name='y')
z = (x + y)
z.parallelize()
assert (z.get() == 5)
assert (z.get_grad([x, y]) == [1, 1])
assert ((((x + x) + y) + 2).get_grad([x, y]) == [2, 1])
assert ((x ** 2).get_grad([x]) == [6])
x.set(4)
assert (z.get() == 5)
assert (z.get(run=True) == 6)
assert (z.get() == 6)
y.set(x)
assert (z.get(run=True) == 8)
with goos.OptimizationPlan():
assert (z.get() == 5)
first_part = ((x + (y ** 2)) + 1)
first_part.parallelize()
second_part = (y + 1)
second_part.parallelize()
obj = ((first_part ** 2) + (second_part ** 2))
goos.opt.scipy_minimize(obj, 'CG')
plan.run()
plan.save(plan_dir)
np.testing.assert_almost_equal(x.get().array, (- 2), decimal=4)
np.testing.assert_almost_equal(y.get().array, (- 1), decimal=4) |
def plot_value_functions():
for exp in EXPS:
save_dir = os.path.join('pdf_plots', 'value_functions')
if (not os.path.exists(save_dir)):
os.makedirs(save_dir, exist_ok=True)
true_value_function = np.load(os.path.join(os.getcwd(), 'Resources', TASK, 'state_values.npy'))
for alg in ALGS:
value_processor = ValueFunctionProcessor(exp, alg)
for run in RUNS:
(fig, ax) = plt.subplots(figsize=(8, 3))
for step in STEPS:
value_function = value_processor.get_value_function_by_step_and_run(step, run)
plot_value_function(ax, value_function, step, run)
plot_value_function(ax, true_value_function)
fig.savefig(os.path.join(save_dir, f'{run}_value_function_{alg}_{exp}.pdf'), format='pdf', dpi=200, bbox_inches='tight')
plt.show() |
def match_patts(file_path, file_patterns, src, tgt, lang):
for file_pattern in file_patterns:
params = {k: v for (k, v) in [('src', src), ('tgt', tgt), ('lang', lang)] if (k in file_pattern)}
matching = file_pattern.format(**params)
if isinstance(file_pattern, tuple):
(pattern, directions) = file_pattern
if ((f'{src}-{tgt}' in directions) and (matching in file_path)):
return True
elif (matching in file_path):
return True
return False |
def generate(output_dir: Path, config: codegen.CodegenConfig=None) -> None:
factors_dir = (output_dir / 'factors')
if (config is None):
config = codegen.CppConfig()
cam_types = sf.CameraCal.__subclasses__()
codegen.Codegen.function(func=inverse_range_landmark_prior_residual, config=config).with_linearization(which_args=['landmark_inverse_range']).generate_function(output_dir=factors_dir, skip_directory_nesting=True)
for cam_type in cam_types:
cam_type_name = python_util.camelcase_to_snakecase(python_util.str_removesuffix(cam_type.__name__, 'CameraCal'))
specialize_cam = functools.partial(util.specialize_types, type_replacements={sf.CameraCal: cam_type})
try:
codegen.Codegen.function(func=specialize_cam(inverse_range_landmark_gnc_residual), name=f'inverse_range_landmark_{cam_type_name}_gnc_residual', config=config).with_linearization(which_args=['source_pose', 'target_pose', 'source_inverse_range']).generate_function(output_dir=factors_dir, skip_directory_nesting=True)
codegen.Codegen.function(func=specialize_cam(reprojection_delta), name=f'{cam_type_name}_reprojection_delta', config=config, output_names=['reprojection_delta', 'is_valid']).generate_function(output_dir=factors_dir, skip_directory_nesting=True)
except NotImplementedError:
codegen.Codegen.function(func=specialize_cam(inverse_range_landmark_ray_gnc_residual), name=f'inverse_range_landmark_{cam_type_name}_gnc_residual', config=config).with_linearization(which_args=['source_pose', 'target_pose', 'source_inverse_range']).generate_function(output_dir=factors_dir, skip_directory_nesting=True)
codegen.Codegen.function(func=specialize_cam(ray_reprojection_delta), name=f'{cam_type_name}_reprojection_delta', config=config, output_names=['reprojection_delta', 'is_valid']).generate_function(output_dir=factors_dir, skip_directory_nesting=True) |
.parametrize('sampling', ['x', 'on_manifold', 'cd'])
def test_nae(sampling):
encoder = FCNet(2, 1)
decoder = FCNet(1, 2)
nae = NAE(encoder, decoder, initial_dist='gaussian', sampling=sampling)
opt = Adam(nae.parameters(), lr=0.0001)
X = torch.randn((10, 2), dtype=torch.float)
lik = nae.predict(X)
nae._set_x_shape(X)
nae._set_z_shape(X)
d_sample = nae.sample(X)
nae.train_step(X, opt) |
def test_tokenizer():
if True:
sql = 'SELECT avg(age) FROM Student WHERE StuID IN ( SELECT T1.StuID FROM Has_allergy AS T1 JOIN Allergy_Type AS T2 ON T1.Allergy = T2.Allergy WHERE T2.allergytype = "food" INTERSECT SELECT T1.StuID FROM Has_allergy AS T1 JOIN Allergy_Type AS T2 ON T1.Allergy = T2.Allergy WHERE T2.allergytype = "animal")'
sql = 'SELECT T1.Name FROM Tourist_Attractions AS T1 JOIN VISITORS AS T2 JOIN VISITS AS T3 ON T1.Tourist_Attraction_ID = T3.Tourist_Attraction_ID AND T2.Tourist_ID = T3.Tourist_ID WHERE T2.Tourist_Details = "Vincent" INTERSECT SELECT T1.Name FROM Tourist_Attractions AS T1 JOIN VISITORS AS T2 JOIN VISITS AS T3 ON T1.Tourist_Attraction_ID = T3.Tourist_Attraction_ID AND T2.Tourist_ID = T3.Tourist_ID WHERE T2.Tourist_Details = "Marcelle"'
sql = "SELECT Perpetrator_ID FROM perpetrator WHERE Year IN ('1995.0', '1994.0', '1982.0')"
print(sql)
data_dir = sys.argv[1]
db_name = sys.argv[2]
schema_graphs = load_schema_graphs_spider(data_dir, 'spider')
schema = schema_graphs[db_name]
tokens = tokenize(sql, bu.tokenizer.tokenize, in_execution_order=True, schema=schema)[0]
print(tokens)
print() |
def test_mpc_warm_start(solver, warm_start):
mpc_solver = get_solver(solver, warm_start, 1, 'mean')
agent = MPCAgent(mpc_solver=mpc_solver)
evaluate_agent(agent, environment=env, num_episodes=1, max_steps=MAX_ITER, render=False) |
def _recall_micro_1d(y_true: np.ndarray, y_pred: np.ndarray):
sum_intersection = 0
sum_prediction_and_ancestors = 0
for (ground_truth, prediction) in zip(y_true, y_pred):
ground_truth_set = set([ground_truth])
ground_truth_set.discard('')
predicted_set = set([prediction])
predicted_set.discard('')
sum_intersection = (sum_intersection + len(ground_truth_set.intersection(predicted_set)))
sum_prediction_and_ancestors = (sum_prediction_and_ancestors + len(ground_truth_set))
recall = (sum_intersection / sum_prediction_and_ancestors)
return recall |
class Cascading(Simulation):
def __init__(self, graph, runs=10, steps=100, l=0.8, r=0.2, **kwargs):
super().__init__(graph, runs, steps, **kwargs)
self.prm.update({'l': l, 'r': r, 'c': len(graph), 'robust_measure': 'largest_connected_component', 'k_a': 10, 'attack': 'id_node', 'attack_approx': None, 'k_d': None, 'defense': None})
self.prm.update(kwargs)
if (self.prm['plot_transition'] or self.prm['gif_animation']):
(self.node_pos, self.edge_pos) = self.get_graph_coordinates()
self.save_dir = os.path.join(os.getcwd(), 'plots', self.get_plot_title(steps))
os.makedirs(self.save_dir, exist_ok=True)
self.capacity_og = nx.betweenness_centrality(self.graph, k=self.prm['c'], normalized=True, endpoints=True)
self.max_val = (max(self.capacity_og.values()) * (1.0 + self.prm['r']))
self.protected = set()
self.failed = set()
self.load = defaultdict()
self.sim_info = defaultdict()
self.reset_simulation()
def reset_simulation(self):
self.protected = set()
self.failed = set()
self.load = defaultdict()
self.sim_info = defaultdict()
self.capacity = self.capacity_og.copy()
for n in self.graph.nodes:
self.load[n] = (self.capacity[n] * np.random.uniform(0, self.prm['l']))
self.capacity[n] = (self.capacity[n] * (1.0 + self.prm['r']))
self.track_simulation(step=0)
if ((self.prm['attack'] is not None) and (self.prm['k_a'] > 0)):
self.failed = set(run_attack_method(self.graph, self.prm['attack'], self.prm['k_a'], approx=self.prm['attack_approx'], seed=self.prm['seed']))
if (get_attack_category(self.prm['attack']) == 'node'):
for n in self.failed:
self.load[n] = (2 * self.load[n])
elif (get_attack_category(self.prm['attack']) == 'edge'):
self.graph.remove_edges_from(self.failed)
if ((self.prm['defense'] is not None) and (self.prm['k_d'] > 0)):
if (get_defense_category(self.prm['defense']) == 'node'):
self.protected = run_defense_method(self.graph, self.prm['defense'], self.prm['k_d'], seed=self.prm['seed'])
for n in self.protected:
self.capacity[n] = (2 * self.capacity[n])
elif (get_defense_category(self.prm['defense']) == 'edge'):
edge_info = run_defense_method(self.graph, self.prm['defense'], self.prm['k_d'], seed=self.prm['seed'])
self.graph.add_edges_from(edge_info['added'])
if ('removed' in edge_info):
self.graph.remove_edges_from(edge_info['removed'])
elif (self.prm['defense'] is not None):
print(self.prm['defense'], 'not available or k <= 0')
self.track_simulation(step=1)
def track_simulation(self, step):
nodes_functioning = set(self.graph.nodes).difference(self.failed)
measure = 0
if (len(nodes_functioning) > 0):
measure = run_measure(self.graph.subgraph(nodes_functioning), self.prm['robust_measure'])
self.sim_info[step] = {'status': [self.load[n] for n in self.graph.nodes], 'failed': len(self.failed), 'measure': measure, 'protected': self.protected}
def run_single_sim(self):
for step in range(self.prm['steps']):
self.track_simulation((step + 2))
failed_new = set()
for n in self.failed:
if (self.load[n] > self.capacity[n]):
nbrs = list(self.graph.neighbors(n))
for nb in self.graph.neighbors(n):
if ((nb not in self.failed) and (nb not in failed_new)):
self.load[nb] += (self.load[n] / len(nbrs))
if (self.load[nb] > self.capacity[nb]):
failed_new.add(nb)
self.failed = self.failed.union(failed_new)
robustness = [(v['measure'] if (v['measure'] is not None) else 0) for (k, v) in self.sim_info.items()]
return robustness |
def import_class_from_path(class_path):
(module_path, class_name) = class_path.split(':')
module = importlib.import_module(module_path)
return getattr(module, class_name) |
def _SQS14():
return [[0, 1, 2, 5], [0, 1, 3, 6], [0, 1, 4, 13], [0, 1, 7, 10], [0, 1, 8, 9], [0, 1, 11, 12], [0, 2, 3, 4], [0, 2, 6, 12], [0, 2, 7, 9], [0, 2, 8, 11], [0, 2, 10, 13], [0, 3, 5, 13], [0, 3, 7, 11], [0, 3, 8, 10], [0, 3, 9, 12], [0, 4, 5, 9], [0, 4, 6, 11], [0, 4, 7, 8], [0, 4, 10, 12], [0, 5, 6, 8], [0, 5, 7, 12], [0, 5, 10, 11], [0, 6, 7, 13], [0, 6, 9, 10], [0, 8, 12, 13], [0, 9, 11, 13], [1, 2, 3, 13], [1, 2, 4, 12], [1, 2, 6, 9], [1, 2, 7, 11], [1, 2, 8, 10], [1, 3, 4, 5], [1, 3, 7, 8], [1, 3, 9, 11], [1, 3, 10, 12], [1, 4, 6, 10], [1, 4, 7, 9], [1, 4, 8, 11], [1, 5, 6, 11], [1, 5, 7, 13], [1, 5, 8, 12], [1, 5, 9, 10], [1, 6, 7, 12], [1, 6, 8, 13], [1, 9, 12, 13], [1, 10, 11, 13], [2, 3, 5, 11], [2, 3, 6, 7], [2, 3, 8, 12], [2, 3, 9, 10], [2, 4, 5, 13], [2, 4, 6, 8], [2, 4, 7, 10], [2, 4, 9, 11], [2, 5, 6, 10], [2, 5, 7, 8], [2, 5, 9, 12], [2, 6, 11, 13], [2, 7, 12, 13], [2, 8, 9, 13], [2, 10, 11, 12], [3, 4, 6, 9], [3, 4, 7, 12], [3, 4, 8, 13], [3, 4, 10, 11], [3, 5, 6, 12], [3, 5, 7, 10], [3, 5, 8, 9], [3, 6, 8, 11], [3, 6, 10, 13], [3, 7, 9, 13], [3, 11, 12, 13], [4, 5, 6, 7], [4, 5, 8, 10], [4, 5, 11, 12], [4, 6, 12, 13], [4, 7, 11, 13], [4, 8, 9, 12], [4, 9, 10, 13], [5, 6, 9, 13], [5, 7, 9, 11], [5, 8, 11, 13], [5, 10, 12, 13], [6, 7, 8, 9], [6, 7, 10, 11], [6, 8, 10, 12], [6, 9, 11, 12], [7, 8, 10, 13], [7, 8, 11, 12], [7, 9, 10, 12], [8, 9, 10, 11]] |
def get_signal_correlations(model, dataloaders, tier, device='cpu', as_dict=False, per_neuron=True):
correlations = {}
for (data_key, dataloader) in dataloaders[tier].items():
(trial_indices, image_ids, neuron_ids, responses) = get_data_filetree_loader(dataloader=dataloader, tier=tier)
(_, predictions) = model_predictions(model, dataloader, data_key=data_key, device=device)
repeats_responses = split_images(responses, image_ids)
repeats_predictions = split_images(predictions, image_ids)
(mean_responses, mean_predictions) = ([], [])
for (repeat_responses, repeat_predictions) in zip(repeats_responses, repeats_predictions):
mean_responses.append(repeat_responses.mean(axis=0, keepdims=True))
mean_predictions.append(repeat_predictions.mean(axis=0, keepdims=True))
mean_responses = np.vstack(mean_responses)
mean_predictions = np.vstack(mean_predictions)
correlations[data_key] = corr(mean_responses, mean_predictions, axis=0)
if (not as_dict):
correlations = (np.hstack([v for v in correlations.values()]) if per_neuron else np.mean(np.hstack([v for v in correlations.values()])))
return (correlations if per_neuron else correlations.mean()) |
def main(args):
cfg = get_default_cfg()
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
device = torch.device(cfg.DEVICE)
if (cfg.SEED >= 0):
set_random_seed(cfg.SEED)
print('Creating model')
model = SeqNet(cfg)
model.to(device)
print('Loading data')
train_loader = build_train_loader(cfg)
(gallery_loader, query_loader) = build_test_loader(cfg)
if args.eval:
assert args.ckpt, '--ckpt must be specified when --eval enabled'
resume_from_ckpt(args.ckpt, model)
evaluate_performance(model, gallery_loader, query_loader, device, use_gt=cfg.EVAL_USE_GT, use_cache=cfg.EVAL_USE_CACHE, use_cbgm=cfg.EVAL_USE_CBGM)
exit(0)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.SGD_MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.SOLVER.LR_DECAY_MILESTONES, gamma=0.1)
start_epoch = 0
if args.resume:
assert args.ckpt, '--ckpt must be specified when --resume enabled'
start_epoch = (resume_from_ckpt(args.ckpt, model, optimizer, lr_scheduler) + 1)
print('Creating output folder')
output_dir = cfg.OUTPUT_DIR
mkdir(output_dir)
path = osp.join(output_dir, 'config.yaml')
with open(path, 'w') as f:
f.write(cfg.dump())
print(f'Full config is saved to {path}')
tfboard = None
if cfg.TF_BOARD:
from torch.utils.tensorboard import SummaryWriter
tf_log_path = osp.join(output_dir, 'tf_log')
mkdir(tf_log_path)
tfboard = SummaryWriter(log_dir=tf_log_path)
print(f'TensorBoard files are saved to {tf_log_path}')
print('Start training')
start_time = time.time()
for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCHS):
train_one_epoch(cfg, model, optimizer, train_loader, device, epoch, tfboard)
lr_scheduler.step()
if ((((epoch + 1) % cfg.EVAL_PERIOD) == 0) or (epoch == (cfg.SOLVER.MAX_EPOCHS - 1))):
evaluate_performance(model, gallery_loader, query_loader, device, use_gt=cfg.EVAL_USE_GT, use_cache=cfg.EVAL_USE_CACHE, use_cbgm=cfg.EVAL_USE_CBGM)
if ((((epoch + 1) % cfg.CKPT_PERIOD) == 0) or (epoch == (cfg.SOLVER.MAX_EPOCHS - 1))):
save_on_master({'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch}, osp.join(output_dir, f'epoch_{epoch}.pth'))
if tfboard:
tfboard.close()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f'Total training time {total_time_str}') |
class RandomSampler(_BasicSampler):
def __init__(self, dataset, params, is_training=True, seed=0, return_index=False):
self.num_points_per_sample = 0
self.modify_type = None
super(RandomSampler, self).__init__(*[dataset, params, is_training])
self.center = np.array([((self.dataset.max_bounds[0] - self.dataset.min_bounds[0]) / 2), ((self.dataset.max_bounds[1] - self.dataset.min_bounds[1]) / 2), self.dataset.min_bounds[2]])
self.random_machine = np.random.RandomState(seed)
self.return_index = return_index
(self._infer_seq, self._res_num) = self._gen_random_seq()
self.modify_func = PointModifier(self.modify_type)
def modify_points(self, points, *args, **kwargs):
return self.modify_func(points, center=self.center)
def cal_length(self):
return (int((len(self.dataset) / self.num_points_per_sample)) + 1)
def sample(self, ind, set_random_machine=None, *args, **kwargs):
ind = self._sample_index(ind, set_random_machine)
if self.return_index:
(empty_pts_, empty_pts, _, empty_clrs) = _gen_empty_sample(self.num_points_per_sample, self.modify_func.shape, self.dataset.labels.shape[1])
return (empty_pts_, empty_pts, ind, empty_clrs)
else:
(points, colors, labels) = self.dataset[ind]
points_centered = self.modify_points(points)
return (points_centered, points, labels, colors)
def _gen_random_seq(self):
seq = np.random.permutation(len(self.dataset))
res = ((len(self) * self.num_points_per_sample) - len(self.dataset))
seq = np.concatenate([seq, seq[:res]])
return (seq, res)
def _get_train_index(self, set_random_machine=None):
random_machine = (self.random_machine if (set_random_machine is not None) else set_random_machine)
return random_machine.permutation(len(self.dataset.points))[:self.num_points_per_sample]
def _get_infer_index(self, ind):
seq_ind = np.arange((ind * self.num_points_per_sample), ((ind + 1) * self.num_points_per_sample))
return self._infer_seq[seq_ind]
def _sample_index(self, ind, set_random_machine=None):
if self.is_training:
ind = self._get_train_index(set_random_machine)
else:
ind = self._get_infer_index(ind)
return ind |
def create_backbone(args, device):
model = vits.__dict__['vit_base']()
state_dict = torch.load(args.dino_pretrain_path, map_location='cpu')
model.load_state_dict(state_dict)
if (args.warmup_model_dir is not None):
print(f'Loading weights from {args.warmup_model_dir}')
model.load_state_dict(torch.load(args.warmup_model_dir, map_location='cpu'))
if args.use_vpt:
vptmodel = vpt_vit.__dict__['vit_base'](num_prompts=args.num_prompts, vpt_dropout=args.vpt_dropout, n_shallow_prompts=args.n_shallow_prompts)
if (args.load_from_model is not None):
print(f'NOTE:: load from {args.load_from_model}')
vptmodel.load_state_dict(torch.load(args.load_from_model, map_location='cpu'), strict=True)
else:
vptmodel.load_from_state_dict(state_dict, False)
model = vptmodel
vpt_vit.configure_parameters(model=model, grad_layer=args.grad_from_block)
else:
for m in model.parameters():
m.requires_grad = False
for (name, m) in model.named_parameters():
if ('block' in name):
block_num = int(name.split('.')[1])
if (block_num >= args.grad_from_block):
m.requires_grad = True
model.to(device)
return model |
class TIntFltH(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
HashPrimes = _snap.TIntFltH_HashPrimes
def __init__(self, *args):
_snap.TIntFltH_swiginit(self, _snap.new_TIntFltH(*args))
def Load(self, SIn):
return _snap.TIntFltH_Load(self, SIn)
def Save(self, SOut):
return _snap.TIntFltH_Save(self, SOut)
def __eq__(self, Hash):
return _snap.TIntFltH___eq__(self, Hash)
def __lt__(self, Hash):
return _snap.TIntFltH___lt__(self, Hash)
def __call__(self, Key):
return _snap.TIntFltH___call__(self, Key)
def GetMemUsed(self):
return _snap.TIntFltH_GetMemUsed(self)
def BegI(self):
return _snap.TIntFltH_BegI(self)
def EndI(self):
return _snap.TIntFltH_EndI(self)
def GetI(self, Key):
return _snap.TIntFltH_GetI(self, Key)
def Gen(self, ExpectVals):
return _snap.TIntFltH_Gen(self, ExpectVals)
def Clr(self, DoDel=True, NoDelLim=(- 1), ResetDat=True):
return _snap.TIntFltH_Clr(self, DoDel, NoDelLim, ResetDat)
def Empty(self):
return _snap.TIntFltH_Empty(self)
def Len(self):
return _snap.TIntFltH_Len(self)
def GetPorts(self):
return _snap.TIntFltH_GetPorts(self)
def IsAutoSize(self):
return _snap.TIntFltH_IsAutoSize(self)
def GetMxKeyIds(self):
return _snap.TIntFltH_GetMxKeyIds(self)
def GetReservedKeyIds(self):
return _snap.TIntFltH_GetReservedKeyIds(self)
def IsKeyIdEqKeyN(self):
return _snap.TIntFltH_IsKeyIdEqKeyN(self)
def AddKey(self, Key):
return _snap.TIntFltH_AddKey(self, Key)
def AddDat(self, *args):
return _snap.TIntFltH_AddDat(self, *args)
def DelKey(self, Key):
return _snap.TIntFltH_DelKey(self, Key)
def DelIfKey(self, Key):
return _snap.TIntFltH_DelIfKey(self, Key)
def DelKeyId(self, KeyId):
return _snap.TIntFltH_DelKeyId(self, KeyId)
def DelKeyIdV(self, KeyIdV):
return _snap.TIntFltH_DelKeyIdV(self, KeyIdV)
def GetKey(self, KeyId):
return _snap.TIntFltH_GetKey(self, KeyId)
def GetKeyId(self, Key):
return _snap.TIntFltH_GetKeyId(self, Key)
def GetRndKeyId(self, *args):
return _snap.TIntFltH_GetRndKeyId(self, *args)
def IsKey(self, *args):
return _snap.TIntFltH_IsKey(self, *args)
def IsKeyId(self, KeyId):
return _snap.TIntFltH_IsKeyId(self, KeyId)
def GetDat(self, *args):
return _snap.TIntFltH_GetDat(self, *args)
def GetDatWithDefault(self, Key, DefaultValue):
return _snap.TIntFltH_GetDatWithDefault(self, Key, DefaultValue)
def GetKeyDat(self, KeyId, Key, Dat):
return _snap.TIntFltH_GetKeyDat(self, KeyId, Key, Dat)
def IsKeyGetDat(self, Key, Dat):
return _snap.TIntFltH_IsKeyGetDat(self, Key, Dat)
def FFirstKeyId(self):
return _snap.TIntFltH_FFirstKeyId(self)
def FNextKeyId(self, KeyId):
return _snap.TIntFltH_FNextKeyId(self, KeyId)
def GetKeyV(self, KeyV):
return _snap.TIntFltH_GetKeyV(self, KeyV)
def GetDatV(self, DatV):
return _snap.TIntFltH_GetDatV(self, DatV)
def GetKeyDatPrV(self, KeyDatPrV):
return _snap.TIntFltH_GetKeyDatPrV(self, KeyDatPrV)
def GetDatKeyPrV(self, DatKeyPrV):
return _snap.TIntFltH_GetDatKeyPrV(self, DatKeyPrV)
def GetKeyDatKdV(self, KeyDatKdV):
return _snap.TIntFltH_GetKeyDatKdV(self, KeyDatKdV)
def GetDatKeyKdV(self, DatKeyKdV):
return _snap.TIntFltH_GetDatKeyKdV(self, DatKeyKdV)
def Swap(self, Hash):
return _snap.TIntFltH_Swap(self, Hash)
def Defrag(self):
return _snap.TIntFltH_Defrag(self)
def Pack(self):
return _snap.TIntFltH_Pack(self)
def Sort(self, CmpKey, Asc):
return _snap.TIntFltH_Sort(self, CmpKey, Asc)
def SortByKey(self, Asc=True):
return _snap.TIntFltH_SortByKey(self, Asc)
def SortByDat(self, Asc=True):
return _snap.TIntFltH_SortByDat(self, Asc)
__swig_destroy__ = _snap.delete_TIntFltH |
def train(model, optimizer, train_loader, criterion, entropy_loss_func, opts):
y_probs = np.zeros((0, len(train_loader.dataset.CLASSES)), np.float)
y_trues = np.zeros(0, np.int)
losses = []
model.train()
for (i, (x_low, x_high, label)) in enumerate(tqdm(train_loader)):
(x_low, x_high, label) = move_to([x_low, x_high, label], opts.device)
optimizer.zero_grad()
(y, attention_map, patches, x_low) = model(x_low, x_high)
entropy_loss = entropy_loss_func(attention_map)
loss = (criterion(y, label) - entropy_loss)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opts.clipnorm)
optimizer.step()
loss_value = loss.item()
losses.append(loss_value)
y_prob = F.softmax(y, dim=1)
y_probs = np.concatenate([y_probs, y_prob.detach().cpu().numpy()])
y_trues = np.concatenate([y_trues, label.cpu().numpy()])
train_loss_epoch = np.round(np.mean(losses), 4)
metrics = calc_cls_measures(y_probs, y_trues)
return (train_loss_epoch, metrics) |
class TunableMixin():
def _tunables(cls) -> list[Any]:
_tunables = []
for attr_key in dir(cls):
if (attr_key == '_tunables'):
continue
attr = getattr(cls, attr_key)
if (hasattr(attr, '_tunables') or isfunction(attr)):
_tunables.append(attr)
return _tunables |
def is_acceptable(tensor):
if (not torch._C._get_cudnn_enabled()):
return False
if (tensor.type() not in CUDNN_TENSOR_TYPES):
return False
if (not is_available()):
warnings.warn('PyTorch was compiled without cuDNN support. To use cuDNN, rebuild PyTorch making sure the library is visible to the build system.')
return False
if (_libcudnn() is None):
warnings.warn('cuDNN library not found. Check your {libpath}'.format(libpath={'darwin': 'DYLD_LIBRARY_PATH', 'win32': 'PATH'}.get(sys.platform, 'LD_LIBRARY_PATH')))
return False
return True |
def mean_color(scan_ids, all_scans):
mean_rgb = np.zeros((1, 3), dtype=np.float32)
n_points = 0
for scan_id in scan_ids:
color = all_scans[scan_id].color
mean_rgb += np.sum(color, axis=0)
n_points += len(color)
mean_rgb /= n_points
return mean_rgb |
def existsSemiDirectedPath(node_from, node_to, bound, graph):
Q = Queue()
V = set()
Q.put(node_from)
V.add(node_from)
node_e = None
distance = 0
while (not Q.empty()):
node_t = Q.get_nowait()
if (node_t == node_to):
return True
if (node_e == node_t):
node_e = None
distance += 1
if (distance > (1000 if (bound == (- 1)) else bound)):
return False
for node_u in graph.get_adjacent_nodes(node_t):
edge = graph.get_edge(node_t, node_u)
node_c = traverseSemiDirected(node_t, edge)
if (node_c is None):
continue
if (node_c == node_to):
return True
if (not V.__contains__(node_c)):
V.add(node_c)
Q.put(node_c)
if (node_e == None):
node_e = node_u
return False |
def compute_F1(gold_files, sys_files, labeled=False):
correct = 0
predicted = 0
actual = 0
n_tokens = 0
n_sequences = 0
current_seq_correct = False
n_correct_sequences = 0
current_fp = 0
current_sent = 0
for (gold_file, sys_file) in zip(gold_files, sys_files):
with codecs.open(gold_file, encoding='utf-8') as gf, codecs.open(sys_file, encoding='utf-8') as sf:
gold_line = gf.readline()
gold_i = 1
sys_i = 0
while gold_line:
while gold_line.startswith('#'):
current_sent += 1
gold_i += 1
n_sequences += 1
n_correct_sequences += current_seq_correct
current_seq_correct = True
gold_line = gf.readline()
if (gold_line.rstrip() != ''):
sys_line = sf.readline()
sys_i += 1
while (sys_line.startswith('#') or (sys_line.rstrip() == '') or (sys_line.split('\t')[0] == '0')):
sys_line = sf.readline()
sys_i += 1
gold_line = gold_line.rstrip().split('\t')
sys_line = sys_line.rstrip().split('\t')
assert (sys_line[1] == gold_line[1]), 'Files are misaligned at lines {}, {}'.format(gold_i, sys_i)
gold_node = gold_line[8]
if (gold_node != '_'):
gold_node = gold_node.split('|')
if labeled:
gold_edges = set((tuple(gold_edge.split(':', 1)) for gold_edge in gold_node))
else:
gold_edges = set((gold_edge.split(':', 1)[0] for gold_edge in gold_node))
else:
gold_edges = set()
sys_node = sys_line[8]
if (sys_node != '_'):
sys_node = sys_node.split('|')
if labeled:
sys_edges = set((tuple(sys_edge.split(':', 1)) for sys_edge in sys_node))
else:
sys_edges = set((sys_edge.split(':', 1)[0] for sys_edge in sys_node))
else:
sys_edges = set()
correct_edges = (gold_edges & sys_edges)
if (len(correct_edges) != len(gold_edges)):
current_seq_correct = False
correct += len(correct_edges)
predicted += len(sys_edges)
actual += len(gold_edges)
n_tokens += 1
gold_line = gf.readline()
gold_i += 1
Accuracy = namedtuple('Accuracy', ['precision', 'recall', 'F1', 'seq_acc'])
precision = (correct / (predicted + 1e-12))
recall = (correct / (actual + 1e-12))
F1 = (((2 * precision) * recall) / ((precision + recall) + 1e-12))
seq_acc = (n_correct_sequences / n_sequences)
return Accuracy(precision, recall, F1, seq_acc) |
class storage():
instance = None
client = None
def __init__(self):
self.client = gcp_storage.Client()
def unique_name(name):
(name, extension) = os.path.splitext(name)
return '{name}.{random}{extension}'.format(name=name, extension=extension, random=str(uuid.uuid4()).split('-')[0])
def upload(self, bucket, file, filepath):
key_name = storage.unique_name(file)
bucket_instance = self.client.bucket(bucket)
blob = bucket_instance.blob(key_name)
blob.upload_from_filename(filepath)
return key_name
def download(self, bucket, file, filepath):
bucket_instance = self.client.bucket(bucket)
blob = bucket_instance.blob(file)
blob.download_to_filename(filepath)
def download_directory(self, bucket, prefix, path):
objects = self.client.bucket(bucket).list_blobs(prefix=prefix)
for obj in objects:
file_name = obj.name
path_to_file = os.path.dirname(file_name)
os.makedirs(os.path.join(path, path_to_file), exist_ok=True)
self.download(bucket, file_name, os.path.join(path, file_name))
def upload_stream(self, bucket, file, data):
key_name = storage.unique_name(file)
bucket_instance = self.client.bucket(bucket)
blob = bucket_instance.blob(key_name)
blob.upload_from_file(data)
return key_name
def download_stream(self, bucket, file):
data = io.BytesIO()
bucket_instance = self.client.bucket(bucket)
blob = bucket_instance.blob(file)
blob.download_to_file(data)
return data.getbuffer()
def get_instance():
if (storage.instance is None):
storage.instance = storage()
return storage.instance |
('dace.libraries.blas.bmm')
def bmmnode(pv, sdfg: dace.SDFG, state: dace.SDFGState, A, B, C, alpha=1, beta=0, trans_a=False, trans_b=False):
(A_in, B_in) = (state.add_read(name) for name in (A, B))
C_out = state.add_write(C)
libnode = BatchedMatMul('bmm')
libnode.alpha = alpha
libnode.beta = beta
libnode.transA = trans_a
libnode.transB = trans_b
state.add_node(libnode)
state.add_edge(A_in, None, libnode, '_a', mm.Memlet(A))
state.add_edge(B_in, None, libnode, '_b', mm.Memlet(B))
state.add_edge(libnode, '_c', C_out, None, mm.Memlet(C))
return [] |
def load_data(args):
data_path = os.path.join(args.data_dir, ('data_%s.json' % args.eval_name))
return json.load(open(data_path, 'r')) |
_utils.test(arch=archs_support_ndarray_ad, default_fp=ti.f32, require=ti.extension.adstack)
def test_ad_fibonacci_index():
N = 5
M = 10
a = ti.ndarray(ti.f32, shape=M, needs_grad=True)
b = ti.ndarray(ti.f32, shape=M, needs_grad=True)
f = ti.ndarray(ti.f32, shape=(), needs_grad=True)
def fib(a: ti.types.ndarray(), b: ti.types.ndarray(), f: ti.types.ndarray()):
for i in range(N):
p = 0
q = 1
for j in range(5):
(p, q) = (q, (p + q))
b[q] += a[q]
for i in range(M):
f[None] += b[i]
f.grad[None] = 1
a.fill(1)
fib(a, b, f)
fib.grad(a, b, f)
for i in range(M):
is_fib = int((i in [1, 2, 3, 5, 8]))
assert (a.grad[i] == (is_fib * N))
assert (b[i] == (is_fib * N)) |
def _read_pretrained_embedding_file(embeddings_filename: str, embedding_dim: int, vocab: Vocabulary, namespace: str='tokens') -> torch.FloatTensor:
if ((embeddings_filename[(- 3):] == '.h5') or (embeddings_filename[(- 5):] == '.hdf5')):
return _read_pretrained_hdf5_format_embedding_file(embeddings_filename, embedding_dim, vocab, namespace)
else:
return _read_pretrained_word2vec_format_embedding_file(embeddings_filename, embedding_dim, vocab, namespace) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.