code stringlengths 101 5.91M |
|---|
def reform_weights(g, w, n, intervals):
slices = [g.op('Slice', w, axes_i=[0], starts_i=[(x * n)], ends_i=[(y * n)]) for (x, y) in intervals]
return g.op('Concat', *slices, axis_i=0) |
def model_to_gpu(model, isTrain, gpu):
if isTrain:
if (gpu is not None):
model.cuda(gpu)
model = DDP(model, device_ids=[gpu], find_unused_parameters=True)
else:
model.cuda()
model = DDP(model, find_unused_parameters=True)
else:
model.cuda()
model = nn.DataParallel(model)
return model |
_utils.test(arch=get_host_arch_list())
def test_offset_must_throw_scalar():
with pytest.raises(ti.TaichiCompilationError, match='The dimensionality of shape and offset must be the same'):
a = ti.field(dtype=ti.f32, shape=3, offset=(3, 4))
with pytest.raises(ti.TaichiCompilationError, match='shape cannot be None when offset is set'):
b = ti.field(dtype=ti.f32, shape=None, offset=(3, 4)) |
def set_lr_scheduler(cfg, scheduler):
if cfg.train.warmup:
scheduler.milestones = cfg.train.milestones
else:
scheduler.milestones = Counter(cfg.train.milestones)
scheduler.gamma = cfg.train.gamma |
class DL_BM_Arch():
def __init__(self, output_dimension):
self.output_dimension = output_dimension
def fcnn(self, depth=3, filter_root=32, output_heads=2, voxel_dim=64, deviation_channels=3):
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import numpy as np
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv3D, MaxPooling3D, Add, BatchNormalization, Input, Activation, Lambda, Concatenate, Flatten, Dense, UpSampling3D, GlobalAveragePooling3D
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import add, multiply
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import plot_model
mse_basic = tf.keras.losses.MeanSquaredError()
overall_loss_dict = {'shape_error_outputs': mse_basic}
overall_loss_weights = {'shape_error_outputs': 1.0}
overall_metrics_dict = {'shape_error_outputs': [tf.keras.metrics.MeanAbsoluteError()]}
Conv = Conv3D
MaxPooling = MaxPooling3D
UpSampling = UpSampling3D
activation = 'relu'
final_activation = 'linear'
input_size = (voxel_dim, voxel_dim, voxel_dim, deviation_channels)
inputs = Input(input_size)
x = inputs
for i in range(depth):
out_channel = ((2 ** i) * filter_root)
conv1 = Conv(out_channel, kernel_size=3, padding='same', name='Conv{}_1'.format(i))(x)
act1 = Activation(activation, name='Act{}_1'.format(i))(conv1)
x = MaxPooling(padding='same', name='MaxPooling{}_1'.format(i))(act1)
for i in range((depth - 1), (- 1), (- 1)):
out_channel = ((2 ** i) * filter_root)
up1 = UpSampling(name='UpSampling{}_1'.format(i))(x)
up_conv1 = Conv(out_channel, 2, activation='relu', padding='same', name='upConvSam{}_1'.format(i))(up1)
up_conv2 = Conv(out_channel, 3, padding='same', name='upConv{}_1'.format(i))(up_conv1)
up_act1 = Activation(activation, name='upAct{}_1'.format(i))(up_conv2)
x = Activation(activation, name='upAct{}_2'.format(i))(up_act1)
output_list = []
output = Conv((deviation_channels * output_heads), 1, padding='same', activation=final_activation, name='shape_error_outputs')(x)
output_list.append(output)
model = Model(inputs, outputs=output_list, name='FCNN')
model.compile(optimizer=tf.keras.optimizers.Adam(), experimental_run_tf_function=False, loss=overall_loss_dict, metrics=overall_metrics_dict, loss_weights=overall_loss_weights)
print('3D FCNN model successfully compiled')
print(model.summary())
plot_model(model, to_file='FCNN.png', show_shapes=True, show_layer_names=True)
return model |
class Morphism(Morphism_abstract, sage.modules.matrix_morphism.MatrixMorphism):
def restrict_domain(self, sub):
if (not sub.is_subvariety(self.domain())):
raise ValueError('sub must be a subvariety of self.domain()')
if (sub == self.domain()):
return self
L = self.domain().lattice()
B = sub.lattice().basis()
ims = sum(((L(b) * self.matrix()).list() for b in B), [])
MS = matrix_space.MatrixSpace(self.base_ring(), len(B), self.codomain().rank())
H = sub.Hom(self.codomain(), self.category_for())
return H(MS(ims)) |
class TestRNGExtension(common.TestCase):
def setUp(self):
super(TestRNGExtension, self).setUp()
def test_rng(self):
fourty_two = torch.full((10,), 42, dtype=torch.int64)
t = torch.empty(10, dtype=torch.int64).random_()
self.assertNotEqual(t, fourty_two)
gen = torch.Generator(device='cpu')
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertNotEqual(t, fourty_two)
self.assertEqual(rng_extension.getInstanceCount(), 0)
gen = rng_extension.createTestCPUGenerator(42)
self.assertEqual(rng_extension.getInstanceCount(), 1)
copy = gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy)
copy2 = rng_extension.identity(copy)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(gen, copy2)
t = torch.empty(10, dtype=torch.int64).random_(generator=gen)
self.assertEqual(rng_extension.getInstanceCount(), 1)
self.assertEqual(t, fourty_two)
del gen
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy
self.assertEqual(rng_extension.getInstanceCount(), 1)
del copy2
self.assertEqual(rng_extension.getInstanceCount(), 0) |
def test_keras_predictor_check_optimizer_property() -> None:
optimizer = KerasOptimizer(tf.optimizers.RMSprop())
model = _DummyKerasPredictor(optimizer)
assert (model.optimizer == optimizer) |
class JoinBlastJobs(Join):
def __init__(self):
Join.__init__(self)
def setup_with_root_problem(self, root_problem):
self.root_problem = root_problem
for p in root_problem.iter_leaves():
self.add_job(p.jobs['blastsearch'])
def figureout_fragment_marker(self):
if ('fragments.distribution.done' in self.root_problem.annotations):
return
max_evalues = dict([(name, (None, None)) for name in self.root_problem.fragments.keys()])
for fragment_chunk_problem in self.root_problem.iter_leaves():
align_problem = fragment_chunk_problem.get_parent()
assert isinstance(align_problem, SeppProblem)
if (align_problem.fragments is None):
align_problem.fragments = self.root_problem.fragments.get_soft_sub_alignment([])
search_res = fragment_chunk_problem.get_job_result_by_name('hmmsearch')
for key in search_res.keys():
(best_value, prev_align_problem) = max_evalues[key]
if ((best_value is None) or (best_value < search_res[key][1])):
max_evalues[key] = (search_res[key][1], align_problem)
notScored = []
for (key, v) in max_evalues.items():
if (v[1] is None):
notScored.append(key)
else:
v[1].fragments.seq_names.add(key)
self.root_problem.annotations['fragments.distribution.done'] = 1
_LOG.warning(('Fragments %s are not scored against any subset' % str(notScored)))
def perform(self):
self.figureout_fragment_subset()
alg_problems = [alg for p in self.root_problem.children for alg in p.children]
for alg_problem in alg_problems:
assert isinstance(alg_problem, SeppProblem)
chunks = len(alg_problem.get_children())
fragment_chunks = alg_problem.fragments.divide_to_equal_chunks(chunks)
for (i, fragment_chunk_problem) in enumerate(alg_problem.children):
fragment_chunk_problem.fragments = fragment_chunks[i]
aj = fragment_chunk_problem.jobs['hmmalign']
assert isinstance(aj, HMMAlignJob)
aj.hmmmodel = alg_problem.get_job_result_by_name('hmmbuild')
aj.base_alignment = alg_problem.jobs['hmmbuild'].infile
if (not fragment_chunk_problem.fragments.is_empty()):
fragment_chunk_problem.fragments.write_to_path(aj.fragments)
else:
aj.fake_run = True
' Now the align job can be put on the queue '
JobPool().enqueue_job(aj)
def __str__(self):
return ('join search jobs for all tips of ', self.root_problem) |
def test_frt():
SIZE = 59
L = (np.tri(SIZE, dtype=np.int32) + np.tri(SIZE, dtype=np.int32)[::(- 1)])
f = frt2(L)
fi = ifrt2(f)
assert np.array_equal(L, fi) |
def update_config(config, data_sets):
config.max_num_sents = 0
config.max_sent_size = 0
config.max_ques_size = 0
config.max_word_size = 0
config.max_tree_height = 0
for data_set in data_sets:
data = data_set.data
shared = data_set.shared
for idx in data_set.valid_idxs:
rx = data['*x'][idx]
q = data['q'][idx]
sents = shared['x'][rx[0]][rx[1]]
trees = map(nltk.tree.Tree.fromstring, shared['stx'][rx[0]][rx[1]])
config.max_tree_height = max(config.max_tree_height, max((tree.height() for tree in trees)))
config.max_num_sents = max(config.max_num_sents, len(sents))
config.max_sent_size = max(config.max_sent_size, max(map(len, sents)))
config.max_word_size = max(config.max_word_size, max((len(word) for sent in sents for word in sent)))
if (len(q) > 0):
config.max_ques_size = max(config.max_ques_size, len(q))
config.max_word_size = max(config.max_word_size, max((len(word) for word in q)))
config.max_word_size = min(config.max_word_size, config.word_size_th)
config.char_vocab_size = len(data_sets[0].shared['char2idx'])
config.word_emb_size = len(next(iter(data_sets[0].shared['word2vec'].values())))
config.word_vocab_size = len(data_sets[0].shared['word2idx'])
config.pos_vocab_size = len(data_sets[0].shared['pos2idx']) |
def assert_csc_almost_equal(r, l):
r = csc_matrix(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000) |
class MockNoChange(nn.Module):
def __init__(self):
super(MockNoChange, self).__init__()
def forward(self, input):
return input.clone().detach().to(device) |
class ParallelRunner():
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
(self.parent_conns, self.worker_conns) = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args)))) for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(('get_env_info', None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info['episode_limit']
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = (- 100000)
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, (self.episode_limit + 1), preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(('close', None))
def reset(self):
self.batch = self.new_batch()
for parent_conn in self.parent_conns:
parent_conn.send(('reset', None))
pre_transition_data = {'state': [], 'avail_actions': [], 'obs': []}
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data['state'].append(data['state'])
pre_transition_data['avail_actions'].append(data['avail_actions'])
pre_transition_data['obs'].append(data['obs'])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
if (self.args.mac == 'basic_mac_6h_vs_8z'):
self.mac.transmit_gap = th.zeros(48).cuda()
self.mac.receive_gap = th.zeros((8, 6, 6)).cuda()
self.mac.msg_old_test = th.zeros((48, 14)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 6, 6, 14)).cuda()
elif (self.args.mac == 'basic_mac_3s_vs_4z'):
self.mac.transmit_gap = th.zeros(24).cuda()
self.mac.receive_gap = th.zeros((8, 3, 3)).cuda()
self.mac.msg_old_test = th.zeros((24, 10)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 3, 3, 10)).cuda()
elif (self.args.mac == 'basic_mac_3s_vs_5z'):
self.mac.transmit_gap = th.zeros(24).cuda()
self.mac.receive_gap = th.zeros((8, 3, 3)).cuda()
self.mac.msg_old_test = th.zeros((24, 11)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 3, 3, 11)).cuda()
elif (self.args.mac == 'basic_mac_2c_vs_64zg'):
self.mac.transmit_gap = th.zeros(16).cuda()
self.mac.receive_gap = th.zeros((8, 2, 2)).cuda()
self.mac.msg_old_test = th.zeros((16, 70)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 2, 2, 70)).cuda()
elif (self.args.mac == 'basic_mac_corridor'):
self.mac.transmit_gap = th.zeros(48).cuda()
self.mac.receive_gap = th.zeros((8, 6, 6)).cuda()
self.mac.msg_old_test = th.zeros((48, 30)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 6, 6, 30)).cuda()
elif (self.args.mac == 'basic_mac_3s5z'):
self.mac.transmit_gap = th.zeros(64).cuda()
self.mac.receive_gap = th.zeros((8, 8, 8)).cuda()
self.mac.msg_old_test = th.zeros((64, 14)).cuda()
self.mac.msg_old_test_reshape = th.zeros((8, 8, 8, 14)).cuda()
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)]
final_env_infos = []
if (test_mode == True):
loss = generate_loss(level=self.args.loss_level, env=self.args.mac)
counter = 0
while True:
counter = (counter + 1)
if (test_mode == False):
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
elif (test_mode == True):
actions = self.mac.select_actions_noisy_env(loss[counter], self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to('cpu').numpy()
actions_chosen = {'actions': actions.unsqueeze(1)}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
action_idx = 0
for (idx, parent_conn) in enumerate(self.parent_conns):
if (idx in envs_not_terminated):
if (not terminated[idx]):
parent_conn.send(('step', cpu_actions[action_idx]))
action_idx += 1
envs_not_terminated = [b_idx for (b_idx, termed) in enumerate(terminated) if (not termed)]
all_terminated = all(terminated)
if all_terminated:
break
post_transition_data = {'reward': [], 'terminated': []}
pre_transition_data = {'state': [], 'avail_actions': [], 'obs': []}
for (idx, parent_conn) in enumerate(self.parent_conns):
if (not terminated[idx]):
data = parent_conn.recv()
post_transition_data['reward'].append((data['reward'],))
episode_returns[idx] += data['reward']
episode_lengths[idx] += 1
if (not test_mode):
self.env_steps_this_run += 1
env_terminated = False
if data['terminated']:
final_env_infos.append(data['info'])
if (data['terminated'] and (not data['info'].get('episode_limit', False))):
env_terminated = True
terminated[idx] = data['terminated']
post_transition_data['terminated'].append((env_terminated,))
pre_transition_data['state'].append(data['state'])
pre_transition_data['avail_actions'].append(data['avail_actions'])
pre_transition_data['obs'].append(data['obs'])
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
self.t += 1
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if (not test_mode):
self.t_env += self.env_steps_this_run
for parent_conn in self.parent_conns:
parent_conn.send(('get_stats', None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = (self.test_stats if test_mode else self.train_stats)
cur_returns = (self.test_returns if test_mode else self.train_returns)
log_prefix = ('test_' if test_mode else '')
infos = ([cur_stats] + final_env_infos)
cur_stats.update({k: sum((d.get(k, 0) for d in infos)) for k in set.union(*[set(d) for d in infos])})
cur_stats['n_episodes'] = (self.batch_size + cur_stats.get('n_episodes', 0))
cur_stats['ep_length'] = (sum(episode_lengths) + cur_stats.get('ep_length', 0))
cur_returns.extend(episode_returns)
n_test_runs = (max(1, (self.args.test_nepisode // self.batch_size)) * self.batch_size)
if (test_mode and (len(self.test_returns) == n_test_runs)):
self._log(cur_returns, cur_stats, log_prefix)
elif ((self.t_env - self.log_train_stats_t) >= self.args.runner_log_interval):
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, 'epsilon'):
self.logger.log_stat('epsilon', self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat((prefix + 'return_mean'), np.mean(returns), self.t_env)
self.logger.log_stat((prefix + 'return_std'), np.std(returns), self.t_env)
returns.clear()
for (k, v) in stats.items():
if (k != 'n_episodes'):
self.logger.log_stat(((prefix + k) + '_mean'), (v / stats['n_episodes']), self.t_env)
stats.clear() |
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='GMM')
parser.add_argument('--gpu_ids', default='')
parser.add_argument('-j', '--workers', type=int, default=1)
parser.add_argument('-b', '--batch-size', type=int, default=4)
parser.add_argument('--dataroot', default='data')
parser.add_argument('--datamode', default='train')
parser.add_argument('--stage', default='GMM')
parser.add_argument('--data_list', default='train_pairs.txt')
parser.add_argument('--fine_width', type=int, default=192)
parser.add_argument('--fine_height', type=int, default=256)
parser.add_argument('--radius', type=int, default=5)
parser.add_argument('--grid_size', type=int, default=5)
parser.add_argument('--tensorboard_dir', type=str, default='tensorboard', help='save tensorboard infos')
parser.add_argument('--result_dir', type=str, default='result', help='save result infos')
parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for test')
parser.add_argument('--display_count', type=int, default=1)
parser.add_argument('--shuffle', action='store_true', help='shuffle input data')
opt = parser.parse_args()
return opt |
def test_import_usingMounts_badDelimitedPaths(datadir, tmp_path, script_runner):
data = datadir.joinpath('xmlimport_absolutePaths')
temp = tmp_path.joinpath('parsed_output.json')
command = f"pyhf xml2json --hide-progress -v {data}::/absolute/path/to -v {data}/another/absolute/path/to --output-file {temp} {data.joinpath('config/example.xml')}"
ret = script_runner.run(shlex.split(command))
assert (not ret.success)
assert (ret.stdout == '')
assert ('is not a valid colon-separated option' in ret.stderr) |
class Shubert01(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.global_optimum = [[(- 7.0835), 4.858]]
self.fglob = (- 186.7309)
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
j = atleast_2d(arange(1, 6)).T
y = (j * cos((((j + 1) * x) + j)))
return prod(sum(y, axis=0)) |
def grey_dilation(input, size=None, footprint=None, structure=None, output=None, mode='reflect', cval=0.0, origin=0):
if ((size is None) and (footprint is None) and (structure is None)):
raise ValueError('size, footprint, or structure must be specified')
if (structure is not None):
structure = numpy.asarray(structure)
structure = structure[tuple(([slice(None, None, (- 1))] * structure.ndim))]
if (footprint is not None):
footprint = numpy.asarray(footprint)
footprint = footprint[tuple(([slice(None, None, (- 1))] * footprint.ndim))]
input = numpy.asarray(input)
origin = _ni_support._normalize_sequence(origin, input.ndim)
for ii in range(len(origin)):
origin[ii] = (- origin[ii])
if (footprint is not None):
sz = footprint.shape[ii]
elif (structure is not None):
sz = structure.shape[ii]
elif numpy.isscalar(size):
sz = size
else:
sz = size[ii]
if (not (sz & 1)):
origin[ii] -= 1
return _filters._min_or_max_filter(input, size, footprint, structure, output, mode, cval, origin, 0) |
class DisCRnDataset(BaseDataset, __DisplMixin):
def __init__(self, **kwargs):
super().__init__(kwargs['vis_processor'], kwargs['text_processor'], kwargs['vis_root'], kwargs['ann_paths'])
self.ds_name = kwargs['dataset_name']
self.modalities = [str(m) for m in kwargs['modalities']]
if ('images' in self.modalities):
self.modalities[self.modalities.index('images')] = 'image'
self.npoints = 8192
self.sample_points_num = self.npoints
self.annotation = self.annotation
self.view = kwargs.get('view', 2)
self.classnames = copy.deepcopy(self.modalities)
self.classnames = kwargs.get('classnames', ['first', 'second'])
self.total = kwargs.get('total', 'all')
self.ground_truth = kwargs.get('ground_truth', False)
self.shuffle_modalities = kwargs.get('shuffle_modalities', False)
self.balance_labels = kwargs.get('balance_labels', True)
self.raw = kwargs.get('raw', False)
if (self.total != 'all'):
self.annotation = self.annotation[:self.total]
for modality in self.modalities:
if ('image' not in modality):
setattr(self, f'{modality}_root', kwargs[f'{modality}_root'])
setattr(self, f'{modality}_processor', kwargs[f'{modality}_processor'])
setattr(self, f'existing_{modality}_annotation', getattr(self, f'get_existing_{modality}_annotations')())
self.sample_ids = set.intersection(*[set(getattr(self, f'existing_{modality}_annotation')) for modality in self.modalities])
self.annotation = [ann for ann in self.annotation if ((ann['sample_ids'][0] in self.sample_ids) and (ann['sample_ids'][1] in self.sample_ids))]
self._add_instance_ids()
def get_existing_image_annotations(self):
if (self.ds_name == 'objaverse'):
return [f.split('_')[0] for f in os.listdir(os.path.join(self.vis_root, f'compressed_imgs_view{self.view}/Cap3D_imgs_view{self.view}/'))]
def get_image_path(self, ann, entity_index):
if (self.ds_name == 'objaverse'):
return os.path.join(self.vis_root, f'compressed_imgs_view{self.view}/Cap3D_imgs_view{self.view}/', (ann['sample_ids'][entity_index] + f'_{self.view}.jpeg'))
def get_existing_audio_annotations(self):
return [f.split('_')[0] for f in os.listdir(self.audio_root)]
def get_audio_path(self, ann, entity_index):
if (self.ds_name == 'audiocaps'):
return str(os.path.join(self.audio_root, (ann['sample_ids'][entity_index] + '_{}.flac'.format(int(ann['start_seconds'][entity_index])))))
def get_video_path(self, ann, entity_index):
if (self.ds_name == 'audiocaps'):
return str(os.path.realpath(os.path.join(self.video_root, (ann['sample_ids'][entity_index] + '_{}.mp4'.format(int(ann['start_seconds'][entity_index]))))))
def get_existing_video_annotations(self):
return [f.split('_')[0] for f in os.listdir(self.video_root)]
def get_existing_pc_annotations(self):
if (self.ds_name == 'objaverse'):
return os.listdir(self.pc_root)
def get_pc_path(self, ann, entity_index):
if (self.ds_name == 'objaverse'):
return os.path.join(self.pc_root, ann['sample_ids'][entity_index], '{}_{}.npz'.format(ann['sample_ids'][entity_index], self.npoints))
def __getitem__(self, index):
ann = copy.deepcopy(self.annotation[index])
N = 2
ann['question_id'] = ann['instance_id']
ann[f'modalities'] = copy.deepcopy(self.modalities)
for (i, modality) in enumerate(self.modalities):
if ((ann[f'captions_pred_{modality}'] == None) or (ann[f'captions_pred_{modality}'][i] == None)):
return None
if (len(self.modalities) == 1):
ann[f'modalities'] = ([self.modalities[0]] * N)
if self.balance_labels:
if (((index % 2) and (ann['label'] == 1)) or ((not (index % 2)) and (ann['label'] == 0))):
ann['label'] = (1 - ann['label'])
ann['properties'] = [ann['properties'][1], ann['properties'][0]]
ann['captions'] = [ann['captions'][1], ann['captions'][0]]
if self.shuffle_modalities:
ann['modalities'] = [ann['modalities'][1], ann['modalities'][0]]
for modality in self.modalities:
ann[f'captions_pred_{modality}'] = [ann[f'captions_pred_{modality}'][1], ann[f'captions_pred_{modality}'][0]]
ann['baseline_captions'] = ([c for c in ann['captions']] if self.ground_truth else [ann[f"captions_pred_{ann['modalities'][0]}"][0], ann[f"captions_pred_{ann['modalities'][1]}"][1]])
ann['baseline_captions'] = [(c.strip() if (c != None) else '') for c in ann['baseline_captions']]
ann['text_input'] = self.text_processor(f"{ann['question'].replace('which entity', 'which of the two options').replace('which object', 'which of the two options').replace('which image', 'which of the two options').replace('which audio', 'which of the two options').replace('audio', 'object').replace('image', 'object')}?".replace('??', '?'))
first_answers = [ann['modalities'][0], 'the first option.', 'the first', 'left one', '(a) left', '(a) left one', '(a)', 'a.', 'A.', 'a)', '(A)', 'Input A', 'Entity 1', 'Object 1', 'Entity A', 'Object A', 'left', 'first', '1st', 'input 1', '1', 'a', 'input a', 'the first', 'the left one']
second_answers = [ann['modalities'][1], 'the second option.', 'the second.', 'second option', 'the second option', 'second option.', 'right one', '(b) right', '(b) right one', '(b)', 'b)', 'Input B', 'right', 'second', '2nd', 'input 2', '2', 'b', 'input b', 'Object 2', 'Entity B', 'Object B', 'the second', 'the right one', 'the second one']
if (ann['label'] == 0):
ann['answers'] = first_answers
else:
ann['answers'] = second_answers
if ('pc' in ann['answers']):
ann['answers'].extend(['3d', '3d model', 'model', 'rendering', 'a 3d', 'a 3d model'])
if ('image' in ann['answers']):
ann['answers'].extend(['photo', 'picture'])
if ('audio' in ann['answers']):
ann['answers'].append('sound')
ann['label'] = self.classnames[ann['label']]
ann['answer'] = ann['answers']
for (i, modality) in enumerate(ann['modalities']):
path = getattr(self, f'get_{modality}_path')(ann, i)
if ('image' in modality):
path = Image.open(path).convert('RGB')
if self.raw:
ann[modality] = path
continue
try:
ann[modality] = getattr(self, f"{('vis' if ('image' in modality) else modality)}_processor")(path)
except:
return None
ann['discrn'] = True
return ann
def __len__(self):
return len(self.annotation) |
class ParserImageTar(Parser):
def __init__(self, root, class_map=''):
super().__init__()
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
assert os.path.isfile(root)
self.root = root
with tarfile.open(root) as tf:
(self.samples, self.class_to_idx) = extract_tarinfo(tf, class_to_idx)
self.imgs = self.samples
self.tarfile = None
def __getitem__(self, index):
if (self.tarfile is None):
self.tarfile = tarfile.open(self.root)
(tarinfo, target) = self.samples[index]
fileobj = self.tarfile.extractfile(tarinfo)
return (fileobj, target)
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0].name
if basename:
filename = os.path.basename(filename)
return filename |
class AllophoneState():
id = None
context_history = ()
context_future = ()
boundary = 0
state = None
_attrs = ['id', 'context_history', 'context_future', 'boundary', 'state']
def __init__(self, id=None, state=None):
self.id = id
self.state = state
def format(self):
s = ('%s{%s+%s}' % (self.id, ('-'.join(self.context_history) or '#'), ('-'.join(self.context_future) or '#')))
if (self.boundary & 1):
s += ''
if (self.boundary & 2):
s += ''
if (self.state is not None):
s += ('.%i' % self.state)
return s
def __repr__(self):
return self.format()
def copy(self):
a = AllophoneState(id=self.id, state=self.state)
for attr in self._attrs:
if getattr(self, attr):
setattr(a, attr, getattr(self, attr))
return a
def mark_initial(self):
self.boundary = (self.boundary | 1)
def mark_final(self):
self.boundary = (self.boundary | 2)
def phoneme(self, ctx_offset, out_of_context_id=None):
if (ctx_offset == 0):
return self.id
if (ctx_offset > 0):
idx = (ctx_offset - 1)
if (idx >= len(self.context_future)):
return out_of_context_id
return self.context_future[idx]
if (ctx_offset < 0):
idx = ((- ctx_offset) - 1)
if (idx >= len(self.context_history)):
return out_of_context_id
return self.context_history[idx]
assert False
def set_phoneme(self, ctx_offset, phone_id):
if (ctx_offset == 0):
self.id = phone_id
elif (ctx_offset > 0):
idx = (ctx_offset - 1)
assert (idx == len(self.context_future))
self.context_future = (self.context_future + (phone_id,))
elif (ctx_offset < 0):
idx = ((- ctx_offset) - 1)
assert (idx == len(self.context_history))
self.context_history = (self.context_history + (phone_id,))
def phone_idx(self, ctx_offset, phone_idxs):
phone = self.phoneme(ctx_offset=ctx_offset)
if (phone is None):
return 0
else:
return (phone_idxs[phone] + 1)
def index(self, phone_idxs, num_states=3, context_length=1):
assert (max(len(self.context_history), len(self.context_future)) <= context_length)
assert (0 <= self.boundary < 4)
assert (0 <= self.state < num_states)
num_phones = (max(phone_idxs.values()) + 1)
num_phone_classes = (num_phones + 1)
result = 0
for i in range(((2 * context_length) + 1)):
pos = (i // 2)
if ((i % 2) == 1):
pos = ((- pos) - 1)
result *= num_phone_classes
result += self.phone_idx(ctx_offset=pos, phone_idxs=phone_idxs)
result *= num_states
result += self.state
result *= 4
result += self.boundary
return result
def from_index(cls, index, phone_ids, num_states=3, context_length=1):
num_phones = (max(phone_ids.keys()) + 1)
num_phone_classes = (num_phones + 1)
code = index
result = AllophoneState()
result.boundary = (code % 4)
code //= 4
result.state = (code % num_states)
code //= num_states
for i in range(((2 * context_length) + 1)):
pos = (i // 2)
if ((i % 2) == 1):
pos = ((- pos) - 1)
phone_idx = (code % num_phone_classes)
code //= num_phone_classes
result.set_phoneme(ctx_offset=pos, phone_id=(phone_ids[(phone_idx - 1)] if phone_idx else ''))
return result
def from_classic_index(cls, index, allophones, max_states=6):
emission = index
state = 0
while (state < max_states):
if (emission >= (1 << 26)):
emission -= (1 << 26)
state += 1
else:
break
a = allophones[emission].copy()
a.state = state
return a
def __hash__(self):
return hash(tuple([getattr(self, a) for a in self._attrs]))
def __eq__(self, other):
for a in self._attrs:
if (getattr(self, a) != getattr(other, a)):
return False
return True
def __ne__(self, other):
return (not (self == other)) |
def _plot(experiences, specs):
clear_output(True)
ncols = 3
nrows = math.ceil((len(specs) / ncols))
plt.figure(figsize=(20, (6 * nrows)))
for (i, spec) in enumerate(specs):
plt.subplot(nrows, ncols, (i + 1))
plt.title(spec['title'])
plt.plot([spec['function'](exp) for exp in experiences])
plt.tight_layout()
plt.show() |
class Actor(nn.Module):
def __init__(self, repr_dim, action_shape, feature_dim, hidden_dim):
super().__init__()
self.policy = nn.Sequential(nn.Linear(feature_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, action_shape[0]))
self.apply(utils.weight_init)
def forward(self, obs, std):
mu = self.policy(obs)
mu = torch.tanh(mu)
std = (torch.ones_like(mu) * std)
dist = utils.TruncatedNormal(mu, std)
return dist |
def main(args):
device = torch.device('cuda')
model = DCFM()
model = model.to(device)
try:
modelname = os.path.join(args.param_root, 'best_ep198_Smeasure0.7019.pth')
dcfmnet_dict = torch.load(modelname)
print('loaded', modelname)
except:
dcfmnet_dict = torch.load(os.path.join(args.param_root, 'dcfm.pth'))
model.to(device)
model.dcfmnet.load_state_dict(dcfmnet_dict)
model.eval()
model.set_mode('test')
tensor2pil = transforms.ToPILImage()
for testset in ['CoCA']:
if (testset == 'CoCA'):
test_img_path = './data/images/CoCA/'
test_gt_path = './data/gts/CoCA/'
saved_root = os.path.join(args.save_root, 'CoCA')
elif (testset == 'CoSOD3k'):
test_img_path = './data/images/CoSOD3k/'
test_gt_path = './data/gts/CoSOD3k/'
saved_root = os.path.join(args.save_root, 'CoSOD3k')
elif (testset == 'CoSal2015'):
test_img_path = './data/images/CoSal2015/'
test_gt_path = './data/gts/CoSal2015/'
saved_root = os.path.join(args.save_root, 'CoSal2015')
else:
print('Unkonwn test dataset')
print(args.dataset)
test_loader = get_loader(test_img_path, test_gt_path, args.size, 1, istrain=False, shuffle=False, num_workers=8, pin=True)
for batch in tqdm(test_loader):
inputs = batch[0].to(device).squeeze(0)
gts = batch[1].to(device).squeeze(0)
subpaths = batch[2]
ori_sizes = batch[3]
scaled_preds = model(inputs, gts)
scaled_preds = torch.sigmoid(scaled_preds[(- 1)])
os.makedirs(os.path.join(saved_root, subpaths[0][0].split('/')[0]), exist_ok=True)
num = gts.shape[0]
for inum in range(num):
subpath = subpaths[inum][0]
ori_size = (ori_sizes[inum][0].item(), ori_sizes[inum][1].item())
res = nn.functional.interpolate(scaled_preds[inum].unsqueeze(0), size=ori_size, mode='bilinear', align_corners=True)
save_tensor_img(res, os.path.join(saved_root, subpath)) |
def CalculateTransitionPolarity(ProteinSequence):
result = CalculateTransition(ProteinSequence, _Polarity, '_Polarity')
return result |
def evaluate_function(u, x):
comm = u.function_space().mesh().mpi_comm()
if (comm.size == 1):
return u(*x)
(cell, distance) = mesh.bounding_box_tree().compute_closest_entity(Point(*x))
u_eval = (u(*x) if (distance < DOLFIN_EPS) else None)
comm = mesh.mpi_comm()
computed_u = comm.gather(u_eval, root=0)
if (comm.rank == 0):
global_u_evals = np.array([y for y in computed_u if (y is not None)], dtype=np.double)
assert np.all((np.abs((global_u_evals[0] - global_u_evals)) < 1e-09))
computed_u = global_u_evals[0]
else:
computed_u = None
computed_u = comm.bcast(computed_u, root=0)
return computed_u |
class GCAlgebra_multigraded(GCAlgebra):
def __init__(self, base, degrees, names=None, R=None, I=None, category=None):
total_degs = [total_degree(d) for d in degrees]
GCAlgebra.__init__(self, base, R=R, I=I, names=names, degrees=total_degs, category=category)
self._degrees_multi = degrees
self._grading_rank = len(list(degrees[0]))
def _repr_(self):
s = GCAlgebra._repr_(self)
old = '{}'.format(self._degrees)
new = '{}'.format(self._degrees_multi)
return s.replace(old, new)
_base_repr = _repr_
def quotient(self, I, check=True):
if (check and any(((not i.is_homogeneous()) for i in I.gens()))):
raise ValueError('the ideal must be homogeneous')
NCR = self.cover_ring()
gens1 = list(self.defining_ideal().gens())
gens2 = [i.lift() for i in I.gens()]
gens = [g for g in (gens1 + gens2) if (g != NCR.zero())]
J = NCR.ideal(gens, side='twosided')
return GCAlgebra_multigraded(self.base_ring(), self._names, self._degrees_multi, NCR, J)
def _coerce_map_from_(self, other):
if isinstance(other, GCAlgebra_multigraded):
if (self._degrees_multi != other._degrees_multi):
return False
elif isinstance(other, GCAlgebra):
return False
return super()._coerce_map_from_(other)
def basis(self, n, total=False):
tot_basis = GCAlgebra.basis(self, total_degree(n))
if (total or (n in ZZ)):
return tot_basis
G = AdditiveAbelianGroup(([0] * self._grading_rank))
n = G(vector(n))
return [b for b in tot_basis if (b.degree() == n)]
def differential(self, diff):
return Differential_multigraded(self, diff)
def cdg_algebra(self, differential):
return DifferentialGCAlgebra_multigraded(self, differential)
class Element(GCAlgebra.Element):
def degree(self, total=False):
if total:
return GCAlgebra.Element.degree(self)
if self.is_zero():
raise ValueError('the zero element does not have a well-defined degree')
degrees = self.parent()._degrees_multi
n = self.parent().ngens()
exps = self.lift().dict().keys()
l = [sum(((exp[i] * degrees[i]) for i in range(n))) for exp in exps]
if (len(set(l)) == 1):
return l[0]
raise ValueError('this element is not homogeneous') |
class TestOrion():
def setup_class(cls):
cls.clean = pd.DataFrame({'timestamp': list(range(100)), 'value': ([1] * 100)})
cls.anomalous = pd.DataFrame({'timestamp': list(range(100, 200)), 'value': ((([1] * 45) + ([10] * 10)) + ([1] * 45))})
cls.events = pd.DataFrame([{'start': 145, 'end': 155, 'severity': 9.0}], columns=['start', 'end', 'severity'])
cls.all_data = pd.concat((cls.clean, cls.anomalous))
cls.all_events = pd.DataFrame([{'start': 145, 'end': 155, 'severity': 4.275}], columns=['start', 'end', 'severity'])
def setup(self):
self.orion = Orion('dummy')
def test_fit(self):
self.orion.fit(self.clean)
def test_detect(self):
self.orion.fit(self.clean)
events = self.orion.detect(self.anomalous)
pd.testing.assert_frame_equal(self.events, events)
def test_detect_no_visualization(self):
self.orion.fit(self.clean)
(events, visualization) = self.orion.detect(self.anomalous, visualization=True)
pd.testing.assert_frame_equal(self.events, events)
assert (visualization == {})
def test_detect_visualization(self):
pipeline = load_pipeline('dummy')
pipeline['outputs'] = {'visualization': [{'name': 'y_hat', 'variable': 'orion.primitives.estimators.MeanEstimator#1.y'}]}
orion = Orion(pipeline)
orion.fit(self.clean)
(events, visualization) = orion.detect(self.anomalous, visualization=True)
pd.testing.assert_frame_equal(self.events, events)
assert isinstance(visualization, dict)
assert ('y_hat' in visualization)
y_hat = visualization['y_hat']
np.testing.assert_array_equal(y_hat, np.ones(len(self.anomalous)))
def test_fit_detect(self):
events = self.orion.fit_detect(self.all_data)
pd.testing.assert_frame_equal(self.all_events, events)
def test_save_load(self, tmpdir):
path = os.path.join(tmpdir, 'some/path.pkl')
self.orion.save(path)
new_orion = Orion.load(path)
assert (new_orion == self.orion)
def test_evaluate(self):
self.orion.fit(self.clean)
scores = self.orion.evaluate(data=self.anomalous, ground_truth=self.events)
expected = pd.Series({'accuracy': 1.0, 'f1': 1.0, 'recall': 1.0, 'precision': 1.0})
pd.testing.assert_series_equal(expected, scores)
def test_evaluate_fit(self):
scores = self.orion.evaluate(data=self.all_data, ground_truth=self.all_events, fit=True)
expected = pd.Series({'accuracy': 1.0, 'f1': 1.0, 'recall': 1.0, 'precision': 1.0})
pd.testing.assert_series_equal(expected, scores)
def test_evaluate_train_data(self):
scores = self.orion.evaluate(data=self.anomalous, ground_truth=self.events, fit=True, train_data=self.clean)
expected = pd.Series({'accuracy': 1.0, 'f1': 1.0, 'recall': 1.0, 'precision': 1.0})
pd.testing.assert_series_equal(expected, scores) |
def test_eval_double2():
x = Symbol('x')
e = ((sin(x) ** 2) + sqrt(2))
raises(RuntimeError, (lambda : e.n(real=True)))
assert (abs(((e.n() - (x ** 2)) - 1.414)) < 0.001) |
def getLogger(log_name='', log_file='file.log'):
logger = logging.getLogger(log_name)
formatter = logging.Formatter(logging.BASIC_FORMAT)
if (not len(logger.handlers)):
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
file_handler_info = logging.FileHandler(log_file, mode='w')
file_handler_info.setFormatter(formatter)
file_handler_info.setLevel(logging.DEBUG)
logger.addHandler(file_handler_info)
logger.setLevel(logging.DEBUG)
return logger |
class ProphetNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', x_sep_token='[X_SEP]', pad_token='[PAD]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, mask_token=mask_token, x_sep_token=x_sep_token, **kwargs)
self.unique_no_split_tokens.append(x_sep_token)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = ProphetNetTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is None):
return (([0] * len(token_ids_0)) + [1])
return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + sep)) * [0])
return ((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.sep_token_id])
sep = [self.sep_token_id]
return (((token_ids_0 + sep) + token_ids_1) + sep) |
class OptimizerAdam(Optimizer):
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, dtype=bb.DType.FP32):
core_optimizer = bb.search_core_object('OptimizerAdam', [dtype]).create(learning_rate=learning_rate, beta1=beta1, beta2=beta2)
super(OptimizerAdam, self).__init__(core_optimizer=core_optimizer) |
class TrackNorms(pl.Callback):
def on_after_training_step(self, batch, batch_idx, trainer: pl.Trainer, pl_module: pl.LightningModule):
metrics = {}
if hasattr(pl_module, '_grad_norms'):
metrics.update(pl_module._grad_norms)
self.log_dict(metrics, on_step=True, on_epoch=False, prog_bar=False, add_dataloader_idx=False, sync_dist=True)
def on_after_backward(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
if OmegaConf.select(trainer.hparams, 'train.track_grad_norms'):
norms = {}
for (name, p) in pl_module.named_parameters():
if (p.grad is None):
continue
param_norm = torch.mean((p.grad.data ** 2))
norms[f'grad_norm.{name}'] = param_norm
pl_module._grad_norms = norms |
def main():
args = get_arg()
stage2_data_dir = ((Path('../out/ensemble-multidomain/data-stage2') / args.datatrack) / args.feat_type)
stage1_result_base_dir = Path('../out/ensemble-multidomain/stage1')
feat_conf = yaml.safe_load(open('./stage2-method/{}.yaml'.format(args.feat_type)))
print(feat_conf)
df_test_list = []
for strong_learner in feat_conf['strong_learners']:
stage1_result_dir = ((stage1_result_base_dir / args.datatrack) / strong_learner)
k_cv = (3 if (args.datatrack == 'testphase-ood') else K_CV)
column_tag = strong_learner
df_test = get_learner_data(stage1_result_dir=stage1_result_dir, pred_datatrack=args.datatrack, use_upper_lower=False, column_tag=strong_learner, k_cv=k_cv)
df_test_list.append(df_test)
for (train_datatrack, model_type, ssl_type) in itertools.product(feat_conf['weak_learners']['datatracks'], feat_conf['weak_learners']['model_types'], feat_conf['weak_learners']['ssl_types']):
if (model_type == 'autogp'):
if (train_datatrack == 'phase1-main'):
model_type = 'svgp'
else:
model_type = 'exactgp'
use_cv_result = ((args.datatrack == train_datatrack) or train_datatrack.startswith('phase1-all'))
use_upper_lower = (model_type in ['svgp', 'exactgp'])
stage1_result_dir = ((stage1_result_base_dir / train_datatrack) / f'{model_type}-{ssl_type}')
column_tag = f'{train_datatrack}---{model_type}---{ssl_type}'
df_test = get_learner_data(stage1_result_dir=stage1_result_dir, pred_datatrack=args.datatrack, use_upper_lower=use_upper_lower, column_tag=column_tag)
df_test_list.append(df_test)
df_test_all = pd.concat(df_test_list, axis=1)
df_test_all.sort_index(inplace=True)
print('Columns: {}'.format(df_test_all.columns))
print('Test: {}'.format(df_test_all.shape))
os.makedirs(stage2_data_dir, exist_ok=True)
df_test_all.to_csv((stage2_data_dir / 'test-X.csv')) |
def filter_by_action(value: NDArray, action: NDArray, action_size: int) -> NDArray:
act_one_hot = np.identity(action_size)[np.reshape(action, ((- 1),))]
return (value * act_one_hot).sum(axis=1) |
class IteratorTimer():
def __init__(self, iterable):
self.iterable = iterable
self.iterator = self.iterable.__iter__()
def __iter__(self):
return self
def __len__(self):
return len(self.iterable)
def __next__(self):
start = time.time()
n = self.iterator.next()
self.last_duration = (time.time() - start)
return n
next = __next__ |
_on_pypy
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert (ConstructorStats.detail_reg_inst() == (n_inst + 2))
lst = [p]
lst.append(lst)
with capture:
del p, lst
assert (ConstructorStats.detail_reg_inst() == n_inst)
assert (capture == '\n Releasing parent.\n Releasing child.\n ') |
class NoClassDataset(torch.utils.data.Dataset):
def __init__(self, dataset, length=None):
self.dataset = dataset
self.length = (length if (length is not None) else len(dataset))
def __getitem__(self, index):
x = self.dataset[index]
if (isinstance(x, tuple) or isinstance(x, list)):
x = x[0]
x = x.mul(255).clamp_(0, 255).to(torch.uint8)
return x.permute(1, 2, 0).numpy()
def __len__(self):
return self.length |
def compute_micro_stats(values_a, values_b, eps=1e-08):
sum_a = np.sum(values_a)
sum_b = np.sum(values_b)
micro_sc = (sum_a / ((sum_a + sum_b) + eps))
return micro_sc |
class ProgramException(Enum):
NOT_CLOSED = 1
NOT_OPEN = 2
NOT_SITTING = 3
NOT_LYING = 4
NOT_CLOSE = 5
NOT_FACING = 6
SITTING = 7
NOT_OFF = 8
NOT_ON = 9
NOT_PLUGGED_OUT = 10
OCCUPIED = 11
UNPLUGGED = 12
STILL_ON = 13
DOOR_CLOSED = 14
INSIDE_CLOSED = 15
FREE_HAND = 16 |
def validate_csrf(data, secret_key=None, time_limit=None, token_key=None):
secret_key = _get_config(secret_key, 'WTF_CSRF_SECRET_KEY', current_app.secret_key, message='A secret key is required to use CSRF.')
field_name = _get_config(token_key, 'WTF_CSRF_FIELD_NAME', 'csrf_token', message='A field name is required to use CSRF.')
time_limit = _get_config(time_limit, 'WTF_CSRF_TIME_LIMIT', 3600, required=False)
if (not data):
raise ValidationError('The CSRF token is missing.')
if (field_name not in session):
raise ValidationError('The CSRF session token is missing.')
s = URLSafeTimedSerializer(secret_key, salt='wtf-csrf-token')
try:
token = s.loads(data, max_age=time_limit)
except SignatureExpired:
raise ValidationError('The CSRF token has expired.')
except BadData:
raise ValidationError('The CSRF token is invalid.')
if (not safe_str_cmp(session[field_name], token)):
raise ValidationError('The CSRF tokens do not match.') |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None, drop_path_rate=0.0):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.drop_path = (DropPath(drop_path_rate) if (drop_path_rate > 0.0) else nn.Identity())
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out = (identity + self.drop_path(out))
out = self.relu(out)
return out |
class TFConvBertForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def test_bad_flow(barbell):
generator = GraphWaveGenerator(barbell, scales=(0.1, 2, 3, 4), degree=10)
sample_points = np.linspace(0, 100, 25)
with pytest.raises(TypeError, match='batch_size: expected.*found float'):
generator.flow(barbell.nodes(), sample_points, batch_size=4.5)
with pytest.raises(ValueError, match='batch_size: expected.*found 0'):
generator.flow(barbell.nodes(), sample_points, batch_size=0)
with pytest.raises(TypeError, match='shuffle: expected.*found int'):
generator.flow(barbell.nodes(), sample_points, batch_size=1, shuffle=1)
with pytest.raises(TypeError, match='repeat: expected.*found int'):
generator.flow(barbell.nodes(), sample_points, batch_size=1, repeat=1)
with pytest.raises(TypeError, match='num_parallel_calls: expected.*found float'):
generator.flow(barbell.nodes(), sample_points, batch_size=1, num_parallel_calls=2.2)
with pytest.raises(ValueError, match='num_parallel_calls: expected.*found 0'):
generator.flow(barbell.nodes(), sample_points, batch_size=1, num_parallel_calls=0) |
class FixedIntervalVideoSchedule(object):
def __init__(self, interval):
self.interval = interval
def __call__(self, count):
return ((count % self.interval) == 0) |
.parametrize('sampling_strategy, sampling_type, expected_result', [({3: 25, 1: 25, 2: 25}, 'under-sampling', OrderedDict({1: 25, 2: 25, 3: 25})), ({3: 100, 1: 100, 2: 100}, 'over-sampling', OrderedDict({1: 50, 2: 0, 3: 75}))])
def test_sampling_strategy_check_order(sampling_strategy, sampling_type, expected_result):
y = np.array(((([1] * 50) + ([2] * 100)) + ([3] * 25)))
sampling_strategy_ = check_sampling_strategy(sampling_strategy, y, sampling_type)
assert (sampling_strategy_ == expected_result) |
def iter_cast(inputs, dst_type, return_type=None):
if (not isinstance(inputs, abc.Iterable)):
raise TypeError('inputs must be an iterable object')
if (not isinstance(dst_type, type)):
raise TypeError('"dst_type" must be a valid type')
out_iterable = map(dst_type, inputs)
if (return_type is None):
return out_iterable
else:
return return_type(out_iterable) |
def run_pool(poolsize, chunksize):
client = utils.init_client(MONGO_ARGS)
id_collection = client[DB_NAME][READ_COL]
query = utils.prepare_query(FILTERS)
document_ids = id_collection.find(query).distinct('_id')
logger.info(f'Obtained ID list for {len(document_ids)} articles.')
if (DOC_LIMIT > 0):
document_ids = document_ids[:DOC_LIMIT]
logger.info(f'Processing {len(document_ids)} articles...')
pool = Pool(processes=poolsize)
pool.map(process_chunks, chunker(document_ids, chunksize=chunksize))
pool.close() |
def add_boxes_by_rids(boxes_list, rids, scene):
def get_boxes_idx(boxes_list, box):
if (box in boxes_list):
return boxes_list.index(box)
else:
boxes_list.append(box)
return (len(boxes_list) - 1)
def get_box_xyxy(obj):
(x, y, w, h) = (obj['x'], obj['y'], obj['w'], obj['h'])
return (x, y, (x + w), (y + h))
boxes_idx = []
for rid in rids:
ref = scene['objects'][rid]
ref_box = list(get_box_xyxy(ref))
boxes_idx.append(get_boxes_idx(boxes_list, ref_box))
return boxes_idx |
class SnipsDataset(Dataset):
def __init__(self, split, tokenizer, bucket_size, path, num_workers=12, ascending=False, **kwargs):
self.path = path
self.bucket_size = bucket_size
self.speaker_list = (kwargs[f'{split}_speakers'] if (type(split) == str) else kwargs[f'{split[0]}_speakers'])
transcripts_file = open(join(self.path, ('all.iob.snips.txt' if ('-slot' in tokenizer.token_type) else 'all-trans.txt'))).readlines()
transcripts = {}
for line in transcripts_file:
line = line.strip().split(' ')
index = line[0]
sent = ' '.join(line[1:])
transcripts[index] = sent
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob('*.wav'))
new_list = []
uf = 0
for i in trange(len(split_list), desc='checking files'):
uid = str(split_list[i]).split('/')[(- 1)].split('.wav', 1)[0].split('/')[(- 1)]
if (uid in transcripts):
for spk in self.speaker_list:
if (uid[:len(spk)] == spk):
new_list.append(split_list[i])
break
else:
print(split_list[i], 'Not Found')
uf += 1
print(('%d wav file with label not found in text file!' % uf))
split_list = new_list
print(f'loaded audio from {len(self.speaker_list)} speakers {str(self.speaker_list)} with {len(split_list)} examples.')
assert (len(split_list) > 0), 'No data found {}'.format(join(path, s))
file_list += split_list
text = [transcripts[str(f).split('.wav', 1)[0].split('/')[(- 1)]] for f in file_list]
text = [tokenizer.encode(txt) for txt in tqdm(text, desc='tokenizing')]
(self.file_list, self.text) = zip(*[(f_name, txt) for (f_name, txt) in sorted(zip(file_list, text), reverse=(not ascending), key=(lambda x: len(x[1])))])
def __getitem__(self, index):
if (self.bucket_size > 1):
index = min((len(self.file_list) - self.bucket_size), index)
return [(f_path, txt) for (f_path, txt) in zip(self.file_list[index:(index + self.bucket_size)], self.text[index:(index + self.bucket_size)])]
else:
return (self.file_list[index], self.text[index])
def __len__(self):
return len(self.file_list) |
def far_apart(c):
distance = len(list(get_between_tokens(c)))
b1 = (distance > 10)
b1 &= (c.pain.char_end > c.anatomy.char_end)
b2 = (distance > 16)
b2 &= (c.pain.char_end < c.anatomy.char_end)
b = (b1 or b2)
return (True if b else False) |
class TestAdagrad(serial.SerializedTestCase):
(inputs=hu.tensors(n=3), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), weight_decay=st.sampled_from([0.0, 0.1]), **hu.gcs)
(deadline=1000)
def test_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
(param, momentum, grad) = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator('Adagrad', ['param', 'momentum', 'grad', 'lr'], ['param', 'momentum'], epsilon=epsilon, weight_decay=weight_decay, device_option=gc)
self.assertReferenceChecks(gc, op, [param, momentum, grad, lr], functools.partial(ref_adagrad, epsilon=epsilon, weight_decay=weight_decay))
(inputs=hu.tensors(n=3), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), weight_decay=st.sampled_from([0.0, 0.1]), **hu.gcs_cpu_only)
(deadline=10000)
def test_adagrad_output_effective_lr(self, inputs, lr, epsilon, weight_decay, gc, dc):
(param, momentum, grad) = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator('Adagrad', ['param', 'momentum', 'grad', 'lr'], ['param', 'momentum', 'effective_lr'], epsilon=epsilon, weight_decay=weight_decay, device_option=gc)
self.assertReferenceChecks(gc, op, [param, momentum, grad, lr], functools.partial(ref_adagrad, epsilon=epsilon, output_effective_lr=True, weight_decay=weight_decay))
(inputs=hu.tensors(n=3), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), **hu.gcs_cpu_only)
(deadline=1000)
def test_adagrad_output_effective_lr_and_update(self, inputs, lr, epsilon, gc, dc):
(param, momentum, grad) = inputs
momentum = np.abs(momentum)
lr = np.array([lr], dtype=np.float32)
op = core.CreateOperator('Adagrad', ['param', 'momentum', 'grad', 'lr'], ['param', 'momentum', 'effective_lr', 'update'], epsilon=epsilon, device_option=gc)
self.assertReferenceChecks(gc, op, [param, momentum, grad, lr], functools.partial(ref_adagrad, epsilon=epsilon, output_effective_lr_and_update=True))
(suppress_health_check=[HealthCheck.filter_too_much], deadline=10000)
(inputs=hu.tensors(n=3), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), weight_decay=st.sampled_from([0.0, 0.1]), **hu.gcs)
def test_sparse_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
adagrad_sparse_test_helper(self, inputs, lr, epsilon, None, ref_adagrad, gc, dc, weight_decay=weight_decay)
(inputs=hu.tensors(n=2), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), **hu.gcs)
(deadline=1000)
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
(param, momentum) = inputs
grad = np.empty(shape=((0,) + param.shape[1:]), dtype=np.float32)
ref_using_fp16_values = [False]
if (gc == hu.gpu_do):
ref_using_fp16_values.append(True)
for ref_using_fp16 in ref_using_fp16_values:
if ref_using_fp16:
print('test_sparse_adagrad_empty with half precision embedding')
momentum_i = momentum.astype(np.float16)
param_i = param.astype(np.float16)
else:
print('test_sparse_adagrad_empty with full precision embedding')
momentum_i = momentum.astype(np.float32)
param_i = param.astype(np.float32)
adagrad_sparse_test_helper(self, [param_i, momentum_i, grad], lr, epsilon, None, ref_adagrad, gc, dc)
(suppress_health_check=[HealthCheck.filter_too_much], deadline=1000)
(inputs=hu.tensors(n=3), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), weight_decay=st.sampled_from([0.0, 0.1]), **hu.gcs)
def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, weight_decay, gc, dc):
adagrad_sparse_test_helper(self, inputs, lr, epsilon, None, functools.partial(ref_adagrad, row_wise=True), gc, dc, row_wise=True, weight_decay=weight_decay)
(inputs=hu.tensors(n=2), lr=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), epsilon=st.floats(min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False), **hu.gcs)
(deadline=1000)
def test_row_wise_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
(param, momentum) = inputs
grad = np.empty(shape=((0,) + param.shape[1:]), dtype=np.float32)
adagrad_sparse_test_helper(self, [param, momentum, grad], lr, epsilon, None, ref_adagrad, gc, dc, row_wise=True) |
def mr(text_field, label_field, batch_size, **kargs):
(train_data, dev_data) = MR.splits(text_field, label_field)
text_field.build_vocab(train_data, dev_data)
label_field.build_vocab(train_data, dev_data)
(train_iter, dev_iter) = data.Iterator.splits((train_data, dev_data), batch_sizes=(batch_size, len(dev_data)), **kargs)
return (train_iter, dev_iter) |
class ICNet3D(nn.Module):
def __init__(self, opt):
super(ICNet3D, self).__init__()
self.residual = opt.residual
self.scaledown = opt.scaledown
self.ec0 = self.encoder(3, 64, kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1), bias=False, batchnorm=True)
self.ec1 = self.encoder(64, 128, kernel_size=(3, 3, 3), stride=(1, 2, 2), padding=(1, 1, 1), bias=False, batchnorm=True)
self.bt0 = self.encoder(128, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False, batchnorm=True)
self.bt1 = self.encoder(256, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1), dilation=(1, 2, 2), padding=(1, 2, 2), bias=False, batchnorm=True)
self.bt2 = self.encoder(256, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1), dilation=(1, 4, 4), padding=(1, 4, 4), bias=False, batchnorm=True)
self.bt3 = self.encoder(256, 256, kernel_size=(3, 3, 3), stride=(1, 1, 1), dilation=(1, 8, 8), padding=(1, 8, 8), bias=False, batchnorm=True)
self.dc1 = self.decoder((4, 64, 64), 256, 128, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False, batchnorm=True)
self.dc0 = self.decoder((8, 128, 128), (128 + 64), 64, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False, batchnorm=True)
self.clip_p = nn.Conv3d((64 + 3), 3, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False)
if (not self.residual):
self.mask_p = nn.Sequential(nn.Conv3d((64 + 3), 1, kernel_size=(3, 3, 3), stride=(1, 1, 1), padding=(1, 1, 1), bias=False), nn.Sigmoid())
def encoder(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding=0, bias=True, batchnorm=False):
if batchnorm:
layer = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias), nn.BatchNorm3d(out_channels), nn.ReLU())
else:
layer = nn.Sequential(nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias), nn.ReLU())
return layer
def decoder(self, size, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, batchnorm=False, mode='trilinear'):
if batchnorm:
layer = nn.Sequential(nn.Upsample(size=size, mode=mode), nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias), nn.BatchNorm3d(out_channels), nn.LeakyReLU(0.2))
else:
layer = nn.Sequential(nn.Upsample(size=size, mode=mode), nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=bias))
return layer
def forward(self, x):
e0 = self.ec0(x)
e1 = self.ec1(e0)
bt0 = self.bt0(e1)
bt1 = self.bt1(bt0)
bt2 = self.bt2(bt1)
bt3 = self.bt3(bt2)
d1 = torch.cat((self.dc1(bt3), e0), 1)
del bt3, e0
d0 = torch.cat((self.dc0(d1), x), 1)
del d1, x
clip = self.clip_p(d0)
if self.residual:
clip = (clip + x)
mask = None
else:
mask = self.mask_p(d0)
del d0
if self.scaledown:
clip = torch.clamp(clip, min=(- 0.5), max=0.5)
return (clip, mask) |
def change_edge_dest(graph: gr.OrderedDiGraph, node_a: Union[(nd.Node, gr.OrderedMultiDiConnectorGraph)], node_b: Union[(nd.Node, gr.OrderedMultiDiConnectorGraph)]):
edges = list(graph.in_edges(node_a))
for e in edges:
graph.remove_edge(e)
if isinstance(e, gr.MultiConnectorEdge):
graph.add_edge(e.src, e.src_conn, node_b, e.dst_conn, e.data)
else:
graph.add_edge(e.src, node_b, e.data) |
class Wav2Vec2PhonemeCTCTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', unk_token='<unk>', pad_token='<pad>', phone_delimiter_token=' ', word_delimiter_token=None, do_phonemize=True, phonemizer_lang='en-us', phonemizer_backend='espeak', **kwargs):
super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, word_delimiter_token=word_delimiter_token, phone_delimiter_token=phone_delimiter_token, do_phonemize=do_phonemize, phonemizer_lang=phonemizer_lang, phonemizer_backend=phonemizer_backend, **kwargs)
self._word_delimiter_token = word_delimiter_token
self._phone_delimiter_token = phone_delimiter_token
self.do_phonemize = do_phonemize
self.phonemizer_lang = phonemizer_lang
self.phonemizer_backend = phonemizer_backend
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
def vocab_size(self) -> int:
return len(self.decoder)
def get_vocab(self) -> Dict:
return dict(self.encoder, **self.added_tokens_encoder)
def prepare_for_tokenization(self, text: str, is_split_into_words: bool=False, phonemizer_lang: Optional[str]=None, do_phonemize: Optional[bool]=None) -> Tuple[(str, Dict[(str, Any)])]:
if is_split_into_words:
text = (' ' + text)
if (do_phonemize is not None):
self.do_phonemize = do_phonemize
if (phonemizer_lang is not None):
self.phonemizer_lang = phonemizer_lang
return (text, {})
def _tokenize(self, text, **kwargs):
text = text.strip()
if self.do_phonemize:
text = text.lower()
text = self.phonemize(text, self.phonemizer_lang)
tokens = text.split(' ')
tokens = list(filter((lambda p: (p.strip() != '')), tokens))
return tokens
def phonemize(self, text: str, phonemizer_lang: Optional[str]=None) -> str:
requires_backends(self, 'phonemizer')
from phonemizer import phonemize
from phonemizer.separator import Separator
word_delimiter = ((self.word_delimiter_token + ' ') if (self.word_delimiter_token is not None) else '')
phonemizer_lang = (phonemizer_lang if (phonemizer_lang is not None) else self.phonemizer_lang)
separator = Separator(phone=self.phone_delimiter_token, word=word_delimiter, syllable='')
phonemes = phonemize(text, language=phonemizer_lang, backend=self.phonemizer_backend, separator=separator, language_switch='remove-flags')
phonemes = phonemes.strip()
return phonemes
def word_delimiter_token(self) -> str:
if ((self._word_delimiter_token is None) and self.verbose):
return None
return str(self._word_delimiter_token)
def word_delimiter_token_id(self) -> Optional[int]:
if (self._word_delimiter_token is None):
return None
return self.convert_tokens_to_ids(self.word_delimiter_token)
_delimiter_token.setter
def word_delimiter_token(self, value):
self._word_delimiter_token = value
_delimiter_token_id.setter
def word_delimiter_token_id(self, value):
self._word_delimiter_token = self.convert_tokens_to_ids(value)
def phone_delimiter_token(self) -> str:
if ((self._phone_delimiter_token is None) and self.verbose):
logger.error('Using phone_delimiter_token, but it is not set yet.')
return None
return str(self._phone_delimiter_token)
def phone_delimiter_token_id(self) -> Optional[int]:
if (self._phone_delimiter_token is None):
return None
return self.convert_tokens_to_ids(self.phone_delimiter_token)
_delimiter_token.setter
def phone_delimiter_token(self, value):
self._phone_delimiter_token = value
_delimiter_token_id.setter
def phone_delimiter_token_id(self, value):
self._phone_delimiter_token = self.convert_tokens_to_ids(value)
def _convert_token_to_id(self, token: str) -> int:
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index: int) -> str:
result = self.decoder.get(index, self.unk_token)
return result
def convert_tokens_to_string(self, tokens: List[str], group_tokens: bool=True, spaces_between_special_tokens: bool=False, filter_word_delimiter_token: bool=True) -> str:
if group_tokens:
tokens = [token_group[0] for token_group in groupby(tokens)]
filtered_tokens = list(filter((lambda token: (token != self.pad_token)), tokens))
if (filter_word_delimiter_token and (self.word_delimiter_token is not None)):
filtered_tokens = list(filter((lambda token: (token != self.word_delimiter_token)), filtered_tokens))
string = ' '.join(filtered_tokens).strip()
return string
def _decode(self, token_ids: List[int], skip_special_tokens: bool=False, clean_up_tokenization_spaces: bool=True, group_tokens: bool=True, filter_word_delimiter_token: bool=True, spaces_between_special_tokens: bool=False) -> str:
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
result = []
for token in filtered_tokens:
if (skip_special_tokens and (token in self.all_special_ids)):
continue
result.append(token)
text = self.convert_tokens_to_string(result, group_tokens=group_tokens, spaces_between_special_tokens=spaces_between_special_tokens, filter_word_delimiter_token=filter_word_delimiter_token)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
return (vocab_file,)
def _add_tokens(self, new_tokens: Union[(List[str], List[AddedToken])], special_tokens: bool=False) -> int:
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
if (not isinstance(token, str)):
raise ValueError(f'Token {token} has to be of type string, but is of type {type(token)}.')
assert isinstance(token, str)
if ((token != self.unk_token) and (self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)) and (token not in tokens_to_add)):
tokens_to_add.append(token)
if self.verbose:
logger.info(f'Adding {token} to the vocabulary')
added_tok_encoder = dict(((tok, (len(self) + i)) for (i, tok) in enumerate(tokens_to_add)))
added_tok_decoder = {v: k for (k, v) in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
for token in tokens_to_add:
if (len(token) > 1):
self._additional_special_tokens.append(AddedToken(token))
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, token)
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add) |
class EnlargedSampler(Sampler):
def __init__(self, dataset, num_replicas, rank, ratio=1):
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = math.ceil(((len(self.dataset) * ratio) / self.num_replicas))
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(self.total_size, generator=g).tolist()
dataset_size = len(self.dataset)
indices = [(v % dataset_size) for v in indices]
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
class VGG19(nn.Module):
def __init__(self, vgg_path='models/vgg19-d01eb7cb.pth'):
super(VGG19, self).__init__()
vgg19_features = models.vgg19(pretrained=False)
vgg19_features.load_state_dict(torch.load(vgg_path), strict=False)
self.features = vgg19_features.features
for param in self.features.parameters():
param.requires_grad = False
def forward(self, x):
layers = {'3': 'relu1_2', '8': 'relu2_2', '17': 'relu3_4', '22': 'relu4_2', '26': 'relu4_4', '35': 'relu5_4'}
features = {}
for (name, layer) in self.features._modules.items():
x = layer(x)
if (name in layers):
features[layers[name]] = x
return features |
class DebertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=False, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
super().__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = XxxTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.do_lower_case = do_lower_case
self.gpt2_tokenizer = GPT2Tokenizer(vocab_file)
def vocab_size(self):
return len(self.vocab)
def vocab(self):
return self.gpt2_tokenizer.vocab
def get_vocab(self):
vocab = self.vocab.copy()
vocab.update(self.get_added_vocab())
return vocab
def _tokenize(self, text):
if self.do_lower_case:
text = text.lower()
return self.gpt2_tokenizer.tokenize(text)
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return (self.gpt2_tokenizer.sym(index) if (index < self.vocab_size) else self.unk_token)
def convert_tokens_to_string(self, tokens):
return self.gpt2_tokenizer.decode(tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', False)
if (is_split_into_words or add_prefix_space):
text = (' ' + text)
return (text, kwargs)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
return self.gpt2_tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix) |
class _PathGraphingAstVisitor(_ASTVisitor):
def __init__(self):
super().__init__()
self.class_name = ''
self.graphs = {}
self.graph = None
self.tail = None
def reset(self):
self.graph = None
self.tail = None
def dispatch_list(self, node_list: Sequence[ast.AST]) -> None:
for node in node_list:
self.dispatch(node)
def visitFunctionDef(self, node: (ast.FunctionDef | ast.AsyncFunctionDef)) -> None:
entity = node.name
name = f'{node.lineno}:{node.col_offset}: {entity}'
if (self.graph is not None):
path_node = self.__append_path_node(name)
self.tail = path_node
self.dispatch_list(node.body)
bottom = _PathNode('')
self.graph.connect(self.tail, bottom)
self.graph.connect(path_node, bottom)
self.tail = bottom
else:
self.graph = _PathGraph(name, entity, node.lineno, node.col_offset)
path_node = _PathNode(name)
self.tail = path_node
self.dispatch_list(node.body)
self.graphs[f'{self.class_name}{node.name}'] = self.graph
self.reset()
visitAsyncFunctionDef = visitFunctionDef
def __append_path_node(self, name: str) -> (_PathNode | None):
if (not self.tail):
return None
assert (self.graph is not None)
path_node = _PathNode(name)
self.graph.connect(self.tail, path_node)
self.tail = path_node
return path_node
def visitSimpleStatement(self, node: ast.stmt) -> None:
name = f'Stmt {node.lineno}'
self.__append_path_node(name)
def default(self, node: ast.AST, *args) -> None:
if isinstance(node, ast.stmt):
self.visitSimpleStatement(node)
else:
super().default(node, *args)
def visitLoop(self, node: ((ast.AsyncFor | ast.For) | ast.While)) -> None:
name = f'Loop {node.lineno}'
self.__subgraph(node, name)
visitAsyncFor = visitFor = visitWhile = visitLoop
def visitIf(self, node: ast.If) -> None:
name = f'If {node.lineno}'
self.__subgraph(node, name)
def __subgraph(self, node, name, extra_blocks=()):
if (self.graph is None):
self.graph = _PathGraph(name, name, node.lineno, node.col_offset)
path_node: (_PathNode | None) = _PathNode(name)
self.__subgraph_parse(node, path_node, extra_blocks)
self.graphs[f'{self.class_name}{name}'] = self.graph
self.reset()
else:
path_node = self.__append_path_node(name)
self.__subgraph_parse(node, path_node, extra_blocks)
def __subgraph_parse(self, node, path_node, extra_blocks):
loose_ends = []
self.tail = path_node
self.dispatch_list(node.body)
loose_ends.append(self.tail)
for extra in extra_blocks:
self.tail = path_node
self.dispatch_list(extra.body)
loose_ends.append(self.tail)
if node.orelse:
self.tail = path_node
self.dispatch_list(node.orelse)
loose_ends.append(self.tail)
else:
loose_ends.append(path_node)
if path_node:
bottom = _PathNode('')
assert (self.graph is not None)
for loose_end in loose_ends:
self.graph.connect(loose_end, bottom)
self.tail = bottom
def visitTryExcept(self, node: ast.Try) -> None:
name = f'TryExcept {node.lineno}'
self.__subgraph(node, name, extra_blocks=node.handlers)
visitTry = visitTryExcept
def visitWith(self, node: (ast.With | ast.AsyncWith)) -> None:
name = f'With {node.lineno}'
self.__append_path_node(name)
self.dispatch_list(node.body)
visitAsyncWith = visitWith |
def test_eval_functional(expected_hxy, test_data_xy, functional_hxy):
hxy_test = functional_hxy.eval(test_data_xy)
assert (np.linalg.norm((hxy_test - expected_hxy)) < (1e-06 * np.linalg.norm(expected_hxy))), 'test_eval_functional: failed using list inputs. ' |
class LowPassFilterRotation(LowPassFilter):
def __init__(self, a=0.8):
super().__init__(a)
def __call__(self, x):
qs = quaternion.from_rotation_matrix(x.detach().cpu().numpy(), nonorthogonal=True).ravel()
if (self.x is None):
self.x = qs
else:
for i in range(len(qs)):
self.x[i] = quaternion.np.slerp_vectorized(self.x[i], qs[i], self.a)
x = torch.from_numpy(quaternion.as_rotation_matrix(self.x)).float().view_as(x)
return x |
class BEVGridTransform(nn.Module):
def __init__(self, *, input_scope: List[Tuple[(float, float, float)]], output_scope: List[Tuple[(float, float, float)]], prescale_factor: float=1) -> None:
super().__init__()
self.input_scope = input_scope
self.output_scope = output_scope
self.prescale_factor = prescale_factor
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (self.prescale_factor != 1):
x = F.interpolate(x, scale_factor=self.prescale_factor, mode='bilinear', align_corners=False)
coords = []
for ((imin, imax, _), (omin, omax, ostep)) in zip(self.input_scope, self.output_scope):
v = torch.arange((omin + (ostep / 2)), omax, ostep)
v = ((((v - imin) / (imax - imin)) * 2) - 1)
coords.append(v.to(x.device))
(u, v) = torch.meshgrid(coords)
grid = torch.stack([v, u], dim=(- 1))
grid = torch.stack(([grid] * x.shape[0]), dim=0)
x = F.grid_sample(x, grid, mode='bilinear', align_corners=False)
return x |
class RealPythonInterpreterExecute(FunctionTool):
name = 'PythonInterpreterExecute'
summary = 'Execute a Python script.'
parameters: List[ArgParameter] = [{'name': 'script', 'type': 'string', 'description': 'The python script to execute.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'result', 'type': 'string', 'description': 'The printed output of the script.'}]
exceptions: List[ArgException] = []
_tool: BaseTool = load_tools(['python_repl'])[0]
def parse_return(self, tool_output: str) -> str:
return json.dumps({'result': tool_output})
def _runtool(self, tool_input: Dict[(str, Any)]) -> Dict[(str, Any)]:
return self._tool._run(tool_input['script'])
def _aruntool(self, tool_input: Dict[(str, Any)]) -> Dict[(str, Any)]:
return self._tool._arun(tool_input['script']) |
def r_while(t):
(cond, stmt) = (t[2], t[5])
def fn(world, n):
if (n > MAX_FUNC_CALL):
return (world, n, False)
(world, n, s, c) = cond(world, n)
if (not s):
return (world, n, s)
while c:
(world, n, s) = stmt(world, n)
if (not s):
return (world, n, s)
(world, n, s, c) = cond(world, n)
if (not s):
return (world, n, s)
return (world, n, s)
return [('while_stmt', fn)] |
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, (- k)], wav)
return wav |
def main(config: ConvertLmConfig):
logger.setLevel(logging.INFO)
tokenizer = config.the_tokenizer
vocab_size = (config.override_vocab_size or len(tokenizer))
Vocab = Axis('vocab', vocab_size)
key = jax.random.PRNGKey(0)
with use_cpu_device(), Mesh([jax.local_devices(backend='cpu')[0]], 'dev'):
model: LmHeadModel = eqx.filter_eval_shape(config.model.build, Vocab, key=key)
(trainable, non_trainable) = eqx.partition(model, is_inexact_arrayish)
ckpt = load_checkpoint(trainable, None, config.checkpoint_path)
assert (ckpt is not None)
(trainable, _, _) = ckpt
model = eqx.combine(trainable, non_trainable)
if config.override_vocab_size:
model = model.resize_vocab(config.override_vocab_size)
converter = model.config.default_hf_checkpoint_converter.replaced(tokenizer=tokenizer)
converter.save_pretrained(model, config.output_dir, upload_to_hf=(config.upload_to_hf or False), save_tokenizer=config.save_tokenizer) |
def nudge_dataset(X, Y):
direction_vectors = [[[0, 1, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 1, 0]]]
def shift(x, w):
return convolve(x.reshape((8, 8)), mode='constant', weights=w).ravel()
X = np.concatenate(([X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors]))
Y = np.concatenate([Y for _ in range(5)], axis=0)
return (X, Y) |
def local_zero_density_congruence(self, p, m, Zvec=None, NZvec=None):
verbose(' In local_zero_density_congruence with ')
verbose((' Q is: \n' + str(self)))
verbose((' p = ' + str(p)))
verbose((' m = ' + str(m)))
verbose((' Zvec = ' + str(Zvec)))
verbose((' NZvec = ' + str(NZvec)))
if (Zvec is None):
Zvec = []
n = self.dim()
Sn = Set(range(n))
if ((Zvec is not None) and (len((Set(Zvec) + Sn)) > n)):
raise RuntimeError('Zvec must be a subset of {0, ..., n-1}.')
if ((NZvec is not None) and (len((Set(NZvec) + Sn)) > n)):
raise RuntimeError('NZvec must be a subset of {0, ..., n-1}.')
p2 = (p * p)
if ((m % p2) or (NZvec is not None)):
return 0
return (self.local_density_congruence(p, (m / p2), None, None) / (p ** (self.dim() - 2))) |
('/api/generate', methods=['GET'])
def do_generate():
generate_results = []
word = request.args.get('word')
wordtype = request.args.get('type')
genInput = word
if wordtype:
genInput += (('<' + wordtype) + '>')
infl = request.args.get('infl')
if infl:
genInput += (('<' + infl) + '>')
gens = generator.generate(genInput)
if (len(gens) == 0):
generate_results.append(genInput)
for gindex in range(len(gens)):
generate_results.append(gens[gindex][0])
return jsonify(word=word, type=wordtype, infl=infl, result=generate_results) |
def test_while_loop_with_break_continue():
A = while_loop_with_break_continue()
A_ref = np.array([0, 0, 2, 0, 4, 0, 6, 0, 8, 0], dtype=np.int32)
assert np.array_equal(A, A_ref) |
def test_cmesh_counts(filename_meshes):
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.geometry_element import create_geometry_elements
from sfepy.discrete.common.extmods.cmesh import get_cmem_usage
gels = create_geometry_elements()
ok = True
for filename in filename_meshes:
basename = os.path.basename(filename)
(enum, esizes) = expected[basename]
tst.report(('mesh: %s' % basename))
mesh = Mesh.from_file(filename)
cmesh = mesh.cmesh
cmesh.set_local_entities(gels)
cmesh.setup_entities()
tst.report('dim:', cmesh.dim)
tst.report(('n_vertex: %d, n_edge: %d, n_face: %d, n_cell: %d' % tuple(cmesh.num)))
_ok = (enum == cmesh.num).all()
if (not _ok):
tst.report(('%s == %s failed!' % (enum, cmesh.num)))
ok = (ok and _ok)
dim = cmesh.dim
for ir in range((dim + 1)):
for ic in range((dim + 1)):
cmesh.setup_connectivity(ir, ic)
mem_usage1 = get_cmem_usage()[0]
if ((ir == dim) and (ic == 0)):
continue
cmesh.free_connectivity(ir, ic)
mem_usage2 = get_cmem_usage()[0]
cmesh.setup_connectivity(ir, ic)
mem_usage3 = get_cmem_usage()[0]
conn = cmesh.get_conn(ir, ic)
tst.report(('(%d, %d) : (%d, %d)' % (ir, ic, conn.num, conn.n_incident)))
sizes = nm.array([conn.num, conn.n_incident])
_ok = (esizes[(ir, ic)] == sizes).all()
if (not _ok):
tst.report(('%s == %s failed!' % (esizes, sizes)))
ok = (ok and _ok)
_ok1 = (mem_usage3 == mem_usage1)
_ok2 = (mem_usage3 > mem_usage2)
if (not (_ok1 and _ok2)):
tst.report(('unexpected memory usage! (%s)' % ([mem_usage1, mem_usage2, mem_usage3],)))
ok = (ok and (_ok1 and _ok2))
assert ok |
class AdditiveBlock(AdditiveCoupling):
def __init__(self, Fm, Gm=None, implementation_fwd=1, implementation_bwd=1):
warnings.warn('This class has been deprecated. Use the AdditiveCoupling class instead.', DeprecationWarning)
super(AdditiveBlock, self).__init__(Fm=Fm, Gm=Gm, implementation_fwd=implementation_fwd, implementation_bwd=implementation_bwd) |
def simSetObjectFloatParameter(objectHandle, parameter, value):
ret = lib.simSetObjectFloatParameter(objectHandle, parameter, value)
_check_set_object_parameter(ret)
_check_return(ret) |
def test_ListArray_NumpyArray():
v2a = ak.contents.listarray.ListArray(ak.index.Index(np.array([4, 100, 1], np.int64)), ak.index.Index(np.array([7, 100, 3, 200], np.int64)), ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8])))
resultv2 = v2a[np.array([1, (- 1)], np.int64)]
assert (to_list(resultv2) == [[], [4.4, 5.5]])
assert (v2a.to_typetracer()[np.array([1, (- 1)], np.int64)].form == resultv2.form) |
def train(epoch):
print(('\nEpoch: %d' % epoch))
net.train()
loss_pred_module.train()
train_loss = 0
correct = 0
total = 0
for (batch_idx, (inputs, targets)) in enumerate(trainloader):
(inputs, targets) = (inputs.to(device), targets.to(device))
optimizer_target.zero_grad()
optimizer_loss.zero_grad()
(outputs, loss_pred) = net(inputs)
loss = criterion(outputs, targets)
loss_pred = loss_pred_module(loss_pred)
loss_prediction_loss = loss_pred_criterion(loss_pred, loss)
target_loss = loss.mean()
if (epoch < 120):
loss = (loss_prediction_loss + target_loss)
loss.backward()
optimizer_target.step()
optimizer_loss.step()
else:
loss = target_loss
loss.backward()
optimizer_target.step()
train_loss += loss.item()
(_, predicted) = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), ('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct) / total), correct, total))) |
class DensePoseChartConfidencePredictorMixin():
def __init__(self, cfg: CfgNode, input_channels: int):
super().__init__(cfg, input_channels)
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
self._initialize_confidence_estimation_layers(cfg, input_channels)
self._registry = {}
initialize_module_params(self)
def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int):
dim_out_patches = (cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1)
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
if self.confidence_model_cfg.uv_confidence.enabled:
if (self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO):
self.sigma_2_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
elif (self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO):
self.sigma_2_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
self.kappa_u_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
self.kappa_v_lowres = ConvTranspose2d(dim_in, dim_out_patches, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
else:
raise ValueError(f'Unknown confidence model type: {self.confidence_model_cfg.confidence_model_type}')
if self.confidence_model_cfg.segm_confidence.enabled:
self.fine_segm_confidence_lowres = ConvTranspose2d(dim_in, 1, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
self.coarse_segm_confidence_lowres = ConvTranspose2d(dim_in, 1, kernel_size, stride=2, padding=int(((kernel_size / 2) - 1)))
def forward(self, head_outputs: torch.Tensor):
base_predictor_outputs = super().forward(head_outputs)
output = self._create_output_instance(base_predictor_outputs)
if self.confidence_model_cfg.uv_confidence.enabled:
if (self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO):
output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs))
elif (self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO):
output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs))
output.kappa_u = self.interp2d(self.kappa_u_lowres(head_outputs))
output.kappa_v = self.interp2d(self.kappa_v_lowres(head_outputs))
else:
raise ValueError(f'Unknown confidence model type: {self.confidence_model_cfg.confidence_model_type}')
if self.confidence_model_cfg.segm_confidence.enabled:
output.fine_segm_confidence = (F.softplus(self.interp2d(self.fine_segm_confidence_lowres(head_outputs))) + self.confidence_model_cfg.segm_confidence.epsilon)
output.fine_segm = (base_predictor_outputs.fine_segm * torch.repeat_interleave(output.fine_segm_confidence, base_predictor_outputs.fine_segm.shape[1], dim=1))
output.coarse_segm_confidence = (F.softplus(self.interp2d(self.coarse_segm_confidence_lowres(head_outputs))) + self.confidence_model_cfg.segm_confidence.epsilon)
output.coarse_segm = (base_predictor_outputs.coarse_segm * torch.repeat_interleave(output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1))
return output
def _create_output_instance(self, base_predictor_outputs: Any):
PredictorOutput = decorate_predictor_output_class_with_confidences(type(base_predictor_outputs))
output = PredictorOutput(**base_predictor_outputs.__dict__, coarse_segm_confidence=None, fine_segm_confidence=None, sigma_1=None, sigma_2=None, kappa_u=None, kappa_v=None)
return output |
_torch
class XLMModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple) if is_torch_available() else ())
all_generative_model_classes = ((XLMWithLMHeadModel,) if is_torch_available() else ())
class XLMModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type='last', use_proj=True, scope=None, bos_token_id=0):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_lengths = use_input_lengths
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.gelu_activation = gelu_activation
self.sinusoidal_embeddings = sinusoidal_embeddings
self.asm = asm
self.n_langs = n_langs
self.vocab_size = vocab_size
self.n_special = n_special
self.summary_type = summary_type
self.causal = causal
self.use_proj = use_proj
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.n_langs = n_langs
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.summary_type = summary_type
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float()
input_lengths = None
if self.use_input_lengths:
input_lengths = ((ids_tensor([self.batch_size], vocab_size=2) + self.seq_length) - 2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
sequence_labels = None
token_labels = None
is_impossible_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
config = XLMConfig(vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id)
return (config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result['loss'].size()), [])
def create_and_check_xlm_model(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask):
model = XLMModel(config=config)
model.to(torch_device)
model.eval()
outputs = model(input_ids, lengths=input_lengths, langs=token_type_ids)
outputs = model(input_ids, langs=token_type_ids)
outputs = model(input_ids)
sequence_output = outputs[0]
result = {'sequence_output': sequence_output}
self.parent.assertListEqual(list(result['sequence_output'].size()), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_xlm_lm_head(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask):
model = XLMWithLMHeadModel(config)
model.to(torch_device)
model.eval()
(loss, logits) = model(input_ids, token_type_ids=token_type_ids, labels=token_labels)
result = {'loss': loss, 'logits': logits}
self.parent.assertListEqual(list(result['loss'].size()), [])
self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_xlm_simple_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask):
model = XLMForQuestionAnsweringSimple(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(loss, start_logits, end_logits) = outputs
result = {'loss': loss, 'start_logits': start_logits, 'end_logits': end_logits}
self.parent.assertListEqual(list(result['start_logits'].size()), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].size()), [self.batch_size, self.seq_length])
self.check_loss_output(result)
def create_and_check_xlm_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask):
model = XLMForQuestionAnswering(config)
model.to(torch_device)
model.eval()
outputs = model(input_ids)
(start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) = outputs
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask)
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels)
(total_loss,) = outputs
outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels)
(total_loss,) = outputs
result = {'loss': total_loss, 'start_top_log_probs': start_top_log_probs, 'start_top_index': start_top_index, 'end_top_log_probs': end_top_log_probs, 'end_top_index': end_top_index, 'cls_logits': cls_logits}
self.parent.assertListEqual(list(result['loss'].size()), [])
self.parent.assertListEqual(list(result['start_top_log_probs'].size()), [self.batch_size, model.config.start_n_top])
self.parent.assertListEqual(list(result['start_top_index'].size()), [self.batch_size, model.config.start_n_top])
self.parent.assertListEqual(list(result['end_top_log_probs'].size()), [self.batch_size, (model.config.start_n_top * model.config.end_n_top)])
self.parent.assertListEqual(list(result['end_top_index'].size()), [self.batch_size, (model.config.start_n_top * model.config.end_n_top)])
self.parent.assertListEqual(list(result['cls_logits'].size()), [self.batch_size])
def create_and_check_xlm_sequence_classif(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask):
model = XLMForSequenceClassification(config)
model.to(torch_device)
model.eval()
(logits,) = model(input_ids)
(loss, logits) = model(input_ids, labels=sequence_labels)
result = {'loss': loss, 'logits': logits}
self.parent.assertListEqual(list(result['loss'].size()), [])
self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.type_sequence_label_size])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return (config, inputs_dict)
def setUp(self):
self.model_tester = XLMModelTest.XLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_xlm_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*config_and_inputs)
def test_xlm_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs)
def test_xlm_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs)
def test_xlm_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*config_and_inputs)
def test_xlm_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in list(XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = XLMModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
class QueryOnVoxelHashGradQuery(PythonFunction):
def __init__(self, ctx, G0=16, growth_factor=1.5, T0=(2 ** 15), L=16, D=2, min_=[(- 1), (- 1), (- 1)], max_=[1, 1, 1], boundary_check=False):
super(QueryOnVoxelHashGradQuery, self).__init__(ctx)
self._G0 = G0
self._growth_factor = growth_factor
self._T0 = T0
self._L = L
self._D = D
self._min = min_
self._max = max_
self._boundary_check = boundary_check
def name(self):
return self.__class__.__name__
def min_outputs(self):
return 1
def setup_impl(self, inputs, outputs):
query = inputs[1]
outputs[0].reset_shape(query.shape, True)
def forward_impl(self, inputs, outputs):
grad_query = outputs[0]
grad_output = inputs[0]
query = inputs[1]
feature = inputs[2]
batch_sizes = query.shape[:(- 1)]
B = np.prod(batch_sizes)
G0 = self._G0
growth_factor = self._growth_factor
T0 = self._T0
L = self._L
D = self._D
N = (L * B)
grad_output_data = F.reshape(grad_output.data, (B, (D * L)))
grad_output_data = F.transpose(grad_output_data, (1, 0))
grad_query_ptr = grad_query.data.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output_data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
feature_ptr = feature.data.data_ptr(np.float32, self.ctx)
voxel_hash_feature_cuda.grad_query(N, grad_query_ptr, grad_output_ptr, query_ptr, feature_ptr, G0, growth_factor, T0, L, D, self._min, self._max, self._boundary_check, False)
def backward_impl(self, inputs, outputs, propagate_down, accum):
grad_query = outputs[0]
grad_output = inputs[0]
query = inputs[1]
feature = inputs[2]
batch_sizes = query.shape[:(- 1)]
B = np.prod(batch_sizes)
G0 = self._G0
growth_factor = self._growth_factor
T0 = self._T0
L = self._L
D = self._D
N = (L * B)
grad_output_data = grad_output.data
grad_output_data = F.reshape(grad_output_data, (B, (D * L)))
grad_output_data = F.transpose(grad_output_data, (1, 0))
grad_grad_query_ptr = grad_query.grad.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output_data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
feature_ptr = feature.data.data_ptr(np.float32, self.ctx)
grad_grad_output_ptr = grad_output.grad.data_ptr(np.float32, self.ctx)
grad_query_ptr = query.grad.data_ptr(np.float32, self.ctx)
grad_feature_ptr = feature.grad.data_ptr(np.float32, self.ctx)
if propagate_down[0]:
voxel_hash_feature_cuda.grad_query_grad_grad_output(N, grad_grad_output_ptr, grad_grad_query_ptr, query_ptr, feature_ptr, G0, growth_factor, T0, L, D, self._min, self._max, self._boundary_check, accum[0])
grad_grad_output = grad_output.grad
grad_grad_output = F.reshape(grad_grad_output, ((D * L), B))
F.transpose(grad_grad_output, (1, 0), outputs=[grad_grad_output])
if propagate_down[2]:
voxel_hash_feature_cuda.grad_query_grad_feature(N, grad_feature_ptr, grad_grad_query_ptr, grad_output_ptr, query_ptr, G0, growth_factor, T0, L, D, self._min, self._max, self._boundary_check, accum[2])
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
if ((i == 0) and ((j == 1) or (j == 2))):
return True
if (i == 1):
return True
if ((i == 2) and ((j == 0) or (j == 1))):
return True
return False |
class Tokenizer(object):
def __init__(self, dict_path='', custom_word_freq_dict=None, custom_confusion_dict=None):
self.model = jieba
if os.path.exists(dict_path):
self.model.set_dictionary(dict_path)
if custom_word_freq_dict:
for (w, f) in custom_word_freq_dict.items():
self.model.add_word(w, freq=f)
if custom_confusion_dict:
for (k, word) in custom_confusion_dict.items():
self.model.add_word(k)
self.model.add_word(word)
def tokenize(self, unicode_sentence, mode='search'):
if (mode == 'ngram'):
n = 2
result_set = set()
tokens = self.model.lcut(unicode_sentence)
tokens_len = len(tokens)
start = 0
for i in range(0, tokens_len):
w = tokens[i]
width = len(w)
result_set.add((w, start, (start + width)))
for j in range(i, (i + n)):
gram = ''.join(tokens[i:(j + 1)])
gram_width = len(gram)
if ((i + j) > tokens_len):
break
result_set.add((gram, start, (start + gram_width)))
start += width
results = list(result_set)
result = sorted(results, key=(lambda x: x[(- 1)]))
else:
result = list(self.model.tokenize(unicode_sentence, mode=mode))
return result |
class AdaptiveSoftmax(nn.Module):
def __init__(self, input_size, cutoff):
super().__init__()
self.input_size = input_size
self.cutoff = cutoff
self.output_size = ((cutoff[0] + len(cutoff)) - 1)
self.head = nn.Linear(input_size, self.output_size)
self.tail = nn.ModuleList()
self.cross_entropy = nn.CrossEntropyLoss(size_average=False)
for i in range((len(self.cutoff) - 1)):
seq = nn.Sequential(nn.Linear(input_size, (input_size // (4 ** (i + 1))), False), nn.Linear((input_size // (4 ** (i + 1))), (cutoff[(i + 1)] - cutoff[i]), False))
self.tail.append(seq)
def rand_ini(self):
nn.init.xavier_normal(self.head.weight)
for tail in self.tail:
nn.init.xavier_normal(tail[0].weight)
nn.init.xavier_normal(tail[1].weight)
def log_prob(self, w_in):
lsm = nn.LogSoftmax(dim=1).cuda()
head_out = self.head(w_in)
batch_size = head_out.size(0)
prob = torch.zeros(batch_size, self.cutoff[(- 1)]).cuda()
lsm_head = lsm(head_out)
prob.narrow(1, 0, self.output_size).add_(lsm_head.narrow(1, 0, self.output_size).data)
for i in range(len(self.tail)):
pos = self.cutoff[i]
i_size = (self.cutoff[(i + 1)] - pos)
buffer = lsm_head.narrow(1, (self.cutoff[0] + i), 1)
buffer = buffer.expand(batch_size, i_size)
lsm_tail = lsm(self.tail[i](w_in))
prob.narrow(1, pos, i_size).copy_(buffer.data).add_(lsm_tail.data)
return prob
def forward(self, w_in, target):
batch_size = w_in.size(0)
output = 0.0
first_target = target.clone()
for i in range((len(self.cutoff) - 1)):
mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[(i + 1)]))
if (mask.sum() > 0):
first_target[mask] = (self.cutoff[0] + i)
second_target = Variable(target[mask].add((- self.cutoff[i])))
second_input = w_in.index_select(0, Variable(mask.nonzero().squeeze()))
second_output = self.tail[i](second_input)
output += self.cross_entropy(second_output, second_target)
output += self.cross_entropy(self.head(w_in), Variable(first_target))
output /= batch_size
return output |
def readMatrixByOffset(arkfile, offset):
with open(arkfile, 'rb') as g:
g.seek(offset)
feature = readMatrix(g)
return feature |
class Trec50Processor(TrecProcessor):
def get_labels(self):
return ['LOC:other', 'NUM:date', 'NUM:count', 'NUM:period', 'NUM:ord', 'NUM:other', 'ENTY:currency', 'LOC:state', 'NUM:volsize', 'ENTY:plant', 'LOC:country', 'HUM:ind', 'ABBR:exp', 'ENTY:food', 'NUM:money', 'NUM:dist', 'DESC:desc', 'HUM:desc', 'LOC:city', 'ENTY:termeq', 'LOC:mount', 'ENTY:word', 'ENTY:body', 'ENTY:dismed', 'NUM:code', 'NUM:weight', 'NUM:temp', 'ENTY:product', 'HUM:title', 'DESC:def', 'DESC:manner', 'ENTY:animal', 'ENTY:sport', 'ENTY:techmeth', 'NUM:speed', 'ENTY:veh', 'ENTY:religion', 'ENTY:instru', 'ENTY:other', 'HUM:gr', 'DESC:reason', 'NUM:perc', 'ENTY:substance', 'ENTY:lang', 'ENTY:color', 'ENTY:cremat', 'ENTY:event', 'ABBR:abb', 'ENTY:symbol', 'ENTY:letter']
def _line2label(self, line):
label = line.strip().split(' ')[0]
assert (label in self.get_labels())
return label |
_utils.test(arch=ti.cuda)
def test_sync_any_nonzero():
a = ti.field(dtype=ti.i32, shape=256)
b = ti.field(dtype=ti.i32, shape=256)
def foo():
ti.loop_config(block_dim=256)
for i in range(256):
a[i] = ti.simt.block.sync_any_nonzero(b[i])
for i in range(256):
b[i] = 0
a[i] = (- 1)
foo()
for i in range(256):
assert (a[i] == 0)
b[np.random.randint(0, 256)] = 1
foo()
for i in range(256):
assert (a[i] == 1) |
def str2list(v):
files = v.split(',')
files = [s.strip() for s in files]
while (files.count('') > 0):
files.remove('')
return files |
def test_gaussian_random_projector(test_path):
from pysad.transform.projection import GaussianRandomProjector
for num_components in [2, 50, 250]:
projector = GaussianRandomProjector(num_components=num_components)
helper_test_projector(test_path, projector, num_components) |
def create_training_file(docs, vocab_list, args, epoch_num):
epoch_filename = (args.output_dir / 'epoch_{}.json'.format(epoch_num))
num_instances = 0
with epoch_filename.open('w') as epoch_file:
for doc_idx in trange(len(docs), desc='Document'):
doc_instances = create_instances_from_document(docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob, masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq, whole_word_mask=args.do_whole_word_mask, vocab_list=vocab_list)
doc_instances = [json.dumps(instance) for instance in doc_instances]
for instance in doc_instances:
epoch_file.write((instance + '\n'))
num_instances += 1
metrics_file = (args.output_dir / 'epoch_{}_metrics.json'.format(epoch_num))
with metrics_file.open('w') as metrics_file:
metrics = {'num_training_examples': num_instances, 'max_seq_len': args.max_seq_len}
metrics_file.write(json.dumps(metrics)) |
class Elliott_GoogLeNet(nn.Module):
def __init__(self):
super(Elliott_GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), Elliott())
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 100)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def get_channels(path):
channels = []
filenames = []
with open(path) as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
channels.append(row[0])
filenames.append(row[1])
return (channels, filenames) |
def _read_and_preprocess_apps(target_path: str) -> List[CodeInstance]:
if hasattr(sys, 'set_int_max_str_digits'):
sys.set_int_max_str_digits(100000)
SINGLE_STR_LIMIT = 150000
instances = []
for split_tag in (TRAIN_SPLIT, TEST_SPLIT):
split_dir = os.path.join(target_path, split_tag)
num_problems = 0
skipped_problems = []
for problem_name in apps_listdir_with_pinned_order(target_path, split_tag):
problem_dir = os.path.join(split_dir, problem_name)
question_fname = os.path.join(problem_dir, 'question.txt')
sols_fname = os.path.join(problem_dir, 'solutions.json')
tests_fname = os.path.join(problem_dir, 'input_output.json')
if (not os.path.isfile(question_fname)):
skipped_problems.append(problem_name)
continue
elif (split_tag in ('train',)):
if (not os.path.isfile(sols_fname)):
skipped_problems.append(problem_name)
continue
elif (split_tag in ('test',)):
if ((not os.path.exists(tests_fname)) or (not os.path.isfile(tests_fname))):
skipped_problems.append(problem_name)
continue
starter_code_fname = os.path.join(problem_dir, 'starter_code.py')
if os.path.exists(starter_code_fname):
answer_type = '\nUse Call-Based format\n'
else:
answer_type = '\nUse Standard Input format\n'
if os.path.isfile(starter_code_fname):
with open(starter_code_fname, 'r') as f:
starter_code = f.read()
else:
starter_code = ''
with open(question_fname, 'r') as f:
question = f.read()
if os.path.isfile(sols_fname):
with open(sols_fname, 'r') as f:
sols_str_list = json.load(f)
solutions = [_reindent_code(sol_str) for sol_str in sols_str_list]
else:
solutions = []
if os.path.exists(tests_fname):
with open(tests_fname, 'r') as f:
data: Dict = json.load(f)
else:
data = dict()
data['root'] = problem_dir
question = question[:SINGLE_STR_LIMIT]
starter_code = starter_code[:SINGLE_STR_LIMIT]
solutions = [sol[:SINGLE_STR_LIMIT] for sol in solutions]
if (len(solutions) == 0):
solutions = ['']
prompt = _make_input_for_apps(question=question, starter_code=starter_code, answer_type=answer_type)
instance = CodeInstance(input=Input(text=prompt), references=[CodeReference(output=Output(text=solution), tags=[CORRECT_TAG], test_cases=data) for solution in solutions], split=split_tag, metadata=data)
instances.append(instance)
num_problems += 1
hlog(f'Split {split_tag}, skipped {len(skipped_problems)}/{num_problems} problems with no description or solution. Their ids are: {skipped_problems}')
return instances |
def _create_function(ctx, network, f, variable_index):
variable_index_name = ''.join([(((('_' + f.repeat_id[index]) + '[') + str(i)) + ']') for (index, i) in enumerate(variable_index)])
variable_index_low_level_name = ''.join([(((('_' + f.repeat_id[index]) + '[') + str(i)) + ']') for (index, i) in enumerate(variable_index[:(- 1)])])
function_name = (f.name + variable_index_name)
if (f.type == 'RepeatStart'):
assert (len(f.input) == 2)
if (variable_index[(- 1)] == 0):
input_variable_names = [(f.input[0] if (f.input[0] in network.variables) else (f.input[0] + variable_index_low_level_name))]
else:
input_variable_names = [((((((f.input[1] + variable_index_low_level_name) + '_') + f.repeat_param.repeat_id) + '[') + str((variable_index[(- 1)] - 1))) + ']')]
elif (f.type == 'RepeatEnd'):
assert (len(f.input) == 1)
input_variable_names = [((((((f.input[0] + variable_index_name) + '_') + f.repeat_param.repeat_id) + '[') + str((f.repeat_param.times - 1))) + ']')]
elif (f.type == 'RecurrentInput'):
if (variable_index[(- 1)] > 0):
return (None, None, None)
function_name = (f.name + variable_index_low_level_name)
variable_index_name = variable_index_low_level_name
input_variable_names = [(v_name if (v_name in network.variables) else (v_name + variable_index_low_level_name)) for v_name in f.input]
elif (f.type == 'RecurrentOutput'):
assert (len(f.input) == 1)
input_variable_names = [((((((f.input[0] + variable_index_name) + '_') + f.recurrent_param.repeat_id) + '[') + str(v_index)) + ']') for v_index in range(f.recurrent_param.length)]
elif (f.type == 'Delay'):
assert (len(f.input) == 2)
if (variable_index[(- 1)] == 0):
input_variable_names = [(f.input[1] if (f.input[1] in network.variables) else (f.input[1] + variable_index_low_level_name))]
else:
input_variable_names = [((((((f.input[0] + variable_index_low_level_name) + '_') + f.recurrent_param.repeat_id) + '[') + str((variable_index[(- 1)] - 1))) + ']')]
else:
v_names = []
for v_name in f.input:
for (index, i) in enumerate(variable_index):
v_name = v_name.replace((('{' + f.repeat_id[index]) + '}'), (('[' + str(i)) + ']'))
v_names.append(v_name)
input_variable_names = [(v_name if (v_name in network.variables) else ((v_name + variable_index_name) if ((v_name + variable_index_name) in network.variables) else (v_name + variable_index_low_level_name))) for v_name in v_names]
inputs = [network.variables[v_name] for v_name in input_variable_names]
if (f.type == 'RecurrentInput'):
assert (len(inputs) == 1)
assert (len(f.output) == 1)
output_variable_names = [((((((f.output[0] + variable_index_low_level_name) + '_') + f.recurrent_param.repeat_id) + '[') + str(v_index)) + ']') for v_index in range(inputs[0].shape[f.recurrent_param.axis])]
else:
output_variable_names = [((v_name + variable_index_name) if ((v_name + variable_index_name) in network.variables) else v_name) for v_name in f.output]
outputs = [network.variables[v_name] for v_name in output_variable_names]
persistent = True
if (f.type == 'Reshape'):
shape = resolve_reshape_params(inputs, f, network.batch_size)
function_instance = F.Reshape(ctx, shape=shape, inplace=True)
elif (f.type == 'RepeatStart'):
function_instance = F.Identity(ctx)
persistent = False
elif (f.type == 'RepeatEnd'):
function_instance = F.Identity(ctx)
persistent = False
elif (f.type == 'RecurrentOutput'):
function_instance = F.Stack(ctx, axis=f.recurrent_param.axis)
elif (f.type == 'RecurrentInput'):
function_instance = F.Split(ctx, axis=f.recurrent_param.axis)
elif (f.type == 'Delay'):
function_instance = F.Identity(ctx)
persistent = False
elif (f.type == 'Broadcast'):
shape = resolve_broadcast_params(inputs, f, network.batch_size)
function_instance = F.Broadcast(ctx, shape)
else:
function_instance = _create_function_instance(ctx, f)
class Function():
pass
function = Function()
function.name = function_name
function.function_instance = function_instance
function.inputs = list(inputs)
function.outputs = list(outputs)
function.persistent = persistent
return (function, input_variable_names, output_variable_names) |
class Config():
def _validate_py_syntax(filename):
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
try:
ast.parse(content)
except SyntaxError as e:
raise SyntaxError(f'There are syntax errors in config file {filename}: {e}')
def _substitute_predefined_vars(filename, temp_config_name):
file_dirname = osp.dirname(filename)
file_basename = osp.basename(filename)
file_basename_no_extension = osp.splitext(file_basename)[0]
file_extname = osp.splitext(filename)[1]
support_templates = dict(fileDirname=file_dirname, fileBasename=file_basename, fileBasenameNoExtension=file_basename_no_extension, fileExtname=file_extname)
with open(filename, 'r', encoding='utf-8') as f:
config_file = f.read()
for (key, value) in support_templates.items():
regexp = (('\\{\\{\\s*' + str(key)) + '\\s*\\}\\}')
value = value.replace('\\', '/')
config_file = re.sub(regexp, value, config_file)
with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
tmp_config_file.write(config_file)
def _pre_substitute_base_vars(filename, temp_config_name):
with open(filename, 'r', encoding='utf-8') as f:
config_file = f.read()
base_var_dict = {}
regexp = (('\\{\\{\\s*' + BASE_KEY) + '\\.([\\w\\.]+)\\s*\\}\\}')
base_vars = set(re.findall(regexp, config_file))
for base_var in base_vars:
randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'
base_var_dict[randstr] = base_var
regexp = (((('\\{\\{\\s*' + BASE_KEY) + '\\.') + base_var) + '\\s*\\}\\}')
config_file = re.sub(regexp, f'"{randstr}"', config_file)
with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
tmp_config_file.write(config_file)
return base_var_dict
def _substitute_base_vars(cfg, base_var_dict, base_cfg):
cfg = copy.deepcopy(cfg)
if isinstance(cfg, dict):
for (k, v) in cfg.items():
if (isinstance(v, str) and (v in base_var_dict)):
new_v = base_cfg
for new_k in base_var_dict[v].split('.'):
new_v = new_v[new_k]
cfg[k] = new_v
elif isinstance(v, (list, tuple, dict)):
cfg[k] = Config._substitute_base_vars(v, base_var_dict, base_cfg)
elif isinstance(cfg, tuple):
cfg = tuple((Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg))
elif isinstance(cfg, list):
cfg = [Config._substitute_base_vars(c, base_var_dict, base_cfg) for c in cfg]
elif (isinstance(cfg, str) and (cfg in base_var_dict)):
new_v = base_cfg
for new_k in base_var_dict[cfg].split('.'):
new_v = new_v[new_k]
cfg = new_v
return cfg
def _file2dict(filename, use_predefined_variables=True):
filename = osp.abspath(osp.expanduser(filename))
check_file_exist(filename)
fileExtname = osp.splitext(filename)[1]
if (fileExtname not in ['.py', '.json', '.yaml', '.yml']):
raise IOError('Only py/yml/yaml/json type are supported now!')
with tempfile.TemporaryDirectory() as temp_config_dir:
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=fileExtname)
if (platform.system() == 'Windows'):
temp_config_file.close()
temp_config_name = osp.basename(temp_config_file.name)
if use_predefined_variables:
Config._substitute_predefined_vars(filename, temp_config_file.name)
else:
shutil.copyfile(filename, temp_config_file.name)
base_var_dict = Config._pre_substitute_base_vars(temp_config_file.name, temp_config_file.name)
if filename.endswith('.py'):
temp_module_name = osp.splitext(temp_config_name)[0]
sys.path.insert(0, temp_config_dir)
Config._validate_py_syntax(filename)
mod = import_module(temp_module_name)
sys.path.pop(0)
cfg_dict = {name: value for (name, value) in mod.__dict__.items() if (not name.startswith('__'))}
del sys.modules[temp_module_name]
elif filename.endswith(('.yml', '.yaml', '.json')):
import annotator.uniformer.mmcv as mmcv
cfg_dict = mmcv.load(temp_config_file.name)
temp_config_file.close()
if (DEPRECATION_KEY in cfg_dict):
deprecation_info = cfg_dict.pop(DEPRECATION_KEY)
warning_msg = f'The config file {filename} will be deprecated in the future.'
if ('expected' in deprecation_info):
warning_msg += f" Please use {deprecation_info['expected']} instead."
if ('reference' in deprecation_info):
warning_msg += f" More information can be found at {deprecation_info['reference']}"
warnings.warn(warning_msg)
cfg_text = (filename + '\n')
with open(filename, 'r', encoding='utf-8') as f:
cfg_text += f.read()
if (BASE_KEY in cfg_dict):
cfg_dir = osp.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = (base_filename if isinstance(base_filename, list) else [base_filename])
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
(_cfg_dict, _cfg_text) = Config._file2dict(osp.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
duplicate_keys = (base_cfg_dict.keys() & c.keys())
if (len(duplicate_keys) > 0):
raise KeyError(f'Duplicate key is not allowed among bases. Duplicate keys: {duplicate_keys}')
base_cfg_dict.update(c)
cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict, base_cfg_dict)
base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
cfg_dict = base_cfg_dict
cfg_text_list.append(cfg_text)
cfg_text = '\n'.join(cfg_text_list)
return (cfg_dict, cfg_text)
def _merge_a_into_b(a, b, allow_list_keys=False):
b = b.copy()
for (k, v) in a.items():
if (allow_list_keys and k.isdigit() and isinstance(b, list)):
k = int(k)
if (len(b) <= k):
raise KeyError(f'Index {k} exceeds the length of list {b}')
b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
elif (isinstance(v, dict) and (k in b) and (not v.pop(DELETE_KEY, False))):
allowed_types = ((dict, list) if allow_list_keys else dict)
if (not isinstance(b[k], allowed_types)):
raise TypeError(f'{k}={v} in child config cannot inherit from base because {k} is a dict in the child config but is of type {type(b[k])} in base config. You may set `{DELETE_KEY}=True` to ignore the base config')
b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
else:
b[k] = v
return b
def fromfile(filename, use_predefined_variables=True, import_custom_modules=True):
(cfg_dict, cfg_text) = Config._file2dict(filename, use_predefined_variables)
if (import_custom_modules and cfg_dict.get('custom_imports', None)):
import_modules_from_strings(**cfg_dict['custom_imports'])
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
def fromstring(cfg_str, file_format):
if (file_format not in ['.py', '.json', '.yaml', '.yml']):
raise IOError('Only py/yml/yaml/json type are supported now!')
if ((file_format != '.py') and ('dict(' in cfg_str)):
warnings.warn('Please check "file_format", the file format may be .py')
with tempfile.NamedTemporaryFile('w', encoding='utf-8', suffix=file_format, delete=False) as temp_file:
temp_file.write(cfg_str)
cfg = Config.fromfile(temp_file.name)
os.remove(temp_file.name)
return cfg
def auto_argparser(description=None):
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.fromfile(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return (parser, cfg)
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
if (cfg_dict is None):
cfg_dict = dict()
elif (not isinstance(cfg_dict, dict)):
raise TypeError(f'cfg_dict must be a dict, but got {type(cfg_dict)}')
for key in cfg_dict:
if (key in RESERVED_KEYS):
raise KeyError(f'{key} is reserved for config file')
super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
super(Config, self).__setattr__('_filename', filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, 'r') as f:
text = f.read()
else:
text = ''
super(Config, self).__setattr__('_text', text)
def filename(self):
return self._filename
def text(self):
return self._text
def pretty_text(self):
indent = 4
def _indent(s_, num_spaces):
s = s_.split('\n')
if (len(s) == 1):
return s_
first = s.pop(0)
s = [((num_spaces * ' ') + line) for line in s]
s = '\n'.join(s)
s = ((first + '\n') + s)
return s
def _format_basic_types(k, v, use_mapping=False):
if isinstance(v, str):
v_str = f"'{v}'"
else:
v_str = str(v)
if use_mapping:
k_str = (f"'{k}'" if isinstance(k, str) else str(k))
attr_str = f'{k_str}: {v_str}'
else:
attr_str = f'{str(k)}={v_str}'
attr_str = _indent(attr_str, indent)
return attr_str
def _format_list(k, v, use_mapping=False):
if all((isinstance(_, dict) for _ in v)):
v_str = '[\n'
v_str += '\n'.join((f'dict({_indent(_format_dict(v_), indent)}),' for v_ in v)).rstrip(',')
if use_mapping:
k_str = (f"'{k}'" if isinstance(k, str) else str(k))
attr_str = f'{k_str}: {v_str}'
else:
attr_str = f'{str(k)}={v_str}'
attr_str = (_indent(attr_str, indent) + ']')
else:
attr_str = _format_basic_types(k, v, use_mapping)
return attr_str
def _contain_invalid_identifier(dict_str):
contain_invalid_identifier = False
for key_name in dict_str:
contain_invalid_identifier |= (not str(key_name).isidentifier())
return contain_invalid_identifier
def _format_dict(input_dict, outest_level=False):
r = ''
s = []
use_mapping = _contain_invalid_identifier(input_dict)
if use_mapping:
r += '{'
for (idx, (k, v)) in enumerate(input_dict.items()):
is_last = (idx >= (len(input_dict) - 1))
end = ('' if (outest_level or is_last) else ',')
if isinstance(v, dict):
v_str = ('\n' + _format_dict(v))
if use_mapping:
k_str = (f"'{k}'" if isinstance(k, str) else str(k))
attr_str = f'{k_str}: dict({v_str}'
else:
attr_str = f'{str(k)}=dict({v_str}'
attr_str = ((_indent(attr_str, indent) + ')') + end)
elif isinstance(v, list):
attr_str = (_format_list(k, v, use_mapping) + end)
else:
attr_str = (_format_basic_types(k, v, use_mapping) + end)
s.append(attr_str)
r += '\n'.join(s)
if use_mapping:
r += '}'
return r
cfg_dict = self._cfg_dict.to_dict()
text = _format_dict(cfg_dict, outest_level=True)
yapf_style = dict(based_on_style='pep8', blank_line_before_nested_class_or_def=True, split_before_expression_after_opening_paren=True)
(text, _) = FormatCode(text, style_config=yapf_style, verify=True)
return text
def __repr__(self):
return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'
def __len__(self):
return len(self._cfg_dict)
def __getattr__(self, name):
return getattr(self._cfg_dict, name)
def __getitem__(self, name):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def __getstate__(self):
return (self._cfg_dict, self._filename, self._text)
def __setstate__(self, state):
(_cfg_dict, _filename, _text) = state
super(Config, self).__setattr__('_cfg_dict', _cfg_dict)
super(Config, self).__setattr__('_filename', _filename)
super(Config, self).__setattr__('_text', _text)
def dump(self, file=None):
cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict()
if self.filename.endswith('.py'):
if (file is None):
return self.pretty_text
else:
with open(file, 'w', encoding='utf-8') as f:
f.write(self.pretty_text)
else:
import annotator.uniformer.mmcv as mmcv
if (file is None):
file_format = self.filename.split('.')[(- 1)]
return mmcv.dump(cfg_dict, file_format=file_format)
else:
mmcv.dump(cfg_dict, file)
def merge_from_dict(self, options, allow_list_keys=True):
option_cfg_dict = {}
for (full_key, v) in options.items():
d = option_cfg_dict
key_list = full_key.split('.')
for subkey in key_list[:(- 1)]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[(- 1)]
d[subkey] = v
cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
super(Config, self).__setattr__('_cfg_dict', Config._merge_a_into_b(option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys)) |
class RedditCandidateGenerator():
def __init__(self, freebase_fp, links_fp, verbose=0):
self.subj_pred_dict = {}
self.p_links_dict = {}
self.verbose = verbose
self._load_fb_subset(freebase_fp)
self._load_linkings(links_fp)
def _load_fb_subset(self, fb_fp):
LogInfo.begin_track('Loading freebase subset from [%s] ...', fb_fp)
prefix = 'www.freebase.com/'
pref_len = len(prefix)
with codecs.open(fb_fp, 'r', 'utf-8') as br:
lines = br.readlines()
LogInfo.logs('%d lines loaded.', len(lines))
for (line_idx, line) in enumerate(lines):
if ((line_idx % 500000) == 0):
LogInfo.logs('Current: %d / %d', line_idx, len(lines))
(s, p, _) = line.strip().split('\t')
s = s[pref_len:].replace('/', '.')
p = p[pref_len:].replace('/', '.')
self.subj_pred_dict.setdefault(s, set([])).add(p)
LogInfo.logs('%d related entities and %d <S, P> pairs saved.', len(self.subj_pred_dict), sum([len(v) for v in self.subj_pred_dict.values()]))
LogInfo.end_track()
def _load_linkings(self, links_fp):
with codecs.open(links_fp, 'r', 'utf-8') as br:
for line in br.readlines():
if line.startswith('#'):
continue
spt = line.strip().split('\t')
(q_idx, st, ed, mention, mid, wiki_name, feats) = spt
q_idx = int(q_idx)
st = int(st)
ed = int(ed)
feat_dict = json.loads(feats)
for k in feat_dict:
v = float(('%.6f' % feat_dict[k]))
feat_dict[k] = v
link_data = LinkData(category='Entity', start=st, end=ed, mention=mention, comp='==', value=mid, name=wiki_name, link_feat=feat_dict)
self.p_links_dict.setdefault(q_idx, []).append(link_data)
LogInfo.logs('%d questions of link data loaded.', len(self.p_links_dict))
def single_post_candgen(self, p_idx, post, link_fp, schema_fp):
if os.path.isfile(link_fp):
gather_linkings = []
with codecs.open(link_fp, 'r', 'utf-8') as br:
for line in br.readlines():
tup_list = json.loads(line.strip())
ld_dict = {k: v for (k, v) in tup_list}
gather_linkings.append(LinkData(**ld_dict))
else:
gather_linkings = self.p_links_dict.get(p_idx, [])
for idx in range(len(gather_linkings)):
gather_linkings[idx].gl_pos = idx
if (not os.path.isfile(link_fp)):
with codecs.open((link_fp + '.tmp'), 'w', 'utf-8') as bw:
for gl in gather_linkings:
bw.write((json.dumps(gl.serialize()) + '\n'))
shutil.move((link_fp + '.tmp'), link_fp)
sc_list = []
for gl_data in gather_linkings:
entity = gl_data.value
pred_set = self.subj_pred_dict.get(entity, set([]))
for pred in pred_set:
sc = Schema()
sc.hops = 1
sc.main_pred_seq = [pred]
sc.raw_paths = [('Main', gl_data, [pred])]
sc.ans_size = 1
sc_list.append(sc)
if (len(sc_list) == 0):
LogInfo.logs(('q_idx: %d sc_list=0' % p_idx))
with codecs.open((schema_fp + '.tmp'), 'w', 'utf-8') as bw:
for sc in sc_list:
sc_info_dict = {k: getattr(sc, k) for k in ('ans_size', 'hops')}
opt_raw_paths = []
for (cate, gl, pred_seq) in sc.raw_paths:
opt_raw_paths.append((cate, gl.gl_pos, gl.value, pred_seq))
sc_info_dict['raw_paths'] = opt_raw_paths
bw.write((json.dumps(sc_info_dict) + '\n'))
shutil.move((schema_fp + '.tmp'), schema_fp) |
def dict_to_nonedict(opt):
if isinstance(opt, dict):
new_opt = dict()
for (key, sub_opt) in opt.items():
new_opt[key] = dict_to_nonedict(sub_opt)
return NoneDict(**new_opt)
elif isinstance(opt, list):
return [dict_to_nonedict(sub_opt) for sub_opt in opt]
else:
return opt |
def extractall_unicode(zfile, out_dir):
for m in zfile.infolist():
data = zfile.read(m)
try:
decoded_name = m.filename.encode('cp437').decode()
except UnicodeEncodeError:
decoded_name = m.filename
disk_file_name = os.path.join(out_dir, decoded_name)
dir_name = os.path.dirname(disk_file_name)
if (not os.path.exists(dir_name)):
os.makedirs(dir_name)
if (not os.path.isdir(disk_file_name)):
with open(disk_file_name, 'wb') as fd:
fd.write(data) |
def load_dataset(name, drop_columns=False, auth=None, show_progress=False):
if (type(name) is not str):
raise ValueError('The argument `name` must be a string.')
if (type(drop_columns) is not bool):
raise ValueError('The argument `drop_columns` must be a boolean.')
if (auth is not None):
if (len(auth) != 2):
raise ValueError('The argument `auth` must have length 2.')
elif ((type(auth[0]) != str) or (type(auth[1]) != str)):
raise ValueError('The argument `auth` must be a pair of strings.')
if (type(show_progress) is not bool):
raise ValueError('The argument `show_progress` must be a boolean.')
if (not name.endswith('.py')):
name = (name + '.py')
short_name = name[:(- 3)]
try:
module = importlib.import_module(('.' + short_name), ('skmob.data.datasets.' + short_name))
except ModuleNotFoundError:
raise ValueError('Dataset name not found. Please use `list_datasets()` to list all the available datasets.')
return
dataset_class = getattr(module, short_name)
dataset_instance = dataset_class()
dataset_info = dataset_instance.dataset_info
hash_value = (None if (dataset_info['hash'] == '') else dataset_info['hash'])
if (dataset_info['auth'] == 'yes'):
if ((auth is None) or (len(auth) != 2)):
raise ValueError('`auth` should be a pair (username, password) used for the authentication.')
else:
auth = ()
full_path_files = _skmob_downloader(dataset_info['url'], hash_value, auth=auth, download_format=dataset_info['download_format'], show_progress=show_progress)
dataset = dataset_instance.prepare(full_path_files)
if ((type(dataset) is TrajDataFrame) and drop_columns):
dataset = dataset[['uid', 'lat', 'lng', 'datetime']]
if (type(dataset) is TrajDataFrame):
dataset._info = dataset_info
elif (type(dataset) is GeoDataFrame):
dataset._info = dataset_info
elif (type(dataset) is DataFrame):
dataset._info = dataset_info
elif (type(dataset) is FlowDataFrame):
dataset._info = dataset_info
del dataset_instance
return dataset |
def _get_level(level):
if isinstance(level, int):
pass
else:
level = level.lower()
level = {'info': logging.INFO, 'debug': logging.DEBUG, 'warn': logging.WARN, 'warning': logging.WARN, 'error': logging.ERROR}[level]
return level |
_args('v', 'v', 'is', 'i')
def as_strided(g, self, sizes, strides, offset=None):
sizes = sym_help._maybe_get_const(sizes, 'is')
rank = len(strides)
self_1d = g.op('Reshape', self, g.op('Constant', value_t=torch.tensor([(- 1)], dtype=torch.int64)))
if (not sym_help._is_value(sizes)):
ind = torch.tensor([0], dtype=torch.long)
for (i, (size, stride)) in enumerate(zip(sizes, strides)):
r_size = ([1] * rank)
r_size[i] = (- 1)
ind = (ind + (torch.arange(size).view(r_size) * stride))
if offset:
ind = (ind + offset)
return g.op('Gather', self_1d, g.op('Constant', value_t=ind))
else:
ind = None
for (i, stride) in enumerate(strides):
r_size = ([1] * rank)
r_size[i] = (- 1)
size = select(g, sizes, g.op('Constant', value_t=torch.tensor([0])), g.op('Constant', value_t=torch.tensor(i)))
tmp_ind = g.op('Reshape', arange(g, size, 4, None, None, None), g.op('Constant', value_t=torch.tensor(r_size)))
tmp_ind = g.op('Mul', tmp_ind, g.op('Constant', value_t=torch.tensor([stride])))
if (ind is None):
ind = tmp_ind
else:
ind = g.op('Add', ind, tmp_ind)
if offset:
ind = g.op('Add', ind, g.op('Constant', torch.tensor([offset])))
return g.op('Gather', self_1d, ind) |
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, xlingual=False):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth, xlingual=xlingual)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths) |
def display_stats(cfg, name_classes, inters_over_union_classes):
for ind_class in range(cfg.NUM_CLASSES):
print(((name_classes[ind_class] + '\t') + str(round((inters_over_union_classes[ind_class] * 100), 2)))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.