code stringlengths 101 5.91M |
|---|
def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor:
if (not inplace):
m1 = (m1 + m2)
else:
m1 += m2
return m1 |
def evaluate(pols, dsol):
result = []
for pol in pols:
result.append(evaluate_polynomial(pol, dsol))
return result |
class DecoderConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(DecoderConv, self).__init__()
self.conv = nn.Conv3d(in_ch, out_ch, 3, padding=1)
self.gn = nn.GroupNorm((out_ch // 4), out_ch)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.gn(x)
x = self.relu(x)
return x |
class LineByLineWithWeightTextDataset_Old(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, file_path: str, block_size: int):
assert os.path.isfile(file_path), f'Input file path {file_path} not found'
logger.info('Creating features from dataset file at %s', file_path)
with open(file_path, encoding='utf-8') as f:
lines = [line.split('###') for line in f.read().splitlines() if ((len(line) > 0) and (not line.isspace()) and (len(line.split('###')) == 3))]
temp = list(zip(*lines))
print(len(temp))
print(temp[0][:5], temp[1][:5])
(weight, score, sents) = list(zip(*lines))
sents = list(sents)
batch_encoding = tokenizer(sents, add_special_tokens=True, truncation=True, max_length=block_size)
self.examples = batch_encoding['input_ids']
separator = tokenizer('[BOS]', add_special_tokens=False)['input_ids'][0]
self.labels = copy.deepcopy(self.examples)
for (i, elem) in enumerate(self.labels):
sep_idx = (elem.index(separator) + 1)
self.labels[i][:sep_idx] = ([(- 100)] * sep_idx)
print(self.labels[i])
print(self.examples[i])
self.weight = [float(w) for w in weight]
print(self.weight[i])
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return (torch.tensor(self.examples[i], dtype=torch.long), torch.tensor(self.labels[i], dtype=torch.long), self.weight[i]) |
def main():
server_executor = NeuralChatServerExecutor()
server_executor(config_file='./audio_service.yaml', log_file='./audio_service.log') |
def register_nnModule_instance_definition():
logger.info('Analyzing nn.Module instance (model instance) definitions in all files ...')
def_cl = []
for cl in globals.list_code_line_instance:
if ((not cl.is_multi_line_comment) and (not cl.is_single_line_comment_or_empty)):
(is_def, lhs, rhs) = of_definition_format(cl.line_content)
stripped = cl.line_content.replace(' ', '')
if (is_def and (rhs in (globals.list_class_name + ['Module', 'Sequential'])) and (cl.class_name not in globals.list_class_name) and ('(' not in cl.return_item)):
def_cl.append(cl)
elif (is_def and ('__dict__[args.arch]' in cl.line_content)):
def_cl.append(cl)
elif (is_def and ('hub.load' in cl.line_content)):
def_cl.append(cl)
elif (is_def and ('onnx.load' in cl.line_content)):
def_cl.append(cl)
elif (is_def and ('keras.Sequential' in cl.line_content)):
def_cl.append(cl)
elif (is_def and ('.' in stripped) and (stripped[(stripped.find('=') + 1):stripped.find('.')] in globals.list_class_name)):
def_cl.append(cl)
list_lhs = []
list_rhs = []
list_is_in_func = []
list_func_name = []
list_return_item = []
list_file_path = []
list_line_idx = []
list_func_def_line_idx = []
for cl in def_cl:
(is_def, lhs, rhs) = of_definition_format(cl.line_content)
list_lhs.append(lhs)
list_rhs.append(rhs)
list_is_in_func.append(cl.is_in_func)
list_func_name.append(cl.func_name)
list_return_item.append(cl.return_item)
list_file_path.append(cl.file_path)
list_line_idx.append(cl.line_idx)
list_func_def_line_idx.append(cl.func_def_line_idx)
globals.list_wrapper_base_function_name = []
for i in range(len(list_lhs)):
if ((not list_is_in_func[i]) and ('tokenizer' not in list_lhs[i])):
globals.list_model_name.append(list_lhs[i])
MD = ModelDefinition(model_name=list_lhs[i], class_name=list_rhs[i], file_path=list_file_path[i], model_def_line_idx=list_line_idx[i], function_def_line_idx=(- 1), function_name='null')
MD.print_info()
globals.list_model_def_instance.append(MD)
elif list_is_in_func[i]:
if ((list_return_item[i] not in list_lhs) and (list_func_name[i] != '__init__') and ('tokenizer' not in list_lhs[i])):
globals.list_model_name.append(list_lhs[i])
MD = ModelDefinition(model_name=list_lhs[i], class_name=list_rhs[i], file_path=list_file_path[i], model_def_line_idx=list_line_idx[i], function_def_line_idx=list_func_def_line_idx[i], function_name=list_func_name[i])
MD.print_info()
globals.list_model_def_instance.append(MD)
elif (list_return_item[i] in list_lhs):
globals.list_wrapper_base_function_name.append(list_func_name[i])
for cl in globals.list_code_line_instance:
if (cl.is_in_func and (cl.line_idx == cl.func_return_idx) and (cl.return_item[:cl.return_item.find('(')] in globals.list_class_name)):
globals.list_wrapper_base_function_name.append(cl.func_name)
globals.list_wrapper_base_function_name = list(set(globals.list_wrapper_base_function_name))
globals.list_wrapper_children_function_name = []
for i in globals.list_wrapper_base_function_name:
globals.list_wrapper_children_function_name += get_all_wrap_children(i)
globals.list_wrapper_all_function_name = (globals.list_wrapper_base_function_name + globals.list_wrapper_children_function_name)
globals.list_wrapper_all_function_name = list(set(globals.list_wrapper_all_function_name))
for cl in globals.list_code_line_instance:
if (cl.is_in_func and (not cl.is_multi_line_comment) and (not cl.is_single_line_comment_or_empty)):
(is_def, lhs, rhs) = of_definition_format(cl.line_content)
if (is_def and (rhs in globals.list_wrapper_all_function_name) and (cl.class_name not in globals.list_class_name) and (cl.return_item == lhs)):
globals.list_wrapper_base_function_name.append(cl.func_name)
globals.list_wrapper_base_function_name = list(set(globals.list_wrapper_base_function_name))
for i in globals.list_wrapper_base_function_name:
globals.list_wrapper_children_function_name += get_all_wrap_children(i)
globals.list_wrapper_all_function_name += (globals.list_wrapper_base_function_name + globals.list_wrapper_children_function_name)
globals.list_wrapper_all_function_name = list(set(globals.list_wrapper_all_function_name))
logger.debug(f'globals.list_wrapper_all_function_name: {globals.list_wrapper_all_function_name}')
for cl in globals.list_code_line_instance:
if ((not cl.is_multi_line_comment) and (not cl.is_single_line_comment_or_empty) and (cl.func_name != '__init__')):
(is_def, lhs, rhs) = of_definition_format(cl.line_content)
if (is_def and (rhs in globals.list_wrapper_all_function_name) and (rhs not in ['self.model', 'model', 'self.call', 'call']) and ('forward' not in rhs) and ('config' not in lhs) and ('congfig' not in lhs) and (',' not in lhs) and ('inference' not in lhs) and ('tokenizer' not in lhs) and (cl.class_name not in globals.list_class_name) and (cl.func_name not in globals.list_wrapper_all_function_name)):
globals.list_model_name.append(lhs)
MD = ModelDefinition(model_name=lhs, class_name=('(Note: this is a func-defined model) ' + rhs), file_path=cl.file_path, model_def_line_idx=cl.line_idx, function_def_line_idx=cl.func_def_line_idx, function_name=cl.func_name)
MD.print_info()
globals.list_model_def_instance.append(MD)
globals.list_model_name = list(set(globals.list_model_name))
logger.debug(f'model name list: {globals.list_model_name}') |
class Voxelization(nn.Module):
def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000):
super(Voxelization, self).__init__()
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.max_num_points = max_num_points
if isinstance(max_voxels, tuple):
self.max_voxels = max_voxels
else:
self.max_voxels = _pair(max_voxels)
point_cloud_range = torch.tensor(point_cloud_range, dtype=torch.float32)
voxel_size = torch.tensor(voxel_size, dtype=torch.float32)
grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size)
grid_size = torch.round(grid_size).long()
input_feat_shape = grid_size[:2]
self.grid_size = grid_size
self.pcd_shape = [*input_feat_shape, 1][::(- 1)]
def forward(self, input):
if self.training:
max_voxels = self.max_voxels[0]
else:
max_voxels = self.max_voxels[1]
return voxelization(input, self.voxel_size, self.point_cloud_range, self.max_num_points, max_voxels)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('voxel_size=' + str(self.voxel_size))
tmpstr += (', point_cloud_range=' + str(self.point_cloud_range))
tmpstr += (', max_num_points=' + str(self.max_num_points))
tmpstr += (', max_voxels=' + str(self.max_voxels))
tmpstr += ')'
return tmpstr |
def invalid_algo_raw_data() -> Dict[(str, Dict[(str, Any)])]:
with open('tests/mock_data_test.json') as f:
read_in_data = json.load(f)
del read_in_data['env_1']['task_1']['algo_1']
return read_in_data |
def test_isotropic_eddington_dehnencore_in_nfw_dens_directint():
pot = [potential.NFWPotential(amp=2.3, a=1.3)]
denspot = [potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)]
dfp = eddingtondf(pot=pot, denspot=denspot)
tol = 0.01
check_dens_directint(dfp, pot, tol, (lambda r: potential.evaluateDensities(denspot, r, 0)), rmin=(pot[0]._scale / 10.0), rmax=(pot[0]._scale * 10.0), bins=31)
return None |
def main(opt):
translator = build_translator(opt, report_score=True)
translator.translate(src_path=opt.src, tgt_path=opt.tgt, src_dir=opt.src_dir, batch_size=opt.batch_size, attn_debug=opt.attn_debug)
correct = 0
total = 0
per_length = defaultdict((lambda : {'correct': 0, 'total': 0}))
trace = []
with open(opt.src) as f_src, open(opt.tgt) as f_tgt, open(opt.output) as f_prd:
for (src, tgt, prd) in zip(f_src, f_tgt, f_prd):
if (tgt == prd):
correct += 1
total += 1
trace.append((src.strip(), tgt.strip(), prd.strip()))
logging.info('Sequence Accuracy: {}'.format((correct / total)))
with open(opt.output, 'w') as f_prd:
f_prd.write('source\ttarget\tprediction\n')
for (src, tgt, prd) in trace:
f_prd.write(f'''{src} {tgt} {prd}
''') |
def trim_scans_per_referit3d_data(referit_data, scans):
in_r3d = referit_data.scan_id.unique()
to_drop = []
for k in scans:
if (k not in in_r3d):
to_drop.append(k)
for k in to_drop:
del scans[k]
print('Dropped {} scans to reduce mem-foot-print.'.format(len(to_drop)))
return scans |
def register_image_dataset(name, dataset):
global __image_datasets
curr_datasets = list(__image_datasets.keys())
if (name in curr_datasets):
raise ValueError('The given name already exists, please choose another name excluding {}'.format(curr_datasets))
__image_datasets[name] = dataset |
class Network(object):
def __init__(self, inputs, batch, keep_prob, is_training, trainable=True):
self.inputs = inputs
self.terminals = []
self.layers = dict(inputs)
self.trainable = trainable
self.batch_size = batch
self.keep_prob = keep_prob
self.is_training = is_training
self.setup()
def setup(self):
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
data_dict = np.load(data_path, encoding='latin1').item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for (param_name, data) in iter(data_dict[op_name].items()):
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if (not ignore_missing):
raise
def feed(self, *args):
assert (len(args) != 0)
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError(('Unknown layer name fed: %s' % fed_layer))
self.terminals.append(fed_layer)
return self
def get_output(self):
return self.terminals[(- 1)]
def get_layer_output(self, name):
return self.layers[name]
def get_unique_name(self, prefix):
ident = (sum((t.startswith(prefix) for (t, _) in self.layers.items())) + 1)
return ('%s_%d' % (prefix, ident))
def make_var(self, name, shape):
return tf.get_variable(name, shape, dtype='float32', trainable=self.trainable)
def validate_padding(self, padding):
assert (padding in ('SAME', 'VALID'))
def conv(self, input_data, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, biased=True):
self.validate_padding(padding)
c_i = input_data.get_shape()[(- 1)]
if (padding == 'SAME'):
input_data = tf.pad(input_data, [[0, 0], [((k_h - 1) // 2), ((k_h - 1) // 2)], [((k_w - 1) // 2), ((k_w - 1) // 2)], [0, 0]], 'CONSTANT')
assert ((c_i % group) == 0)
assert ((c_o % group) == 0)
convolve = (lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding='VALID'))
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, (c_i // group), c_o])
if (group == 1):
output = convolve(input_data, kernel)
else:
input_groups = tf.split(3, group, input_data)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for (i, k) in zip(input_groups, kernel_groups)]
output = tf.concat(3, output_groups)
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
output = tf.nn.relu(output, name=scope.name)
return output
def relu(self, input_data, name):
return tf.nn.relu(input_data, name=name)
def max_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input_data, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding, name=name)
def avg_pool(self, input_data, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input_data, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding, name=name)
def lrn(self, input_data, radius, alpha, beta, name, bias=1.0):
return tf.nn.local_response_normalization(input_data, depth_radius=radius, alpha=alpha, beta=beta, bias=bias, name=name)
def concat(self, inputs, axis, name):
return tf.concat(concat_dim=axis, values=inputs, name=name)
def add(self, inputs, name):
return tf.add_n(inputs, name=name)
def fc(self, input_data, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
input_shape = input_data.get_shape()
if (input_shape.ndims == 4):
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(input_data, [(- 1), dim])
else:
(feed_in, dim) = (input_data, input_shape[(- 1)].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = (tf.nn.relu_layer if relu else tf.nn.xw_plus_b)
fc = op(feed_in, weights, biases, name=scope.name)
return fc
def softmax(self, input_data, name):
input_shape = map((lambda v: v.value), input_data.get_shape())
if (len(input_shape) > 2):
if ((input_shape[1] == 1) and (input_shape[2] == 1)):
input_data = tf.squeeze(input_data, squeeze_dims=[1, 2])
else:
raise ValueError('Rank 2 tensor input expected for softmax!')
return tf.nn.softmax(input_data, name)
def batch_normalization(self, input_data, name, scale_offset=True, relu=False):
with tf.variable_scope(name) as scope:
shape = [input_data.get_shape()[(- 1)]]
pop_mean = tf.get_variable('mean', shape, initializer=tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable('variance', shape, initializer=tf.constant_initializer(1.0), trainable=False)
epsilon = 0.0001
decay = 0.999
if scale_offset:
scale = tf.get_variable('scale', shape, initializer=tf.constant_initializer(1.0))
offset = tf.get_variable('offset', shape, initializer=tf.constant_initializer(0.0))
else:
(scale, offset) = (None, None)
if self.is_training:
(batch_mean, batch_var) = tf.nn.moments(input_data, [0, 1, 2])
train_mean = tf.assign(pop_mean, ((pop_mean * decay) + (batch_mean * (1 - decay))))
train_var = tf.assign(pop_var, ((pop_var * decay) + (batch_var * (1 - decay))))
with tf.control_dependencies([train_mean, train_var]):
output = tf.nn.batch_normalization(input_data, batch_mean, batch_var, offset, scale, epsilon, name=name)
else:
output = tf.nn.batch_normalization(input_data, pop_mean, pop_var, offset, scale, epsilon, name=name)
if relu:
output = tf.nn.relu(output)
return output
def dropout(self, input_data, keep_prob, name):
return tf.nn.dropout(input_data, keep_prob, name=name)
def unpool_as_conv(self, size, input_data, id, stride=1, ReLU=False, BN=True):
layerName = ('layer%s_ConvA' % id)
self.feed(input_data)
self.conv(3, 3, size[3], stride, stride, name=layerName, padding='SAME', relu=False)
outputA = self.get_output()
layerName = ('layer%s_ConvB' % id)
padded_input_B = tf.pad(input_data, [[0, 0], [1, 0], [1, 1], [0, 0]], 'CONSTANT')
self.feed(padded_input_B)
self.conv(2, 3, size[3], stride, stride, name=layerName, padding='VALID', relu=False)
outputB = self.get_output()
layerName = ('layer%s_ConvC' % id)
padded_input_C = tf.pad(input_data, [[0, 0], [1, 1], [1, 0], [0, 0]], 'CONSTANT')
self.feed(padded_input_C)
self.conv(3, 2, size[3], stride, stride, name=layerName, padding='VALID', relu=False)
outputC = self.get_output()
layerName = ('layer%s_ConvD' % id)
padded_input_D = tf.pad(input_data, [[0, 0], [1, 0], [1, 0], [0, 0]], 'CONSTANT')
self.feed(padded_input_D)
self.conv(2, 2, size[3], stride, stride, name=layerName, padding='VALID', relu=False)
outputD = self.get_output()
left = interleave([outputA, outputB], axis=1)
right = interleave([outputC, outputD], axis=1)
Y = interleave([left, right], axis=2)
if BN:
layerName = ('layer%s_BN' % id)
self.feed(Y)
self.batch_normalization(name=layerName, scale_offset=True, relu=False)
Y = self.get_output()
if ReLU:
Y = tf.nn.relu(Y, name=layerName)
return Y
def up_project(self, size, id, stride=1, BN=True):
input_data = self.get_output()
id_br1 = ('%s_br1' % id)
out = self.unpool_as_conv(size, input_data, id_br1, stride, ReLU=True, BN=True)
layerName = ('layer%s_Conv' % id)
self.feed(out)
self.conv(size[0], size[1], size[3], stride, stride, name=layerName, relu=False)
if BN:
layerName = ('layer%s_BN' % id)
self.batch_normalization(name=layerName, scale_offset=True, relu=False)
branch1_output = self.get_output()
id_br2 = ('%s_br2' % id)
branch2_output = self.unpool_as_conv(size, input_data, id_br2, stride, ReLU=False)
layerName = ('layer%s_Sum' % id)
output = tf.add_n([branch1_output, branch2_output], name=layerName)
layerName = ('layer%s_ReLU' % id)
output = tf.nn.relu(output, name=layerName)
self.feed(output)
return self |
class TestGaussianMLPTaskEmbeddingPolicy(TfGraphTestCase):
.parametrize('obs_dim', [(2,), (2, 2)])
.parametrize('task_num', [1, 5])
.parametrize('latent_dim', [1, 5])
.parametrize('action_dim', [(2,), (2, 2)])
def test_get_action(self, obs_dim, task_num, latent_dim, action_dim):
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder)
env.reset()
(obs, _, _, _) = env.step(1)
latent = np.random.random((latent_dim,))
task = np.zeros(task_num)
task[0] = 1
(action1, _) = policy.get_action_given_latent(obs, latent)
(action2, _) = policy.get_action_given_task(obs, task)
(action3, _) = policy.get_action(np.concatenate([obs.flatten(), task]))
assert env.action_space.contains(action1)
assert env.action_space.contains(action2)
assert env.action_space.contains(action3)
(obses, latents, tasks) = (([obs] * 3), ([latent] * 3), ([task] * 3))
aug_obses = ([np.concatenate([obs.flatten(), task])] * 3)
(action1n, _) = policy.get_actions_given_latents(obses, latents)
(action2n, _) = policy.get_actions_given_tasks(obses, tasks)
(action3n, _) = policy.get_actions(aug_obses)
for action in chain(action1n, action2n, action3n):
assert env.action_space.contains(action)
def test_get_latent(self):
(obs_dim, action_dim, task_num, latent_dim) = ((2,), (2,), 5, 2)
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder)
task_id = 3
task_onehot = np.zeros(task_num)
task_onehot[task_id] = 1
(latent, latent_info) = policy.get_latent(task_onehot)
assert (latent.shape == (latent_dim,))
assert (latent_info['mean'].shape == (latent_dim,))
assert (latent_info['log_std'].shape == (latent_dim,))
def test_auxiliary(self):
(obs_dim, action_dim, task_num, latent_dim) = ((2,), (2,), 2, 2)
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder)
obs_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 2))
task_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, 2))
policy.build(obs_input, task_input)
assert (policy.distribution.loc.get_shape().as_list()[(- 1)] == env.action_space.flat_dim)
assert (policy.encoder == encoder)
assert (policy.latent_space.flat_dim == latent_dim)
assert (policy.task_space.flat_dim == task_num)
assert (policy.augmented_observation_space.flat_dim == (env.observation_space.flat_dim + task_num))
assert (policy.encoder_distribution.loc.get_shape().as_list()[(- 1)] == latent_dim)
def test_split_augmented_observation(self):
(obs_dim, task_num) = (3, 5)
policy = mock.Mock(spec=GaussianMLPTaskEmbeddingPolicy)
policy.task_space = mock.Mock()
policy.task_space.flat_dim = task_num
policy.split_augmented_observation = GaussianMLPTaskEmbeddingPolicy.split_augmented_observation
obs = np.random.random(obs_dim)
task = np.random.random(task_num)
(o, t) = policy.split_augmented_observation(policy, np.concatenate([obs, task]))
assert np.array_equal(obs, o)
assert np.array_equal(task, t)
def test_get_vars(self):
(obs_dim, action_dim, task_num, latent_dim) = ((2,), (2,), 5, 2)
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec, hidden_sizes=[32, 32, 32])
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder, hidden_sizes=[32, 32, 32])
vars1 = sorted(policy.get_trainable_vars(), key=(lambda v: v.name))
vars2 = sorted(policy.get_global_vars(), key=(lambda v: v.name))
assert (vars1 == vars2)
assert (len(vars1) == (2 * ((4 * 2) + 1)))
obs = np.random.random(obs_dim)
latent = np.random.random((latent_dim,))
for var in vars1:
var.assign(np.ones(var.shape))
assert np.any((policy.get_action_given_latent(obs, latent) != 0))
for var in vars1:
var.assign(np.zeros(var.shape))
assert (not np.all((policy.get_action_given_latent(obs, latent) == 0)))
def test_pickling(self):
(obs_dim, action_dim, task_num, latent_dim) = ((2,), (2,), 5, 2)
env = GarageEnv(DummyBoxEnv(obs_dim=obs_dim, action_dim=action_dim))
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec)
policy = GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder)
pickled = pickle.dumps(policy)
with tf.compat.v1.variable_scope('resumed'):
unpickled = pickle.loads(pickled)
assert hasattr(unpickled, '_f_dist_obs_latent')
assert hasattr(unpickled, '_f_dist_obs_task')
def test_does_not_support_non_box_obs_space(self):
(task_num, latent_dim) = (5, 2)
env = GarageEnv(DummyDictEnv(act_space_type='box'))
with pytest.raises(ValueError, match='This task embedding policy does not supportnon akro.Box observation spaces.'):
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec, hidden_sizes=[32, 32, 32])
GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder, hidden_sizes=[32, 32, 32])
def test_does_not_support_non_box_action_space(self):
(task_num, latent_dim) = (5, 2)
env = GarageEnv(DummyDictEnv(act_space_type='discrete'))
with pytest.raises(ValueError, match='This task embedding policy does not supportnon akro.Box action spaces.'):
embedding_spec = InOutSpec(input_space=akro.Box(low=np.zeros(task_num), high=np.ones(task_num)), output_space=akro.Box(low=np.zeros(latent_dim), high=np.ones(latent_dim)))
encoder = GaussianMLPEncoder(embedding_spec, hidden_sizes=[32, 32, 32])
GaussianMLPTaskEmbeddingPolicy(env_spec=env.spec, encoder=encoder, hidden_sizes=[32, 32, 32]) |
class ChamferDistanceTestCase(unittest.TestCase):
def test_chamfer_dist(self):
x = torch.rand(4, 64, 3).float()
y = torch.rand(4, 128, 3).float()
x.requires_grad = True
y.requires_grad = True
print(gradcheck(ChamferFunction.apply, [x.cuda(), y.cuda()])) |
def evaluate_all(model, data_loader, print_freq=50):
model.eval()
mseloss = torch.nn.MSELoss()
batch_time = AverageMeter()
data_time = AverageMeter()
MAEs = 0.0
MSEs = 0.0
val_loss = []
end = time.time()
with torch.no_grad():
for (i, (imgs, gts)) in enumerate(data_loader):
data_time.update((time.time() - end))
imgs = imgs.cuda()
gts = gts.cuda()
dens = model(imgs)
for (j, (den, gt)) in enumerate(zip(dens, gts)):
loss = mseloss(den, gt)
val_loss.append(loss.item())
den = (torch.sum(den) / 1000.0)
gt = (torch.sum(gt) / 1000.0)
MAEs += abs((gt - den))
MSEs += ((gt - den) * (gt - den))
batch_time.update((time.time() - end))
end = time.time()
mae = (MAEs / len(data_loader))
mse = torch.sqrt((MSEs / len(data_loader)))
loss = torch.mean(torch.Tensor(val_loss))
print('mae:', mae, 'mse:', mse, 'loss:', loss)
return (mae, mse) |
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append'), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True) |
def get_topk_arch_hidden(args, controller, gen_net, prev_archs, prev_hiddens):
logger.info(f'=> get top{args.topk} archs out of {args.num_candidate} candidate archs...')
assert (args.num_candidate >= args.topk)
controller.eval()
cur_stage = controller.cur_stage
(archs, _, _, hiddens) = controller.sample(args.num_candidate, with_hidden=True, prev_archs=prev_archs, prev_hiddens=prev_hiddens)
(hxs, cxs) = hiddens
arch_idx_perf_table = {}
for arch_idx in range(len(archs)):
logger.info(f'arch: {archs[arch_idx]}')
gen_net.set_arch(archs[arch_idx], cur_stage)
is_score = get_is(args, gen_net, args.rl_num_eval_img)
logger.info(f'get Inception score of {is_score}')
arch_idx_perf_table[arch_idx] = is_score
topk_arch_idx_perf = sorted(arch_idx_perf_table.items(), key=operator.itemgetter(1))[::(- 1)][:args.topk]
topk_archs = []
topk_hxs = []
topk_cxs = []
logger.info(f'top{args.topk} archs:')
for arch_idx_perf in topk_arch_idx_perf:
logger.info(arch_idx_perf)
arch_idx = arch_idx_perf[0]
topk_archs.append(archs[arch_idx])
topk_hxs.append(hxs[arch_idx].detach().requires_grad_(False))
topk_cxs.append(cxs[arch_idx].detach().requires_grad_(False))
return (topk_archs, (topk_hxs, topk_cxs)) |
class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def define_G(input_nc=1, output_nc=1, ngf=64, netG='resnet_9blocks', norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
if (netG == 'resnet_9blocks'):
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif (netG == 'resnet_6blocks'):
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif (netG == 'unet_128'):
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif (netG == 'unet_256'):
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError(('Generator model name [%s] is not recognized' % netG))
return init_net(net, init_type, init_gain, gpu_ids) |
class NewsProcessor():
def __init__(self, raw_gdelt_csv_dir, download_news_dir, clean_gdelt_csv_dir):
self.sentence_model = SentenceTransformer('all-MiniLM-L6-v2')
self.kw_model = KeyBERT(model=self.sentence_model)
self.raw_gdelt_csv_dir = raw_gdelt_csv_dir
self.clean_gdelt_csv_dir = clean_gdelt_csv_dir
self.download_news_dir = download_news_dir
def parse_txt(txt_dir):
news_name = txt_dir.split('.')[0]
news_name = news_name.split('_')[2]
news = open(txt_dir, 'r')
news_content = news.read()
news.close()
return (news_name, news_content)
def clean_news_txt(news_content):
if ('' in news_content):
return False
if (len(news_content) < 100):
return False
if (len(news_content.splitlines()) <= 3):
return False
return True
def get_article_keyword_dir(self, event_id_path):
(article_dir, keyword_dir) = (None, None)
dir_list = os.listdir(event_id_path)
for dir_ in dir_list:
if dir_.startswith('article'):
article_dir = dir_
elif dir_.startswith('keyword'):
keyword_dir = dir_
return (article_dir, keyword_dir)
def merge_download_files_to_csv(self, db_storage, save_dir):
res = pd.DataFrame(columns=['GLOBALEVENTID', 'event_time', 'news_title', 'news_short_key_word'])
count = 0
raw_count = 0
for (idx, ts_id) in enumerate(os.listdir(db_storage)):
ts_id_path = os.path.join(db_storage, ts_id)
if os.path.isdir(ts_id_path):
for event_id in os.listdir(ts_id_path):
event_id_path = os.path.join(ts_id_path, event_id)
if (os.path.isdir(event_id_path) and (len(os.listdir(event_id_path)) > 0)):
raw_count += 1
if ((raw_count % 100) == 0):
print(('raw_count ' + str(raw_count)))
(article_dir, keyword_dir) = self.get_article_keyword_dir(event_id_path)
if (keyword_dir is not None):
continue
article_dir = os.path.join(event_id_path, article_dir)
(news_title, news_content) = NewsProcessor.parse_txt(article_dir)
add_content = NewsProcessor.clean_news_txt(news_content)
if add_content:
key_word = self.kw_model.extract_keywords(news_content, keyphrase_ngram_range=(4, 7), use_mmr=True, top_n=3)
key_word = list(zip(*key_word))[0]
key_word = ','.join(key_word)
res.loc[(count, 'GLOBALEVENTID')] = event_id
res.loc[(count, 'event_time')] = ts_id
res.loc[(count, 'news_title')] = news_title
res.loc[(count, 'news_short_key_word')] = key_word
count += 1
if ((count > 0) and ((count % 100) == 0)):
print(count)
res.to_csv(save_dir, index=False, header=True)
return
def read_gdelt_news_dir(self, csv_dir):
res = pd.read_csv(csv_dir, header=0)
return res
def merge_with_raw_gdelt(self, news_csv_dir, raw_csv_dir, target_dir):
df_new = self.read_gdelt_news_dir(news_csv_dir)
df_raw = self.read_gdelt_news_dir(raw_csv_dir)
res = pd.merge(df_raw, df_new, on=['GLOBALEVENTID'], how='right')
print(len(res))
res.to_csv(target_dir, index=False, header=True)
return
def remove_invaliad_str(self, news_title):
res = []
for x in news_title.split(' '):
if (x.isnumeric() or x.isalpha()):
res.append(x)
return ' '.join(res)
def clean_news_headline(self, source_dir, target_dir):
res = pd.read_csv(source_dir, header=0)
for i in range(len(res)):
if (len(str(res.loc[(i, 'news_title')])) < 20):
url_split = res.loc[(i, 'SOURCEURL')].split('/')
if (len(url_split[(- 1)]) > 10):
title = url_split[(- 1)]
elif (len(url_split[(- 2)]) > 10):
title = url_split[(- 2)]
else:
title = url_split[(- 3)]
title = title.replace('u-s', 'us')
title = title.replace('u-n', 'un')
title = title.replace('-', ' ')
title = title.replace('_', ' ')
title = self.remove_invaliad_str(title)
print(i, title)
res.loc[(i, 'news_title')] = title
res.to_csv(target_dir, index=False, header=True)
return
def run(self):
print('Extract keywords from downloaded news and merge them together')
news_merge_dir = 'temp_news.csv'
self.merge_download_files_to_csv(db_storage=self.download_news_dir, save_dir=news_merge_dir)
print('Merge news with other event information into one file')
news_gdelt_merge_dir = 'temp_news_gdelt_merge.csv'
self.merge_with_raw_gdelt(news_csv_dir=news_merge_dir, raw_csv_dir=self.raw_gdelt_csv_dir, target_dir=news_gdelt_merge_dir)
print('Merge news with other event information into one file')
self.clean_news_headline(source_dir=news_gdelt_merge_dir, target_dir=self.clean_gdelt_csv_dir)
return |
_REGISTRY.register()
class TTSRModel(SRModel):
def feed_data(self, data):
self.lq = data['lq'].to(self.device)
if ('gt' in data):
self.gt = data['gt'].to(self.device)
if ('ref' in data):
self.ref = data['ref'].to(self.device)
if ('mask' in data):
self.mask = data['mask'].to(self.device)
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
self.ema_decay = train_opt.get('ema_decay', 0)
if (self.ema_decay > 0):
logger = get_root_logger()
logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
load_path = self.opt['path'].get('pretrain_network_g', None)
if (load_path is not None):
self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
else:
self.model_ema(0)
self.net_g_ema.eval()
if train_opt.get('pixel_opt'):
self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
else:
self.cri_pix = None
if train_opt.get('perceptual_opt'):
self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if train_opt.get('spatial_opt'):
self.cri_spatial = build_loss(train_opt['spatial_opt']).to(self.device)
else:
self.cri_spatial = None
if train_opt.get('grad_opt'):
self.cri_grad = build_loss(train_opt['grad_opt']).to(self.device)
else:
self.cri_grad = None
if ((self.cri_pix is None) and (self.cri_perceptual is None) and (self.cri_spatial is None) and (self.cri_grad is None)):
raise ValueError('All losses are None. Please check.')
self.setup_optimizers()
self.setup_schedulers()
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
if hasattr(self, 'mask'):
self.output = self.net_g(self.lq, self.ref, self.mask)
else:
self.output = self.net_g(self.lq, self.ref)
l_total = 0
loss_dict = OrderedDict()
if self.cri_pix:
l_pix = self.cri_pix(self.output, self.gt)
l_total += l_pix
loss_dict['l_pix'] = l_pix
if self.cri_perceptual:
(l_percep, l_style) = self.cri_perceptual(self.output, self.gt)
if (l_percep is not None):
l_total += l_percep
loss_dict['l_percep'] = l_percep
if (l_style is not None):
l_total += l_style
loss_dict['l_style'] = l_style
if self.cri_spatial:
l_spatial = self.cri_spatial(self.output, self.lq)
l_total += l_spatial
loss_dict['l_spatial'] = l_spatial
if self.cri_grad:
l_grad = self.cri_grad(self.output, self.lq)
l_total += l_grad
loss_dict['l_grad'] = l_grad
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if (self.ema_decay > 0):
self.model_ema(decay=self.ema_decay)
def test(self):
if hasattr(self, 'net_g_ema'):
self.net_g_ema.eval()
with torch.no_grad():
if hasattr(self, 'mask'):
self.output = self.net_g_ema(self.lq, self.ref, self.mask)
else:
self.output = self.net_g_ema(self.lq, self.ref)
else:
self.net_g.eval()
with torch.no_grad():
if hasattr(self, 'mask'):
self.output = self.net_g(self.lq, self.ref, self.mask)
else:
self.output = self.net_g(self.lq, self.ref)
self.net_g.train()
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = (self.opt['val'].get('metrics') is not None)
if with_metrics:
self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
pbar = tqdm(total=len(dataloader), unit='image')
for (idx, val_data) in enumerate(dataloader):
img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_img = tensor2img([visuals['result']])
if ('gt' in visuals):
gt_img = tensor2img([visuals['gt']])
del self.gt
if ('ref' in visuals):
ref_img = tensor2img([visuals['ref']])
del self.ref
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'], img_name, f'{img_name}_{current_iter}.png')
elif self.opt['val']['suffix']:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['val']['suffix']}.png")
else:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['name']}.png")
imwrite(sr_img, save_img_path)
if with_metrics:
for (name, opt_) in self.opt['val']['metrics'].items():
metric_data = dict(img1=sr_img, img2=gt_img)
self.metric_results[name] += calculate_metric(metric_data, opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['lq'] = self.lq.detach().cpu()
out_dict['ref'] = self.ref.detach().cpu()
out_dict['result'] = self.output.detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt.detach().cpu()
return out_dict |
class RandomCycleIter():
def __init__(self, data, generator=None):
self.data = data
self.length = len(data)
self.index = torch.randperm(self.length, generator=generator).numpy()
self.i = 0
self.generator = generator
def __iter__(self):
return self
def __len__(self):
return len(self.data)
def __next__(self):
if (self.i == self.length):
self.index = torch.randperm(self.length, generator=self.generator).numpy()
self.i = 0
idx = self.data[self.index[self.i]]
self.i += 1
return idx |
def negative_accuracy(y_true, y_predicted, sample_weight=None):
if isinstance(y_true, pd.DataFrame):
y_true = np.array(y_true)
if isinstance(y_predicted, pd.DataFrame):
y_predicted = np.array(y_predicted)
if ((len(y_predicted.shape) == 2) and (y_predicted.shape[1] == 1)):
y_predicted = y_predicted.ravel()
if (len(y_predicted.shape) == 1):
y_predicted = (y_predicted > 0.5).astype(int)
else:
y_predicted = np.argmax(y_predicted, axis=1)
val = accuracy_score(y_true, y_predicted, sample_weight=sample_weight)
return (- val) |
class ConvE(torch.nn.Module):
def __init__(self, num_entities, num_relations):
super(ConvE, self).__init__()
self.emb_e = torch.nn.Embedding(num_entities, Config.embedding_dim, padding_idx=0)
self.emb_rel = torch.nn.Embedding(num_relations, Config.embedding_dim, padding_idx=0)
self.inp_drop = torch.nn.Dropout(Config.input_dropout)
self.hidden_drop = torch.nn.Dropout(Config.dropout)
self.feature_map_drop = torch.nn.Dropout2d(Config.feature_map_dropout)
self.loss = torch.nn.BCELoss()
self.conv1 = torch.nn.Conv2d(1, 32, (3, 3), 1, 0, bias=Config.use_bias)
self.bn0 = torch.nn.BatchNorm2d(1)
self.bn1 = torch.nn.BatchNorm2d(32)
self.bn2 = torch.nn.BatchNorm1d(Config.embedding_dim)
self.register_parameter('b', Parameter(torch.zeros(num_entities)))
self.fc = torch.nn.Linear(10368, Config.embedding_dim)
print(num_entities, num_relations)
def init(self):
xavier_normal_(self.emb_e.weight.data)
xavier_normal_(self.emb_rel.weight.data)
def forward(self, e1, rel):
e1_embedded = self.emb_e(e1).view((- 1), 1, 10, 20)
rel_embedded = self.emb_rel(rel).view((- 1), 1, 10, 20)
stacked_inputs = torch.cat([e1_embedded, rel_embedded], 2)
stacked_inputs = self.bn0(stacked_inputs)
x = self.inp_drop(stacked_inputs)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.feature_map_drop(x)
x = x.view(Config.batch_size, (- 1))
x = self.fc(x)
x = self.hidden_drop(x)
x = self.bn2(x)
x = F.relu(x)
x = torch.mm(x, self.emb_e.weight.transpose(1, 0))
x += self.b.expand_as(x)
pred = F.sigmoid(x)
return pred |
class CosmphiDiskPotential(planarPotential):
def __init__(self, amp=1.0, phib=(25.0 * _degtorad), p=1.0, phio=0.01, m=4, r1=1.0, rb=None, cp=None, sp=None, ro=None, vo=None):
planarPotential.__init__(self, amp=amp, ro=ro, vo=vo)
phib = conversion.parse_angle(phib)
r1 = conversion.parse_length(r1, ro=self._ro)
rb = conversion.parse_length(rb, ro=self._ro)
phio = conversion.parse_energy(phio, vo=self._vo)
cp = conversion.parse_energy(cp, vo=self._vo)
sp = conversion.parse_energy(sp, vo=self._vo)
self._r1p = (r1 ** p)
self._amp /= self._r1p
self.hasC = False
self._m = int(m)
if ((cp is None) or (sp is None)):
self._phib = phib
self._mphio = (phio * self._m)
else:
self._mphio = numpy.sqrt(((cp * cp) + (sp * sp)))
self._phib = (numpy.arctan((sp / cp)) / self._m)
if ((m < 2.0) and (cp < 0.0)):
self._phib = (numpy.pi + self._phib)
self._p = p
if (rb is None):
self._rb = 0.0
self._rbp = 1.0
self._rb2p = 1.0
else:
self._rb = rb
self._rbp = (self._rb ** self._p)
self._rb2p = (self._rbp ** 2.0)
self._mphib = (self._m * self._phib)
self.hasC = True
self.hasC_dxdv = True
def _evaluate(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return ((((self._mphio / self._m) * numpy.cos(((self._m * phi) - self._mphib))) * self._rbp) * ((2.0 * self._r1p) - (self._rbp / (R ** self._p))))
else:
return (((self._mphio / self._m) * (R ** self._p)) * numpy.cos(((self._m * phi) - self._mphib)))
def _Rforce(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return ((((((- self._p) * self._mphio) / self._m) * self._rb2p) / (R ** (self._p + 1.0))) * numpy.cos(((self._m * phi) - self._mphib)))
else:
return (((((- self._p) * self._mphio) / self._m) * (R ** (self._p - 1.0))) * numpy.cos(((self._m * phi) - self._mphib)))
def _phitorque(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return (((self._mphio * numpy.sin(((self._m * phi) - self._mphib))) * self._rbp) * ((2.0 * self._r1p) - (self._rbp / (R ** self._p))))
else:
return ((self._mphio * (R ** self._p)) * numpy.sin(((self._m * phi) - self._mphib)))
def _R2deriv(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return (((((((- self._p) * (self._p + 1.0)) * self._mphio) / self._m) * self._rb2p) / (R ** (self._p + 2.0))) * numpy.cos(((self._m * phi) - self._mphib)))
else:
return (((((self._p * (self._p - 1.0)) / self._m) * self._mphio) * (R ** (self._p - 2.0))) * numpy.cos(((self._m * phi) - self._mphib)))
def _phi2deriv(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return (((((- self._m) * self._mphio) * numpy.cos(((self._m * phi) - self._mphib))) * self._rbp) * ((2.0 * self._r1p) - (self._rbp / (R ** self._p))))
else:
return ((((- self._m) * self._mphio) * (R ** self._p)) * numpy.cos(((self._m * phi) - self._mphib)))
def _Rphideriv(self, R, phi=0.0, t=0.0):
if (R < self._rb):
return ((((((- self._p) * self._mphio) / self._m) * self._rb2p) / (R ** (self._p + 1.0))) * numpy.sin(((self._m * phi) - self._mphib)))
else:
return ((((- self._p) * self._mphio) * (R ** (self._p - 1.0))) * numpy.sin(((self._m * phi) - self._mphib))) |
class ExperimentPlannerCT2(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannerCT2, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = 'nnUNet_CT2'
self.plans_fname = join(self.preprocessed_output_folder, ('nnUNetPlans' + 'CT2_plans_3D.pkl'))
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if (modalities[i] == 'CT'):
schemes[i] = 'CT2'
else:
schemes[i] = 'nonCT'
return schemes |
('stopwords')
class StopwordFilter(WordFilter):
def __init__(self):
self.stopwords = set(['I', 'a', 'aboard', 'about', 'above', 'accordance', 'according', 'across', 'after', 'against', 'along', 'alongside', 'also', 'am', 'amid', 'amidst', 'an', 'and', 'apart', 'are', 'around', 'as', 'aside', 'astride', 'at', 'atop', 'back', 'be', 'because', 'before', 'behind', 'below', 'beneath', 'beside', 'besides', 'between', 'beyond', 'but', 'by', 'concerning', 'do', 'down', 'due', 'during', 'either', 'except', 'exclusive', 'false', 'for', 'from', 'happen', 'he', 'her', 'hers', 'herself', 'him', 'himself', 'his', 'how', 'how many', 'how much', 'i', 'if', 'in', 'including', 'inside', 'instead', 'into', 'irrespective', 'is', 'it', 'its', 'itself', 'less', 'me', 'mine', 'minus', 'my', 'myself', 'neither', 'next', 'not', 'occur', 'of', 'off', 'on', 'onto', 'opposite', 'or', 'our', 'ours', 'ourselves', 'out', 'out of', 'outside', 'over', 'owing', 'per', 'prepatory', 'previous', 'prior', 'pursuant', 'regarding', 's', 'sans', 'she', 'subsequent', 'such', 'than', 'thanks', 'that', 'the', 'their', 'theirs', 'them', 'themselves', 'then', 'these', 'they', 'this', 'those', 'through', 'throughout', 'thru', 'till', 'to', 'together', 'top', 'toward', 'towards', 'true', 'under', 'underneath', 'unlike', 'until', 'up', 'upon', 'us', 'using', 'versus', 'via', 'was', 'we', 'were', 'what', 'when', 'where', 'which', 'who', 'why', 'will', 'with', 'within', 'without', 'you', 'your', 'yours', 'yourself', 'yourselves', ',', '.', ':', '!', ';', "'", '"', '&', '$', '#', '', '(', ')', '?'])
def filter_words(self, words: List[Token]) -> List[Token]:
return [word for word in words if (word.text.lower() not in self.stopwords)] |
class VideoDiffFramesDataset_MoCo(Dataset):
def __init__(self, datapath, idspath, used_fps, img_size, num_frames, limit):
super().__init__()
self.limit = limit
self.boarden = 0.4
self.lower_bound = max(0, (self.limit - self.boarden))
self.upper_bound = min(1, (self.limit + self.boarden))
self.img_size = img_size
self.frame_path = datapath
self.num_frames = num_frames
logger = get_logger()
if (idspath is not None):
self.video_ids = json.load(open(idspath, 'r'))
else:
assert used_fps
used_fps = used_fps.split(',')
logger.info(f'*** Used FPS {used_fps} for datapath: {datapath}')
self.video_ids = []
for cfps in used_fps:
cfps_path = os.path.join(datapath, cfps)
cvideos = [os.path.join(cfps, v) for v in os.listdir(cfps_path)]
self.video_ids += cvideos
shuffle(self.video_ids)
self.id2files = dict()
for cid in self.video_ids:
cur_path = os.path.join(self.frame_path, cid)
files = sorted(os.listdir(cur_path))
self.id2files[cid] = files
self.transform = transforms.Compose([transforms.Resize((img_size, img_size), interpolation=PIL.Image.BILINEAR), transforms.ToTensor()])
logger = get_logger()
logger.info(f'{len(self.video_ids)} videos from datapath {datapath}, img_size: {img_size}, num_frames: {num_frames}')
def __len__(self):
return len(self.video_ids)
def skip_sample(self, ind):
if (ind >= (self.__len__() - 1)):
return self.__getitem__(0)
return self.__getitem__((ind + 1))
def __getitem__(self, index):
video_id = self.video_ids[index]
files = self.id2files[video_id]
if (len(files) < (self.num_frames + 2)):
return self.skip_sample(index)
elif (len(files) == (self.num_frames + 2)):
start = 0
else:
start = np.random.choice(range(((len(files) - self.num_frames) - 2)))
cur_path = os.path.join(self.frame_path, video_id)
pre_img = get_img_from_path(os.path.join(cur_path, files[start]), transform=self.transform)
nxt_img = get_img_from_path(os.path.join(cur_path, files[(start + 1)]), transform=self.transform)
(imgs, imgs_mo) = ([], [])
for file in files[(start + 2):((start + 2) + self.num_frames)]:
cur_img = nxt_img
nxt_img = get_img_from_path(os.path.join(cur_path, file), transform=self.transform)
cur_diff = (((cur_img * 2) - pre_img) - nxt_img)
imgs.append(cur_img.unsqueeze(0))
imgs_mo.append(cur_diff.unsqueeze(0))
ret_img = torch.cat(imgs, dim=0)
ret_img_mo = torch.cat(imgs_mo, dim=0)
return [ret_img, ret_img, ret_img_mo] |
class SimpleMLPModel(Model):
def __init__(self, output_dim, name=None, *args, **kwargs):
super().__init__(name)
self.output_dim = output_dim
def _build(self, obs_input, name=None):
return_var = tf.compat.v1.get_variable('return_var', (), initializer=tf.constant_initializer(0.5))
return tf.fill((tf.shape(obs_input)[0], self.output_dim), return_var) |
def shift_and_rotate_coordination(orig_x, orig_y, orig_d, coordi_shift_x, coordi_shift_y, coordi_rotate_d):
(shift_x, shift_y) = shift_coordination(orig_x, orig_y, coordi_shift_x, coordi_shift_y)
(transformed_x, transformed_y, transformed_d) = rotate_coordination(shift_x, shift_y, orig_d, coordi_rotate_d)
return (transformed_x, transformed_y, transformed_d) |
class MT5Tokenizer(metaclass=DummyObject):
_backends = ['sentencepiece']
def __init__(self, *args, **kwargs):
requires_backends(self, ['sentencepiece']) |
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prefix = (args.prefix if args.prefix else (args.padding_text if args.padding_text else PREFIX))
prompt_text = (prefix + prompt_text)
return prompt_text |
def intersectionAndUnionGPU(output, target, K, ignore_index=255):
assert (output.dim() in [1, 2, 3])
assert (output.shape == target.shape)
output = output.view((- 1))
target = target.view((- 1))
output[(target == ignore_index)] = ignore_index
intersection = output[(output == target)]
if (len(intersection) > 0):
area_intersection = torch.histc(intersection, bins=K, min=0, max=(K - 1))
else:
area_intersection = torch.zeros(K, dtype=output.dtype, device=output.device)
area_output = torch.histc(output, bins=K, min=0, max=(K - 1))
area_target = torch.histc(target, bins=K, min=0, max=(K - 1))
area_union = ((area_output + area_target) - area_intersection)
return (area_intersection, area_union, area_target) |
def load_dataset(all_img_dbs, txt_dbs, img_dbs, args, is_train):
if is_train:
datasets = []
for (txt_path, img_path) in zip(txt_dbs, img_dbs):
img_db = all_img_dbs[img_path]
txt_db = TxtTokLmdb(txt_path, args.max_txt_len)
datasets.append(ItmFastDataset(txt_db, img_db, args.num_hard_negatives, args.img_meta, args.tokenizer))
datasets = ConcatDataset(datasets)
else:
img_db = all_img_dbs[img_dbs]
txt_db = TxtTokLmdb(txt_dbs, (- 1))
datasets = ItmFastDataset(txt_db, img_db, args.inf_minibatch_size, args.img_meta, args.tokenizer)
return datasets |
def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, variant, revision, model_filenames):
info = model_info(pretrained_model_name_or_path, use_auth_token=use_auth_token, revision=None)
filenames = {sibling.rfilename for sibling in info.siblings}
(comp_model_filenames, _) = variant_compatible_siblings(filenames, variant=revision)
comp_model_filenames = ['.'.join((f.split('.')[:1] + f.split('.')[2:])) for f in comp_model_filenames]
if set(model_filenames).issubset(set(comp_model_filenames)):
warnings.warn(f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.", FutureWarning)
else:
warnings.warn(f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch.
The Diffusers team and community would be very grateful if you could open an issue: with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.''', FutureWarning) |
def test_isotropic_nfw_dens_directint():
pot = potential.NFWPotential(amp=2.3, a=1.3)
dfp = isotropicNFWdf(pot=pot)
tol = 0.01
check_dens_directint(dfp, pot, tol, (lambda r: pot.dens(r, 0)), rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
def sync_params(params):
for p in params:
with th.no_grad():
dist.broadcast(p, 0) |
def test_mildnonaxi_meanvt_direct_tlist():
idf = dehnendf(beta=0.0)
pot = [LogarithmicHaloPotential(normalize=1.0)]
edf = evolveddiskdf(idf, pot=pot, to=(- 10.0))
try:
edf.meanvT(0.9, t=[0.0, (- 2.5), (- 5.0), (- 7.5), (- 10.0)], phi=0.2, integrate_method='rk6_c', grid=False)
except OSError:
pass
else:
raise AssertionError('direct evolveddiskdf calculation of meanvT w/ list of times did not raise IOError')
return None |
class CheckDummiesTester(unittest.TestCase):
def test_find_backend(self):
simple_backend = find_backend(' if not is_torch_available():')
self.assertEqual(simple_backend, 'torch')
double_backend = find_backend(' if not (is_torch_available() and is_transformers_available()):')
self.assertEqual(double_backend, 'torch_and_transformers')
triple_backend = find_backend(' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):')
self.assertEqual(triple_backend, 'torch_and_transformers_and_onnx')
def test_read_init(self):
objects = read_init()
self.assertIn('torch', objects)
self.assertIn('torch_and_transformers', objects)
self.assertIn('flax_and_transformers', objects)
self.assertIn('torch_and_transformers_and_onnx', objects)
self.assertIn('UNet2DModel', objects['torch'])
self.assertIn('FlaxUNet2DConditionModel', objects['flax'])
self.assertIn('StableDiffusionPipeline', objects['torch_and_transformers'])
self.assertIn('FlaxStableDiffusionPipeline', objects['flax_and_transformers'])
self.assertIn('LMSDiscreteScheduler', objects['torch_and_scipy'])
self.assertIn('OnnxStableDiffusionPipeline', objects['torch_and_transformers_and_onnx'])
def test_create_dummy_object(self):
dummy_constant = create_dummy_object('CONSTANT', "'torch'")
self.assertEqual(dummy_constant, '\nCONSTANT = None\n')
dummy_function = create_dummy_object('function', "'torch'")
self.assertEqual(dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
expected_dummy_class = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n \n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n \n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
dummy_class = create_dummy_object('FakeClass', "'torch'")
self.assertEqual(dummy_class, expected_dummy_class)
def test_create_dummy_files(self):
expected_dummy_pytorch_file = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n \n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n \n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
dummy_files = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']})
self.assertEqual(dummy_files['torch'], expected_dummy_pytorch_file) |
class DenseTransition(nn.Sequential):
def __init__(self, num_input_features, num_output_features, norm_layer=nn.BatchNorm2d, aa_layer=None):
super(DenseTransition, self).__init__()
self.add_module('norm', norm_layer(num_input_features))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False))
if (aa_layer is not None):
self.add_module('pool', aa_layer(num_output_features, stride=2))
else:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) |
def download_from_tcga(uuid: str, dest: str, message: str='Downloading...') -> None:
data_endpt = f'
response = requests.post(data_endpt, data=json.dumps({'ids': [uuid]}), headers={'Content-Type': 'application/json'}, stream=True)
response_head_cd = response.headers['Content-Disposition']
block_size = 4096
block_per_mb = (block_size / 1000000)
file_size = int(response.headers.get('Content-Length', ''))
file_size_mb = (file_size / 1000000)
running_total_mb = 0
file_name = join(dest, re.findall('filename=(.+)', response_head_cd)[0])
pbar = tqdm(desc=message, total=file_size_mb, unit='MB', bar_format='{desc}: {percentage:3.0f}%|{bar}| {n:.2f}/{total:.2f} [{elapsed}<{remaining}] {rate_fmt}{postfix}')
with open(file_name, 'wb') as output_file:
for chunk in response.iter_content(chunk_size=block_size):
output_file.write(chunk)
if ((block_per_mb + running_total_mb) < file_size_mb):
running_total_mb += block_per_mb
pbar.update(block_per_mb)
else:
running_total_mb += (file_size_mb - running_total_mb)
pbar.update((file_size_mb - running_total_mb)) |
class NNSOps():
def knn_setup(datasets, nns_opt):
index = o3c.nns.NearestNeighborSearch(datasets)
index.knn_index()
return index
def radius_setup(datasets, nns_opt):
radius = nns_opt['radius']
index = o3c.nns.NearestNeighborSearch(datasets)
index.fixed_radius_index(radius)
return index
def hybrid_setup(datasets, nns_opt):
radius = nns_opt['radius']
index = o3c.nns.NearestNeighborSearch(datasets)
index.hybrid_index(radius)
return index
def knn_search(index, queries, nns_opt):
knn = nns_opt['knn']
result = index.knn_search(queries, knn)
return result
def radius_search(index, queries, nns_opt):
radius = nns_opt['radius']
result = index.fixed_radius_search(queries, radius)
return result
def hybrid_search(index, queries, nns_opt):
(radius, knn) = (nns_opt['radius'], nns_opt['knn'])
result = index.hybrid_search(queries, radius, knn)
return result |
def safe_var(entity, **kwargs):
warnings.warn('As of >=pytorch0.4.0 this is no longer necessary', DeprecationWarning)
if isinstance(entity, Variable):
return entity
elif isinstance(entity, torch._C._TensorBase):
return Variable(entity, **kwargs)
else:
raise Exception(("Can't cast %s to a Variable" % entity.__class__.__name__)) |
class DB3(nn.Module):
def __init__(self) -> None:
super(DB3, self).__init__()
self.db2 = DB2(64, 64)
self.conv3x3 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.sqz_r4 = nn.Sequential(nn.Conv2d(256, 64, kernel_size=3, stride=1, dilation=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.sqz_s1 = nn.Sequential(nn.Conv2d(128, 64, kernel_size=3, stride=1, dilation=1, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
def forward(self, s, r, up):
up = F.interpolate(up, size=s.size()[2:], mode='bilinear', align_corners=True)
s = self.sqz_s1(s)
r = self.sqz_r4(r)
sr = self.conv3x3((s + r))
(out, _) = self.db2(sr, up)
return (out, out)
def initialize(self):
weight_init(self) |
class PassthroughBeforeEvalDensity(WrapperDensity):
def __init__(self, density, x):
super().__init__(density)
self.register_buffer('x', x)
def train(self, train_mode=True):
if (not train_mode):
self.training = True
with torch.no_grad():
self.elbo(self.x, num_importance_samples=1, detach_q_params=False, detach_q_samples=False)
super().train(train_mode) |
def double_newton_at_point(pols, sols, idx=1, maxdeg=4, nbr=4, vrblvl=0):
nbsym = number_of_symbols(pols, vrblvl)
if (vrblvl > 0):
print('-> double_newton_at_point, idx :', idx)
print('-> double_newton_at_point, the polynomials :')
for pol in pols:
print(pol)
print('number of variables :', nbsym)
set_double_system(nbsym, pols, vrblvl)
set_double_solutions(nbsym, sols, vrblvl)
syspol = get_double_system(vrblvl)
for pol in syspol:
print(pol)
write_double_solutions(vrblvl)
phc = get_phcfun()
apars = int4a2nbr([idx, maxdeg, nbr], (vrblvl > 0))
bbb = pointer(c_int32(vrblvl))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> double_newton_at_point calls phc ...')
print('apars =', nbr2int4a(apars))
retval = phc(691, apars, bbb, ccc, vrb)
fail = (retval > 0)
size = ((- 1) if fail else size_double_syspool(vrblvl))
if (vrblvl > 0):
print('the return value of double_newton_at_point :', retval)
if (size == (- 1)):
print("An error occurred in the execution of Newton's method.")
else:
print(('Computed %d series solutions.' % size))
result = []
for k in range(1, (size + 1)):
if (vrblvl > 0):
print('')
print('retrieving series', k, '...')
print('')
copy_from_double_syspool(k)
sersol = get_double_system(vrblvl)
result.append(substitute_symbol(sersol, idx))
return result |
class conv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1):
super(conv2DBatchNorm, self).__init__()
if (dilation > 1):
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=dilation)
else:
conv_mod = nn.Conv2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias, dilation=1)
self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs |
def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix='') -> Dict:
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if (args.local_rank in [(- 1), 0]):
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
def collate(examples: List[torch.Tensor]):
if (tokenizer._pad_token is None):
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc='Evaluating'):
(inputs, labels) = (mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch))
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = (model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels))
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
perplexity = torch.exp(torch.tensor(eval_loss))
result = {'perplexity': perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'a') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write((str(float(perplexity)) + '\n'))
return result |
class HTMLTableProcessor():
def __init__(self, root_url: str, headings_xpath: str, data_rows_xpath: str, data_cell_xpath: str, table_class: str=None):
self.root_url = root_url
self.table_class = table_class
self.headings_xpath = headings_xpath.format(TABLE_XPATH=self.table_xpath)
self.data_rows_xpath = data_rows_xpath.format(TABLE_XPATH=self.table_xpath)
self.data_cell_xpath = data_cell_xpath
def table_xpath(self) -> str:
if self.table_class:
return f'//table[="{self.table_class}"]'
return '//table'
def get_tabular_data_from_element(self, element: lxml.etree.Element, column_name_mapper: ColumnListMapperFunction=None, known_percentages: Optional[List[str]]=None, row_id_func: RowIdFunction=None, row_id_name: Optional[str]=None) -> pd.DataFrame:
headings = element.xpath(self.headings_xpath)
if column_name_mapper:
headings = list(column_name_mapper(headings))
data_rows_dom = element.xpath(self.data_rows_xpath)
data_rows: List[List[Union[(str, int, float, datetime)]]] = [[postprocessing.try_parse(y, headings[index], known_percentages=known_percentages) for (index, y) in enumerate(x.xpath(self.data_cell_xpath))] for x in data_rows_dom]
if row_id_func:
headings = ([(row_id_name or 'id')] + headings)
for index in range(len(data_rows_dom)):
data_rows[index].insert(0, row_id_func(data_rows_dom[index]))
fg_data = pd.DataFrame(data_rows, columns=headings)
return fg_data
def get_tabular_data_from_html(self, html: Union[(str, bytes)], column_name_mapper: ColumnListMapperFunction=None, known_percentages: Optional[List[str]]=None, row_id_func: RowIdFunction=None, row_id_name: Optional[str]=None) -> pd.DataFrame:
html_dom = lxml.etree.HTML(html)
return self.get_tabular_data_from_element(html_dom, column_name_mapper=column_name_mapper, known_percentages=known_percentages, row_id_func=row_id_func, row_id_name=row_id_name)
def get_tabular_data_from_url(self, url: str, query_params: Dict[(str, Union[(str, int)])]=None, column_name_mapper: ColumnListMapperFunction=None, known_percentages: Optional[List[str]]=None, row_id_func: RowIdFunction=None, row_id_name: Optional[str]=None) -> pd.DataFrame:
response = requests.get((self.root_url + url), params=query_params)
if (response.status_code > 399):
raise requests.exceptions.HTTPError(f"Error accessing '{(self.root_url + url)}'. Received status code {response.status_code}")
return self.get_tabular_data_from_html(response.content, column_name_mapper=column_name_mapper, known_percentages=known_percentages, row_id_func=row_id_func, row_id_name=row_id_name)
def get_tabular_data_from_options(self, base_url: str, query_params: Dict[(str, Union[(str, int)])], column_name_mapper: ColumnListMapperFunction=None, known_percentages: Optional[List[str]]=None, row_id_func: RowIdFunction=None, row_id_name: Optional[str]=None) -> pd.DataFrame:
return self.get_tabular_data_from_url(base_url, query_params=query_params, column_name_mapper=column_name_mapper, known_percentages=known_percentages, row_id_func=row_id_func, row_id_name=row_id_name) |
.parametrize('embed_dims', [False, 256])
def test_basetransformerlayer(embed_dims):
attn_cfgs = (dict(type='MultiheadAttention', embed_dims=256, num_heads=8),)
if embed_dims:
ffn_cfgs = dict(type='FFN', embed_dims=embed_dims, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True))
else:
ffn_cfgs = dict(type='FFN', feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True))
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, ffn_cfgs=ffn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order)
assert (baselayer.batch_first is False)
assert (baselayer.ffns[0].feedforward_channels == feedforward_channels)
attn_cfgs = (dict(type='MultiheadAttention', num_heads=8, embed_dims=256),)
feedforward_channels = 2048
ffn_dropout = 0.1
operation_order = ('self_attn', 'norm', 'ffn', 'norm')
baselayer = BaseTransformerLayer(attn_cfgs=attn_cfgs, feedforward_channels=feedforward_channels, ffn_dropout=ffn_dropout, operation_order=operation_order, batch_first=True)
assert baselayer.attentions[0].batch_first
in_tensor = torch.rand(2, 10, 256)
baselayer(in_tensor) |
def load_cifar100_data(datadir):
(train_transform, test_transform) = _data_transforms_cifar100()
cifar100_train_ds = CIFAR100_truncated(datadir, train=True, download=True, transform=train_transform)
cifar100_test_ds = CIFAR100_truncated(datadir, train=False, download=True, transform=test_transform)
(X_train, y_train) = (cifar100_train_ds.data, cifar100_train_ds.target)
(X_test, y_test) = (cifar100_test_ds.data, cifar100_test_ds.target)
return (X_train, y_train, X_test, y_test) |
def get_dataset():
data_dict = {'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': [('a ' * 20), ('a ' * 30), ('b ' * 7)]}
dataset = Dataset.from_dict(data_dict)
return dataset |
class CosStepScheduler(LRScheduler):
def __init__(self, optimizer, start_lr=0.01, end_lr=0.005, epochs=50, last_epoch=(- 1), **kwargs):
self.start_lr = start_lr
self.end_lr = end_lr
self.lr_spaces = self._build_lr(start_lr, end_lr, epochs)
super(CosStepScheduler, self).__init__(optimizer, last_epoch)
def _build_lr(self, start_lr, end_lr, epochs):
index = np.arange(epochs).astype(np.float32)
lr = (end_lr + (((start_lr - end_lr) * (1.0 + np.cos(((index * np.pi) / 10)))) * 0.5))
return lr.astype(np.float32) |
def traverse(tree, func, types=(list, tuple)):
if isinstance(tree, types):
obj = type(tree)
tree = obj([traverse(node, func, types) for node in tree])
else:
tree = func(tree)
return tree |
def cimport_commons(lines, no_optimization):
for (i, line) in enumerate(lines):
if line.startswith('from {} import *'.format(commons_name)):
lines = ((lines[:(i + 1)] + ['from {} cimport *\n'.format(commons_name)]) + lines[(i + 1):])
break
return lines |
class CascadeBBoxAssigner(object):
__shared__ = ['num_classes']
def __init__(self, batch_size_per_im=512, fg_fraction=0.25, fg_thresh=[0.5, 0.6, 0.7], bg_thresh_hi=[0.5, 0.6, 0.7], bg_thresh_lo=[0.0, 0.0, 0.0], bbox_reg_weights=[10, 20, 30], shuffle_before_sample=True, num_classes=81, class_aware=False):
super(CascadeBBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh_hi = bg_thresh_hi
self.bg_thresh_lo = bg_thresh_lo
self.bbox_reg_weights = bbox_reg_weights
self.class_nums = num_classes
self.use_random = shuffle_before_sample
self.class_aware = class_aware
def __call__(self, input_rois, feed_vars, curr_stage):
curr_bbox_reg_w = [(1.0 / self.bbox_reg_weights[curr_stage]), (1.0 / self.bbox_reg_weights[curr_stage]), (2.0 / self.bbox_reg_weights[curr_stage]), (2.0 / self.bbox_reg_weights[curr_stage])]
outs = fluid.layers.generate_proposal_labels(rpn_rois=input_rois, gt_classes=feed_vars['gt_class'], is_crowd=feed_vars['is_crowd'], gt_boxes=feed_vars['gt_bbox'], im_info=feed_vars['im_info'], batch_size_per_im=self.batch_size_per_im, fg_thresh=self.fg_thresh[curr_stage], bg_thresh_hi=self.bg_thresh_hi[curr_stage], bg_thresh_lo=self.bg_thresh_lo[curr_stage], bbox_reg_weights=curr_bbox_reg_w, use_random=self.use_random, class_nums=(self.class_nums if self.class_aware else 2), is_cls_agnostic=(not self.class_aware), is_cascade_rcnn=(True if ((curr_stage > 0) and (not self.class_aware)) else False))
return outs |
def bunzip(fn: PathOrStr):
fn = Path(fn)
assert fn.exists(), f"{fn} doesn't exist"
out_fn = fn.with_suffix('')
assert (not out_fn.exists()), f'{out_fn} already exists'
with bz2.BZ2File(fn, 'rb') as src, out_fn.open('wb') as dst:
for d in iter((lambda : src.read((1024 * 1024))), b''):
dst.write(d) |
def get_logger(filename):
filename = '{}.log'.format(filename)
logger = logging.getLogger(filename)
logger.setLevel(logging.INFO)
log_path = './logs'
if (not os.path.exists(log_path)):
os.mkdir(log_path)
handler = logging.FileHandler((log_path + '/{}'.format(filename)))
handler.setLevel(logging.INFO)
fmt = '%(asctime)-15s %(levelname)s %(filename)s %(lineno)d %(process)d %(message)s'
datefmt = '%a %d %b %Y %H:%M:%S'
formatter = logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger |
class TRPO(VPG):
def __init__(self, env_spec, policy, value_function, policy_optimizer=None, vf_optimizer=None, max_path_length=100, num_train_per_epoch=1, discount=0.99, gae_lambda=0.98, center_adv=True, positive_adv=False, policy_ent_coeff=0.0, use_softplus_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy'):
if (policy_optimizer is None):
policy_optimizer = OptimizerWrapper((ConjugateGradientOptimizer, dict(max_constraint_value=0.01)), policy)
if (vf_optimizer is None):
vf_optimizer = OptimizerWrapper((torch.optim.Adam, dict(lr=0.00025)), value_function, max_optimization_epochs=10, minibatch_size=64)
super().__init__(env_spec=env_spec, policy=policy, value_function=value_function, policy_optimizer=policy_optimizer, vf_optimizer=vf_optimizer, max_path_length=max_path_length, num_train_per_epoch=num_train_per_epoch, discount=discount, gae_lambda=gae_lambda, center_adv=center_adv, positive_adv=positive_adv, policy_ent_coeff=policy_ent_coeff, use_softplus_entropy=use_softplus_entropy, stop_entropy_gradient=stop_entropy_gradient, entropy_method=entropy_method)
def _compute_objective(self, advantages, obs, actions, rewards):
with torch.no_grad():
old_ll = self._old_policy(obs)[0].log_prob(actions)
new_ll = self.policy(obs)[0].log_prob(actions)
likelihood_ratio = (new_ll - old_ll).exp()
surrogate = (likelihood_ratio * advantages)
return surrogate
def _train_policy(self, obs, actions, rewards, advantages):
self._policy_optimizer.zero_grad()
loss = self._compute_loss_with_adv(obs, actions, rewards, advantages)
loss.backward()
self._policy_optimizer.step(f_loss=(lambda : self._compute_loss_with_adv(obs, actions, rewards, advantages)), f_constraint=(lambda : self._compute_kl_constraint(obs)))
return loss |
def save_checkpoint(state, track_list, filename):
with open((filename + '.json'), 'w') as f:
json.dump(track_list, f)
torch.save(state, (filename + '.model')) |
class Material(xmlr.Object):
def __init__(self, name=None, color=None, texture=None):
self.name = name
self.color = color
self.texture = texture
def check_valid(self):
if ((self.color is None) and (self.texture is None)):
xmlr.on_error('Material has neither a color nor texture') |
_module()
class DAHead(BaseDecodeHead):
def __init__(self, pam_channels, **kwargs):
super(DAHead, self).__init__(**kwargs)
self.pam_channels = pam_channels
self.pam_in_conv = ConvModule(self.in_channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.pam = PAM(self.channels, pam_channels)
self.pam_out_conv = ConvModule(self.channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.pam_conv_seg = nn.Conv2d(self.channels, self.num_classes, kernel_size=1)
self.cam_in_conv = ConvModule(self.in_channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.cam = CAM()
self.cam_out_conv = ConvModule(self.channels, self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.cam_conv_seg = nn.Conv2d(self.channels, self.num_classes, kernel_size=1)
def pam_cls_seg(self, feat):
if (self.dropout is not None):
feat = self.dropout(feat)
output = self.pam_conv_seg(feat)
return output
def cam_cls_seg(self, feat):
if (self.dropout is not None):
feat = self.dropout(feat)
output = self.cam_conv_seg(feat)
return output
def forward(self, inputs):
x = self._transform_inputs(inputs)
pam_feat = self.pam_in_conv(x)
pam_feat = self.pam(pam_feat)
pam_feat = self.pam_out_conv(pam_feat)
pam_out = self.pam_cls_seg(pam_feat)
cam_feat = self.cam_in_conv(x)
cam_feat = self.cam(cam_feat)
cam_feat = self.cam_out_conv(cam_feat)
cam_out = self.cam_cls_seg(cam_feat)
feat_sum = (pam_feat + cam_feat)
pam_cam_out = self.cls_seg(feat_sum)
return (pam_cam_out, pam_out, cam_out)
def forward_test(self, inputs, img_metas, test_cfg):
return self.forward(inputs)[0]
def losses(self, seg_logit, seg_label):
(pam_cam_seg_logit, pam_seg_logit, cam_seg_logit) = seg_logit
loss = dict()
loss.update(add_prefix(super(DAHead, self).losses(pam_cam_seg_logit, seg_label), 'pam_cam'))
loss.update(add_prefix(super(DAHead, self).losses(pam_seg_logit, seg_label), 'pam'))
loss.update(add_prefix(super(DAHead, self).losses(cam_seg_logit, seg_label), 'cam'))
return loss |
class ResNetCIFAR(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNetCIFAR, self).__init__()
global BN
BN = nn.BatchNorm2d
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = BN(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear((64 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def build_padding_layer(cfg, *args, **kwargs):
if (not isinstance(cfg, dict)):
raise TypeError('cfg must be a dict')
if ('type' not in cfg):
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
padding_type = cfg_.pop('type')
if (padding_type not in PADDING_LAYERS):
raise KeyError(f'Unrecognized padding type {padding_type}.')
else:
padding_layer = PADDING_LAYERS.get(padding_type)
layer = padding_layer(*args, **kwargs, **cfg_)
return layer |
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if (total_it_each_epoch == len(train_loader)):
dataloader_iter = iter(train_loader)
if (rank == 0):
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if ((accumulated_iter % 100) == 0):
if (tb_log is not None):
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
(loss, tb_dict, disp_dict) = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
if (rank == 0):
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if ((accumulated_iter % 200) == 0):
if (tb_log is not None):
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for (key, val) in tb_dict.items():
if ('vis_dict' not in key):
tb_log.add_scalar(('train/' + key), val, accumulated_iter)
elif ('vis_dict' in key):
pass
if (rank == 0):
pbar.close()
return accumulated_iter |
def text_POS_Sentiments_analysis(text, sentiment_ctl=None):
words = word_tokenize(text)
word_tag = pos_tag(words)
res_tag = [tag[1] for tag in word_tag]
tag_map = {'NN': 'n', 'NNP': 'n', 'NNPS': 'n', 'NNS': 'n', 'UH': 'n', 'VB': 'v', 'VBD': 'v', 'VBG': 'v', 'VBN': 'v', 'VBP': 'v', 'VBZ': 'v', 'JJ': 'a', 'JJR': 'a', 'JJS': 'a', 'RB': 'r', 'RBR': 'r', 'RBS': 'r', 'RP': 'r', 'WRB': 'r'}
word_tag = [((t[0], tag_map[t[1]]) if (t[1] in tag_map) else (t[0], '')) for t in word_tag]
wordnet_tag = [tag[1] for tag in word_tag]
sentiment_synsets = [list(sentiwordnet.senti_synsets(t[0], t[1])) for t in word_tag]
if (sentiment_ctl is None):
return (0, res_tag, wordnet_tag)
score = sum(((sum([(x.pos_score() - x.neg_score()) for x in s]) / len(s)) for s in sentiment_synsets if (len(s) != 0)))
if (sentiment_ctl == 'negative'):
score = (- score)
return (score, res_tag, wordnet_tag) |
def load_pure_strat(policy: Policy, pure_strat_spec: StrategySpec=None, checkpoint_path: str=None, weights_key: str='weights'):
if ((pure_strat_spec is not None) and (checkpoint_path is not None)):
raise ValueError('Can only pass pure_strat_spec or checkpoint_path but not both')
if (checkpoint_path is None):
if (hasattr(policy, 'policy_spec') and (pure_strat_spec == policy.policy_spec)):
return
pure_strat_checkpoint_path = pure_strat_spec.metadata['checkpoint_path']
else:
pure_strat_checkpoint_path = checkpoint_path
weights = None
try:
num_load_attempts = 5
for attempt in range(num_load_attempts):
try:
checkpoint_data = deepdish.io.load(path=pure_strat_checkpoint_path)
weights = checkpoint_data[weights_key]
break
except (HDF5ExtError, KeyError):
if ((attempt + 1) == num_load_attempts):
raise
time.sleep(1.0)
except Exception:
with open(pure_strat_checkpoint_path, 'rb') as pickle_file:
checkpoint_data = cloudpickle.load(pickle_file)
weights = checkpoint_data[weights_key]
weights = {k.replace('_dot_', '.'): v for (k, v) in weights.items()}
policy.set_weights(weights=weights)
policy.policy_spec = pure_strat_spec |
def get_processor_types_from_config_class(config_class, allowed_mappings=None):
def _to_tuple(x):
if (not isinstance(x, collections.abc.Sequence)):
x = (x,)
else:
x = tuple(x)
return x
if (allowed_mappings is None):
allowed_mappings = ['processor', 'tokenizer', 'image_processor', 'feature_extractor']
processor_types = ()
if ((config_class in PROCESSOR_MAPPING) and ('processor' in allowed_mappings)):
processor_types = _to_tuple(PROCESSOR_MAPPING[config_class])
else:
if ((config_class in TOKENIZER_MAPPING) and ('tokenizer' in allowed_mappings)):
processor_types = TOKENIZER_MAPPING[config_class]
if ((config_class in IMAGE_PROCESSOR_MAPPING) and ('image_processor' in allowed_mappings)):
processor_types += _to_tuple(IMAGE_PROCESSOR_MAPPING[config_class])
elif ((config_class in FEATURE_EXTRACTOR_MAPPING) and ('feature_extractor' in allowed_mappings)):
processor_types += _to_tuple(FEATURE_EXTRACTOR_MAPPING[config_class])
processor_types = tuple((p for p in processor_types if (p is not None)))
return processor_types |
def log_variant(log_file, variant_data):
mkdir_p(os.path.dirname(log_file))
if hasattr(variant_data, 'dump'):
variant_data = variant_data.dump()
variant_json = stub_to_json(variant_data)
with open(log_file, 'w') as f:
json.dump(variant_json, f, indent=2, sort_keys=True, cls=MyEncoder) |
def _format_causes(err, level=0):
lines = []
def _print(string, offset=0):
lines.append(_pad(string, offset=offset))
def _pad(string, offset=0):
padding = (' ' * (level + offset))
padded_lines = [(padding + line) for line in string.split('\n')]
return '\n'.join(padded_lines)
def _format_path(path):
def _format(item):
if isinstance(item, str):
return '.{}'.format(item)
return '[{}]'.format(item)
return ''.join((['<root>'] + list(map(_format, path))))
_print("'{}' failed '{}' because of:".format(err.validator, _format_path(err.absolute_path)))
if (not err.context):
_print(str(err.message), offset=1)
else:
for suberr in err.context:
lines.append(_format_causes(suberr, (level + 1)))
return '\n'.join(lines) |
class RobertaTokenizer(GPT2Tokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file=vocab_file, merges_file=merges_file, errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0])
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
if ('is_pretokenized' in kwargs):
warnings.warn('`is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.', FutureWarning)
is_split_into_words = kwargs.pop('is_pretokenized')
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs) |
def Main():
args = GetArgs()
lexicon = defaultdict(list)
prons = defaultdict(list)
start_frames = {}
stats = defaultdict((lambda : defaultdict(float)))
sum_tot = defaultdict(float)
phone_map = {}
for line in args.phone_map_handle.readlines():
splits = line.strip().split()
phone_map[splits[0]] = splits[1]
for line in args.arc_info_file_handle.readlines():
splits = line.strip().split()
if (len(splits) == 0):
continue
if (len(splits) < 6):
raise Exception(((('Invalid format of line ' + line) + ' in ') + args.arc_info_file))
utt = splits[0]
start_frame = int(splits[1])
word = splits[4]
count = float(splits[3])
phones_unmapped = splits[5:]
phones = [phone_map[phone] for phone in phones_unmapped]
phones = ' '.join(phones)
overlap = False
if (word == '<eps>'):
continue
if ((word, utt) not in start_frames):
start_frames[(word, utt)] = start_frame
if ((word, utt) in stats):
stats[(word, utt)][phones] = (stats[(word, utt)].get(phones, 0) + count)
else:
stats[(word, utt)][phones] = count
sum_tot[(word, utt)] += count
if (phones not in prons[word]):
prons[word].append(phones)
for (word, utt) in stats:
count_sum = 0.0
counts = dict()
for phones in stats[(word, utt)]:
count = stats[(word, utt)][phones]
count_sum += count
counts[phones] = count
for phones in stats[(word, utt)]:
count = (counts[phones] / count_sum)
print(word, utt, start_frames[(word, utt)], count, phones, file=args.stats_file_handle)
args.stats_file_handle.close() |
def initialize(module, init_cfg):
if (not isinstance(init_cfg, (dict, list))):
raise TypeError(f'init_cfg must be a dict or a list of dict, but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if (override is not None):
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
pass |
def init_embedding(input_embedding):
bias = np.sqrt((3.0 / input_embedding.size(1)))
nn.init.uniform_(input_embedding, (- bias), bias) |
def eval_func(args, model, accelerator, eval_dataloader, metric):
batch_time = AverageMeter('Time', ':6.3f')
is_regression = (args.task_name == 'stsb')
model.eval()
for (step, batch) in enumerate(eval_dataloader):
if (step >= args.warmup_iter):
start = time.time()
outputs = model(**batch)
if (step >= args.warmup_iter):
batch_time.update((time.time() - start))
predictions = (outputs.logits.argmax(dim=(- 1)) if (not is_regression) else outputs.logits.squeeze())
metric.add_batch(predictions=accelerator.gather(predictions), references=accelerator.gather(batch['labels']))
eval_metric = metric.compute()
batch_size = args.per_device_eval_batch_size
print('Batch size = {}'.format(batch_size))
print(('Latency: %.3f ms' % ((batch_time.avg / batch_size) * 1000)))
print(('Throughput: %.3f images/sec' % (batch_size / batch_time.avg)))
logger.info(f'{eval_metric}')
return eval_metric |
def nnsmith_div(left: Union[(float, int, z3.ExprRef)], right: Union[(float, int, z3.ExprRef)]):
(left, right) = align_bvs(left, right)
if (isinstance(left, z3.BitVecRef) or isinstance(right, z3.BitVecRef)):
return z3.UDiv(left, right)
if (isinstance(left, int) and isinstance(right, int)):
ConstraintCheck.true((right != 0), 'Div by zero')
return (left // right)
return (left / right) |
def convert_path(path: str, path_type: PathType=PathType.AUTO) -> str:
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path |
def sample_subgraphs(idx, G, output_path, iterations, num_factor, min_num_nodes=None, max_num_nodes=None, min_num_edges=None, max_num_edges=None):
count = 0
deg = G.degree[idx]
for _ in range((num_factor * int(math.sqrt(deg)))):
G_rw = random_walk_with_restart_sampling(G, idx, iterations=iterations, max_nodes=max_num_nodes, max_edges=max_num_edges)
G_rw = nx.convert_node_labels_to_integers(G_rw)
G_rw.remove_edges_from(nx.selfloop_edges(G_rw))
if (not check_graph_size(G_rw, min_num_nodes, max_num_nodes, min_num_edges, max_num_edges)):
continue
if nx.is_connected(G_rw):
with open(os.path.join(output_path, 'graph{}-{}.dat'.format(idx, count)), 'wb') as f:
pickle.dump(G_rw, f)
count += 1 |
class ImSeq2Seq(Dataset):
def __init__(self, src, tgt, image, split, ckpt_dir, **kwargs):
self.split = split
self.seq2seq = Seq2Seq(src, tgt, split, ckpt_dir)
self.image = ImageDataset(**image, split=split)
self.tgt_tokenizer = self.seq2seq.tgt.tokenizer
self.tgt_tokenizer_max_len = self.seq2seq.tgt.tokenizer_max_len
assert (len(self.image) == len(self.seq2seq))
def __getitem__(self, index):
return {**self.image.__getitem__(index), **self.seq2seq.__getitem__(index)}
def get_collate_fn(self):
def collate_fn(batch):
collated = {**self.seq2seq.get_collate_fn()(batch), **self.image.get_collate_fn()(batch)}
return collated
return collate_fn
def __len__(self):
return len(self.seq2seq)
def __repr__(self):
return ((('ImSeq2Seq\n' + str(self.seq2seq)) + '\n') + str(self.image)) |
class ResNet34_128(ResNetBase):
BLOCK = BasicBlock
PLANES = (128, 128, 256, 512)
LAYERS = (3, 4, 6, 3) |
def refine_results(tokens, spans, stopwords):
all_spans = []
for (span_start, span_end, is_candidate) in spans:
if (not is_candidate):
while ((span_start < span_end) and (tokens[span_start].lower() in stopwords)):
span_start += 1
if (span_start >= span_end):
continue
if ((span_start > 0) and (tokens[(span_start - 1)] in ['a', 'an', 'A', 'An', 'the', 'The'])):
span_start -= 1
span = ' '.join(tokens[span_start:span_end])
span_start = (len(' '.join(tokens[:span_start])) + (1 * min(1, span_start)))
span_end = (span_start + len(span))
all_spans.append((span, span_start, span_end))
all_spans = sorted(all_spans, key=(lambda x: (x[1], (x[1] - x[2]))))
refined_spans = []
for (span, span_start, span_end) in all_spans:
flag = True
for (_, start, end) in refined_spans:
if (start <= span_start < span_end <= end):
flag = False
break
if flag:
refined_spans.append((span, span_start, span_end))
return refined_spans |
class TFEsmPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_mobilenet_v1_block():
data = torch.randn(1, 32, 112, 112)
inplanes = 32
planes = 64
stride = 1
model = MobileNetV1Block(inplanes, planes, stride)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (1, 64, 112, 112))
data = torch.randn(1, 64, 112, 112)
inplanes = 64
planes = 128
stride = 2
model = MobileNetV1Block(inplanes, planes, stride)
print(model)
outputs = model(data)
print(outputs.shape)
assert (outputs.shape == (1, 128, 56, 56)) |
def inference(args, start_idx=0, end_idx=200):
data_path = f'{args.data_path}/{args.task}/{args.dataset}/{args.data_type}.txt'
(sources, targets) = get_transformed_io(data_path, args.dataset, args.data_type, top_k=1, args=args)
samples = random.sample(list(zip(sources, targets)), args.num_sample)
prompt = load_prompt(args.task, args.dataset, args.prompt_type)
for (i, (source, target)) in enumerate(samples):
if ((i < start_idx) or (i > end_idx)):
continue
print(i)
try:
source = ' '.join(source)
gold_list = extract_spans_para(target, 'gold')
print(gold_list)
if (args.task in ['asqp', 'acos']):
gold_list = [(at, ot, ac, opinion2sentword[sp]) for (ac, at, sp, ot) in gold_list]
elif (args.task == 'aste'):
gold_list = [(at, ot, opinion2sentword[sp]) for (ac, at, sp, ot) in gold_list]
elif (args.task == 'tasd'):
gold_list = [(at, ac, opinion2sentword[sp]) for (ac, at, sp, ot) in gold_list]
context = f'''Text: {source}
'''
context += 'Sentiment Elements: '
res = llm_chat((prompt + context))
print((context + res))
print(f'''Gold: {gold_list}
''')
time.sleep(3)
except BaseException as e:
print(('>' * 30), 'exception:', e)
exit()
continue |
def squared_l2_norm(x):
flattened = x.view(x.unsqueeze(0).shape[0], (- 1))
return (flattened ** 2).sum(1) |
def load_train(root_path, dir, batch_size, phase):
transform_dict = {'src': transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), 'tar': transforms.Compose([transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])}
data = datasets.ImageFolder(root=os.path.join(root_path, dir), transform=transform_dict[phase])
train_size = int((0.8 * len(data)))
test_size = (len(data) - train_size)
(data_train, data_val) = torch.utils.data.random_split(data, [train_size, test_size])
train_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
val_loader = torch.utils.data.DataLoader(data_val, batch_size=batch_size, shuffle=True, drop_last=False, num_workers=4)
return (train_loader, val_loader) |
class TBRecorder(object):
def __init__(self, tb_path):
check_mkdir(dir_path=tb_path)
self.tb = SummaryWriter(tb_path)
def record_curve(self, name, data, curr_iter):
if (not isinstance(data, (tuple, list))):
self.tb.add_scalar(f'data/{name}', data, curr_iter)
else:
for (idx, data_item) in enumerate(data):
self.tb.add_scalar(f'data/{name}_{idx}', data_item[name], curr_iter)
def record_image(self, name, data, curr_iter):
data_grid = make_grid(data, nrow=data.size(0), padding=5)
self.tb.add_image(name, data_grid, curr_iter)
def close_tb(self):
self.tb.close() |
class TrainLoop():
def __init__(self, *, model, diffusion, data, batch_size, microbatch, lr, ema_rate, log_interval, save_interval, resume_checkpoint, use_fp16=False, fp16_scale_growth=0.001, schedule_sampler=None, weight_decay=0.0, lr_anneal_steps=0, checkpoint_path='', gradient_clipping=(- 1.0), eval_data=None, eval_interval=(- 1)):
self.model = model
self.diffusion = diffusion
self.data = data
self.eval_data = eval_data
self.batch_size = batch_size
self.microbatch = (microbatch if (microbatch > 0) else batch_size)
self.lr = lr
self.ema_rate = ([ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(',')])
self.log_interval = log_interval
self.eval_interval = eval_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = (schedule_sampler or UniformSampler(diffusion))
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.gradient_clipping = gradient_clipping
self.step = 0
self.resume_step = 0
self.global_batch = (self.batch_size * dist.get_world_size())
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.lg_loss_scale = INITIAL_LOG_LOSS_SCALE
self.sync_cuda = th.cuda.is_available()
self.checkpoint_path = checkpoint_path
self._load_and_sync_parameters()
if self.use_fp16:
self._setup_fp16()
self.opt = AdamW(self.master_params, lr=self.lr, weight_decay=self.weight_decay)
if self.resume_step:
self._load_optimizer_state()
self.ema_params = [self._load_ema_parameters(rate) for rate in self.ema_rate]
else:
self.ema_params = [copy.deepcopy(self.master_params) for _ in range(len(self.ema_rate))]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False)
else:
if (dist.get_world_size() > 1):
logger.warn('Distributed training requires CUDA. Gradients will not be synchronized properly!')
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if (dist.get_rank() == 0):
logger.log(f'loading model from checkpoint: {resume_checkpoint}...')
self.model.load_state_dict(dist_util.load_state_dict(resume_checkpoint, map_location=dist_util.dev()))
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.master_params)
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if (dist.get_rank() == 0):
logger.log(f'loading EMA from checkpoint: {ema_checkpoint}...')
state_dict = dist_util.load_state_dict(ema_checkpoint, map_location=dist_util.dev())
ema_params = self._state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
opt_checkpoint = bf.join(bf.dirname(main_checkpoint), f'opt{self.resume_step:06}.pt')
if bf.exists(opt_checkpoint):
logger.log(f'loading optimizer state from checkpoint: {opt_checkpoint}')
state_dict = dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
self.opt.load_state_dict(state_dict)
def _setup_fp16(self):
self.master_params = make_master_params(self.model_params)
self.model.convert_to_fp16()
def run_loop(self):
while ((not self.lr_anneal_steps) or ((self.step + self.resume_step) < self.lr_anneal_steps)):
(batch, cond) = next(self.data)
self.run_step(batch, cond)
if ((self.step % self.log_interval) == 0):
logger.dumpkvs()
if ((self.eval_data is not None) and ((self.step % self.eval_interval) == 0)):
(batch_eval, cond_eval) = next(self.eval_data)
self.forward_only(batch, cond)
print('eval on validation set')
logger.dumpkvs()
if ((self.step % self.save_interval) == 0):
self.save()
if (os.environ.get('DIFFUSION_TRAINING_TEST', '') and (self.step > 0)):
return
self.step += 1
if (((self.step - 1) % self.save_interval) != 0):
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
if self.use_fp16:
self.optimize_fp16()
else:
self.optimize_normal()
self.log_step()
def forward_only(self, batch, cond):
with th.no_grad():
zero_grad(self.model_params)
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i:(i + self.microbatch)].to(dist_util.dev())
micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()}
last_batch = ((i + self.microbatch) >= batch.shape[0])
(t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond)
if (last_batch or (not self.use_ddp)):
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
log_loss_dict(self.diffusion, t, {f'eval_{k}': (v * weights) for (k, v) in losses.items()})
def forward_backward(self, batch, cond):
zero_grad(self.model_params)
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i:(i + self.microbatch)].to(dist_util.dev())
micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()}
last_batch = ((i + self.microbatch) >= batch.shape[0])
(t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond)
if (last_batch or (not self.use_ddp)):
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, losses['loss'].detach())
loss = (losses['loss'] * weights).mean()
log_loss_dict(self.diffusion, t, {k: (v * weights) for (k, v) in losses.items()})
if self.use_fp16:
loss_scale = (2 ** self.lg_loss_scale)
(loss * loss_scale).backward()
else:
loss.backward()
def optimize_fp16(self):
if any(((not th.isfinite(p.grad).all()) for p in self.model_params)):
self.lg_loss_scale -= 1
logger.log(f'Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}')
return
model_grads_to_master_grads(self.model_params, self.master_params)
self.master_params[0].grad.mul_((1.0 / (2 ** self.lg_loss_scale)))
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
master_params_to_model_params(self.model_params, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
def grad_clip(self):
max_grad_norm = self.gradient_clipping
if hasattr(self.opt, 'clip_grad_norm'):
self.opt.clip_grad_norm(max_grad_norm)
else:
th.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
def optimize_normal(self):
if (self.gradient_clipping > 0):
self.grad_clip()
self._log_grad_norm()
self._anneal_lr()
self.opt.step()
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.master_params, rate=rate)
def _log_grad_norm(self):
sqsum = 0.0
for p in self.master_params:
sqsum += (p.grad ** 2).sum().item()
logger.logkv_mean('grad_norm', np.sqrt(sqsum))
def _anneal_lr(self):
if (not self.lr_anneal_steps):
return
frac_done = ((self.step + self.resume_step) / self.lr_anneal_steps)
lr = (self.lr * (1 - frac_done))
for param_group in self.opt.param_groups:
param_group['lr'] = lr
def log_step(self):
logger.logkv('step', (self.step + self.resume_step))
logger.logkv('samples', (((self.step + self.resume_step) + 1) * self.global_batch))
if self.use_fp16:
logger.logkv('lg_loss_scale', self.lg_loss_scale)
def save(self):
def save_checkpoint(rate, params):
state_dict = self._master_params_to_state_dict(params)
if (dist.get_rank() == 0):
logger.log(f'saving model {rate}...')
if (not rate):
filename = f'model{(self.step + self.resume_step):06d}.pt'
else:
filename = f'ema_{rate}_{(self.step + self.resume_step):06d}.pt'
print('writing to', bf.join(get_blob_logdir(), filename))
print('writing to', bf.join(self.checkpoint_path, filename))
with bf.BlobFile(bf.join(self.checkpoint_path, filename), 'wb') as f:
th.save(state_dict, f)
save_checkpoint(0, self.master_params)
for (rate, params) in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
dist.barrier()
def _master_params_to_state_dict(self, master_params):
if self.use_fp16:
master_params = unflatten_master_params(list(self.model.parameters()), master_params)
state_dict = self.model.state_dict()
for (i, (name, _value)) in enumerate(self.model.named_parameters()):
assert (name in state_dict)
state_dict[name] = master_params[i]
return state_dict
def _state_dict_to_master_params(self, state_dict):
params = [state_dict[name] for (name, _) in self.model.named_parameters()]
if self.use_fp16:
return make_master_params(params)
else:
return params |
class PickAxe(BasePickAxe):
def __init__(self):
super().__init__('pick-axe', weight=100, damage=D.Dice.from_str('d3'), material=M.Iron, hit=0) |
def wilcoxon_significance(preds_dir: str, split_type: str):
print(('dataset\t' + '\t'.join([f'{exp_1} vs {exp_2}' for (exp_1, exp_2) in COMPARISONS])))
for dataset in DATASETS:
dataset_type = DATASETS[dataset]['type']
experiment_to_values = {}
for experiment in EXPERIMENTS:
if ((experiment == 'compare_lsc_scaffold') and (split_type != 'scaffold')):
continue
(preds, targets) = load_preds_and_targets(preds_dir, experiment, dataset, split_type)
if ((preds is None) or (targets is None)):
experiment_to_values[experiment] = None
continue
if (dataset_type == 'regression'):
(preds, targets) = ([[pred] for pred in preds], [[target] for target in targets])
else:
(preds, targets) = (np.array_split(preds, 30), np.array_split(targets, 30))
values = compute_values(dataset, preds, targets)
experiment_to_values[experiment] = values
print(dataset, end='\t')
for (experiment_1, experiment_2) in COMPARISONS:
if (('compare_lsc_scaffold' in [experiment_1, experiment_2]) and (split_type != 'scaffold')):
continue
(values_1, values_2) = (experiment_to_values[experiment_1], experiment_to_values[experiment_2])
if ((values_1 is None) or (values_2 is None)):
print('Error', end='\t')
continue
assert (len(values_1) == len(values_2))
(values_1, values_2) = zip(*[(v_1, v_2) for (v_1, v_2) in zip(values_1, values_2) if (not (np.isnan(v_1) or np.isnan(v_2)))])
print(wilcoxon(values_1, values_2, alternative=('less' if (dataset_type == 'regression') else 'greater')).pvalue, end='\t')
print() |
def prepare_submission():
from cremi.io import CremiFile
from cremi.Volume import Volume
base = '/home/fabian/drives/datasets/results/nnUNet/test_sets/Task061_CREMI/'
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', 'sample_a+.nii.gz'))).astype(np.uint64)
pred[(pred == 0)] =
out_a = CremiFile(join(base, 'sample_A+_.hdf'), 'w')
clefts = Volume(pred, (40.0, 4.0, 4.0))
out_a.write_clefts(clefts)
out_a.close()
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', 'sample_b+.nii.gz'))).astype(np.uint64)
pred[(pred == 0)] =
out_b = CremiFile(join(base, 'sample_B+_.hdf'), 'w')
clefts = Volume(pred, (40.0, 4.0, 4.0))
out_b.write_clefts(clefts)
out_b.close()
pred = sitk.GetArrayFromImage(sitk.ReadImage(join(base, 'results_3d_fullres', 'sample_c+.nii.gz'))).astype(np.uint64)
pred[(pred == 0)] =
out_c = CremiFile(join(base, 'sample_C+_.hdf'), 'w')
clefts = Volume(pred, (40.0, 4.0, 4.0))
out_c.write_clefts(clefts)
out_c.close() |
class ReflectionPad3d(nn.Module):
def __init__(self, padding):
super().__init__()
self.pad = nn.ReflectionPad2d(padding)
def forward(self, x: torch.Tensor) -> torch.Tensor:
(b, t, c, h, w) = x.shape
x = rearrange(x, 'b t c h w -> b (t c) h w')
x = self.pad(x)
x = rearrange(x, 'b (t c) h w -> b t c h w', t=t, c=c)
return x |
class Dataset(object):
def __init__(self, dataset_dict: DatasetDict, seed: Optional[int]=None):
self.dataset_dict = dataset_dict
self.dataset_len = _check_lengths(dataset_dict)
self._np_random = None
self._seed = None
if (seed is not None):
self.seed(seed)
def np_random(self) -> np.random.RandomState:
if (self._np_random is None):
self.seed()
return self._np_random
def seed(self, seed: Optional[int]=None) -> list:
(self._np_random, self._seed) = seeding.np_random(seed)
return [self._seed]
def __len__(self) -> int:
return self.dataset_len
def sample(self, batch_size: int, keys: Optional[Iterable[str]]=None, indx: Optional[np.ndarray]=None) -> frozen_dict.FrozenDict:
if (indx is None):
if hasattr(self.np_random, 'integers'):
indx = self.np_random.integers(len(self), size=batch_size)
else:
indx = self.np_random.randint(len(self), size=batch_size)
batch = dict()
if (keys is None):
keys = self.dataset_dict.keys()
for k in keys:
if isinstance(self.dataset_dict[k], dict):
batch[k] = _sample(self.dataset_dict[k], indx)
else:
batch[k] = self.dataset_dict[k][indx]
return frozen_dict.freeze(batch) |
def convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head):
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval()
roberta_sent_encoder = roberta.model.decoder.sentence_encoder
config = BertConfig(vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.args.encoder_embed_dim, num_hidden_layers=roberta.args.encoder_layers, num_attention_heads=roberta.args.encoder_attention_heads, intermediate_size=roberta.args.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05)
if classification_head:
config.num_labels = roberta.args.num_classes
print('Our BERT config:', config)
model = (RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config))
model.eval()
model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
for i in range(config.num_hidden_layers):
layer: BertLayer = model.roberta.encoder.layer[i]
roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
self_attn: BertSelfAttention = layer.attention.self
assert (roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size)))
self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
self_output: BertSelfOutput = layer.attention.output
assert (self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape)
self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
intermediate: BertIntermediate = layer.intermediate
assert (intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape)
intermediate.dense.weight = roberta_layer.fc1.weight
intermediate.dense.bias = roberta_layer.fc1.bias
bert_output: BertOutput = layer.output
assert (bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape)
bert_output.dense.weight = roberta_layer.fc2.weight
bert_output.dense.bias = roberta_layer.fc2.bias
bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
if classification_head:
model.classifier.dense.weight = roberta.model.classification_heads['mnli'].dense.weight
model.classifier.dense.bias = roberta.model.classification_heads['mnli'].dense.bias
model.classifier.out_proj.weight = roberta.model.classification_heads['mnli'].out_proj.weight
model.classifier.out_proj.bias = roberta.model.classification_heads['mnli'].out_proj.bias
else:
model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
model.lm_head.bias = roberta.model.decoder.lm_head.bias
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0)
our_output = model(input_ids)[0]
if classification_head:
their_output = roberta.model.classification_heads['mnli'](roberta.extract_features(input_ids))
else:
their_output = roberta.model(input_ids)[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item()
print(f'max_absolute_diff = {max_absolute_diff}')
success = torch.allclose(our_output, their_output, atol=0.001)
print('Do both models output the same tensors?', ('' if success else ''))
if (not success):
raise Exception('Something went wRoNg')
pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
print(f'Saving model to {pytorch_dump_folder_path}')
model.save_pretrained(pytorch_dump_folder_path) |
def evaluate(agent, env: gym.Env, num_episodes: int) -> Dict[(str, float)]:
env = gym.wrappers.RecordEpisodeStatistics(env, deque_size=num_episodes)
for _ in range(num_episodes):
(observation, done) = (env.reset(), False)
while (not done):
action = agent.eval_actions(observation)
(observation, _, done, _) = env.step(action)
return {'return': np.mean(env.return_queue), 'length': np.mean(env.length_queue)} |
def viz_curves(df, filename='out.html', key_toggle='CountyName', keys_table=['CountyName', 'StateName'], keys_curves=['deaths', 'cases'], dropdown_suffix=' County', decimal_places=0, expl_dict=None, interval_dicts=None, point_id=None, show_stds=False):
color_strings = [credstr, cbluestr]
df_tab = df[keys_table]
fig = ff.create_table(df_tab.round(decimal_places))
traces = []
num_traces_per_plot = len(keys_curves)
key0 = df_tab[key_toggle].values[0]
for i in range(df.shape[0]):
row = df.iloc[i]
key = row[key_toggle]
for (j, key_curve) in enumerate(keys_curves):
curve = row[key_curve]
x = np.arange(curve.size)
traces.append(go.Scatter(x=x, y=curve, showlegend=False, visible=(i == 0), name=key_curve, line=dict(color=color_strings[j], width=4), xaxis='x2', yaxis='y2'))
fig.add_traces(traces)
buttons = []
for (i, key) in enumerate(df[key_toggle].values):
table_offset = 1
visible = np.array((([True] * table_offset) + (([False] * num_traces_per_plot) * len(df[key_toggle]))))
visible[((num_traces_per_plot * i) + table_offset):((num_traces_per_plot * (i + 1)) + table_offset)] = True
buttons.append(dict(method='restyle', args=[{'visible': visible}], label=(key + dropdown_suffix)))
fig['layout']['xaxis2'] = {}
fig['layout']['yaxis2'] = {}
fig.layout.updatemenus = [go.layout.Updatemenu(dict(active=int(np.argmax((df[key_toggle].values == key0))), buttons=buttons, x=0.8, y=1.05, direction='down'))]
fig.layout.xaxis.update({'domain': [0, 0.5]})
fig.layout.xaxis2.update({'domain': [0.6, 1.0]})
fig.layout.xaxis2.update({'title': 'Time'})
fig.layout.yaxis.update({'domain': [0, 0.9]})
fig.layout.yaxis2.update({'domain': [0, 0.9], 'anchor': 'x2'})
fig.layout.yaxis2.update({'title': 'Count'})
fig.layout.margin.update({'t': 50, 'b': 120})
fig.layout.update({'title': 'County-level outbreaks', 'height': 800})
plot(fig, filename=filename, config={'showLink': False, 'showSendToCloud': False, 'sendData': True, 'responsive': True, 'autosizable': True, 'displaylogo': False})
print('plot saved to', filename) |
def test_evaluate():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
root_path = 'tests/data/kitti/'
info_file = 'tests/data/kitti/kitti_infos_mono3d.pkl'
ann_file = 'tests/data/kitti/kitti_infos_mono3d.coco.json'
class_names = ['Pedestrian', 'Cyclist', 'Car']
pipeline = [dict(type='LoadImageFromFileMono3D'), dict(type='LoadAnnotations3D', with_bbox=True, with_label=True, with_attr_label=False, with_bbox_3d=True, with_label_3d=True, with_bbox_depth=True), dict(type='Resize', img_scale=(1242, 375), keep_ratio=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_bboxes_3d', 'gt_labels_3d', 'centers2d', 'depths'])]
kitti_dataset = KittiMonoDataset(ann_file=ann_file, info_file=info_file, pipeline=pipeline, data_root=root_path, test_mode=True)
results = mmcv.load('tests/data/kitti/mono3d_sample_results.pkl')
results2d = mmcv.load('tests/data/kitti/mono3d_sample_results2d.pkl')
results[0]['img_bbox2d'] = results2d[0]['img_bbox2d']
metric = ['mAP']
ap_dict = kitti_dataset.evaluate(results, metric)
assert np.isclose(ap_dict['img_bbox/KITTI/Overall_3D_easy'], 3.0303)
assert np.isclose(ap_dict['img_bbox/KITTI/Overall_3D_moderate'], 6.0606)
assert np.isclose(ap_dict['img_bbox/KITTI/Overall_3D_hard'], 6.0606)
assert np.isclose(ap_dict['img_bbox2d/KITTI/Overall_2D_easy'], 3.0303)
assert np.isclose(ap_dict['img_bbox2d/KITTI/Overall_2D_moderate'], 6.0606)
assert np.isclose(ap_dict['img_bbox2d/KITTI/Overall_2D_hard'], 6.0606) |
class MSCOCOVocab(object):
def __init__(self):
self.stoi = {}
self.itos = {}
self.idx = 0
def add_word(self, word):
if (not (word in self.stoi)):
self.stoi[word] = self.idx
self.itos[self.idx] = word
self.idx += 1
def __call__(self, word):
if (not (word in self.stoi)):
return self.stoi['<unk>']
return self.stoi[word]
def __len__(self):
return len(self.stoi) |
class Normalize(Base):
def __init__(self, mean=0.0, std=1.0, num=(- 1)):
self.mean = mean
self.std = std
self.num = num
def tf(self, img, k=0):
if ((self.num > 0) and (k >= self.num)):
return img
img -= self.mean
img /= self.std
return img
def __str__(self):
return 'Normalize()' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.