code stringlengths 101 5.91M |
|---|
def create_dataset(trainset_file, devset_file, device, vocab_size=None, embed_type=None, embed_dir=None):
return CoQADataset(trainset_file, devset_file, vocab_size=vocab_size, device=device, embed_type=embed_type, embed_dir=embed_dir) |
def parse_args():
parser = argparse.ArgumentParser('Sample (with beam-search) from the session model')
parser.add_argument('--ignore-unk', action='store_false', help='Disables the generation of unknown words (<unk> tokens)')
parser.add_argument('model_prefix', help='Path to the model prefix (without _model.npz or _state.pkl)')
parser.add_argument('context', help='File of input contexts')
parser.add_argument('output', help='Output file')
parser.add_argument('--beam_search', action='store_true', help='Use beam search instead of random search')
parser.add_argument('--n-samples', default='1', type=int, help='Number of samples')
parser.add_argument('--n-turns', default=1, type=int, help='Number of dialog turns to generate')
parser.add_argument('--verbose', action='store_true', default=False, help='Be verbose')
parser.add_argument('changes', nargs='?', default='', help='Changes to state')
return parser.parse_args() |
def lrelu(x, a):
with tf.name_scope('lrelu'):
x = tf.identity(x)
return (((0.5 * (1 + a)) * x) + ((0.5 * (1 - a)) * tf.abs(x))) |
class ActivationStats(HookCallback):
def on_train_begin(self, **kwargs):
super().on_train_begin(**kwargs)
self.stats = []
def hook(self, m: nn.Module, i: Tensors, o: Tensors) -> Tuple[(Rank0Tensor, Rank0Tensor)]:
return (o.mean().item(), o.std().item())
def on_batch_end(self, train, **kwargs):
if train:
self.stats.append(self.hooks.stored)
def on_train_end(self, **kwargs):
super().on_train_end(**kwargs)
self.stats = tensor(self.stats).permute(2, 1, 0) |
def main():
print('\nTesting the Pieri homotopies ...\n')
test_pieri()
print('\nTesting the Littlewood-Richardson homotopies ...')
test_lrhom() |
def get_angle(a, b, c):
ang = math.degrees((math.atan2((c[1] - b[1]), (c[0] - b[0])) - math.atan2((a[1] - b[1]), (a[0] - b[0]))))
ang = ((ang + 360) if (ang < 0) else ang)
return (ang if (ang < 180) else (360 - ang)) |
def calc_metrics(tp, p, t, percent=False):
precision = ((tp / p) if p else 0)
recall = ((tp / t) if t else 0)
fb1 = ((((2 * precision) * recall) / (precision + recall)) if (precision + recall) else 0)
if percent:
return ((100 * precision), (100 * recall), (100 * fb1))
else:
return (precision, recall, fb1) |
def train_dev_test_split(dialogs):
n_dial = len(dialogs)
random.shuffle(dialogs)
dataset = {'train': dialogs[:int((n_dial * 0.8))], 'dev': dialogs[int((n_dial * 0.8)):int((n_dial * 0.9))], 'test': dialogs[int((n_dial * 0.9)):]}
return dataset |
def get_world_size(group):
if use_xla():
assert (group[0] == 'tpu')
my_group = _find_my_group(group[1])
return len(my_group)
elif torch.distributed.is_initialized():
return dist.get_world_size(group=group)
else:
return 1 |
class TestCategoricalLSTMPolicy(TfGraphTestCase):
def test_invalid_env(self):
env = GarageEnv(DummyBoxEnv())
with pytest.raises(ValueError):
CategoricalLSTMPolicy(env_spec=env.spec)
.parametrize('obs_dim, action_dim, hidden_dim, obs_type', [((1,), 1, 4, 'discrete'), ((2,), 2, 4, 'discrete'), ((1, 1), 1, 4, 'discrete'), ((2, 2), 2, 4, 'discrete'), ((1,), 1, 4, 'dict')])
def test_get_action_state_include_action(self, obs_dim, action_dim, hidden_dim, obs_type):
assert (obs_type in ['discrete', 'dict'])
if (obs_type == 'discrete'):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
else:
env = GarageEnv(DummyDictEnv(obs_space_type='box', act_space_type='discrete'))
policy = CategoricalLSTMPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=True)
policy.reset()
obs = env.reset()
if (obs_type == 'discrete'):
obs = obs.flatten()
(action, _) = policy.get_action(obs)
assert env.action_space.contains(action)
(actions, _) = policy.get_actions([obs])
for action in actions:
assert env.action_space.contains(action)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_get_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=False)
policy.reset()
obs = env.reset()
(action, _) = policy.get_action(obs.flatten())
assert env.action_space.contains(action)
(actions, _) = policy.get_actions([obs.flatten()])
for action in actions:
assert env.action_space.contains(action)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_build_state_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=True)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
concat_obs = np.concatenate([obs.flatten(), np.zeros(action_dim)])
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[concat_obs], [concat_obs]]})
output2 = self.sess.run([dist_sym.probs], feed_dict={state_input: [[concat_obs], [concat_obs]]})
assert np.array_equal(output1, output2)
.parametrize('obs_dim, action_dim, hidden_dim', [((1,), 1, 4), ((2,), 2, 4), ((1, 1), 1, 4), ((2, 2), 2, 4)])
def test_build_state_not_include_action(self, obs_dim, action_dim, hidden_dim):
env = GarageEnv(DummyDiscreteEnv(obs_dim=obs_dim, action_dim=action_dim))
policy = CategoricalLSTMPolicy(env_spec=env.spec, hidden_dim=hidden_dim, state_include_action=False)
policy.reset(do_resets=None)
obs = env.reset()
state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, None, policy.input_dim))
dist_sym = policy.build(state_input, name='dist_sym').dist
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
output2 = self.sess.run([dist_sym.probs], feed_dict={state_input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_is_pickleable(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(1,), action_dim=1))
policy = CategoricalLSTMPolicy(env_spec=env.spec, state_include_action=False)
policy.reset()
obs = env.reset()
policy.model._lstm_cell.weights[0].load(tf.ones_like(policy.model._lstm_cell.weights[0]).eval())
output1 = self.sess.run([policy.distribution.probs], feed_dict={policy.model.input: [[obs.flatten()], [obs.flatten()]]})
p = pickle.dumps(policy)
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
policy_pickled = pickle.loads(p)
output2 = sess.run([policy_pickled.distribution.probs], feed_dict={policy_pickled.model.input: [[obs.flatten()], [obs.flatten()]]})
assert np.array_equal(output1, output2)
def test_state_info_specs(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10,), action_dim=4))
policy = CategoricalLSTMPolicy(env_spec=env.spec, state_include_action=False)
assert (policy.state_info_specs == [])
def test_state_info_specs_with_state_include_action(self):
env = GarageEnv(DummyDiscreteEnv(obs_dim=(10,), action_dim=4))
policy = CategoricalLSTMPolicy(env_spec=env.spec, state_include_action=True)
assert (policy.state_info_specs == [('prev_action', (4,))]) |
def set_weights(full_name, module, fsq_value, hf_weight_path):
hf_weight = access_by_string(module, hf_weight_path)
hf_value = hf_weight.data
if (fsq_value.shape != hf_value.shape):
raise ValueError(f'{full_name} has size {fsq_value.shape}, but {hf_value.shape} was found.')
hf_weight.data = fsq_value
logger.info(f'{full_name} was correctly initialized from {hf_weight_path}.') |
def pytorch2onnx(config_path, checkpoint_path, input_img, input_shape, opset_version=11, show=False, output_file='tmp.onnx', verify=False, normalize_cfg=None, dataset='coco', test_img=None, do_simplify=False, cfg_options=None):
input_config = {'input_shape': input_shape, 'input_path': input_img, 'normalize_cfg': normalize_cfg}
orig_model = build_model_from_cfg(config_path, checkpoint_path, cfg_options=cfg_options)
(one_img, one_meta) = preprocess_example_input(input_config)
(model, tensor_data) = generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=cfg_options)
output_names = ['boxes']
if model.with_bbox:
output_names.append('labels')
if model.with_mask:
output_names.append('masks')
torch.onnx.export(model, tensor_data, output_file, input_names=['input'], output_names=output_names, export_params=True, keep_initializers_as_inputs=True, do_constant_folding=True, verbose=show, opset_version=opset_version)
model.forward = orig_model.forward
if do_simplify:
from mmdet import digit_version
import mmcv
min_required_version = '1.2.5'
assert (digit_version(mmcv.__version__) >= digit_version(min_required_version)), f'Requires to install mmcv>={min_required_version}'
from mmcv.onnx.simplify import simplify
input_dic = {'input': one_img.detach().cpu().numpy()}
_ = simplify(output_file, [input_dic], output_file)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
from mmdet.core import get_classes, bbox2result
from mmdet.apis import show_result_pyplot
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with ONNXRuntime from source.')
model.CLASSES = get_classes(dataset)
num_classes = len(model.CLASSES)
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
if (test_img is not None):
input_config['input_path'] = test_img
(one_img, one_meta) = preprocess_example_input(input_config)
tensor_data = [one_img]
pytorch_results = model(tensor_data, [[one_meta]], return_loss=False)
pytorch_results = pytorch_results[0]
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list((set(input_all) - set(input_initializer)))
assert (len(net_feed_input) == 1)
session_options = rt.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = rt.InferenceSession(output_file, session_options)
onnx_outputs = sess.run(None, {net_feed_input[0]: one_img.detach().numpy()})
output_names = [_.name for _ in sess.get_outputs()]
output_shapes = [_.shape for _ in onnx_outputs]
print(f'onnxruntime output names: {output_names}, output shapes: {output_shapes}')
nrof_out = len(onnx_outputs)
assert (nrof_out > 0), 'Must have output'
with_mask = (nrof_out == 3)
if (nrof_out == 1):
onnx_results = onnx_outputs[0]
else:
(det_bboxes, det_labels) = onnx_outputs[:2]
onnx_results = bbox2result(det_bboxes, det_labels, num_classes)
if with_mask:
segm_results = onnx_outputs[2].squeeze(1)
cls_segms = [[] for _ in range(num_classes)]
for i in range(det_bboxes.shape[0]):
cls_segms[det_labels[i]].append(segm_results[i])
onnx_results = (onnx_results, cls_segms)
if show:
show_result_pyplot(model, one_meta['show_img'], pytorch_results, title='Pytorch')
show_result_pyplot(model, one_meta['show_img'], onnx_results, title='ONNX')
if with_mask:
compare_pairs = list(zip(onnx_results, pytorch_results))
else:
compare_pairs = [(onnx_results, pytorch_results)]
for (onnx_res, pytorch_res) in compare_pairs:
for (o_res, p_res) in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(o_res, p_res, rtol=0.001, atol=1e-05)
print('The numerical values are the same between Pytorch and ONNX') |
class LiftingSurface():
def __init__(self, p: bullet_client.BulletClient, physics_period: float, np_random: np.random.RandomState, uav_id: int, surface_id: int, lifting_unit: np.ndarray, forward_unit: np.ndarray, Cl_alpha_2D: float, chord: float, span: float, flap_to_chord: float, eta: float, alpha_0_base: float, alpha_stall_P_base: float, alpha_stall_N_base: float, Cd_0: float, deflection_limit: float, tau: float):
self.p = p
self.physics_period = physics_period
self.np_random = np_random
self.uav_id = uav_id
self.surface_id = surface_id
assert (lifting_unit.shape == (3,))
assert (forward_unit.shape == (3,))
assert (tau >= (0.0 / physics_period)), f'Setting `tau = 1 / physics_period` is equivalent to 0, 0 is not a valid option, got {tau}.'
if (np.linalg.norm(lifting_unit) != 1.0):
warnings.warn(f'Norm of `lifting_unit={lifting_unit!r}` is not 1.0, normalizing...')
lifting_unit /= np.linalg.norm(lifting_unit)
if (np.linalg.norm(forward_unit) != 1.0):
warnings.warn(f'Norm of `forward_unit={forward_unit!r}` is not 1.0, normalizing...')
forward_unit /= np.linalg.norm(forward_unit)
if (np.dot(lifting_unit, forward_unit) != 0.0):
warnings.warn(f'`{forward_unit}` and `{lifting_unit}` are not orthogonal, you have been warned...')
self.lift_unit = lifting_unit
self.drag_unit = forward_unit
self.torque_unit = np.cross(lifting_unit, forward_unit)
self.Cl_alpha_2D = Cl_alpha_2D
self.chord = chord
self.span = span
self.flap_to_chord = flap_to_chord
self.eta = eta
self.alpha_0_base = alpha_0_base
self.alpha_stall_P_base = alpha_stall_P_base
self.alpha_stall_N_base = alpha_stall_N_base
self.Cd_0 = Cd_0
self.deflection_limit = deflection_limit
self.cmd_tau = tau
self.half_rho = (0.5 * 1.225)
self.area = (self.chord * self.span)
self.aspect = (self.span / self.chord)
self.alpha_stall_P_base = np.deg2rad(self.alpha_stall_P_base)
self.alpha_stall_N_base = np.deg2rad(self.alpha_stall_N_base)
self.alpha_0_base = np.deg2rad(self.alpha_0_base)
self.Cl_alpha_3D = (self.Cl_alpha_2D * (self.aspect / (self.aspect + ((2.0 * (self.aspect + 4.0)) / (self.aspect + 2.0)))))
self.theta_f = np.arccos(((2.0 * self.flap_to_chord) - 1.0))
self.aero_tau = (1 - ((self.theta_f - np.sin(self.theta_f)) / np.pi))
self.local_surface_velocity = np.array([0.0, 0.0, 0.0])
def reset(self):
self.actuation = 0.0
def get_states(self) -> float:
return self.actuation
def state_update(self, surface_velocity: np.ndarray):
self.local_surface_velocity = surface_velocity
def physics_update(self, cmd: float):
self.actuation += ((self.physics_period / self.cmd_tau) * (cmd - self.actuation))
(alpha, freestream_speed) = self._compute_aoa_freestream(self.local_surface_velocity, self.lift_unit, self.drag_unit)
(Cl, Cd, CM) = self._jitted_compute_aero_data(alpha, self.aspect, self.flap_to_chord, self.aero_tau, self.actuation, self.deflection_limit, self.eta, self.Cl_alpha_3D, self.alpha_stall_P_base, self.alpha_0_base, self.alpha_stall_N_base, self.Cd_0)
(force, torque) = self._jitted_compute_force_torque(alpha, freestream_speed, Cl, Cd, CM, self.half_rho, self.area, self.chord, self.lift_unit, self.drag_unit, self.torque_unit)
self.p.applyExternalForce(self.uav_id, self.surface_id, force, [0.0, 0.0, 0.0], self.p.LINK_FRAME)
self.p.applyExternalTorque(self.uav_id, self.surface_id, torque, self.p.LINK_FRAME)
def _compute_aoa_freestream(local_surface_velocity: np.ndarray, lift_unit: np.ndarray, drag_unit: np.ndarray) -> tuple[(float, float)]:
freestream_speed = np.linalg.norm(local_surface_velocity).item()
lifting_airspeed = np.dot(local_surface_velocity, lift_unit)
forward_airspeed = np.dot(local_surface_velocity, drag_unit)
alpha = np.arctan2((- lifting_airspeed), forward_airspeed)
return (alpha, freestream_speed)
def _jitted_compute_aero_data(alpha: float, aspect: float, flap_to_chord: float, aero_tau: float, actuation: float, deflection_limit: float, eta: float, Cl_alpha_3D: float, alpha_stall_P_base: float, alpha_0_base: float, alpha_stall_N_base: float, Cd_0: float) -> tuple[(float, float, float)]:
deflection_radians = np.deg2rad((actuation * deflection_limit))
delta_Cl = (((Cl_alpha_3D * aero_tau) * eta) * deflection_radians)
delta_Cl_max = (flap_to_chord * delta_Cl)
Cl_max_P = ((Cl_alpha_3D * (alpha_stall_P_base - alpha_0_base)) + delta_Cl_max)
Cl_max_N = ((Cl_alpha_3D * (alpha_stall_N_base - alpha_0_base)) + delta_Cl_max)
alpha_0 = (alpha_0_base - (delta_Cl / Cl_alpha_3D))
alpha_stall_P = (alpha_0 + (Cl_max_P / Cl_alpha_3D))
alpha_stall_N = (alpha_0 + (Cl_max_N / Cl_alpha_3D))
if ((alpha_stall_N < alpha) and (alpha < alpha_stall_P)):
Cl = (Cl_alpha_3D * (alpha - alpha_0))
alpha_i = (Cl / (np.pi * aspect))
alpha_eff = ((alpha - alpha_0) - alpha_i)
CT = (Cd_0 * np.cos(alpha_eff))
CN = ((Cl + (CT * np.sin(alpha_eff))) / np.cos(alpha_eff))
Cd = ((CN * np.sin(alpha_eff)) + (CT * np.cos(alpha_eff)))
CM = ((- CN) * (0.25 - (0.175 * (1.0 - ((2.0 * alpha_eff) / np.pi)))))
return (Cl, Cd, CM)
if (alpha > 0.0):
Cl_stall = (Cl_alpha_3D * (alpha_stall_P - alpha_0))
alpha_i_at_stall = (Cl_stall / (np.pi * aspect))
alpha_i = np.interp(alpha, [alpha_stall_P, (np.pi / 2.0)], [alpha_i_at_stall, 0.0])
else:
Cl_stall = (Cl_alpha_3D * (alpha_stall_N - alpha_0))
alpha_i_at_stall = (Cl_stall / (np.pi * aspect))
alpha_i = np.interp(alpha, [((- np.pi) / 2.0), alpha_stall_N], [0.0, alpha_i_at_stall])
alpha_eff = ((alpha - alpha_0) - alpha_i)
Cd_90 = (((((- 4.26) * (10 ** (- 2))) * (deflection_radians ** 2)) + ((2.1 * (10 ** (- 1))) * deflection_radians)) + 1.98)
CN = ((Cd_90 * np.sin(alpha_eff)) * ((1.0 / (0.56 + (0.44 * abs(np.sin(alpha_eff))))) - (0.41 * (1.0 - np.exp(((- 17.0) / aspect))))))
CT = ((0.5 * Cd_0) * np.cos(alpha_eff))
Cl = ((CN * np.cos(alpha_eff)) - (CT * np.sin(alpha_eff)))
Cd = ((CN * np.sin(alpha_eff)) + (CT * np.cos(alpha_eff)))
CM = ((- CN) * (0.25 - (0.175 * (1.0 - ((2.0 * abs(alpha_eff)) / np.pi)))))
return (Cl, Cd, CM)
def _jitted_compute_force_torque(alpha: float, freestream_speed: float, Cl: float, Cd: float, CM: float, half_rho: float, area: float, chord: float, lift_unit: np.ndarray, drag_unit: np.ndarray, torque_unit: np.ndarray) -> tuple[(np.ndarray, np.ndarray)]:
Q = (half_rho * np.square(freestream_speed))
Q_area = (Q * area)
lift = (Cl * Q_area)
drag = (Cd * Q_area)
force_normal = ((lift * np.cos(alpha)) + (drag * np.sin(alpha)))
force_parallel = ((lift * np.sin(alpha)) - (drag * np.cos(alpha)))
force = ((lift_unit * force_normal) + (drag_unit * force_parallel))
torque = (((Q_area * CM) * chord) * torque_unit)
return (force, torque) |
def add_emerging_index(df, col_name='emerging_index', target_days=[1, 2, 3], n_days_past=3, min_deaths=20, new_deaths=True):
past_cols = df.filter(regex='#Deaths_').columns[(- (n_days_past + 1)):].tolist()
pred_cols = [f'Predicted Deaths {day}-day' for day in target_days]
assert set(pred_cols).issubset(df.columns), f'not all predictions for target_days={str(target_days)} are in df!'
past_days = list(map((lambda x: x.replace('#Deaths_', '')), past_cols))
d = df
d['past_pred_deaths'] = d[(past_cols + pred_cols)].values.tolist()
if new_deaths:
d['past_pred_deaths'] = compute_new_deaths(df, 'past_pred_deaths')
df[col_name] = d['past_pred_deaths'].apply((lambda x: compute_emerging_index(x, n_days_past)))
df[col_name] = ((1 - (df['tot_deaths'] < min_deaths)) * df[col_name])
return None |
def count_summary(sequence: List[E]) -> str:
return ', '.join(['{}: {}'.format(tag, count) for (tag, count) in Counter(sequence).most_common()]) |
class unet(nn.Module):
def __init__(self, n_classes, n_channels=14, bilinear=True):
super(unet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = (2 if bilinear else 1)
self.down4 = Down(512, (1024 // factor))
self.up1 = Up(1024, (512 // factor), bilinear)
self.up2 = Up(512, (256 // factor), bilinear)
self.up3 = Up(256, (128 // factor), bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits |
def test_multiple_files_scene_path():
dataset_config = get_config(CFG_MULTI_TEST).DATASET
if (not PointNavDatasetV1.check_config_paths_exist(dataset_config)):
pytest.skip('Test skipped as dataset files are missing.')
scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
assert (len(scenes) > 0), 'Expected dataset contains separate episode file per scene.'
dataset_config.defrost()
dataset_config.CONTENT_SCENES = scenes[:PARTIAL_LOAD_SCENES]
dataset_config.SCENES_DIR = os.path.join(os.getcwd(), DEFAULT_SCENE_PATH_PREFIX)
dataset_config.freeze()
partial_dataset = make_dataset(id_dataset=dataset_config.TYPE, config=dataset_config)
assert (len(partial_dataset.scene_ids) == PARTIAL_LOAD_SCENES), "Number of loaded scenes doesn't correspond."
print(partial_dataset.episodes[0].scene_id)
assert os.path.exists(partial_dataset.episodes[0].scene_id), "Scene file {} doesn't exist using absolute path".format(partial_dataset.episodes[0].scene_id) |
class GlobalContextTest(unittest.TestCase):
def test_config_master_port(self):
ctx = Context.singleton_instance()
ctx.config_master_port(50001)
self.assertEqual(ctx.master_port, 50001)
os.environ['HOST_PORTS'] = '20000,20001,20002,20003'
ctx.config_master_port(0)
self.assertTrue((ctx.master_port in [20000, 20001, 20002, 20003]))
ctx.master_port = None
os.environ['HOST_PORTS'] = ''
ctx.config_master_port(0)
self.assertTrue((ctx.master_port > 20000)) |
class GegenbauerPolynomials(torch.nn.Module):
def __init__(self, alpha, n):
super().__init__()
self.alpha = alpha
self.n = n
self.coefficients = self.compute_coefficients()
self.powers = torch.arange(0.0, (self.n + 1.0), dtype=dtype, device=device)
def compute_coefficients(self):
coefficients = torch.zeros((self.n + 1), dtype=dtype, device=device)
if (self.n == 0):
coefficients[0] = 1
if (self.n == 1):
coefficients[1] = (2 * self.alpha)
if (self.n >= 2):
for k in range(0, ((self.n // 2) + 1)):
sgn = ((- 1) ** k)
log_coeff = ((((((self.n - (2 * k)) * np.log(2)) + loggamma(((self.n - k) + self.alpha))) - loggamma(self.alpha)) - loggamma((k + 1))) - loggamma(((self.n - (2 * k)) + 1)))
coeff = (sgn * np.exp(log_coeff))
coefficients[(self.n - (2 * k))] = coeff
return coefficients
def forward(self, x):
x_pows = torch.pow(x, self.powers)
return torch.dot(x_pows, self.coefficients) |
.parametrize('bandwidth', [(1.5 / 4), (1 / 8)])
.parametrize('discretization_parameter', [4, 8, 16])
def test_tree_parameters__creation(bandwidth: float, discretization_parameter: int) -> None:
tree_params = TreeParameters.construct(bandwidth=bandwidth, discretization_parameter=discretization_parameter)
assert hasattr(tree_params, 'bandwidth')
assert hasattr(tree_params, 'discretization_parameter')
assert hasattr(tree_params, 'action_space')
assert hasattr(tree_params, 'depth')
assert hasattr(tree_params, 'spaces')
assert hasattr(tree_params, 'volumes')
assert hasattr(tree_params, 'probabilities')
assert isinstance(tree_params.bandwidth, float)
assert isinstance(tree_params.discretization_parameter, int)
assert (tree_params.depth >= 1)
assert isinstance(tree_params.depth, int)
assert_type(tree_params.action_space, float)
assert_type(tree_params.spaces, float)
assert_type(tree_params.volumes, float)
assert_type(tree_params.probabilities, float)
assert (jnp.shape(tree_params.action_space)[0] == discretization_parameter)
assert (int(np.log2(tree_params.discretization_parameter)) == tree_params.depth)
assert (jnp.min(tree_params.spaces) >= 0)
assert (jnp.max(tree_params.spaces) <= 1) |
def get_logits(args, size, bias, bias_start=0.0, scope=None, mask=None, wd=0.0, input_keep_prob=1.0, is_train=None, func=None, reuse=False):
if (func is None):
func = 'sum'
if (func == 'sum'):
return sum_logits(args, mask=mask, name=scope)
elif (func == 'linear'):
return linear_logits(args, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob, is_train=is_train, reuse=reuse)
elif (func == 'double'):
return double_linear_logits(args, size, bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
elif (func == 'dot'):
assert (len(args) == 2)
arg = (args[0] * args[1])
return sum_logits([arg], mask=mask, name=scope)
elif (func == 'mul_linear'):
assert (len(args) == 2)
arg = (args[0] * args[1])
return linear_logits([arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
elif (func == 'proj'):
assert (len(args) == 2)
d = args[1].get_shape()[(- 1)]
proj = linear([args[0]], d, False, bias_start=bias_start, scope=scope, wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
return sum_logits([(proj * args[1])], mask=mask)
elif (func == 'tri_linear'):
assert (len(args) == 2)
new_arg = (args[0] * args[1])
return linear_logits([args[0], args[1], new_arg], bias, bias_start=bias_start, scope=scope, mask=mask, wd=wd, input_keep_prob=input_keep_prob, is_train=is_train)
else:
raise Exception() |
class DFE(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.fe = nn.Sequential(*[DRB(in_channels), DB(in_channels, mid_channels, offset_channels=32)])
def forward(self, x):
out = self.fe(x)
return out |
_HEADS.register('SingleConvRPNHead')
class RPNHead(nn.Module):
def __init__(self, cfg, in_channels, num_anchors):
super(RPNHead, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(in_channels, (num_anchors * 4), kernel_size=1, stride=1)
for l in [self.conv, self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x):
logits = []
bbox_reg = []
for feature in x:
t = F.relu(self.conv(feature))
logits.append(self.cls_logits(t))
bbox_reg.append(self.bbox_pred(t))
return (logits, bbox_reg) |
def get_media_info(media_path):
assert os.path.isfile(media_path), f'The media file does not exist: "{media_path}"'
probe = ffmpeg.probe(media_path)
video_stream = next((stream for stream in probe['streams'] if (stream['codec_type'] == 'video')), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
total_frames = (int(video_stream['nb_frames']) if ('nb_frames' in video_stream) else 1)
(fps_part1, fps_part2) = video_stream['r_frame_rate'].split(sep='/')
fps = (float(fps_part1) / float(fps_part2))
return (width, height, total_frames, fps) |
class Writer():
def __init__(self, save_dir):
self.save_dir = Path(save_dir)
self.log_mjson_file = None
self.summary_writter = None
self.metrics = []
self._text_current_gstep = (- 1)
self._tb_texts = []
def open(self):
save_dir = self.save_dir
assert save_dir.exists()
summary_dir = (save_dir / 'summary')
summary_dir.mkdir(parents=True, exist_ok=True)
self.summary_writter = SummaryWriter(str(summary_dir))
return self
def close(self):
assert (self.summary_writter is not None)
tb_json_path = str((self.save_dir / 'tensorboard_scalars.json'))
self.summary_writter.export_scalars_to_json(tb_json_path)
self.summary_writter.close()
self.summary_writter = None
def log_text(self, text, step, tag='regular log'):
if ((step > self._text_current_gstep) and (self._text_current_gstep != (- 1))):
total_text = '\n'.join(self._tb_texts)
self.summary_writter.add_text(tag, total_text, global_step=step)
self._tb_texts = []
self._text_current_gstep = step
else:
self._tb_texts.append(text)
if (self._text_current_gstep == (- 1)):
self._text_current_gstep = step
def log_metrics(self, metrics: dict, step):
flatted_summarys = flat_nested_json_dict(metrics, '/')
for (k, v) in flatted_summarys.items():
if isinstance(v, (list, tuple)):
if any([isinstance(e, str) for e in v]):
continue
v_dict = {str(i): e for (i, e) in enumerate(v)}
for (k1, v1) in v_dict.items():
self.summary_writter.add_scalar(((k + '/') + k1), v1, step)
else:
if isinstance(v, str):
continue
self.summary_writter.add_scalar(k, v, step) |
def concatenate(*mats: CSXMatrix3d, device=None):
device = (mats[0].device if (device is None) else device)
mat_type = type(mats[0])
mat_h = mats[0].shape[1]
mat_w = mats[0].shape[2]
batch_size = 0
indptr_offset = 0
indices = []
indptr = []
data = []
for mat in mats:
assert (type(mat) == mat_type), 'Matrix type inconsistent'
assert (mat.shape[1] == mat_h), 'Matrix shape inconsistent in dimension 1'
assert (mat.shape[2] == mat_w), 'Matrix shape inconsistent in dimension 2'
indices.append(mat.indices.clone().to(device))
indptr.append((mat.indptr[:(- 1)].clone().to(device) + indptr_offset))
data.append(mat.data.clone().to(device))
indptr_offset += mat.indptr[(- 1)]
indptr_offset = indptr_offset.to(device)
batch_size += mat.shape[0]
indptr.append(indptr_offset)
indices = torch.cat(indices)
indptr = torch.cat(indptr)
data = torch.cat(data)
return mat_type([indices, indptr, data], shape=(batch_size, mat_h, mat_w)) |
class SGDTorch(SGD):
_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if (self.initial_decay > 0):
lr *= (1.0 / (1.0 + (self.decay * K.cast(self.iterations, K.dtype(self.decay)))))
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = ([self.iterations] + moments)
for (p, g, m) in zip(params, grads, moments):
v = ((self.momentum * m) + g)
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = (p - (lr * ((self.momentum * v) + g)))
else:
new_p = (p - (lr * v))
if (getattr(p, 'constraint', None) is not None):
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates |
def main(opt):
translator = build_translator(opt, report_score=False)
translator.translate(data_path=opt.data, batch_size=opt.batch_size, attn_debug=opt.attn_debug) |
class OpenAIGPTLMHeadModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
def mlp_mixer_s32(num_classes: int, image_size: int=224, channels: int=3):
params = dict(patch_size=32, num_layers=8, hidden_dim=512, tokens_hidden_dim=256, channels_hidden_dim=2048)
return MLPMixer(num_classes, image_size, channels, **params) |
def rank_roidb_ratio(roidb):
ratio_large = 2
ratio_small = 0.5
ratio_list = []
for i in range(len(roidb)):
width = roidb[i]['width']
height = roidb[i]['height']
ratio = (width / float(height))
if (ratio > ratio_large):
roidb[i]['need_crop'] = 1
ratio = ratio_large
elif (ratio < ratio_small):
roidb[i]['need_crop'] = 1
ratio = ratio_small
else:
roidb[i]['need_crop'] = 0
ratio_list.append(ratio)
ratio_list = np.array(ratio_list)
ratio_index = np.argsort(ratio_list)
return (ratio_list[ratio_index], ratio_index) |
class MultiCADopt():
dir_name: str
task_name: str
class MultiCADargs():
n_nodes: int
input_step: int
batch_size: int
dy_dim: int
total_epoch: int
update_every: int
show_graph_every: int
class data_pred():
model: str
multi_scale: bool
multi_scale_periods: list
pred_step: int
mlp_hid: int
mlp_layers: int
lr_data_start: float
lr_data_end: float
weight_decay: int
class graph_discov():
lambda_s: 0.1
lr_graph_start: float
lr_graph_end: float
start_tau: 0.3
end_tau: 0.01
dynamic_sampling_milestones: list
dynamic_sampling_periods: list
causal_thres: str
reproduc: ReproducOpt
log: Any |
def linear_attention_normalization(q, k, causal=False):
if (not causal):
return torch.einsum('...nm,...m->...n', q, k.sum(dim=(- 2)))
else:
return torch.einsum('...nm,...nm->...n', q, k.cumsum(dim=(- 2))) |
class TestLossMetric(Metric):
def __init__(self, criterion, train=False):
self.criterion = criterion
self.main_metric_name = 'loss_value'
super().__init__(name='Loss', train=False)
def compute_metric(self, outputs: torch.Tensor, labels: torch.Tensor, top_k=(1,)):
loss = None
if (0.999 <= torch.sum(outputs[0]) <= 1.001):
outputs = torch.log(outputs)
nllloss_func = torch.nn.NLLLoss()
loss = nllloss_func(outputs, labels)
else:
loss = self.criterion(outputs, labels)
return {'loss_value': loss.mean().item()} |
.skipif((not baseline_installed), reason='baseline sub-module not installed')
def test_simple_agents():
config_env = habitat.get_config(config_paths=CFG_TEST)
if (not os.path.exists(config_env.SIMULATOR.SCENE)):
pytest.skip('Please download Habitat test data to data folder.')
benchmark = habitat.Benchmark(config_paths=CFG_TEST)
for agent_class in [simple_agents.ForwardOnlyAgent, simple_agents.GoalFollower, simple_agents.RandomAgent, simple_agents.RandomForwardAgent]:
agent = agent_class(config_env.TASK.SUCCESS_DISTANCE, config_env.TASK.GOAL_SENSOR_UUID)
habitat.logger.info(agent_class.__name__)
habitat.logger.info(benchmark.evaluate(agent, num_episodes=100))
benchmark._env.close() |
def hash_clustering(clustering):
clustering = [list(v) for v in clustering]
for i in range(len(clustering)):
clustering[i].sort()
clustering[i] = tuple(clustering[i])
clustering.sort()
return tuple(clustering) |
def lifetime(m, Z):
lm = np.log10(m)
a0 = (3.79 + (0.24 * Z))
a1 = ((- 3.1) - (0.35 * Z))
a2 = (0.74 + (0.11 * Z))
tmp = ((a0 + (a1 * lm)) + ((a2 * lm) * lm))
return np.divide(np.power(10, tmp), 1000) |
def decode_predictions(preds, top_n=5):
assert ((len(preds.shape) == 2) and (preds.shape[1] == 50))
results = []
for pred in preds:
result = zip(TAGS, pred)
result = sorted(result, key=(lambda x: x[1]), reverse=True)
results.append(result[:top_n])
return results |
def get():
print(('Python Interpreter version:%s' % sys.version[:3]))
print(('tensorflow version:%s' % tf.__version__))
print(('numpy version:%s' % np.__version__))
return FLAGS |
class NNModel(JavaTransformer, MLWritable, MLReadable, HasFeaturesCol, HasPredictionCol, HasBatchSize, HasSamplePreprocessing, JavaValue):
def __init__(self, model, feature_preprocessing=None, jvalue=None, bigdl_type='float'):
super(NNModel, self).__init__()
if jvalue:
invalidInputError((feature_preprocessing is None), 'feature_preprocessing cannot be None')
self.value = jvalue
else:
if (not feature_preprocessing):
feature_preprocessing = SeqToTensor()
if (type(feature_preprocessing) is list):
if (type(feature_preprocessing[0]) is list):
feature_preprocessing = SeqToMultipleTensors(feature_preprocessing)
elif isinstance(feature_preprocessing[0], int):
feature_preprocessing = SeqToTensor(feature_preprocessing)
sample_preprocessing = ChainedPreprocessing([feature_preprocessing, TensorToSample()])
self.value = callZooFunc(bigdl_type, self.jvm_class_constructor(), model, sample_preprocessing)
self.samplePreprocessing = sample_preprocessing
self.model = model
self._java_obj = self.value
self.bigdl_type = bigdl_type
self.setBatchSize(self.value.getBatchSize())
def write(self):
return NNModelWriter(self)
def load(path):
jvalue = callZooFunc('float', 'loadNNModel', path)
return NNModel(model=None, feature_preprocessing=None, jvalue=jvalue)
def setFeaturesCol(self, value):
return self._set(featuresCol=value)
def setPredictionCol(self, value):
return self._set(predictionCol=value)
def getModel(self):
return self.model |
class TextModeTestClass(nn.Module):
def __init__(self):
super(TextModeTestClass, self).__init__()
self.word_embed = nn.Embedding(5, 16, padding_idx=0)
self.rnn = nn.LSTM(16, 8, batch_first=True)
self.linear = nn.Linear(8, 1)
def forward(self, X):
embed = self.word_embed(X.long())
(o, (h, c)) = self.rnn(embed)
return self.linear(h).view((- 1), 1) |
def update_plot(losses, prefix):
for key in ['loss', 'cls_loss', 'reg_loss']:
plotter.update(f'{prefix}_{key}', losses[key].item()) |
def convnet(input, output, dropout_rate=0.0, input_shape=(1, 28, 28), batch_size=100, l2_rate=0.001, nb_epoch=12, img_rows=28, img_cols=28, nb_filters=64, pool_size=(2, 2), kernel_size=(3, 3), activations='relu', constrain_norm=False):
const = (maxnorm(2) if constrain_norm else None)
state = Convolution2D(nb_filters, kernel_size, padding='valid', input_shape=input_shape, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(input)
state = Convolution2D(nb_filters, kernel_size, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(state)
state = MaxPooling2D(pool_size=pool_size)(state)
state = Flatten()(state)
if (dropout_rate > 0.0):
state = Dropout(dropout_rate)(state)
state = Dense(128, activation=activations, kernel_regularizer=l2(l2_rate), kernel_constraint=const)(state)
if (dropout_rate > 0.0):
state = Dropout(dropout_rate)(state)
return output(state) |
.script
def hard_mish_jit(x, inplace: bool=False):
return ((0.5 * x) * (x + 2).clamp(min=0, max=2)) |
class Conv1x1Branch(nn.Module):
def __init__(self, in_channels, out_channels):
super(Conv1x1Branch, self).__init__()
self.conv = incept_conv1x1(in_channels=in_channels, out_channels=out_channels)
def forward(self, x):
x = self.conv(x)
return x |
def validate(val_loader, model, criterion, epoch, args, log=None, tf_writer=None, flag='val'):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
model.eval()
all_preds = []
all_targets = []
with torch.no_grad():
end = time.time()
for (i, (inputs, _)) in enumerate(val_loader):
(inputs, target_rot) = rotation(inputs)
inputs = inputs.cuda()
target = target_rot.cuda()
output = model(inputs)
loss = criterion(output, target)
acc1 = accuracy(output, target)[0]
losses.update(loss.item(), inputs.size(0))
top1.update(acc1.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
(_, pred) = torch.max(output, 1)
all_preds.extend(pred.cpu().numpy())
all_targets.extend(target.cpu().numpy())
if ((i % args.print_freq) == 0):
output = 'Test: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\ {top1.val:.3f} ({top1.avg:.3f})'.format(i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1)
print(output)
output = '{flag} Results: {top1.avg:.3f} Loss {loss.avg:.5f}'.format(flag=flag, top1=top1, loss=losses)
print(output)
if (log is not None):
log.write((output + '\n'))
log.flush()
tf_writer.add_scalar(('loss/test_' + flag), losses.avg, epoch)
tf_writer.add_scalar((('acc/test_' + flag) + '_top1'), top1.avg, epoch)
return top1.avg |
def expand_dims(array, axis):
if is_numpy_array(array):
return np.expand_dims(array, axis)
elif is_torch_tensor(array):
return array.unsqueeze(dim=axis)
elif is_tf_tensor(array):
import tensorflow as tf
return tf.expand_dims(array, axis=axis)
elif is_jax_tensor(array):
return jnp.expand_dims(array, axis=axis)
else:
raise ValueError(f'Type not supported for expand_dims: {type(array)}.') |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--audio-manifest', '-i', required=True, type=str, help='path to the input manifest.')
parser.add_argument('--output-dir', '-o', required=True, type=str, help='path to the output dir. it will contain files after denoising and vad')
parser.add_argument('--vad-agg-level', '-a', type=int, default=2, help='the aggresive level of the vad [0-3].')
parser.add_argument('--dry-wet', '-dw', type=float, default=0.01, help='the level of linear interpolation between noisy and enhanced files.')
parser.add_argument('--device', '-d', type=str, default='cpu', help='the device to be used for the speech enhancement model: cpu | cuda.')
parser.add_argument('--denoise', action='store_true', help='apply a denoising')
parser.add_argument('--vad', action='store_true', help='apply a VAD')
args = parser.parse_args()
process(args) |
def build_model(input_shape, out_dim, activation=tf.keras.layers.LeakyReLU):
filters = 32
def middle_stack(x, activation):
x = _res_block(x, filters=filters, num_blocks=3, strides=2, name='res32', activation=activation)
x = _res_block(x, filters=(filters * 2), num_blocks=3, strides=2, name='res64', activation=activation)
x = _res_block(x, filters=(filters * 4), num_blocks=3, strides=2, name='res128', activation=activation)
return x
return _build_model(input_shape, out_dim, middle_stack, filters, activation) |
def main(args):
if (args.dataset == 'mr'):
(train_x, train_y) = dataloader.read_corpus('/data/medg/misc/jindi/nlp/datasets/mr/train.txt')
(test_x, test_y) = dataloader.read_corpus('/data/medg/misc/jindi/nlp/datasets/mr/test.txt')
elif (args.dataset == 'imdb'):
(train_x, train_y) = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb', 'train_tok.csv'), clean=False, MR=True, shuffle=True)
(test_x, test_y) = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb', 'test_tok.csv'), clean=False, MR=True, shuffle=True)
else:
(train_x, train_y) = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/train_tok.csv'.format(args.dataset), clean=False, MR=False, shuffle=True)
(test_x, test_y) = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/test_tok.csv'.format(args.dataset), clean=False, MR=False, shuffle=True)
nclasses = (max(train_y) + 1)
model = Model(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses).cuda()
need_grad = (lambda x: x.requires_grad)
optimizer = optim.Adam(filter(need_grad, model.parameters()), lr=args.lr)
(train_x, train_y) = dataloader.create_batches(train_x, train_y, args.batch_size, model.word2id)
(test_x, test_y) = dataloader.create_batches(test_x, test_y, args.batch_size, model.word2id)
best_test = 0
for epoch in range(args.max_epoch):
best_test = train_model(epoch, model, optimizer, train_x, train_y, test_x, test_y, best_test, args.save_path)
if (args.lr_decay > 0):
optimizer.param_groups[0]['lr'] *= args.lr_decay
sys.stdout.write('test_err: {:.6f}\n'.format(best_test)) |
class MyValDataSet_cls(data.Dataset):
def __init__(self, root_path, root_path_coarsemask, list_path, crop_size=(224, 224)):
self.root_path = root_path
self.root_path_coarsemask = root_path_coarsemask
self.list_path = list_path
(self.crop_h, self.crop_w) = crop_size
self.img_ids = [i_id.strip() for i_id in open(list_path)]
self.files = []
for name in self.img_ids:
img_file = name[0:name.find(' ')]
label_file = name[(name.find(' ') + 1):]
self.files.append({'img': img_file, 'label': label_file, 'name': name})
def __len__(self):
return len(self.files)
def __getitem__(self, index):
datafiles = self.files[index]
image = Image.open((self.root_path + datafiles['img']))
coarsemask = Image.open((self.root_path_coarsemask + datafiles['img']))
label = np.array(np.int(datafiles['label']))
image = (np.array(image) / 255.0)
image = image.transpose(2, 0, 1)
image = image.astype(np.float32)
coarsemask = np.array(coarsemask)
coarsemask = np.float32((coarsemask > 0))
name = datafiles['img']
return (image.copy(), coarsemask.copy(), label, name) |
class TemplateFfdBuilder(builder.ModelBuilder):
def __init__(self, *args, **kwargs):
super(TemplateFfdBuilder, self).__init__(*args, **kwargs)
self._initializer_run = False
def n_ffd_samples(self):
return self.params.get('n_ffd_samples', 16384)
def view_index(self):
return self.params.get('view_index', 5)
def _get_ffd_data(self, ffd_dataset):
for (cat_id, example_id) in self.template_ids:
ffd_data = ffd_dataset[(cat_id, example_id)]
(b, p) = (np.array(ffd_data[k]) for k in ('b', 'p'))
(yield (cat_id, example_id, b, p))
def get_ffd_data(self, ffd_dataset=None):
if (ffd_dataset is None):
n_ffd_points = self.n_ffd_samples
ffd_dataset = get_ffd_dataset(self.cat_id, self.n, n_samples=n_ffd_points)
with ffd_dataset:
return tuple(self._get_ffd_data(ffd_dataset))
else:
return self._get_ffd_data(ffd_dataset)
def get_ffd_tensors(self, ffd_dataset=None):
n_ffd_resamples = self.params.get('n_ffd_resamples', 1024)
bs = []
ps = []
for (cat_id, example_id, b, p) in self.get_ffd_data(ffd_dataset):
b = tf.constant(b, dtype=tf.float32)
b = sample_tf(b, n_ffd_resamples, axis=0, name=('b_resampled_%s' % example_id))
bs.append(b)
ps.append(p)
b = tf.stack(bs)
p = tf.constant(np.array(ps), dtype=tf.float32)
return (b, p)
def get_image_features(self, image, mode, **inference_params):
alpha = inference_params.get('alpha', 1)
load_weights = self._initializer_run
features = get_mobilenet_features(image, mode, load_weights, alpha)
conv_filters = inference_params.get('final_conv_filters', [64])
use_bn_bug = self.params.get('use_bn_bugged_version', False)
if use_bn_bug:
activation = batch_norm_then(tf.nn.relu6, training=(mode == tf.estimator.ModeKeys.TRAIN))
for n in conv_filters:
features = tf.layers.conv2d(features, n, 1, activation=activation)
features = tf.layers.batch_normalization(features)
else:
for n in conv_filters:
features = tf.layers.conv2d(features, n, 1)
features = tf.nn.relu6(tf.layers.batch_normalization(features))
return features
def get_inference(self, features, mode):
inference_params = self.params.get('inference_params', {})
training = (mode == tf.estimator.ModeKeys.TRAIN)
image = features['image']
example_id = features['example_id']
cat_id = features['cat_id']
view_index = features['view_index']
features = self.get_image_features(image, mode, **inference_params)
features = tf.layers.flatten(features)
for n_dense in inference_params.get('final_dense_nodes', [512]):
features = tf.layers.dense(features, n_dense, activation=batch_norm_then(tf.nn.relu6, training=training))
n_control_points = self.n_control_points
n_templates = self.n_templates
dp = tf.layers.dense(features, ((n_templates * n_control_points) * 3), kernel_initializer=tf.random_normal_initializer(stddev=0.0001))
dp = tf.reshape(dp, ((- 1), n_templates, n_control_points, 3))
probs = tf.layers.dense(features, n_templates, activation=tf.nn.softmax)
eps = self.params.get('prob_eps', 0.1)
if (eps > 0):
probs = (((1 - eps) * probs) + (eps / n_templates))
return dict(cat_id=cat_id, view_index=view_index, example_id=example_id, probs=probs, dp=dp)
def cat_id(self):
cat_desc = self.params['cat_desc']
if isinstance(cat_desc, (list, tuple)):
return [cat_desc_to_id(c) for c in cat_desc]
else:
return cat_desc_to_id(cat_desc)
def n_templates(self):
return len(self.template_ids)
def n_control_points(self):
return ((self.n + 1) ** 3)
def template_ids(self):
cat_id = self.cat_id
idxs = self.params.get('template_idxs')
if isinstance(cat_id, str):
return _get_cat_template_ids(cat_id, idxs)
else:
if (idxs is None):
idxs = [None for _ in cat_id]
return tuple(itertools.chain(*(_get_cat_template_ids(c, i) for (c, i) in zip(cat_id, idxs))))
def get_inferred_point_clouds(self, dp):
(b, p) = self.get_ffd_tensors()
inferred_point_clouds = tf.einsum('ijk,likm->lijm', b, (p + dp))
return inferred_point_clouds
def get_chamfer_loss(self, gamma, dp, ground_truth_cloud):
inferred_point_clouds = self.get_inferred_point_clouds(dp)
inferred_point_clouds = tf.unstack(inferred_point_clouds, axis=1)
losses = [tf_metrics.chamfer(inferred, ground_truth_cloud) for inferred in inferred_point_clouds]
losses = tf.stack(losses, axis=1)
losses = (gamma * losses)
loss = tf.reduce_sum(losses)
return loss
def get_entropy_loss(self, probs, **weight_kwargs):
mean_probs = tf.reduce_mean(probs, axis=0)
entropy_loss = tf.reduce_sum((mean_probs * tf.log(mean_probs)))
weight = annealed_weight(**weight_kwargs)
return (entropy_loss * weight)
def get_dp_reg_loss(self, probs, dp, **weight_kwargs):
if weight_kwargs.pop('uniform', False):
reg_loss = tf.reduce_sum((dp ** 2))
else:
reg_loss = tf.reduce_sum((dp ** 2), axis=(2, 3))
reg_loss *= probs
reg_loss = tf.reduce_sum(reg_loss)
weight = annealed_weight(**weight_kwargs)
return (reg_loss * weight)
def get_inference_loss(self, inference, labels):
(probs, dp) = (inference[k] for k in ('probs', 'dp'))
ground_truth_cloud = labels
losses = []
gamma_code = self.params.get('gamma', 'linear')
if (gamma_code == 'linear'):
gamma = probs
elif (gamma_code == 'square'):
gamma = (probs ** 2)
elif (gamma_code == 'log'):
gamma = (- tf.log((1 - probs)))
else:
raise ValueError(('Unrecognized gamma value in params: %s' % gamma_code))
chamfer_loss = self.get_chamfer_loss(gamma, dp, ground_truth_cloud)
tf.summary.scalar('chamfer', chamfer_loss, family='sublosses')
losses.append(chamfer_loss)
entropy_params = self.params.get('entropy_loss')
if (entropy_params is not None):
entropy_loss = self.get_entropy_loss(probs, **entropy_params)
tf.summary.scalar('entropy', entropy_loss, family='sublosses')
losses.append(entropy_loss)
dp_reg_params = self.params.get('dp_regularization')
if (dp_reg_params is not None):
dp_reg_loss = self.get_dp_reg_loss(probs, dp, **dp_reg_params)
tf.summary.scalar('dp_reg_loss', dp_reg_loss, family='sublosses')
losses.append(dp_reg_loss)
loss = (losses[0] if (len(losses) == 1) else tf.add_n(losses))
return loss
def get_train_op(self, loss, step):
optimizer = tf.train.AdamOptimizer(learning_rate=self.params.get('learning_rate', 0.001))
return optimizer.minimize(loss, step)
def batch_size(self):
return self.params.get('batch_size', 32)
def n(self):
return self.params.get('n', 3)
def render_config(self):
from shapenet.core.blender_renderings.config import RenderConfig
return RenderConfig(**self.params.get('render_params', {}))
def n_samples(self):
return self.params.get('n_samples', 16384)
def get_dataset(self, mode, repeat=None):
cat_id = self.cat_id
if isinstance(cat_id, (list, tuple)):
example_ids = [get_example_ids(c, mode) for c in cat_id]
else:
example_ids = get_example_ids(cat_id, mode)
render_config = self.render_config
view_index = self.view_index
n_samples = self.n_samples
n_resamples = self.params.get('n_resamples', 1024)
if (repeat is None):
repeat = (mode == tf.estimator.ModeKeys.TRAIN)
shuffle = repeat
batch_size = self.batch_size
dataset = get_dataset(render_config, view_index, n_samples, n_resamples, cat_id, example_ids, shuffle=shuffle, repeat=repeat, batch_size=batch_size)
return dataset
def get_inputs(self, mode, repeat=None):
dataset = self.get_dataset(mode, repeat=repeat)
return dataset.make_one_shot_iterator().get_next()
def vis_example_data(self, feature_data, label_data):
import matplotlib.pyplot as plt
from shapenet.core import cat_id_to_desc
from util3d.mayavi_vis import vis_point_cloud
from mayavi import mlab
image = feature_data['image']
point_cloud = label_data
image -= np.min(image)
image /= np.max(image)
plt.imshow(image)
cat_ids = self.cat_id
cat_index = feature_data['cat_index']
if isinstance(cat_ids, str):
assert (cat_index == 0)
cat_id = cat_ids
else:
cat_id = cat_ids[cat_index]
plt.title(('%s: %s' % (cat_id_to_desc(cat_id), feature_data['example_id'])))
plt.show(block=False)
vis_point_cloud(point_cloud, color=(0, 0, 1), scale_factor=0.01, axis_order='xzy')
mlab.show()
plt.close()
def get_prediction_to_mesh_fn(self, edge_length_threshold=None):
cat_id = self.cat_id
if (not isinstance(cat_id, (list, tuple))):
cat_id = [cat_id]
with get_ffd_dataset(self.cat_id, self.n, edge_length_threshold=edge_length_threshold) as d:
(cat_ids, example_ids, bs, ps) = zip(*self.get_ffd_data(d))
with get_template_mesh_dataset(cat_id, edge_length_threshold) as mesh_dataset:
all_faces = []
all_vertices = []
for (cat_id, example_id) in zip(cat_ids, example_ids):
sg = mesh_dataset[(cat_id, example_id)]
all_faces.append(np.array(sg['faces']))
all_vertices.append(np.array(sg['vertices']))
def transform_predictions(probs, dp):
i = np.argmax(probs)
vertices = np.matmul(bs[i], (ps[i] + dp[i]))
faces = all_faces[i]
original_vertices = all_vertices[i]
return dict(vertices=vertices, faces=faces, original_vertices=original_vertices, attrs=dict(template_id=example_ids[i]))
return transform_predictions
def get_prediction_to_top_k_mesh_fn(self, edge_length_threshold=None, top_k=2):
cat_id = self.cat_id
with get_ffd_dataset(cat_id, self.n, edge_length_threshold) as ffd_dataset:
(cat_ids, example_ids, bs, ps) = zip(*self.get_ffd_data(ffd_dataset))
with get_template_mesh_dataset(cat_id, edge_length_threshold) as mesh_dataset:
all_faces = []
all_vertices = []
for k in example_ids:
sg = mesh_dataset[k]
all_faces.append(np.array(sg['faces']))
all_vertices.append(np.array(sg['vertices']))
def get_deformed_mesh(i, dp):
vertices = np.matmul(bs[i], (ps[i] + dp[i]))
faces = all_faces[i]
return dict(vertices=vertices, faces=faces, original_vertices=all_vertices[i])
def transform_predictions(probs, dp):
ks = probs.argsort()[(- 3):][::(- 1)]
return [get_deformed_mesh(k, dp) for k in ks]
return transform_predictions
def get_prediction_to_cloud_fn(self, n_samples=None):
from util3d.point_cloud import sample_points
with get_ffd_dataset(self.cat_id, self.n, n_samples=self.n_ffd_samples) as ffd_dataset:
(cat_ids, example_ids, bs, ps) = zip(*self.get_ffd_data(ffd_dataset))
def transform_predictions(probs, dp):
i = np.argmax(probs)
b = bs[i]
if (n_samples is not None):
b = sample_points(b, n_samples)
points = np.matmul(b, (ps[i] + dp[i]))
return dict(cloud=points, attrs=dict(template_id=example_ids[i]))
return transform_predictions
def get_segmented_cloud_fn(self):
from shapenet.core.annotations.datasets import PointCloudDataset, SegmentationDataset
import template_ffd.templates.annotations_ffd as ann
cat_id = self.cat_id
bs = []
ps = []
segs = []
original_points = []
with ann.get_annotations_ffd_dataset(cat_id, self.n) as ds:
for k in self.template_ids:
if (k in ds):
subgroup = ds[k]
(b, p) = (np.array(subgroup[kk]) for kk in ('b', 'p'))
else:
b = None
p = None
bs.append(b)
ps.append(p)
with SegmentationDataset(cat_id) as sd:
for k in self.template_ids:
if (k in sd):
seg = sd[k]
else:
seg = None
segs.append(seg)
with PointCloudDataset(cat_id) as ds:
for k in self.template_ids:
if (k in ds):
points = ds[k]
else:
points = None
original_points.append(points)
def transform_predictions(probs, dp):
i = np.argmax(probs)
b = bs[i]
if (b is None):
return None
else:
points = np.matmul(b, (ps[i] + dp[i]))
return dict(points=points, segmentation=segs[i], original_points=original_points[i])
return transform_predictions
def get_segmented_mesh_fn(self, edge_length_threshold=None):
from shapenet.core.annotations.datasets import PointCloudDataset, SegmentationDataset
from dataset import Dataset
cat_id = self.cat_id
bs = []
ps = []
segs = []
faces = []
original_segs = []
original_seg_points = []
ffd_dataset = get_ffd_dataset(cat_id, self.n, edge_length_threshold=edge_length_threshold)
with ffd_dataset:
(cat_ids, example_ids, bs, ps) = zip(*self.get_ffd_data(ffd_dataset))
template_mesh_ds = get_template_mesh_dataset(cat_id, edge_length_threshold=edge_length_threshold)
seg_points_ds = PointCloudDataset(cat_id)
seg_ds = SegmentationDataset(cat_id)
ds = Dataset.zip(template_mesh_ds, seg_points_ds, seg_ds)
with ds:
for example_id in example_ids:
if (example_id in ds):
(template_mesh, seg_points, original_seg) = ds[example_id]
(v, f) = (np.array(template_mesh[k]) for k in ('vertices', 'faces'))
centroids = get_centroids(v, f)
seg = original_seg[get_nn(seg_points, centroids)]
else:
f = None
seg = None
seg_points = None
original_seg = None
segs.append(seg)
original_seg_points.append(seg_points)
original_segs.append(original_seg)
faces.append(f)
def transform_predictions(probs, dp):
i = np.argmax(probs)
seg = segs[i]
if (seg is None):
return None
else:
v = np.matmul(bs[i], (ps[i] + dp[i]))
return dict(faces=faces[i], vertices=v, segmentation=segs[i], original_points=original_seg_points[i], original_segmentation=original_segs[i])
return transform_predictions
def vis_prediction_data(self, prediction_data, feature_data, label_data=None):
import matplotlib.pyplot as plt
from util3d.mayavi_vis import vis_mesh
from mayavi import mlab
image = feature_data['image']
dp = prediction_data['dp']
probs = prediction_data['probs']
if ((not hasattr(self, '_mesh_fn')) or (self._mesh_fn is None)):
self._mesh_fn = self.get_prediction_to_mesh_fn()
image -= np.min(image)
image /= np.max(image)
plt.imshow(image)
mesh = self._mesh_fn(probs, dp)
(vertices, faces, original_vertices) = (mesh[k] for k in ('vertices', 'faces', 'original_vertices'))
mlab.figure()
vis_mesh(vertices, faces, color=(0, 1, 0), include_wireframe=False, axis_order='xzy')
mlab.figure()
vis_mesh(original_vertices, faces, color=(1, 0, 0), include_wireframe=False, axis_order='xzy')
plt.show(block=False)
mlab.show()
plt.close() |
def tanhtanh_2(x, mu1, mu2, sd):
xn_1 = ((x - mu1) / sd)
xn_2 = ((x - mu2) / sd)
tanh_1 = torch.tanh(xn_1)
tanh_2 = torch.tanh(xn_2)
sech2_1 = (1 - (tanh_1 ** 2))
sech2_2 = (1 - (tanh_2 ** 2))
t = (tanh_1 * tanh_2)
jt = ((1 / sd) * ((tanh_1 * sech2_2) + (sech2_1 * tanh_2)))
jjt = ((1 / (sd ** 2)) * (((2 * sech2_1) * sech2_2) - (((2 * tanh_1) * tanh_2) * (sech2_1 + sech2_2))))
return (t, jt, jjt) |
class StableBaselines3Wrapper(Wrapper):
def __init__(self, env: CityLearnEnv):
env = StableBaselines3ActionWrapper(env)
env = StableBaselines3RewardWrapper(env)
env = StableBaselines3ObservationWrapper(env)
super().__init__(env)
self.env: CityLearnEnv |
class TFRoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_fsaf_head_forward():
fsaf_model = fsaf_config()
s = 128
feats = [torch.rand(1, fsaf_model.in_channels, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))) for i in range(len(fsaf_model.anchor_generator.strides))]
ort_validate(fsaf_model.forward, feats) |
class ResidualBlock(nn.Module):
def __init__(self, in_channels, dilation=1):
super(ResidualBlock, self).__init__()
self.dilation = dilation
self.bn1 = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), padding=(1, 1), bias=False)
self.bn3 = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(in_channels, in_channels, kernel_size=(3, 3), padding=(1, 1), dilation=dilation, bias=False)
def forward(self, x):
residual = x
x = self.bn1(x)
x = self.relu(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv2(x)
x += residual
x = self.bn3(x)
x = self.relu(x)
x = self.conv3(x)
return x |
def training_loss_3rd_item_task_fbne(fbne_data, batch_index, model, sess, train_data, is_training):
train_loss = 0.0
num_batch = (fbne_data.oracle_num_items // setting.batch_size)
for index in batch_index:
(b_target_item, b_k_shot_user, b_second_order_items, b_third_order_users, b_oracle_item_ebd, b_mask_num_second_order_item, b_mask_num_third_order_user, b_intra_2nd_item, b_intra_3rd_item) = fbne_data.batch_gen_3rd_item_task(train_data, index)
feed_dict = {model.target_item: b_oracle_item_ebd, model.support_user_1st_pos_: b_k_shot_user, model.training_phrase_user_task: is_training, model.support_item_2nd_pos_: b_second_order_items, model.training_phrase_item_task: is_training, model.inter_support_3rd_item_pos: b_intra_3rd_item, model.support_user_3rd_pos: b_third_order_users}
train_loss += sess.run(model.loss_3rd_item_pos, feed_dict)
return (train_loss / num_batch) |
def reconstruct_with_dqvae(img, dqvae):
with torch.no_grad():
output = dqvae(img, is_training=False, ret_loss=False)
x_rec = output['x_rec']
return tensor2img(x_rec[0]) |
class TestShortener(unittest.TestCase):
def test_example(self):
text = ' Ilsa, le mechant gardien '
width = 27
shortened = shorten_to_bytes_width(text, width)
self.assertEqual(shortened, ' Ilsa, le mechant [...]')
self.assertLessEqual(len(shortened.encode()), width)
def test_stylized_irc_text(self):
self.assertEqual(shorten_to_bytes_width(('\x1dzzz\x0f ' * 100), 20), '\x1dzzz\x0f \x1dzzz\x0f [...]') |
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output('a')
m.raw_output('b')
m.captured_output('c')
m.raw_output('d')
(stdout, stderr) = capfd.readouterr()
assert (stdout == 'bd')
assert (stream.getvalue() == 'ac') |
class RNN(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, rnn_type='GRU', dropout=0, output_size=None, output_embedding_size=None, device=torch.device('cpu')):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn_type = rnn_type
self.output_size = output_size
self.device = device
self.input = nn.Linear(input_size, embedding_size)
if (self.rnn_type == 'GRU'):
self.rnn = nn.GRU(input_size=embedding_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=dropout)
elif (self.rnn_type == 'LSTM'):
self.rnn = nn.LSTM(input_size=embedding_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True, dropout=dropout)
self.hidden = None
if (self.output_size is not None):
if (output_embedding_size is None):
self.output = MLP_Softmax(hidden_size, embedding_size, self.output_size)
else:
self.output = MLP_Softmax(hidden_size, output_embedding_size, self.output_size)
for (name, param) in self.rnn.named_parameters():
if ('bias' in name):
nn.init.constant_(param, 0.25)
elif ('weight' in name):
nn.init.xavier_uniform_(param, gain=nn.init.calculate_gain('sigmoid'))
for m in self.modules():
if isinstance(m, nn.Linear):
m.weight.data = init.xavier_uniform_(m.weight.data, gain=nn.init.calculate_gain('relu'))
def init_hidden(self, batch_size):
if (self.rnn_type == 'GRU'):
return torch.zeros(self.num_layers, batch_size, self.hidden_size, device=self.device)
elif (self.rnn_type == 'LSTM'):
return (torch.zeros(self.num_layers, batch_size, self.hidden_size, device=self.device), torch.zeros(self.num_layers, batch_size, self.hidden_size, device=self.device))
def forward(self, input, input_len=None):
input = self.input(input)
if (input_len is not None):
input = pack_padded_sequence(input, input_len, batch_first=True, enforce_sorted=False)
(output, self.hidden) = self.rnn(input, self.hidden)
if (input_len is not None):
(output, _) = pad_packed_sequence(output, batch_first=True)
if (self.output_size is not None):
output = self.output(output)
return output |
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({'username': 'tacotron', 'icon_emoji': ':taco:', 'text_jamo': ('*%s*: %s' % (_run_name, msg))}).encode()) |
def require_librosa(test_case):
return unittest.skipUnless(is_librosa_available(), 'test requires librosa')(test_case) |
class SentencePieceModelTokenizer(Tokenizer):
def __init__(self, model_path):
super().__init__()
import sentencepiece as spm
self.model = spm.SentencePieceProcessor()
self.model.Load(model_path)
def split(self, string):
return self.model.EncodeAsPieces(string)
def end_idx_last_full_word(self, tokens):
bow_indices = [i for (i, t) in enumerate(tokens) if (t[0] == '')]
if (len(bow_indices) < 2):
return 0
else:
return bow_indices[(- 1)]
def merge(self, list_of_string):
return self.model.DecodePieces(list_of_string)
def extract_full_word(self):
pass |
class Trainer(DefaultTrainer):
def build_evaluator(cls, cfg: CfgNode, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
if cfg.MODEL.DENSEPOSE_ON:
evaluators.append(DensePoseCOCOEvaluator(dataset_name, True, output_folder))
return DatasetEvaluators(evaluators)
def build_test_loader(cls, cfg: CfgNode, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
def build_train_loader(cls, cfg: CfgNode):
return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
def test_with_TTA(cls, cfg: CfgNode, model):
logger = logging.getLogger('detectron2.trainer')
logger.info('Running inference with test-time augmentation ...')
transform_data = load_from_cfg(cfg)
model = DensePoseGeneralizedRCNNWithTTA(cfg, model, transform_data, DensePoseDatasetMapperTTA(cfg))
evaluators = [cls.build_evaluator(cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, 'inference_TTA')) for name in cfg.DATASETS.TEST]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({(k + '_TTA'): v for (k, v) in res.items()})
return res |
def plot_rank_corrs(rho, rho_p, tau, tau_p, METRICS, scatter=False, title=''):
(fig, ax) = plt.subplots(2, 2, figsize=(10, 10))
fig.suptitle(title)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(rho[metric]) * [i])
y += rho[metric]
ax[(0, 0)].scatter(x, y)
ax[(0, 0)].scatter(list(range(len(METRICS))), [np.mean(rho[metric]) for metric in METRICS])
else:
ax[(0, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(rho[metric]) for metric in METRICS])
ax[(0, 0)].set_title("Spearman's rho")
ax[(0, 0)].set_xticks(list(range(len(METRICS))))
ax[(0, 0)].set_xticklabels(METRICS)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(rho_p[metric]) * [i])
y += rho_p[metric]
ax[(0, 1)].scatter(x, y)
ax[(0, 1)].scatter(list(range(len(METRICS))), [np.mean(rho_p[metric]) for metric in METRICS])
else:
ax[(0, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(rho_p[metric]) for metric in METRICS])
ax[(0, 1)].set_title("Spearman's rho: p-values")
ax[(0, 1)].set_xticks(list(range(len(METRICS))))
ax[(0, 1)].set_xticklabels(METRICS)
ax[(0, 1)].set_yscale('log')
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(tau[metric]) * [i])
y += tau[metric]
ax[(1, 0)].scatter(x, y)
ax[(1, 0)].scatter(list(range(len(METRICS))), [np.mean(tau[metric]) for metric in METRICS])
else:
ax[(1, 0)].bar(x=list(range(len(METRICS))), height=[np.mean(tau[metric]) for metric in METRICS])
ax[(1, 0)].set_title("Kendall's tau")
ax[(1, 0)].set_xticks(list(range(len(METRICS))))
ax[(1, 0)].set_xticklabels(METRICS)
if scatter:
(x, y) = ([], [])
for (i, metric) in enumerate(METRICS):
x += (len(tau_p[metric]) * [i])
y += tau_p[metric]
ax[(1, 1)].scatter(x, y)
ax[(1, 1)].scatter(list(range(len(METRICS))), [np.mean(tau_p[metric]) for metric in METRICS])
else:
ax[(1, 1)].bar(x=list(range(len(METRICS))), height=[np.mean(tau_p[metric]) for metric in METRICS])
ax[(1, 1)].set_title("Kendall's tau: p-values")
ax[(1, 1)].set_xticks(list(range(len(METRICS))))
ax[(1, 1)].set_xticklabels(METRICS)
ax[(1, 1)].set_yscale('log')
plt.show() |
def get_kernel_window(kernel: Literal[('gaussian', 'triang', 'laplace')]='gaussian', ks: int=5, sigma: Union[(int, float)]=2) -> List[float]:
half_ks = ((ks - 1) // 2)
if (kernel == 'gaussian'):
base_kernel = ((([0.0] * half_ks) + [1.0]) + ([0.0] * half_ks))
kernel_window = gaussian_filter1d(base_kernel, sigma=sigma)
elif (kernel == 'triang'):
kernel_window = (triang(ks) / sum(triang(ks)))
elif (kernel == 'laplace'):
kernel_window = list(map(_laplace, np.arange((- half_ks), (half_ks + 1))))
else:
raise ValueError("Kernel can be only ['gaussian', 'triang', 'laplace'].")
return kernel_window |
class CategoricalPolicy(nn.Module):
def __init__(self, embedder, recurrent, action_size):
super(CategoricalPolicy, self).__init__()
self.embedder = embedder
self.fc_policy = orthogonal_init(nn.Linear(self.embedder.output_dim, action_size), gain=0.01)
self.fc_value = orthogonal_init(nn.Linear(self.embedder.output_dim, 1), gain=1.0)
self.recurrent = recurrent
if self.recurrent:
self.gru = GRU(self.embedder.output_dim, self.embedder.output_dim)
def is_recurrent(self):
return self.recurrent
def forward(self, x, hx, masks):
hidden = self.embedder(x)
if self.recurrent:
(hidden, hx) = self.gru(hidden, hx, masks)
logits = self.fc_policy(hidden)
log_probs = F.log_softmax(logits, dim=1)
p = Categorical(logits=log_probs)
v = self.fc_value(hidden).reshape((- 1))
return (p, v, hx) |
_torch
_vision
class BeitFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase):
feature_extraction_class = (BeitFeatureExtractor if is_vision_available() else None)
def setUp(self):
self.feature_extract_tester = BeitFeatureExtractionTester(self)
def feat_extract_dict(self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def test_feat_extract_properties(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(feature_extractor, 'do_resize'))
self.assertTrue(hasattr(feature_extractor, 'size'))
self.assertTrue(hasattr(feature_extractor, 'do_center_crop'))
self.assertTrue(hasattr(feature_extractor, 'center_crop'))
self.assertTrue(hasattr(feature_extractor, 'do_normalize'))
self.assertTrue(hasattr(feature_extractor, 'image_mean'))
self.assertTrue(hasattr(feature_extractor, 'image_std'))
def test_batch_feature(self):
pass
def test_call_pil(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_numpy(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_pytorch(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = feature_extractor(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
encoded_images = feature_extractor(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
def test_call_segmentation_maps(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[(- 2):]).long())
encoding = feature_extractor(image_inputs[0], maps[0], return_tensors='pt')
self.assertEqual(encoding['pixel_values'].shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].shape, (1, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].dtype, torch.long)
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 255))
encoding = feature_extractor(image_inputs, maps, return_tensors='pt')
self.assertEqual(encoding['pixel_values'].shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].shape, (self.feature_extract_tester.batch_size, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].dtype, torch.long)
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 255))
(image, segmentation_map) = prepare_semantic_single_inputs()
encoding = feature_extractor(image, segmentation_map, return_tensors='pt')
self.assertEqual(encoding['pixel_values'].shape, (1, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].shape, (1, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].dtype, torch.long)
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 255))
(images, segmentation_maps) = prepare_semantic_batch_inputs()
encoding = feature_extractor(images, segmentation_maps, return_tensors='pt')
self.assertEqual(encoding['pixel_values'].shape, (2, self.feature_extract_tester.num_channels, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].shape, (2, self.feature_extract_tester.crop_size, self.feature_extract_tester.crop_size))
self.assertEqual(encoding['labels'].dtype, torch.long)
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 255))
def test_reduce_labels(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_dict)
(image, map) = prepare_semantic_single_inputs()
encoding = feature_extractor(image, map, return_tensors='pt')
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 150))
feature_extractor.reduce_labels = True
encoding = feature_extractor(image, map, return_tensors='pt')
self.assertTrue((encoding['labels'].min().item() >= 0))
self.assertTrue((encoding['labels'].max().item() <= 255)) |
class PGDAttack(Attack):
__metaclass__ = abc.ABCMeta
def __init__(self, predictor, specification, epsilon, lr=0.1, lr_fn=None, num_steps=20, num_restarts=1, input_bounds=(0.0, 1.0), optimizer_builder=UnrolledGradientDescent):
super(PGDAttack, self).__init__(name='pgd')
self._predictor = predictor
self._specification = specification
self._num_steps = num_steps
self._num_restarts = num_restarts
self._epsilon = epsilon
self._lr = lr
self._lr_fn = lr_fn
self._input_bounds = input_bounds
self._optimizer_builder = optimizer_builder |
.skipif((torch is None), reason='requires torch library')
def test_assert_is_norm_layer():
assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3)))
assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128))
assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64))
assert (not mmcv.assert_is_norm_layer(nn.Sigmoid())) |
class RODDecode_RA(nn.Module):
def __init__(self):
super(RODDecode_RA, self).__init__()
self.convt1 = nn.ConvTranspose3d(in_channels=256, out_channels=128, kernel_size=(4, 6, 6), stride=(2, 2, 2), padding=(1, 2, 2))
self.convt2 = nn.ConvTranspose3d(in_channels=128, out_channels=64, kernel_size=(4, 6, 6), stride=(2, 2, 2), padding=(1, 2, 2))
self.convt3 = nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2))
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'], radar_configs['ramap_asize']), mode='nearest')
def forward(self, x):
x = self.prelu(self.convt1(x))
x = self.prelu(self.convt2(x))
x = self.prelu(self.convt3(x))
return x |
class TestCircuitBit(QiskitTestCase):
def test_bit_getitem(self):
qubit = QuantumRegister(1, 'q')[0]
with self.assertWarns(DeprecationWarning):
self.assertEqual(qubit[0], qubit.register)
self.assertEqual(qubit[1], qubit.index)
def test_gate_with_tuples(self):
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
expected = QuantumCircuit(qr)
expected.h(qr[0])
with self.assertWarns(DeprecationWarning):
qc.h((qr, 0))
self.assertEqual(qc, expected)
def test_gate_with_tuple_list(self):
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
expected = QuantumCircuit(qr)
expected.h([qr[0], qr[1]])
with self.assertWarns(DeprecationWarning):
qc.h([(qr, 0), (qr, 1)])
self.assertEqual(qc, expected) |
def query_argname(arg_name):
def index_name(api_name, arg_name):
arg_names = DB[signature_collection].find_one({'api': api_name})['args']
for (idx, name) in enumerate(arg_names):
if (name == arg_name):
return f'parameter:{idx}'
return None
APIs = []
for api_name in API_args.keys():
if ((api_name not in DB.list_collection_names()) or (arg_name not in API_args[api_name])):
continue
temp = DB[api_name].find_one({arg_name: {'$exists': True}})
if (temp == None):
idx_name = index_name(api_name, arg_name)
if (idx_name and DB[api_name].find_one({idx_name: {'$exists': True}})):
APIs.append(api_name)
else:
APIs.append(api_name)
return APIs |
def execute_only_once():
f = inspect.currentframe().f_back
ident = (f.f_code.co_filename, f.f_lineno)
if (ident in _EXECUTE_HISTORY):
return False
_EXECUTE_HISTORY.add(ident)
return True |
def test_DPICT_main(epoch, test_dataloader, model, criterion, quantize_parameters=0, loss_weights=np.array([(1 / 3), (1 / 3), (1 / 3)]), distillation=True):
model.eval()
device = next(model.parameters()).device
loss = []
bpp_loss = []
mse_loss = []
msssim_loss = []
aux_loss = []
psnr = []
msssim = []
msssim_db = []
for index_divide in range(len(criterion)):
loss.append(AverageMeter())
bpp_loss.append(AverageMeter())
mse_loss.append(AverageMeter())
msssim_loss.append(AverageMeter())
aux_loss.append(AverageMeter())
psnr.append(AverageMeter())
msssim.append(AverageMeter())
msssim_db.append(AverageMeter())
with torch.no_grad():
i = (- 1)
for d in test_dataloader:
i += 1
d = d.to(device)
for index_divide in range(len(criterion)):
if (index_divide == 0):
d_for_loss = d.detach()
elif ((index_divide > 0) and (distillation == True)):
d_for_loss = out_net['x_hat'].detach()
elif ((index_divide > 0) and (distillation == False)):
d_for_loss = d.detach()
out_net = model(d, index_channel=index_divide, quantize_parameters=[quantize_parameters[index_divide], 0, 0, 0])
out_criterion = criterion[index_divide](out_net, d_for_loss)
loss_this_index = out_criterion['loss']
if (index_divide == 0):
loss_across_index = np.expand_dims(loss_this_index.cpu().detach().numpy(), axis=1)
elif (index_divide > 0):
loss_across_index = np.concatenate((loss_across_index, np.expand_dims(loss_this_index.cpu().detach().numpy(), axis=1)), axis=1)
aux_loss[index_divide].update(model.aux_loss())
bpp_loss[index_divide].update(out_criterion['bpp_loss'].mean())
mse_loss[index_divide].update(out_criterion['mse_loss'].mean())
msssim_loss[index_divide].update(out_criterion['ms_ssim_loss'].mean())
loss[index_divide].update(out_criterion['loss'].mean())
psnr[index_divide].update(out_criterion['PSNR'].mean())
msssim[index_divide].update(out_criterion['MS-SSIM'].mean())
msssim_db[index_divide].update(out_criterion['MS-SSIM-DB'].mean())
if (i == 0):
loss_recoder = LossRecoder(loss_across_index)
elif (i > 0):
loss_recoder.update_losses(loss_across_index)
for index_divide in range(len(criterion)):
logging.info(f'''Test epoch {epoch}: Average losses: Loss: {loss[index_divide].avg:.3f} | MSE loss: {mse_loss[index_divide].avg:.3f} | MS-SSIM loss: {msssim_loss[index_divide].avg:.4f} | Bpp loss: {bpp_loss[index_divide].avg:.2f} | Aux loss: {aux_loss[index_divide].avg:.2f} | PSNR: {psnr[index_divide].avg:.2f} | MS-SSIM: {msssim[index_divide].avg:.4f} | MS-SSIM(DB): {msssim_db[index_divide].avg:.2f}
''')
loss_recoder.update_overall_loss(loss_weights)
return (loss_recoder.losses, loss_recoder.overall_loss) |
_config
def exploration():
uuid = 'habitat_exploration'
cfg = {}
cfg['learner'] = {'lr': 0.001, 'perception_network_kwargs': {'n_map_channels': 1, 'use_target': False}}
cfg['env'] = {'env_name': 'Habitat_Exploration', 'transform_fn_pre_aggregation_fn': 'TransformFactory.independent', 'transform_fn_pre_aggregation_kwargs': {'names_to_transforms': {'map': 'identity_transform()', 'global_pos': 'identity_transform()'}, 'keep_unnamed': False}, 'transform_fn_post_aggregation_fn': 'TransformFactory.independent', 'transform_fn_post_aggregation_kwargs': {'names_to_transforms': {'map': 'map_pool_collated((1,84,84))'}, 'keep_unnamed': True}, 'env_specific_kwargs': {'scenario_kwargs': {'max_episode_steps': 1000}, 'map_kwargs': {'map_size': 84, 'fov': (np.pi / 2), 'min_depth': 0, 'max_depth': 1.5, 'relative_range': True, 'map_x_range': [(- 11), 11], 'map_y_range': [(- 11), 11], 'fullvision': False}, 'reward_kwargs': {'slack_reward': 0}}} |
class MultilingualDatasetManager(object):
def __init__(self, args, lang_pairs, langs, dicts, sampling_method):
super().__init__()
self.args = args
self.seed = args.seed
self.lang_pairs = lang_pairs
self.extra_lang_pairs = (list({p for (_, v) in args.extra_lang_pairs.items() for p in v.split(',')}) if args.extra_lang_pairs else [])
self.src_langs = {p.split('-')[0] for p in (args.lang_pairs + self.extra_lang_pairs)}
self.tgt_langs = {p.split('-')[1] for p in (args.lang_pairs + self.extra_lang_pairs)}
self.langs = langs
self.dicts = dicts
self.lang_dict = self.create_lang_dictionary(self.langs)
self.sampling_method = sampling_method
self.sampling_scheduler = None
self._has_sharded_data = False
self._num_shards_dict = {}
self._training_data_sizes = defaultdict((lambda : {}))
def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method):
return MultilingualDatasetManager(args, lang_pairs, langs, dicts, sampling_method)
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner', action=FileContentsAction)
parser.add_argument('--langs', default=None, type=csv_str_list, help='a list of languages comma sperated languages which can appear in lang-pairs; note that the ordering determines language token IDs')
parser.add_argument('--lang-dict', default=None, type=str, help='an external file which contains a list of languages which can appear in lang-pairs; note that the ordering determines language token IDs; --langs and --lang-dict are two exclusive options')
parser.add_argument('--source-dict', default=None, type=str, help='path to source dictionary; if specified it will override per language dictionary loading')
parser.add_argument('--target-dict', default=None, type=str, help='path to target dictionary; if specified it will override per language dictionary loading')
parser.add_argument('--lang-tok-style', default=LangTokStyle.multilingual.value, type=str, choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value], help='language token styles')
parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', action='store_true', default=False, help='truncate source to max-source-positions')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value], metavar='SRCTGT', help='prepend to the beginning of source sentence the source or target language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true', help='prepend to the beginning of target sentence the target language token')
parser.add_argument('--lang-tok-replacing-bos-eos', action='store_true', default=False)
parser.add_argument('--enable-lang-ids', default=False, action='store_true', help='whether to include language IDs in samples')
parser.add_argument('--enable-reservsed-directions-shared-datasets', default=False, action='store_true', help='whether to allow datasets be used in reversed directions')
parser.add_argument('--extra-data', help='a dictionary of data name to this path, e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}', type=(lambda uf: eval_str_dict(uf, type=str)), default=None)
parser.add_argument('--extra-lang-pairs', help='a dictionary of data name to the language pairs they serve, e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}', type=(lambda uf: eval_str_dict(uf, type=str)), default=None)
parser.add_argument('--fixed-dictionary', help='Fixed dictionary to use with model path', default=None, type=str)
parser.add_argument('--langtoks-specs', help='a list of comma separated data types that a set of language tokens to be specialized for, e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to distinguish languages in different training data types. If not specified, default language tokens per languages will be added', default=LangTokSpec.main.value, type=csv_str_list)
parser.add_argument('--langtoks', help='a dictionary of how to add language tokens, e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": ("src", "tgt")}, or {"mined": ("src.mined", "tgt")}', default=None, type=(lambda uf: eval_str_dict(uf, type=str)))
parser.add_argument('--sampling-weights-from-file', help='a file contain a python dictionary of how to sample data sets, e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=str)
parser.add_argument('--sampling-weights', help='a dictionary of how to sample data sets, e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=(lambda uf: eval_str_dict(uf, type=str)))
parser.add_argument('--virtual-epoch-size', default=None, type=int, help='virtual epoch size to speed up data loading')
parser.add_argument('--virtual-data-size', default=None, type=int, help='virtual data size of the whole joint dataset to speedup data loading and have specific dynamic sampling strategy interval')
def load_langs(cls, args, **kwargs):
if (args.lang_dict and args.langs):
raise ValueError('--langs and --lang-dict can not both be specified')
if ((args.lang_dict is None) and (args.langs is None)):
logger.warning('External language dictionary is not provided; use lang-pairs to infer the set of supported languages. The language ordering is not stable which might cause misalignment in pretraining and finetuning.')
langs = list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')})
langs = sorted(langs)
logger.info(f'inferred language list: {langs}')
elif args.lang_dict:
with open(PathManager.get_local_path(args.lang_dict), 'r', encoding='utf-8') as f:
langs = [lang.strip() for lang in f.readlines() if lang.strip()]
logger.info(f'loaded language list from {args.lang_dict} as they are ordered in file')
elif args.langs:
langs = args.langs
logger.info(f'parsed the language list as they are ordered in the option: {langs}')
return langs
def has_sharded_data(self, split):
return (self._has_sharded_data and (split == getattr(self.args, 'train_subset', None)))
def _shared_collater(self):
return ((not (self.args.extra_data and ('mono_dae' in self.args.extra_data))) and (not self.args.lang_tok_replacing_bos_eos))
def estimate_global_pass_epoch(self, epoch):
if ((self.args.virtual_epoch_size is None) or (self.args.virtual_data_size is None)):
return None
virtual_epochs_per_shard = math.ceil((self.args.virtual_data_size / self.args.virtual_epoch_size))
shard_epoch = (((epoch - 1) // virtual_epochs_per_shard) + 1)
return shard_epoch
def prepare(cls, load_dictionary, args, **kargs):
args.left_pad_source = utils.eval_bool(args.left_pad_source)
args.left_pad_target = utils.eval_bool(args.left_pad_target)
if (not hasattr(args, 'shuffle_instance')):
args.shuffle_instance = False
if (args.langtoks is None):
args.langtoks = {}
if ('main' not in args.langtoks):
src_langtok_spec = (args.encoder_langtok if args.encoder_langtok else None)
tgt_langtok_spec = ('tgt' if args.decoder_langtok else None)
args.langtoks['main'] = (src_langtok_spec, tgt_langtok_spec)
def check_langs(langs, pairs):
messages = []
for (src, tgt) in pairs:
if ((src not in langs) or (tgt not in langs)):
messages.append(f'language pair {src}-{tgt} contains languages that are not in the language dictionary')
if (len(messages) > 0):
raise ValueError((' '.join(messages) + f'; langs: {langs}'))
if (args.lang_pairs is None):
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(',')
if ((args.source_lang is not None) or (args.target_lang is not None)):
training = False
else:
training = True
language_list = cls.load_langs(args, **kargs)
check_langs(language_list, ([p.split('-') for p in args.lang_pairs] if training else [(args.source_lang, args.target_lang)]))
def load_dictionary_and_postproc(path):
d = load_dictionary(path)
augment_dictionary(dictionary=d, language_list=language_list, lang_tok_style=args.lang_tok_style, langtoks_specs=args.langtoks_specs, extra_data=args.extra_data)
return d
dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training)
return (language_list, dicts, training)
def load_all_dictionaries(cls, args, language_list, load_dictionary, training):
dicts = OrderedDict()
if (args.source_dict is not None):
dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict)
if (args.target_dict is not None):
dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict)
if training:
extra_lang_pairs = (list({p for (_, v) in args.extra_lang_pairs.items() for p in v.split(',')}) if args.extra_lang_pairs else [])
src_langs_to_load_dicts = sorted({p.split('-')[0] for p in (args.lang_pairs + extra_lang_pairs)})
tgt_langs_to_load_dicts = sorted({p.split('-')[1] for p in (args.lang_pairs + extra_lang_pairs)})
else:
src_langs_to_load_dicts = [args.source_lang]
tgt_langs_to_load_dicts = [args.target_lang]
paths = utils.split_paths(args.data)
assert (len(paths) > 0)
def load_dicts(langs_to_load_dicts):
for lang in langs_to_load_dicts:
dicts[lang] = load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if (len(dicts) > 0):
dict0 = next(iter(dicts.values()))
assert (dicts[lang].pad() == dict0.pad())
assert (dicts[lang].eos() == dict0.eos())
assert (dicts[lang].unk() == dict0.unk())
logger.info('[{}] dictionary: {} types'.format(lang, len(dicts[lang])))
if (args.fixed_dictionary is not None):
fixed_dict = load_dictionary(args.fixed_dictionary)
dicts = {lang: fixed_dict for lang in (src_langs_to_load_dicts + tgt_langs_to_load_dicts)}
else:
if (args.source_dict is None):
load_dicts(src_langs_to_load_dicts)
if (args.target_dict is None):
load_dicts(tgt_langs_to_load_dicts)
return dicts
def get_source_dictionary(self, lang):
if (self.args.source_dict is not None):
return self.dicts[SRC_DICT_NAME]
else:
return self.dicts[lang]
def get_target_dictionary(self, lang):
if (self.args.target_dict is not None):
return self.dicts[TGT_DICT_NAME]
else:
return self.dicts[lang]
def create_lang_dictionary(cls, langs):
unk = '<unk>'
lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk)
for lang in langs:
lang_dict.add_symbol(lang)
return lang_dict
def get_langtok_index(cls, lang_tok, dic):
idx = dic.index(lang_tok)
assert (idx != dic.unk_index), 'cannot find language token {} in the dictionary'.format(lang_tok)
return idx
def get_encoder_langtok(self, src_lang, tgt_lang, spec=None):
if (spec is None):
return None
if (spec and spec.startswith('src')):
if (src_lang is None):
return None
langtok = get_lang_tok(lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
else:
if (tgt_lang is None):
return None
langtok = get_lang_tok(lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
return self.get_langtok_index(langtok, (self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang)))
def get_decoder_langtok(self, tgt_lang, spec=None):
if (spec is None):
return None
langtok = get_lang_tok(lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec)
return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang))
def load_data(cls, path, vdict, impl):
dataset = data_utils.load_indexed_dataset(path, vdict, impl)
return dataset
def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
def load_lang_dataset(self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions, prepend_bos=False, load_alignments=False, truncate_source=False):
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = (split + (str(k) if (k > 0) else ''))
if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
elif (k > 0):
break
else:
logger.error(f'Dataset not found: {data_path}, {split_k}, {src}, {tgt}')
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = self.load_data((prefix + src), src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(TruncateDataset(StripTokenDataset(src_dataset, src_dict.eos()), (max_source_positions - 1)), src_dict.eos())
src_datasets.append(src_dataset)
tgt_datasets.append(self.load_data((prefix + tgt), tgt_dict, dataset_impl))
logger.info('{} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[(- 1)])))
if (not combine):
break
assert (len(src_datasets) == len(tgt_datasets))
if (len(src_datasets) == 1):
(src_dataset, tgt_dataset) = (src_datasets[0], tgt_datasets[0])
else:
sample_ratios = ([1] * len(src_datasets))
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert (hasattr(src_dict, 'bos_index') and hasattr(tgt_dict, 'bos_index'))
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
return (src_dataset, tgt_dataset, align_dataset)
def load_langpair_dataset(self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, src_dataset_transform_func=(lambda dataset: dataset), tgt_dataset_transform_func=(lambda dataset: dataset), src_lang_id=None, tgt_lang_id=None, langpairs_sharing_datasets=None):
norm_direction = '-'.join(sorted([src, tgt]))
if (langpairs_sharing_datasets is not None):
src_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, src), 'NotInCache')
tgt_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, tgt), 'NotInCache')
align_dataset = langpairs_sharing_datasets.get((data_path, split, norm_direction, src, tgt), 'NotInCache')
if ((langpairs_sharing_datasets is None) or (src_dataset == 'NotInCache') or (tgt_dataset == 'NotInCache') or (align_dataset == 'NotInCache') or (split != getattr(self.args, 'train_subset', None))):
(src_dataset, tgt_dataset, align_dataset) = self.load_lang_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions=max_source_positions, prepend_bos=prepend_bos, load_alignments=load_alignments, truncate_source=truncate_source)
src_dataset = src_dataset_transform_func(src_dataset)
tgt_dataset = tgt_dataset_transform_func(tgt_dataset)
if (langpairs_sharing_datasets is not None):
langpairs_sharing_datasets[(data_path, split, norm_direction, src)] = src_dataset
langpairs_sharing_datasets[(data_path, split, norm_direction, tgt)] = tgt_dataset
langpairs_sharing_datasets[(data_path, split, norm_direction, src, tgt)] = align_dataset
if (align_dataset is None):
langpairs_sharing_datasets[(data_path, split, norm_direction, tgt, src)] = align_dataset
else:
logger.info(f'Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: [{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}')
return LanguagePairDataset(src_dataset, src_dataset.sizes, src_dict, tgt_dataset, (tgt_dataset.sizes if (tgt_dataset is not None) else None), tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, src_lang_id=src_lang_id, tgt_lang_id=tgt_lang_id)
def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None):
if self.args.lang_tok_replacing_bos_eos:
return dataset
if (spec is None):
return dataset
tok = self.get_encoder_langtok(src_lang, tgt_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None):
if (dataset is None):
return None
if self.args.lang_tok_replacing_bos_eos:
return dataset
if (not spec):
return dataset
tok = self.get_decoder_langtok(target_lang, spec)
if tok:
return PrependTokenDataset(dataset, tok)
return dataset
def alter_dataset_langtok(self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, src_langtok_spec=None, tgt_langtok_spec=None):
if ((src_langtok_spec is None) and (tgt_langtok_spec is None)):
return lang_pair_dataset
new_src_eos = None
if ((src_langtok_spec is not None) and (src_eos is not None) and ((src_lang is not None) or (tgt_lang is not None))):
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec)
else:
src_eos = None
new_tgt_bos = None
if (tgt_langtok_spec and (tgt_eos is not None) and (tgt_lang is not None)):
new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec)
else:
tgt_eos = None
return TransformEosLangPairDataset(lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos)
def load_a_dataset(self, split, data_path, src, src_dict, tgt, tgt_dict, combine, prepend_bos=False, langpairs_sharing_datasets=None, data_category=None, **extra_kwargs):
dataset_impl = self.args.dataset_impl
upsample_primary = self.args.upsample_primary
left_pad_source = self.args.left_pad_source
left_pad_target = self.args.left_pad_target
max_source_positions = self.args.max_source_positions
max_target_positions = self.args.max_target_positions
load_alignments = self.args.load_alignments
truncate_source = self.args.truncate_source
src_dataset_transform_func = self.src_dataset_tranform_func
tgt_dataset_transform_func = self.tgt_dataset_tranform_func
enable_lang_ids = self.args.enable_lang_ids
lang_dictionary = self.lang_dict
(src_langtok_spec, tgt_langtok_spec) = extra_kwargs['langtok_spec']
src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec)
tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec)
logger.info(f'{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}')
langpair_ds = self.load_langpair_dataset(data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos, load_alignments, truncate_source, src_dataset_transform_func=(lambda dataset: src_dataset_transform_func(src, tgt, dataset, src_langtok_spec)), tgt_dataset_transform_func=(lambda dataset: tgt_dataset_transform_func(src, tgt, dataset, tgt_langtok_spec)), src_lang_id=(_lang_id(lang_dictionary, src) if (enable_lang_ids and (lang_dictionary is not None)) else None), tgt_lang_id=(_lang_id(lang_dictionary, tgt) if (enable_lang_ids and (lang_dictionary is not None)) else None), langpairs_sharing_datasets=langpairs_sharing_datasets)
if self.args.lang_tok_replacing_bos_eos:
ds = self.alter_dataset_langtok(langpair_ds, src_eos=(self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos()), src_lang=src, tgt_eos=self.get_target_dictionary(tgt).eos(), tgt_lang=tgt, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec)
else:
ds = langpair_ds
return ds
def load_split_langpair_datasets(self, split, data_param_list):
datasets = []
langpairs_sharing_datasets = ({} if self.args.enable_reservsed_directions_shared_datasets else None)
for param in data_param_list:
ds = self.load_a_dataset(split=split, langpairs_sharing_datasets=langpairs_sharing_datasets, **param)
datasets.append(ds)
return datasets
def get_data_paths_and_lang_pairs(self, split):
datapaths = {'main': self.args.data}
lang_pairs = {'main': self.lang_pairs}
if (split == getattr(self.args, 'train_subset', None)):
if self.args.extra_data:
extra_datapaths = self.args.extra_data
datapaths.update(extra_datapaths)
if self.args.extra_lang_pairs:
extra_lang_pairs = {k: v.split(',') for (k, v) in self.args.extra_lang_pairs.items()}
lang_pairs.update(extra_lang_pairs)
return (datapaths, lang_pairs)
def get_dataset_key(cls, data_category, src, tgt):
return f'{data_category}:{src}-{tgt}'
def _get_shard_num_dict(cls, split, paths):
shards = defaultdict(int)
for path in paths:
files = PathManager.ls(path)
directions = set()
for f in files:
if (f.startswith(split) and f.endswith('.idx')):
direction = f.split('.')[(- 3)]
directions.add(direction)
for direction in directions:
shards[direction] += 1
return shards
def get_split_num_data_shards(self, split):
if (split in self._num_shards_dict):
return self._num_shards_dict[split]
num_shards_dict = {}
(data_paths, lang_pairs) = self.get_data_paths_and_lang_pairs(split)
for (data_category, paths) in data_paths.items():
if (data_category not in lang_pairs):
continue
paths = utils.split_paths(paths)
shards_dict = self._get_shard_num_dict(split, paths)
lang_dirs = [lang_pair.split('-') for lang_pair in lang_pairs[data_category]]
lang_dirs = [(x if (len(x) > 1) else (x[0], x[0])) for x in lang_dirs]
for (src, tgt) in lang_dirs:
key = self.get_dataset_key(data_category, src, tgt)
if ('mono_' in data_category):
assert ((src is None) or (src == tgt)), f'error: src={src}, tgt={{tgt}} for data_category={{data_category}}'
num_shards_dict[key] = shards_dict[tgt]
elif (f'{src}-{tgt}' in shards_dict):
num_shards_dict[key] = shards_dict[f'{src}-{tgt}']
elif (f'{tgt}-{src}' in shards_dict):
num_shards_dict[key] = shards_dict[f'{tgt}-{src}']
self._num_shards_dict[split] = num_shards_dict
logger.info(f'[{split}] num of shards: {num_shards_dict}')
return num_shards_dict
def get_shard_id(cls, num_shards, epoch, shard_epoch=None):
shard = (epoch if (shard_epoch is None) else shard_epoch)
shard = ((shard - 1) % num_shards)
return shard
def get_split_data_path(self, paths, epoch, shard_epoch, num_shards):
path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)]
return path
def get_split_data_param_list(self, split, epoch, shard_epoch=None):
param_list = []
(data_paths, lang_pairs) = self.get_data_paths_and_lang_pairs(split)
logger.info(f'langtoks settings: {self.args.langtoks}')
split_num_shards_dict = self.get_split_num_data_shards(split)
for (data_category, paths) in data_paths.items():
if (data_category not in lang_pairs):
continue
paths = utils.split_paths(paths)
assert (len(paths) > 0)
if (len(paths) > 1):
self._has_sharded_data = True
if (split != getattr(self.args, 'train_subset', None)):
paths = paths[:1]
if (data_category in self.args.langtoks):
lang_tok_spec = self.args.langtoks[data_category]
else:
lang_tok_spec = (None, None)
lang_dirs = [lang_pair.split('-') for lang_pair in lang_pairs[data_category]]
lang_dirs = [(x if (len(x) > 1) else (x[0], x[0])) for x in lang_dirs]
for (src, tgt) in lang_dirs:
assert ((src is not None) or (data_category == 'mono_dae')), f'error: src={src}, tgt={{tgt}} for data_category={{data_category}}'
key = self.get_dataset_key(data_category, src, tgt)
data_path = self.get_split_data_path(paths, epoch, shard_epoch, split_num_shards_dict[key])
param_list.append({'key': key, 'data_path': data_path, 'split': split, 'src': src, 'src_dict': (self.get_source_dictionary(src) if (src and (data_category != 'mono_dae')) else None), 'tgt': tgt, 'tgt_dict': self.get_target_dictionary(tgt), 'data_category': data_category, 'langtok_spec': lang_tok_spec})
return param_list
def get_train_dataset_sizes(self, data_param_list, datasets, epoch, shard_epoch=None):
num_shards = [self.get_split_num_data_shards(param['split'])[param['key']] for param in data_param_list]
data_sizes = []
for ((key, d), num_shard) in zip(datasets, num_shards):
my_data_sizes = self._training_data_sizes[key]
shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch)
if (shard_ind not in my_data_sizes):
my_data_sizes[shard_ind] = len(d)
known_size = max(my_data_sizes.values())
data_sizes.append((key, sum((my_data_sizes.get(i, known_size) for i in range(num_shard)))))
logger.info(f'estimated total data sizes of all shards used in sampling ratios: {data_sizes}. Note that if the data a shard has not been loaded yet, use the max known data size to approximate')
return [s for (_, s) in data_sizes]
def get_train_sampling_ratios(self, data_param_list, datasets, epoch=1, shard_epoch=None):
data_sizes = self.get_train_dataset_sizes(data_param_list, datasets, epoch, shard_epoch)
sampling_func = self.sampling_method.sampling_method_selector()
sample_ratios = (sampling_func(data_sizes) if (sampling_func is not None) else None)
return sample_ratios
def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None):
if self.args.sampling_weights_from_file:
weights = load_sampling_weights(self.args.sampling_weights_from_file)
sample_ratios = [weights[k] for (k, _) in datasets]
logger.info(f'| ignoring --sampling-weights when loadding sampling weights from file {self.args.sampling_weights_from_file}')
elif self.args.sampling_weights:
sample_ratios = [self.args.sampling_weights[k] for (k, _) in datasets]
else:
sample_ratios = self.get_train_sampling_ratios(data_param_list, datasets, epoch, shard_epoch)
if (sample_ratios is not None):
logger.info('| Upsample ratios: {}'.format(list(zip(map((lambda x: x['key']), data_param_list), sample_ratios))))
assert (len(sample_ratios) == len(datasets))
return sample_ratios
def load_split_datasets(self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs):
data_param_list = self.get_split_data_param_list(split, epoch, shard_epoch=shard_epoch)
langpairs_sharing_datasets = ({} if self.args.enable_reservsed_directions_shared_datasets else None)
datasets = [(param['key'], self.load_a_dataset(combine=combine, langpairs_sharing_datasets=langpairs_sharing_datasets, **param)) for param in data_param_list]
return (datasets, data_param_list)
def load_into_concat_dataset(self, split, datasets, data_param_list):
if self.args.lang_tok_replacing_bos_eos:
return SampledMultiDataset(OrderedDict(datasets), sampling_ratios=None, eval_key=None, collate_format=CollateFormat.single, virtual_size=None, split=split)
return ConcatDataset([d for (_, d) in datasets])
def load_sampled_multi_epoch_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
(datasets, data_param_list) = self.load_split_datasets(split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs)
if (training and (split == getattr(self.args, 'train_subset', None))):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiEpochDataset(OrderedDict(datasets), epoch=epoch, shard_epoch=shard_epoch, sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, virtual_epoch_size=self.args.virtual_epoch_size, shared_collater=self._shared_collater())
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_sampled_multi_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
(datasets, data_param_list) = self.load_split_datasets(split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs)
if (training and (split == getattr(self.args, 'train_subset', None))):
sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch)
return SampledMultiDataset(OrderedDict(datasets), epoch=epoch, sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, shared_collater=self._shared_collater())
else:
return self.load_into_concat_dataset(split, datasets, data_param_list)
def load_dataset(self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs):
if (self.args.virtual_epoch_size is None):
return self.load_sampled_multi_dataset(split, training, epoch, combine, shard_epoch, **kwargs)
else:
return self.load_sampled_multi_epoch_dataset(split, training, epoch, combine, shard_epoch, **kwargs) |
class ResNet(nn.Module):
__factory = {18: torchvision.models.resnet18, 34: torchvision.models.resnet34, 50: torchvision.models.resnet50, 101: torchvision.models.resnet101, 152: torchvision.models.resnet152}
def __init__(self, depth, ibn_type=None, final_layer='layer3', neck=128, pretrained=True):
super(ResNet, self).__init__()
self.depth = depth
self.final_layer = final_layer
self.neck = neck
self.pretrained = pretrained
if (depth not in ResNet.__factory):
raise KeyError('Unsupported depth: ', depth)
if ((ibn_type is not None) and (depth == 152)):
raise KeyError('Unsupported IBN-Net depth: ', depth)
if (ibn_type is None):
print(('\nCreate ResNet model ResNet-%d.\n' % depth))
self.base = ResNet.__factory[depth](pretrained=pretrained)
else:
model_name = ('resnet%d_ibn_%s' % (depth, ibn_type))
print(('\nCreate IBN-Net model %s.\n' % model_name))
self.base = torch.hub.load('XingangPan/IBN-Net', model_name, pretrained=pretrained)
if (depth < 50):
out_planes = fea_dims_small[final_layer]
else:
out_planes = fea_dims[final_layer]
if (neck > 0):
self.neck_conv = nn.Conv2d(out_planes, neck, kernel_size=3, padding=1, bias=False)
out_planes = neck
self.neck_bn = nn.BatchNorm2d(out_planes)
self.num_features = out_planes
def forward(self, inputs):
x = inputs
for (name, module) in self.base._modules.items():
x = module(x)
if (name == self.final_layer):
break
if (self.neck > 0):
x = self.neck_conv(x)
x = self.neck_bn(x)
x = F.normalize(x)
return x |
class Config(object):
def __init__(self, args, labels={}):
self.args = args
self.labels = labels
self.annotation = args.ann_scope
self.annotation_type = args.ann_type
self.input_to_action = {}
if (args.config_file is not None):
for line in open(args.config_file):
if line.startswith('Input:'):
(_, action, mode, key) = line.strip().split()
symbols = tuple(keydef_to_symbols(key))
if (mode == 'all'):
mode = None
self.add_keybinding(mode, symbols, action)
elif line.startswith('Label:'):
(_, label, key, color) = line.strip().split()
symbols = tuple(keydef_to_symbols(key))
self.labels[label] = (symbols, color)
elif line.startswith('Special_Key:'):
(_, symbol, key) = line.strip().split()
special_keys[int(key)] = symbol
symbol_to_key[symbol] = key
key_to_symbol[key] = symbol
else:
for action in input_action_list:
for opt in input_action_list[action]:
(mode, symbol) = (None, opt)
if (type(opt) == tuple):
mode = opt[0]
symbol = opt[1]
if (type(symbol) == str):
symbol = [symbol]
self.add_keybinding(mode, tuple(symbol), action)
for label in self.labels:
(key, _) = self.labels[label]
self.add_keybinding('category', key, 'edit-annotation')
self.input_to_label = {}
for label in self.labels:
(key, _) = self.labels[label]
if (type(key) == str):
key = (key,)
else:
key = tuple(key)
self.input_to_label[key] = label
self.valid_prefixes = set()
for (mode, symbol) in self.input_to_action:
for i in range(1, len(symbol)):
self.valid_prefixes.add((mode, symbol[:i]))
for (mode, symbol) in self.input_to_action:
if ((mode, symbol) in self.valid_prefixes):
raise Exception('input {} overlaps with a prefix'.format(symbol))
def get_color_for_label(self, mark):
name = self.labels[mark][1]
return name_to_color[name]
def get_label_for_input(self, user_input):
return self.input_to_label.get(user_input, None)
def add_keybinding(self, mode, key, action):
pair = (mode, key)
if (pair in self.input_to_action):
raise Exception('input {} used twice'.format(pair))
self.input_to_action[pair] = action
def __str__(self):
ans = []
for (mode, key) in self.input_to_action:
action = self.input_to_action[(mode, key)]
if (mode is None):
mode = 'all'
key = '_'.join(key)
ans.append('{:<15} {:<25} {:<20} {}'.format('Input:', action, mode, key))
for label in self.labels:
(key, color) = self.labels[label]
key = '_'.join(key)
ans.append('{:<15} {:<25} {} {}'.format('Label:', label, key, color))
for key in special_keys:
symbol = special_keys[key]
ans.append('{:<15} {:<25} {}'.format('Special_Key:', symbol, key))
return '\n'.join(ans) |
_model
def gmixer_24_224(pretrained=False, **kwargs):
model_args = dict(patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs)
model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args)
return model |
class TestTuningSpaceV2(unittest.TestCase):
def setUp(self) -> None:
self.capability = {'calib': {'calib_sampling_size': [1, 10, 50]}, 'op': deepcopy(op_cap)}
self.op_wise_user_cfg_for_fallback = {'op_name1': {'activation': {'dtype': ['fp32']}, 'weight': {'dtype': ['fp32']}}}
def test_tuning_sampler_int4(self):
conf = {'usr_cfg': {}}
conf = DotDict(conf)
tuning_space = TuningSpace(deepcopy(self.capability), deepcopy(conf))
logger.debug(tuning_space.root_item.get_details())
found_int4_activation = False
found_int4_weight = False
op3_act_item = tuning_space.query_quant_mode_item_by_full_path(('op_name3', 'op_type3'), ('static', 'activation'))
for dtype_item in op3_act_item.options:
if (dtype_item.name == 'int4'):
found_int4_activation = True
self.assertTrue(found_int4_activation)
op3_weight_item = tuning_space.query_quant_mode_item_by_full_path(('op_name3', 'op_type3'), ('static', 'weight'))
for dtype_item in op3_weight_item.options:
if (dtype_item.name == 'int4'):
found_int4_weight = True
self.assertTrue(found_int4_weight)
def test_sampler_int4(self):
from collections import OrderedDict
from neural_compressor.strategy.utils.tuning_sampler import OpWiseTuningSampler
from neural_compressor.strategy.utils.tuning_structs import OpTuningConfig
conf = {'usr_cfg': {}}
conf = DotDict(conf)
tuning_space = TuningSpace(deepcopy(self.capability), deepcopy(conf))
logger.debug(tuning_space.root_item.get_details())
initial_op_tuning_cfg = {}
for item in tuning_space.root_item.options:
if (item.item_type == 'op'):
(op_name, op_type) = item.name
initial_op_tuning_cfg[item.name] = OpTuningConfig(op_name, op_type, 'fp32', tuning_space)
quant_mode_wise_items = OrderedDict()
from neural_compressor.strategy.utils.constant import auto_query_order as query_order
pre_items = set()
for quant_mode in query_order:
items = tuning_space.query_items_by_quant_mode(quant_mode)
filtered_items = [item for item in items if (item not in pre_items)]
pre_items = pre_items.union(set(items))
quant_mode_wise_items[quant_mode] = filtered_items
def initial_op_quant_mode(items_lst, target_quant_mode, op_item_dtype_dict):
for item in items_lst:
op_item_dtype_dict[item.name] = target_quant_mode
op_item_dtype_dict = OrderedDict()
for (quant_mode, quant_mode_items) in quant_mode_wise_items.items():
initial_op_quant_mode(quant_mode_items, quant_mode, op_item_dtype_dict)
op_wise_tuning_sampler = OpWiseTuningSampler(deepcopy(tuning_space), [], [], op_item_dtype_dict, initial_op_tuning_cfg)
op3 = ('op_name3', 'op_type3')
for tune_cfg in op_wise_tuning_sampler:
op_cfg = tune_cfg[op3].get_state()
act_dtype = op_cfg['activation']['dtype']
weight_dtype = op_cfg['weight']['dtype']
self.assertTrue((act_dtype == weight_dtype == 'int4'))
def test_tuning_space_merge_op_wise(self):
conf = {'usr_cfg': {'quantization': {'op_wise': self.op_wise_user_cfg_for_fallback}}}
conf = DotDict(conf)
tuning_space2 = TuningSpace(deepcopy(self.capability), deepcopy(conf))
logger.debug(tuning_space2.root_item.get_details())
op_name1_only_fp32 = True
for quant_mode in ['static', 'dynamic']:
for item in tuning_space2.query_items_by_quant_mode(quant_mode):
if (item.name[0] == 'op_name1'):
op_name1_only_fp32 = False
self.assertTrue(op_name1_only_fp32) |
class SchedulerMixin():
config_name = SCHEDULER_CONFIG_NAME
_compatibles = []
has_compatibles = True
def from_pretrained(cls, pretrained_model_name_or_path: Dict[(str, Any)]=None, subfolder: Optional[str]=None, return_unused_kwargs=False, **kwargs):
(config, kwargs, commit_hash) = cls.load_config(pretrained_model_name_or_path=pretrained_model_name_or_path, subfolder=subfolder, return_unused_kwargs=True, return_commit_hash=True, **kwargs)
return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], push_to_hub: bool=False, **kwargs):
self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)
def compatibles(self):
return self._get_compatibles()
def _get_compatibles(cls):
compatible_classes_str = list(set(([cls.__name__] + cls._compatibles)))
diffusers_library = importlib.import_module(__name__.split('.')[0])
compatible_classes = [getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)]
return compatible_classes |
class AttentionSaver(Callback):
def __init__(self, output_directory, att_model, training_set):
self._dir = path.join(output_directory, 'attention')
try:
os.mkdir(self._dir)
except FileExistsError:
pass
self._att_model = att_model
idxs = training_set.strided(10)
data = [training_set[i] for i in idxs]
self._X = np.array([d[0] for d in data])
self._Y = np.array([d[1] for d in data]).argmax(axis=1)
np.savetxt(path.join(self._dir, 'points.txt'), np.array([[i, yi] for (i, yi) in zip(idxs, self._Y)]).astype(int), fmt='%d')
def on_train_begin(self, *args):
(_, _, x_low) = self._att_model.predict(self._X)
for (i, xi) in enumerate(x_low):
self._imsave(path.join(self._dir, '{}.jpg').format(i), xi)
def on_epoch_end(self, e, logs):
(att, patches, _) = self._att_model.predict(self._X)
for (i, att_i) in enumerate(att):
np.save(path.join(self._dir, 'att_{}_{}.npy').format(e, i), att_i)
def _imsave(self, filepath, x):
x = (x * 255).astype(np.uint8)
imwrite(filepath, x) |
class NamedEntityConfig(BaseModel):
text: str = Field(..., description='Text for entity linking or disambiguation.')
spans: Optional[List[Span]] = Field(None, description="\nFor EL: the spans field needs to be set to an empty list. \n\nFor ED: spans should consist of a list of tuples, where each tuple refers to \nthe start position and length of a mention.\n\nThis is used when mentions are already identified and disambiguation is only \nneeded. Each tuple represents start position and length of mention (in \ncharacters); e.g., `[(0, 8), (15,11)]` for mentions 'Nijmegen' and \n'Netherlands' in text 'Nijmegen is in the Netherlands'.\n")
tagger: Literal[('ner-fast', 'ner-fast-with-lowercase')] = Field('ner-fast', description='NER tagger to use.')
class Config():
schema_extra = {'example': {'text': "If you're going to try, go all the way - Charles Bukowski.", 'spans': [(41, 16)], 'tagger': 'ner-fast'}}
def response(self):
handler = handlers[self.tagger]
response = handler.generate_response(text=self.text, spans=self.spans)
return response |
def seenArticle(articles):
conn = getDb()
cur = conn.cursor()
sql = 'UPDATE article_feedback SET seen_web=CURRENT_TIMESTAMP WHERE article_id=%s AND user_id = %s'
cur.executemany(sql, articles)
cur.close()
conn.commit()
return True |
def get_is(args, gen_net: nn.Module, num_img):
gen_net = gen_net.eval()
eval_iter = (num_img // args.eval_batch_size)
img_list = list()
for _ in range(eval_iter):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
img_list.extend(list(gen_imgs))
logger.info('calculate Inception score...')
(mean, std) = get_inception_score(img_list)
return mean |
def verify(pols, sols):
from phcpy.solutions import strsol2dict, evaluate
dictsols = [strsol2dict(sol) for sol in sols]
checksum = 0
for sol in dictsols:
sumeval = sum(evaluate(pols, sol))
print(sumeval)
checksum = (checksum + sumeval)
print('the total check sum :', checksum) |
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = torch.nn.Linear(10, 5)
self.fc2 = torch.nn.Linear(5, 10)
self.mm = Matmul()
self.bmm = BatchMatmul()
def forward(self, inp):
x1 = self.fc1(inp)
x2 = self.fc2(x1)
x3 = self.mm(inp.T, x2)
x3 = x3.unsqueeze(0)
x4 = self.mm(inp.T, x2)
x4 = x4.unsqueeze(0)
x5 = self.bmm(x3, x4)
x6 = self.bmm(x3, x4)
out = (x5 + x6)
return out |
class Data2VecVisionForSemanticSegmentation(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class TestOptions(BaseOptions):
def __init__(self):
BaseOptions.__init__(self, print_opt=False)
parser = self.parser
parser.add_argument('--train_config', type=argparse.FileType(mode='r'), required=True, help='config file saved from model training')
parser.add_argument('--partition', type=str, default='val', help='val or test')
parser.add_argument('--dataset_name', type=str, required=True, help='name to describe test dataset when saving results, e.g. celebahq_pgan')
parser.add_argument('--force_redo', action='store_true', help='force recompute results')
parser.add_argument('--test_compression', type=int, help='jpeg compression level')
parser.add_argument('--test_gamma', type=int, help='gamma adjustment level')
parser.add_argument('--test_blur', type=int, help='blur level')
parser.add_argument('--test_flip', action='store_true', help='flip all test images')
parser.add_argument('--visualize', action='store_true', help='save visualizations when running test')
parser.add_argument('--average_mode', help='which kind of patch averaging to use for visualizations [vote, before_softmax, after_softmax]')
parser.add_argument('--topn', type=int, default=100, help='visualize top n')
def parse(self):
opt = super().parse()
train_conf = yaml.load(opt.train_config, Loader=yaml.FullLoader)
option_strings = {}
for action_group in self.parser._action_groups:
for action in action_group._group_actions:
for option in action.option_strings:
option_strings[option] = action.dest
specified_options = set([option_strings[x] for x in sys.argv if (x in option_strings)])
options_from_train = []
for (k, v) in train_conf.items():
if (k in ['real_im_path', 'fake_im_path', 'gpu_ids']):
continue
if (getattr(opt, k, None) is None):
continue
if (k not in specified_options):
setattr(opt, k, v)
options_from_train.append((k, v))
print('Using the following options from the train configuration file:')
print(options_from_train)
if opt.real_im_path:
assert (opt.partition in opt.real_im_path)
opt.real_im_path = opt.real_im_path.rstrip('/')
if opt.fake_im_path:
assert (opt.partition in opt.fake_im_path)
opt.fake_im_path = opt.fake_im_path.rstrip('/')
opt.load_model = True
opt.model_seed = 0
opt.isTrain = False
return opt |
class TestBiasCorrection(unittest.TestCase):
def test_bias_correction(self):
tf.compat.v1.disable_eager_execution()
x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='input')
if (tf.version.VERSION <= '2.1.0'):
x = tf.nn.relu(x)
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable('bias', [32], initializer=tf.compat.v1.random_normal_initializer())
conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding='SAME')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
relu = tf.nn.relu(conv_bias, name='Relu_1')
op_wise_sequences = TensorflowQuery(local_config_file=os.path.join(os.path.dirname(neural_compressor.__file__), 'adaptor/tensorflow.yaml')).get_eightbit_patterns()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[relu.name.split(':')[0]])
output_graph_def = QuantizeGraphHelper.remove_training_nodes(output_graph_def, protected_nodes=[relu.name.split(':')[0]])
inputs = [x.name.split(':')[0]]
outputs = [relu.name.split(':')[0]]
op_wise_config = {'Conv2D': (False, 'minmax', False, 7.0)}
(int8_graph_def, _, _) = QuantizeGraphForIntel(output_graph_def, inputs, outputs, op_wise_config, op_wise_sequences, 'cpu').do_transform()
correct_graph_def = BiasCorrection(int8_graph_def, output_graph_def).do_transformation()
self.assertEqual(len(correct_graph_def.node), len(int8_graph_def.node)) |
class Program():
def __init__(self, prog, mul, imgFeats, arities):
self.prog = prog
self.mul = mul
self.imgFeats = imgFeats
self.arities = arities
self.root = Node(None)
def build(self, ind=0):
self.buildInternal(self.root)
def buildInternal(self, cur=None, count=0):
if (count >= len(self.prog)):
arity = 0
ind = 0
mul = 1.0
else:
ind = self.prog[count]
mul = self.mul[count]
arity = self.arities[ind]
cur.build(ind, mul, arity)
if (arity == 0):
cur.inpData = [self.imgFeats]
elif (arity == 1):
cur.next = [Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
elif (arity == 2):
cur.next = [Node(cur), Node(cur)]
count = self.buildInternal(cur.next[0], (count + 1))
count = self.buildInternal(cur.next[1], (count + 1))
return count
def flat(self):
return self.flatInternal(self.root, [])
def flatInternal(self, cur, flattened):
flattened += [cur.cellInd]
for e in cur.next:
self.flatInternal(e, flattened)
return flattened
def topologicalSort(self):
return self.topInternal(self.root, [])
def topInternal(self, cur, flattened):
for e in cur.next:
self.topInternal(e, flattened)
flattened += [cur]
return flattened |
def format_row(buffer, environment, results):
buffer_str = BUFFER_STRINGS[buffer]
environment_str = ENVIRONMENT_STRINGS[environment]
highlights = set_highlights(results)
results_str = ' & '.join((format_result(result, h) for (result, h) in zip(results, highlights)))
row = f'''{buffer_str} & {environment_str} & {results_str} \
'''
return row |
def find_max_f1_subtree(tree, span):
return max(((t, span_f1(span, t.span)) for t in tree.subtrees()), key=(lambda p: p[1]))[0] |
class LSTMLatentLevel(LatentLevel):
def __init__(self, level_config):
super(LSTMLatentLevel, self).__init__(level_config)
self._construct(level_config)
def _construct(self, level_config):
if (level_config['inference_config'] is not None):
self.inference_model = LSTMNetwork(level_config['inference_config'])
else:
self.inference_model = (lambda x: x)
if (level_config['generative_config'] is not None):
self.generative_model = LSTMNetwork(level_config['generative_config'])
else:
self.generative_model = (lambda x: x)
self.latent = FullyConnectedLatentVariable(level_config['latent_config'])
self.inference_procedure = level_config['inference_procedure']
def _get_encoding_form(self, input):
if (self.inference_procedure == 'direct'):
return input
else:
raise NotImplementedError
def infer(self, input):
input = self._get_encoding_form(input)
input = self.inference_model(input)
self.latent.infer(input)
def generate(self, input, gen, n_samples):
if (input is not None):
(b, s, n) = input.data.shape
input = self.generative_model(input.view((b * s), n)).view(b, s, (- 1))
return self.latent.generate(input, gen=gen, n_samples=n_samples)
def step(self):
self.latent.step()
if ('step' in dir(self.inference_model)):
self.inference_model.step()
if ('step' in dir(self.generative_model)):
self.generative_model.step()
def re_init(self):
self.latent.re_init()
if ('re_init' in dir(self.inference_model)):
self.inference_model.re_init()
if ('re_init' in dir(self.generative_model)):
self.generative_model.re_init()
def inference_parameters(self):
params = nn.ParameterList()
if ('parameters' in dir(self.inference_model)):
params.extend(list(self.inference_model.parameters()))
params.extend(list(self.latent.inference_parameters()))
return params
def generative_parameters(self):
params = nn.ParameterList()
if ('parameters' in dir(self.generative_model)):
params.extend(list(self.generative_model.parameters()))
params.extend(list(self.latent.generative_parameters()))
return params |
def dataset(metadata_filename, args):
batch_size = args.batch_size
buffer_size = 20000
num_mels = args.num_mels
max_length = 2000
input_dir = os.path.join(os.path.dirname(metadata_filename), 'inputs')
label_dir = os.path.join(os.path.dirname(metadata_filename), 'labels')
with open(metadata_filename, encoding='utf-8') as f:
metadata = [line.strip().split('|') for line in f]
metadata = [x for x in metadata if (int(x[(- 1)]) <= max_length)]
timesteps = sum([int(x[4]) for x in metadata])
sr = args.sample_rate
hours = ((timesteps / sr) / 3600)
log('Loaded metadata for {} examples ({:.2f} hours)'.format(len(metadata), hours))
if ('kspon' in args.dataset):
with open(os.path.join('datasets/filter_train_list.csv')) as train_csv, open(os.path.join('datasets/filter_test_list.csv')) as test_csv:
train_list = csv.reader(train_csv)
valid_list = csv.reader(test_csv)
train_list = [x[0] for x in train_list][1:]
valid_list = [x[0] for x in valid_list][1:]
all_list = [os.path.basename(x[0]) for x in metadata]
train_intersect = np.in1d(all_list, train_list)
valid_intersect = np.in1d(all_list, valid_list)
train_meta = [x for (idx, x) in enumerate(metadata) if train_intersect[idx]]
valid_meta = [x for (idx, x) in enumerate(metadata) if valid_intersect[idx]]
else:
(train_meta, valid_meta) = train_test_split(metadata, test_size=0.01, shuffle=True)
train_steps = int(np.ceil((len(train_meta) / args.batch_size)))
valid_steps = int(np.ceil((len(valid_meta) / args.batch_size)))
train_input_path = [x[(- 4)] for x in train_meta]
train_label_path = [x[(- 3)] for x in train_meta]
valid_input_path = [x[(- 4)] for x in valid_meta]
valid_label_path = [x[(- 3)] for x in valid_meta]
def encode(input_path, label_path):
input = np.load(os.path.join(input_dir, input_path.numpy().decode('utf8'))).astype('float32')
input = build_lfr(input)
input = ((input - input.mean()) / input.std())
with open(os.path.join(label_dir, label_path.numpy().decode('utf8')), 'r', encoding='utf-8') as f_in:
label = f_in.readline()
if (args.token_style == 'jamo'):
label = hangul_to_jamo(label)
label = np.array((([_symbol_to_id[SOS]] + [_symbol_to_id[x] for x in label]) + [_symbol_to_id[EOS]])).astype('int32')
else:
label = np.array((([token_index[SOS]] + [token_index[x] for x in label]) + [token_index[EOS]])).astype('int32')
return (input, label)
def tf_encode(input_path, label_path):
(result_input, result_label) = tf.py_function(encode, [input_path, label_path], [tf.float32, tf.int32])
result_input.set_shape([None, (args.num_mels * args.lfr_m)])
result_label.set_shape([None])
return (result_input, result_label)
def build_lfr(input):
m = args.lfr_m
n = args.lfr_n
seq_len = len(input)
seq_len_lfr = int(np.ceil((seq_len / n)))
lfr_input = np.zeros((seq_len_lfr, (args.num_mels * m)))
for i in range(seq_len_lfr):
if (m <= (seq_len - (i * n))):
lfr_input[i] = input[(i * n):((i * n) + m)].reshape((- 1))
else:
num_pad = (m - (seq_len - (i * n)))
frame = input[(i * n):]
padded_frame = np.pad(frame, ((0, num_pad), (0, 0)), mode='reflect')
lfr_input[i] = padded_frame.reshape((- 1))
return lfr_input
def filter_max_length(x, y):
return tf.logical_and((tf.shape(x)[0] <= max_length), (tf.size(y) <= max_length))
train_dataset = tf.data.Dataset.from_tensor_slices((train_input_path, train_label_path))
train_dataset = train_dataset.map(tf_encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.filter(filter_max_length)
train_dataset = train_dataset.shuffle(buffer_size).padded_batch(batch_size, padded_shapes=([None, (num_mels * args.lfr_m)], [None]))
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
valid_dataset = tf.data.Dataset.from_tensor_slices((valid_input_path, valid_label_path))
valid_dataset = valid_dataset.map(tf_encode, num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid_dataset = valid_dataset.filter(filter_max_length).padded_batch(batch_size, padded_shapes=([None, (num_mels * args.lfr_m)], [None]))
return (train_dataset, valid_dataset, train_steps, valid_steps) |
class Config():
batch_size = 100
original_dim = 784
latent_dim = 40
encoder_arch = [[300, None], [300, None]]
decoder_arch = [[300, None], [300, None]]
number_epochs = 5000
epsilon_std = 1.0
early_stopping_epochs = 100
learning_rate = 0.0002
kl_sample = True
regularization = 'none'
if (regularization == 'none'):
regularization_param = 0
elif (regularization == 'warmup'):
regularization_param = 200
else:
raise Exception('Wrong name of regularizer!')
dataset_name = 'mnistDynamic'
data_type = 'binary'
model = 'VAE'
number_of_flows = 1 |
def plot_train_history(train_loss_history, val_loss_history, save_dir, save_title):
(fig, ax) = plt.subplots()
time_ = range(len(train_loss_history))
ax.set_xlabel('Epochs')
ax.set_ylabel('BCE Loss')
ax.grid(linestyle='--')
ax.plot(time_, train_loss_history, color='blue', label='train loss')
ax.plot(time_, val_loss_history, color='red', label='val loss')
ax.legend(loc='best')
fig.savefig(os.path.join(save_dir, f'{save_title}_train_history.png'), dpi=300)
plt.close(fig) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.