code stringlengths 101 5.91M |
|---|
def get_nets(model_config):
return (get_generator(model_config), get_discriminator(model_config)) |
class SDistCommand(sdist):
def run(self):
_copy_native_code_to_setup()
build_vis_if_needed()
sdist.run(self) |
def get_optimizer(args, model):
hparams = deepcopy(args.optimizer)
hparams = standardize_otpmizers_params(hparams)
hparams = Dict2Obj(hparams)
op_col = {}
params = _get_model_params_for_opt(args, model)
if (hparams.name_optimizer == 'sgd'):
optimizer = SGD(params=params, momentum=hparams.momentum, dampening=hparams.dampening, weight_decay=hparams.weight_decay, nesterov=hparams.nesterov)
op_col['optim_name'] = hparams.name_optimizer
op_col['lr'] = hparams.lr
op_col['momentum'] = hparams.momentum
op_col['dampening'] = hparams.dampening
op_col['weight_decay'] = hparams.weight_decay
op_col['nesterov'] = hparams.nesterov
elif (hparams.name_optimizer == 'adam'):
optimizer = Adam(params=params, betas=(hparams.beta1, hparams.beta2), eps=hparams.eps_adam, weight_decay=hparams.weight_decay, amsgrad=hparams.amsgrad)
op_col['optim_name'] = hparams.name_optimizer
op_col['lr'] = hparams.lr
op_col['beta1'] = hparams.beta1
op_col['beta2'] = hparams.beta2
op_col['weight_decay'] = hparams.weight_decay
op_col['amsgrad'] = hparams.amsgrad
else:
raise ValueError('Unsupported optimizer `{}` .... [NOT OK]'.format(args.optimizer['name']))
if hparams.lr_scheduler:
if (hparams.name_lr_scheduler == 'step'):
lrate_scheduler = lr_scheduler.StepLR(optimizer, step_size=hparams.step_size, gamma=hparams.gamma, last_epoch=hparams.last_epoch)
op_col['name_lr_scheduler'] = hparams.name_lr_scheduler
op_col['step_size'] = hparams.step_size
op_col['gamma'] = hparams.gamma
op_col['last_epoch'] = hparams.last_epoch
elif (hparams.name_lr_scheduler == 'cosine'):
lrate_scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=hparams.t_max, eta_min=hparams.min_lr, last_epoch=hparams.last_epoch)
op_col['name_lr_scheduler'] = hparams.name_lr_scheduler
op_col['T_max'] = hparams.T_max
op_col['eta_min'] = hparams.eta_min
op_col['last_epoch'] = hparams.last_epoch
elif (hparams.name_lr_scheduler == 'mystep'):
lrate_scheduler = my_lr_scheduler.MyStepLR(optimizer, step_size=hparams.step_size, gamma=hparams.gamma, last_epoch=hparams.last_epoch, min_lr=hparams.min_lr)
op_col['name_lr_scheduler'] = hparams.name_lr_scheduler
op_col['step_size'] = hparams.step_size
op_col['gamma'] = hparams.gamma
op_col['min_lr'] = hparams.min_lr
op_col['last_epoch'] = hparams.last_epoch
elif (hparams.name_lr_scheduler == 'mycosine'):
lrate_scheduler = my_lr_scheduler.MyCosineLR(optimizer, coef=hparams.coef, max_epochs=hparams.max_epochs, min_lr=hparams.min_lr, last_epoch=hparams.last_epoch)
op_col['name_lr_scheduler'] = hparams.name_lr_scheduler
op_col['coef'] = hparams.coef
op_col['max_epochs'] = hparams.max_epochs
op_col['min_lr'] = hparams.min_lr
op_col['last_epoch'] = hparams.last_epoch
elif (hparams.name_lr_scheduler == 'multistep'):
lrate_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=hparams.milestones, gamma=hparams.gamma, last_epoch=hparams.last_epoch)
op_col['name_lr_scheduler'] = hparams.name_lr_scheduler
op_col['milestones'] = hparams.milestones
op_col['gamma'] = hparams.gamma
op_col['last_epoch'] = hparams.last_epoch
else:
raise ValueError('Unsupported learning rate scheduler `{}` .... [NOT OK]'.format(hparams.name_lr_scheduler))
else:
lrate_scheduler = None
DLLogger.log('Optimizer:\n{}'.format(format_dict_2_str(op_col)))
return (optimizer, lrate_scheduler) |
class BlockEnv():
def __init__(self, max_num_objects_dropped):
self.asset_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'mujoco_data/stl/')
self.img_dim = 64
self.polygons = ['cube', 'horizontal_rectangle', 'tetrahedron']
self.settle_bounds = {'pos': [[(- 0.5), 0.5], [(- 0.5), 0], [1, 2]], 'hsv': [[0, 1], [0.5, 1], [0.5, 1]], 'scale': [[0.4, 0.4]], 'force': [[0, 0], [0, 0], [0, 0]]}
self.drop_bounds = {'pos': [[(- 1.75), 1.75], [(- 0.5), 0], [0, 3]]}
self.xml = XML(self.asset_path)
xml_str = self.xml.instantiate()
model = mjc.load_model_from_xml(xml_str)
sim = mjc.MjSim(model)
self.max_num_objects_dropped = max_num_objects_dropped
self.logger = Logger(self.xml, sim, steps=(max_num_objects_dropped + 1), img_dim=self.img_dim)
self.logger.log(0)
self._blank_observation = self.get_observation()
self.xml_actions_taken = []
self.names = []
self.env_step = 0
self.settle_steps = 2000
def reset(self):
xml = XML(self.asset_path)
xml_str = xml.instantiate()
model = mjc.load_model_from_xml(xml_str)
sim = mjc.MjSim(model)
self.logger = Logger(xml, sim, steps=(self.max_num_objects_dropped + 1), img_dim=self.img_dim)
self.logger.log(0)
self.xml_actions_taken = []
self.names = []
self.env_step = 0
return self.get_observation()
def get_observation(self):
(data, images, masks) = self.logger.get_logs()
image = images[0]
return image
def sample_action(self):
ply = random.choice(self.polygons)
pos = utils.uniform(*self.drop_bounds['pos'])
if ('horizontal' in ply):
axis = [1, 0, 0]
else:
axis = [0, 0, 1]
axangle = utils.random_axangle(axis=axis)
scale = utils.uniform(*self.settle_bounds['scale'])
rgba = self.sample_rgba_from_hsv(*self.settle_bounds['hsv'])
xml_action = {'polygon': ply, 'pos': pos, 'axangle': axangle, 'scale': scale, 'rgba': rgba}
return self.xml_action_to_model_action(xml_action)
def sample_action_gaussian(self, mean, std):
ply_t = 0.1
ply_p = ((mean[:3] + ply_t) / (mean[:3] + ply_t).sum())
ply = np.random.choice(self.polygons, p=ply_p)
std = np.maximum(std, 0.01)
random_a = np.random.normal(mean, std)
pos = np.clip(random_a[3:6], [x[0] for x in self.drop_bounds['pos']], [x[1] for x in self.drop_bounds['pos']])
if ('horizontal' in ply):
axis = [1, 0, 0]
else:
axis = [0, 0, 1]
axangle = utils.random_axangle(axis=axis)
axangle[(- 1)] = random_a[9]
if ('horizontal' in ply):
axangle[(- 1)] = 0
scale = utils.uniform(*self.settle_bounds['scale'])
rgba = np.clip(random_a[(- 3):], 0, 1)
xml_action = {'polygon': ply, 'pos': pos, 'axangle': axangle, 'scale': scale, 'rgba': rgba}
return self.xml_action_to_model_action(xml_action)
def sample_multiple_action_gaussian(self, mean, std, num_actions):
return np.stack([self.sample_action_gaussian(mean, std) for _ in range(num_actions)])
def get_obs_size(self):
return (self.img_dim, self.img_dim)
def get_actions_size(self):
return 15
def step(self, an_action):
xml = XML(self.asset_path)
for (ind, prev_action) in enumerate(self.xml_actions_taken):
prev_action['pos'][(- 1)] = (ind * 2)
xml.add_mesh(**prev_action)
xml_action = self.model_action_to_xml_action(an_action)
new_name = xml.add_mesh(**xml_action)
self.names.append(new_name)
xml_str = xml.instantiate()
model = mjc.load_model_from_xml(xml_str)
sim = mjc.MjSim(model)
logger = Logger(xml, sim, steps=(self.max_num_objects_dropped + 1), img_dim=self.img_dim)
for a_block in self.names[:(- 1)]:
self.set_block_info(sim, a_block, self.get_block_info(a_block))
logger.hold_drop_execute([], self.names[(- 1)], self.settle_steps)
self.logger = logger
self.logger.log(0)
self.xml_actions_taken.append(xml_action)
self.sim = self.logger.sim
return self.get_observation()
def try_action(self, an_action):
xml = XML(self.asset_path)
for (ind, prev_action) in enumerate(self.xml_actions_taken):
prev_action['pos'][(- 1)] = (ind * 2)
xml.add_mesh(**prev_action)
xml_action = self.model_action_to_xml_action(an_action)
new_name = xml.add_mesh(**xml_action)
new_names = (self.names + [new_name])
xml_str = xml.instantiate()
model = mjc.load_model_from_xml(xml_str)
sim = mjc.MjSim(model)
logger = Logger(xml, sim, steps=(self.max_num_objects_dropped + 1), img_dim=self.img_dim)
for a_block in self.names:
self.set_block_info(sim, a_block, self.get_block_info(a_block))
logger.hold_drop_execute([], new_name, 1)
logger.log(0)
original_logger = self.logger
self.logger = logger
obs = self.get_observation()
self.logger = original_logger
return obs
def get_block_info(self, a_block):
info = {}
info['poly'] = a_block[:(- 2)]
info['pos'] = np.copy(self.logger.sim.data.get_body_xpos(a_block))
info['quat'] = np.copy(self.logger.sim.data.get_body_xquat(a_block))
info['vel'] = np.copy(self.logger.sim.data.get_body_xvelp(a_block))
info['rot_vel'] = np.copy(self.logger.sim.data.get_body_xvelr(a_block))
return info
def set_block_info(self, sim, a_block, info):
start_ind = sim.model.get_joint_qpos_addr(a_block)[0]
sim_state = sim.get_state()
if ('pos' in info):
sim_state.qpos[start_ind:(start_ind + 3)] = np.array(info['pos'])
if ('quat' in info):
sim_state.qpos[(start_ind + 3):(start_ind + 7)] = info['quat']
else:
sim_state.qpos[(start_ind + 3):(start_ind + 7)] = np.array([1, 0, 0, 0])
start_ind = sim.model.get_joint_qvel_addr(a_block)[0]
if ('vel' in info):
sim_state.qvel[start_ind:(start_ind + 3)] = info['vel']
else:
sim_state.qvel[start_ind:(start_ind + 3)] = np.zeros(3)
if ('rot_vel' in info):
sim_state.qvel[(start_ind + 3):(start_ind + 6)] = info['rot_vel']
else:
sim_state.qvel[(start_ind + 3):(start_ind + 6)] = np.zeros(3)
sim.set_state(sim_state)
def model_action_to_xml_action(self, model_action):
ans = {'polygon': self.polygons[np.where((model_action == 1))[0][0]], 'pos': model_action[3:6], 'axangle': model_action[6:10], 'scale': 0.4, 'rgba': np.concatenate([model_action[10:], np.array([1])])}
return ans
def xml_action_to_model_action(self, xml_action):
num_type_polygons = len(self.polygons)
total_size_of_array = 13
ans = np.zeros(total_size_of_array)
poly_name = xml_action['polygon']
val = self.polygons.index(poly_name)
ans[val] = 1
for i in range(len(xml_action['pos'])):
ans[(num_type_polygons + i)] = xml_action['pos'][i]
for i in range(len(xml_action['axangle'])):
ans[((num_type_polygons + 3) + i)] = xml_action['axangle'][i]
for i in range(3):
ans[(((num_type_polygons + 3) + 4) + i)] = xml_action['rgba'][i]
return ans
def sample_rgba_from_hsv(self, *hsv_bounds):
hsv = utils.uniform(*hsv_bounds)
rgba = (list(colorsys.hsv_to_rgb(*hsv)) + [1])
return rgba
def compute_accuracy(self, true_data):
state = self.logger.get_state()
return (self.compare_matching(state, true_data['data']), state)
def compare_matching(self, data, mjc_data, threshold=0.2):
mjc_data = copy.deepcopy(mjc_data)
max_err = (- float('inf'))
for (pred_name, pred_datum) in data.items():
(err, mjc_match, err_pos, err_rgb) = self._best_obj_match(pred_datum, mjc_data)
del mjc_data[mjc_match]
if (err > max_err):
max_err = err
max_pos = err_pos
max_rgb = err_rgb
if (len(mjc_data) == 0):
break
correct = (max_err < threshold)
return (correct, max_pos, max_rgb)
def _best_obj_match(self, pred, targs):
def np_mse(x1, x2):
return np.square((x1 - x2)).mean()
pos = pred['qpos'][:3]
rgb = pred['rgba']
best_err = float('inf')
for (obj_name, obj_data) in targs.items():
obj_pos = obj_data['xpos'][(- 1)]
obj_rgb = obj_data['xrgba'][(- 1)]
pos_err = np_mse(pos, obj_pos)
rgb_err = np_mse(rgb, obj_rgb)
err = (pos_err + rgb_err)
if (err < best_err):
best_err = err
best_obj = obj_name
best_pos = pos_err
best_rgb = rgb_err
return (best_err, best_obj, best_pos, best_rgb) |
def run_keyboard_agent(env, render_mode='readable'):
print(LINE_BREAK2)
print('STARTING EPISODE')
print(LINE_BREAK2)
o = env.reset()
env.render(render_mode)
total_reward = 0
total_steps = 0
done = False
while (not done):
a = choose_action(env)
(o, r, done, _) = env.step(a)
total_reward += r
total_steps += 1
print(('\n' + LINE_BREAK2))
print('OBSERVATION RECIEVED')
print(LINE_BREAK2)
env.render(render_mode)
print(f'Reward={r}')
print(f'Done={done}')
print(LINE_BREAK)
if done:
done = env.goal_reached()
return (total_reward, total_steps, done) |
def get_voxel_cluster(voxel_centers, neighbourhood_threshold):
voxel_centers_mp_buffer = multiprocessing.RawArray('d', (voxel_centers.shape[0] * voxel_centers.shape[1]))
voxel_centers_mp_buffer_np = np.frombuffer(voxel_centers_mp_buffer, dtype=np.float64).reshape(voxel_centers.shape)
np.copyto(voxel_centers_mp_buffer_np, voxel_centers)
def init_worker(voxel_centers_mp_buffer, voxel_shape, neighbourhood_threshold):
global voxel_centers_mp
global voxel_shape_mp
global neighbourhood_threshold_mp
voxel_centers_mp = voxel_centers_mp_buffer
voxel_shape_mp = voxel_shape
neighbourhood_threshold_mp = neighbourhood_threshold
with multiprocessing.Pool(4, initializer=init_worker, initargs=(voxel_centers_mp_buffer, voxel_centers.shape, neighbourhood_threshold)) as p:
res = p.map(neighbors_in_bubble, voxel_centers)
voxel_cluster = np.repeat((- 1), voxel_centers.shape[0])
max_cluster_num = 0
for i in range(len(voxel_centers)):
if (voxel_cluster[i] < 0):
queue = deque([i])
while queue:
idx = queue.popleft()
if (voxel_cluster[idx] < 0):
voxel_cluster[idx] = max_cluster_num
queue.extend(res[idx])
max_cluster_num += 1
return voxel_cluster |
class GaussianLSTMPolicy(StochasticPolicy):
def __init__(self, env_spec, hidden_dim=32, name='GaussianLSTMPolicy', hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), recurrent_nonlinearity=tf.nn.sigmoid, recurrent_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_nonlinearity=None, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), hidden_state_init=tf.zeros_initializer(), hidden_state_init_trainable=False, cell_state_init=tf.zeros_initializer(), cell_state_init_trainable=False, forget_bias=True, learn_std=True, std_share_network=False, init_std=1.0, layer_normalization=False, state_include_action=True):
if (not isinstance(env_spec.action_space, akro.Box)):
raise ValueError('GaussianLSTMPolicy only works with akro.Box action space, but not {}'.format(env_spec.action_space))
super().__init__(name, env_spec)
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._hidden_dim = hidden_dim
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._recurrent_nonlinearity = recurrent_nonlinearity
self._recurrent_w_init = recurrent_w_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._hidden_state_init = hidden_state_init
self._hidden_state_init_trainable = hidden_state_init_trainable
self._cell_state_init = cell_state_init
self._cell_state_init_trainable = cell_state_init_trainable
self._forget_bias = forget_bias
self._learn_std = learn_std
self._std_share_network = std_share_network
self._init_std = init_std
self._layer_normalization = layer_normalization
self._state_include_action = state_include_action
self._f_step_mean_std = None
if state_include_action:
self._input_dim = (self._obs_dim + self._action_dim)
else:
self._input_dim = self._obs_dim
self.model = GaussianLSTMModel(output_dim=self._action_dim, hidden_dim=hidden_dim, name='GaussianLSTMModel', hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, recurrent_nonlinearity=recurrent_nonlinearity, recurrent_w_init=recurrent_w_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, hidden_state_init=hidden_state_init, hidden_state_init_trainable=hidden_state_init_trainable, cell_state_init=cell_state_init, cell_state_init_trainable=cell_state_init_trainable, forget_bias=forget_bias, layer_normalization=layer_normalization, learn_std=learn_std, std_share_network=std_share_network, init_std=init_std)
self._prev_actions = None
self._prev_hiddens = None
self._prev_cells = None
self._dist = None
self._init_hidden = None
self._init_cell = None
self._initialize()
def _initialize(self):
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
state_input = tf.compat.v1.placeholder(shape=(None, None, self._input_dim), name='state_input', dtype=tf.float32)
step_input_var = tf.compat.v1.placeholder(shape=(None, self._input_dim), name='step_input', dtype=tf.float32)
step_hidden_var = tf.compat.v1.placeholder(shape=(None, self._hidden_dim), name='step_hidden_input', dtype=tf.float32)
step_cell_var = tf.compat.v1.placeholder(shape=(None, self._hidden_dim), name='step_cell_input', dtype=tf.float32)
(self._dist, step_mean, step_log_std, step_hidden, step_cell, self._init_hidden, self._init_cell) = self.model.build(state_input, step_input_var, step_hidden_var, step_cell_var).outputs
self._f_step_mean_std = tf.compat.v1.get_default_session().make_callable([step_mean, step_log_std, step_hidden, step_cell], feed_list=[step_input_var, step_hidden_var, step_cell_var])
def build(self, state_input, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
(_, step_input, step_hidden, step_cell) = self.model.inputs
return self.model.build(state_input, step_input, step_hidden, step_cell, name=name)
def input_dim(self):
return self._input_dim
def vectorized(self):
return True
def reset(self, do_resets=None):
if (do_resets is None):
do_resets = np.array([True])
if ((self._prev_actions is None) or (len(do_resets) != len(self._prev_actions))):
self._prev_actions = np.zeros((len(do_resets), self.action_space.flat_dim))
self._prev_hiddens = np.zeros((len(do_resets), self._hidden_dim))
self._prev_cells = np.zeros((len(do_resets), self._hidden_dim))
self._prev_actions[do_resets] = 0.0
self._prev_hiddens[do_resets] = self._init_hidden.eval()
self._prev_cells[do_resets] = self._init_cell.eval()
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
return (actions[0], {k: v[0] for (k, v) in agent_infos.items()})
def get_actions(self, observations):
observations = self.observation_space.flatten_n(observations)
if self._state_include_action:
assert (self._prev_actions is not None)
all_input = np.concatenate([observations, self._prev_actions], axis=(- 1))
else:
all_input = observations
(means, log_stds, hidden_vec, cell_vec) = self._f_step_mean_std(all_input, self._prev_hiddens, self._prev_cells)
rnd = np.random.normal(size=means.shape)
samples = ((rnd * np.exp(log_stds)) + means)
samples = self.action_space.unflatten_n(samples)
prev_actions = self._prev_actions
self._prev_actions = samples
self._prev_hiddens = hidden_vec
self._prev_cells = cell_vec
agent_infos = dict(mean=means, log_std=log_stds)
if self._state_include_action:
agent_infos['prev_action'] = np.copy(prev_actions)
return (samples, agent_infos)
def distribution(self):
return self._dist
def state_info_specs(self):
if self._state_include_action:
return [('prev_action', (self._action_dim,))]
return []
def clone(self, name):
new_policy = self.__class__(name=name, env_spec=self._env_spec, hidden_dim=self._hidden_dim, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, recurrent_nonlinearity=self._recurrent_nonlinearity, recurrent_w_init=self._recurrent_w_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, hidden_state_init=self._hidden_state_init, hidden_state_init_trainable=self._hidden_state_init_trainable, cell_state_init=self._cell_state_init, cell_state_init_trainable=self._cell_state_init_trainable, forget_bias=self._forget_bias, learn_std=self._learn_std, std_share_network=self._std_share_network, init_std=self._init_std, layer_normalization=self._layer_normalization, state_include_action=self._state_include_action)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_step_mean_std']
del new_dict['_dist']
del new_dict['_init_hidden']
del new_dict['_init_cell']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def parse_options(root_path, is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none', help='job launcher')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
opt = parse(args.opt, root_path, is_train=is_train, debug=args.debug)
opt['auto_resume'] = args.auto_resume
if (args.launcher == 'none'):
opt['dist'] = False
print('Disable distributed.', flush=True)
else:
opt['dist'] = True
if ((args.launcher == 'slurm') and ('dist_params' in opt)):
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
(opt['rank'], opt['world_size']) = get_dist_info()
seed = opt.get('manual_seed')
if (seed is None):
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed((seed + opt['rank']))
return opt |
def load_unstructured_data(input):
if input.endswith('pdf'):
text = read_pdf(input)
elif input.endswith('docx'):
text = read_docx(input)
elif input.endswith('html'):
text = read_html(input)
elif input.endswith('txt'):
text = read_txt(input)
elif input.endswith('md'):
text = read_md(input)
text = text.replace('\n', '')
text = text.replace('\n\n', '')
text = uni_pro(text)
text = re.sub('\\s+', ' ', text)
return text |
def adb_pull(src_path, dst_path, serialno):
try:
sh.adb('-s', serialno, 'pull', src_path, dst_path)
except Exception as e:
six.print_(('Error msg: %s' % e), file=sys.stderr) |
class ReproduciblePaths():
base = 'outputs/final/'
figure123 = ['SyntheticGPGP', 'SyntheticQuadraticLinear', 'SyntheticTwoMoonsRF']
figure123 = plus_base(base, figure123)
figure4 = ['SmallMNISTBNN', 'SmallFMNISTResNet']
figure4names = ['BNN MNIST', 'ResNet-18 Fashion-MNIST']
figure5 = (base + 'LargeCIFAR100ResNet')
figure5name = 'ResNet CIFAR100'
figure6 = ['LargeFMNISTResNet', 'LargeCIFAR10ResNet', 'LargeCIFAR100WideResNet', 'LargeCIFAR10ResNetAccuracy']
figure6 = plus_base(base, figure6)
figure6names = ['ResNet Fashion-MNIST', 'ResNet CIFAR-10', 'WideResNet CIFAR-100', 'Resnet CIFAR-10 Accuracy']
figure7 = (base + 'LargeFMNISTBNN')
old_figure4 = 'outputs/final/LargeMNISTBNN' |
def parse_translate_args():
parser = ArgumentParser(description='FlowNMT')
parser.add_argument('--batch_size', type=int, default=512, metavar='N', help='input batch size for training (default: 512)')
parser.add_argument('--seed', type=int, default=524287, metavar='S', help='random seed (default: 65537)')
parser.add_argument('--model_path', help='path for saving model file.', required=True)
parser.add_argument('--data_path', help='path for data file.', default=None)
parser.add_argument('--subword', type=str, default='joint-bpe', choices=['joint-bpe', 'sep-bpe', 'word', 'bert-bpe', 'joint-spm'])
parser.add_argument('--bucket_batch', type=int, default=0, help='whether bucket data based on tgt length in batching')
parser.add_argument('--decode', choices=['argmax', 'iw', 'sample'], help='decoding algorithm', default='argmax')
parser.add_argument('--tau', type=float, default=0.0, metavar='S', help='temperature for iw decoding (default: 0.)')
parser.add_argument('--nlen', type=int, default=3, help='number of length candidates.')
parser.add_argument('--ntr', type=int, default=1, help='number of samples per length candidate.')
return parser.parse_args() |
class Controller(nn.Module):
def __init__(self, args):
super(Controller, self).__init__()
self.logger = args.logger
self.use_cuda = args.use_cuda
self.dtype = args.dtype
self.batch_size = args.batch_size
self.input_dim = args.input_dim
self.read_vec_dim = args.read_vec_dim
self.output_dim = args.output_dim
self.hidden_dim = args.hidden_dim
self.mem_hei = args.mem_hei
self.mem_wid = args.mem_wid
self.clip_value = args.clip_value
def _init_weights(self):
raise NotImplementedError('not implemented in base calss')
def print_model(self):
self.logger.warning('<===> Controller:')
self.logger.warning(self)
def _reset_states(self):
self.lstm_hidden_vb = (Variable(self.lstm_hidden_ts[0]).type(self.dtype), Variable(self.lstm_hidden_ts[1]).type(self.dtype))
def _reset(self):
self._init_weights()
self.type(self.dtype)
self.print_model()
self.lstm_hidden_ts = []
self.lstm_hidden_ts.append(torch.zeros(self.batch_size, self.hidden_dim))
self.lstm_hidden_ts.append(torch.zeros(self.batch_size, self.hidden_dim))
self._reset_states()
def forward(self, input_vb):
raise NotImplementedError('not implemented in base calss') |
def rescale_zero_terminal_snr(betas):
alphas = (1.0 - betas)
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_bar_sqrt = alphas_cumprod.sqrt()
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
alphas_bar_sqrt_T = alphas_bar_sqrt[(- 1)].clone()
alphas_bar_sqrt -= alphas_bar_sqrt_T
alphas_bar_sqrt *= (alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T))
alphas_bar = (alphas_bar_sqrt ** 2)
alphas = (alphas_bar[1:] / alphas_bar[:(- 1)])
alphas = torch.cat([alphas_bar[0:1], alphas])
betas = (1 - alphas)
return betas |
_torch
class TestActivations(unittest.TestCase):
def test_gelu_versions(self):
x = torch.tensor([(- 100), (- 1), (- 0.1), 0, 0.1, 1.0, 100])
torch_builtin = get_activation('gelu')
self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x)))
self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x)))
def test_gelu_10(self):
x = torch.tensor([(- 100), (- 1), (- 0.1), 0, 0.1, 1.0, 100])
torch_builtin = get_activation('gelu')
gelu10 = get_activation('gelu_10')
y_gelu = torch_builtin(x)
y_gelu_10 = gelu10(x)
clipped_mask = torch.where((y_gelu_10 < 10.0), 1, 0)
self.assertTrue((torch.max(y_gelu_10).item() == 10.0))
self.assertTrue(torch.allclose((y_gelu * clipped_mask), (y_gelu_10 * clipped_mask)))
def test_get_activation(self):
get_activation('gelu')
get_activation('gelu_10')
get_activation('gelu_fast')
get_activation('gelu_new')
get_activation('gelu_python')
get_activation('gelu_pytorch_tanh')
get_activation('linear')
get_activation('mish')
get_activation('quick_gelu')
get_activation('relu')
get_activation('sigmoid')
get_activation('silu')
get_activation('swish')
get_activation('tanh')
with self.assertRaises(KeyError):
get_activation('bogus')
with self.assertRaises(KeyError):
get_activation(None)
def test_activations_are_distinct_objects(self):
act1 = get_activation('gelu')
act1.a = 1
act2 = get_activation('gelu')
self.assertEqual(act1.a, 1)
with self.assertRaises(AttributeError):
_ = act2.a |
def print_info(append_detail=False):
dlrover_trainer = os.path.dirname(trainer.__file__)
file_path = os.path.join(dlrover_trainer, 'COMMIT_INFO')
if (not os.path.exists(file_path)):
logger.info('Whl is not built by sh build.sh, please be careful.')
return
with open(file_path, encoding='utf-8') as fd:
commit_id = fd.readline().strip()
user = fd.readline().strip()
time = fd.readline().strip()
logger.info(trainer.logo_string)
logger.info(('-' * 30))
logger.info('Penrose version: %s', trainer.__version__)
logger.info('Build by: %s', user)
logger.info('Build time: %s', time)
logger.info('Commit id: %s', commit_id)
logger.info('Pid: %s', os.getpid())
logger.info('CWD: %s', os.getcwd())
logger.info(('-' * 30)) |
def display_config():
print('')
print('# Video Super Resolution - Pytorch implementation #')
print('')
print('')
print('-------YOUR SETTINGS_________')
for arg in vars(args):
print(('%15s: %s' % (str(arg), str(getattr(args, arg)))))
print('') |
def define_constrast_g(output_nc=3, ngf=64, z_nc=512, img_f=512, L=1, layers=5, norm='instance', activation='ReLU', output_scale=1, use_spect=True, use_coord=False, use_attn=True, init_type='orthogonal', gpu_ids=[]):
net = ContrastResGenerator(output_nc, ngf, z_nc, img_f, L, layers, norm, activation, output_scale, use_spect, use_coord, use_attn)
return init_net(net, init_type, activation, gpu_ids) |
def get_vlnbert_models(args, config=None):
from transformers import PretrainedConfig
from models.vilmodel_cmt import NavCMT
model_class = NavCMT
model_name_or_path = args.bert_ckpt_file
new_ckpt_weights = {}
if (model_name_or_path is not None):
ckpt_weights = torch.load(model_name_or_path)
for (k, v) in ckpt_weights.items():
if k.startswith('module'):
new_ckpt_weights[k[7:]] = v
else:
if k.startswith('next_action'):
k = ('bert.' + k)
new_ckpt_weights[k] = v
if ((args.dataset == 'rxr') or (args.tokenizer == 'xlm')):
cfg_name = 'xlm-roberta-base'
else:
cfg_name = 'bert-base-uncased'
vis_config = PretrainedConfig.from_pretrained(cfg_name)
if ((args.dataset == 'rxr') or (args.tokenizer == 'xlm')):
vis_config.type_vocab_size = 2
vis_config.max_action_steps = 100
vis_config.image_feat_size = args.image_feat_size
vis_config.angle_feat_size = args.angle_feat_size
vis_config.num_l_layers = args.num_l_layers
vis_config.num_r_layers = 0
vis_config.num_h_layers = args.num_h_layers
vis_config.num_x_layers = args.num_x_layers
vis_config.hist_enc_pano = args.hist_enc_pano
vis_config.num_h_pano_layers = args.hist_pano_num_layers
vis_config.fix_lang_embedding = args.fix_lang_embedding
vis_config.fix_hist_embedding = args.fix_hist_embedding
vis_config.fix_obs_embedding = args.fix_obs_embedding
vis_config.update_lang_bert = (not args.fix_lang_embedding)
vis_config.output_attentions = True
vis_config.pred_head_dropout_prob = 0.1
vis_config.no_lang_ca = args.no_lang_ca
vis_config.act_pred_token = args.act_pred_token
vis_config.max_action_steps = 50
vis_config.max_action_steps = 50
visual_model = model_class.from_pretrained(pretrained_model_name_or_path=None, config=vis_config, state_dict=new_ckpt_weights)
return visual_model |
class Data(_BaseData):
def __init__(self, **params):
self.decide_implementation_type(params)
def decide_implementation_type(self, params):
self.__class__ = decide(params)
self.__init__(params) |
def do_format_to_lines(args):
print(time.clock())
data_builder.format_to_lines(args)
print(time.clock()) |
class ArgDef():
def __init__(self):
self.name: str = ''
self.is_optional: bool = False
self.must_use_name: bool = False
self.type: set = set()
self.default_value: str = ''
self.description: str = ''
self.case: Argument = None
self.record = None
self.ignore: bool = False
self.disable = False
def new(record):
arg = ArgDef()
arg.name = record['name']
arg.is_optional = record['is_optional']
arg.must_use_name = record['must_use_name']
arg.type = set(record['type'])
arg.default_value = record['default_value']
arg.description = record['description']
return arg
def arg_similar(self, arg: 'ArgDef'):
def name_wrapper(name):
if (name == '_input_tensor'):
return 'input'
else:
return name
name_sim = self.string_similar(name_wrapper(self.name), name_wrapper(arg.name))
if (len(self.type) == 0):
type_sim = 0
else:
type_sim = (len(self.type.intersection(arg.type)) / len(self.type))
return (name_sim + type_sim)
def args_similar(self, args: list['ArgDef'], w_name=0.3, w_type=0.7):
sims = []
for arg in args:
sims.append(self.arg_similar(arg, w_name, w_type))
return list(sims)
def perfect_match(self, arg: 'ArgDef'):
return ArgDef.perfect_match_(self, arg)
def similarity(argdefs_a: list['ArgDef'], argdefs_b: list['ArgDef']):
use_position = (len(argdefs_a) and len(argdefs_b) and (argdefs_a[0].name != '_input_tensor') and (argdefs_b[0].name != '_input_tensor'))
sim = []
for (idx_a, def_a) in enumerate(argdefs_a):
temp = []
for (idx_b, def_b) in enumerate(argdefs_b):
t = def_a.arg_similar(def_b)
if use_position:
t += (1 - (abs((idx_a - idx_b)) / max(len(argdefs_a), len(argdefs_b))))
temp.append(t)
sim.append(temp)
return sim
def perfect_match_(arg1: 'ArgDef', arg2: 'ArgDef'):
flag = ((arg1.name == arg2.name) and arg1.type.intersection(arg2.type))
return flag
def string_similar(s1, s2):
return textdistance.levenshtein.normalized_similarity(s1, s2) |
class ConstraintResEncoder(nn.Module):
def __init__(self, input_nc=3, ngf=32, z_nc=256, img_f=256, L=6, layers=5, norm='none', activation='ReLU', use_spect=True, use_coord=False, image_dim=256, text_dim=256, multi_peak=True, pool_attention='max'):
super(ConstraintResEncoder, self).__init__()
self.layers = layers
self.z_nc = z_nc
self.L = L
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
self.block0 = ResBlockEncoderOptimized(input_nc, ngf, norm_layer, nonlinearity, use_spect, use_coord)
self.word_attention = ImageTextAttention(idf=image_dim, cdf=text_dim, multi_peak=multi_peak, pooling=pool_attention)
mult = 1
for i in range((layers - 1)):
mult_prev = mult
mult = min((2 ** (i + 2)), (img_f // ngf))
block = ResBlock((ngf * mult_prev), (ngf * mult), (ngf * mult_prev), norm_layer, nonlinearity, 'down', use_spect, use_coord)
setattr(self, ('encoder' + str(i)), block)
for i in range(self.L):
block = ResBlock((ngf * mult), (ngf * mult), (ngf * mult), norm_layer, nonlinearity, 'none', use_spect, use_coord)
setattr(self, ('infer_prior' + str(i)), block)
for i in range(self.L):
block = ResBlock((ngf * mult), (ngf * mult), (ngf * mult), norm_layer, nonlinearity, 'none', use_spect, use_coord)
setattr(self, ('infer_prior_word' + str(i)), block)
self.posterior = ResBlock(((ngf * mult) + (2 * text_dim)), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
self.prior = ResBlock(((ngf * mult) + (2 * text_dim)), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
def forward(self, img_m, sentence_embedding, word_embeddings, text_mask, image_mask, img_c=None):
if (type(img_c) != type(None)):
img = torch.cat([img_m, img_c], dim=0)
else:
img = img_m
out = self.block0(img)
feature = [out]
for i in range((self.layers - 1)):
model = getattr(self, ('encoder' + str(i)))
out = model(out)
feature.append(out)
image_mask = task.scale_img(image_mask, size=[feature[(- 1)].size(2), feature[(- 1)].size(3)])
if (image_mask.size(1) == 3):
image_mask = image_mask.chunk(3, dim=1)[0]
if (type(img_c) != type(None)):
(f_m_g, f_m_rec) = feature[(- 1)].chunk(2)
img_mask_g = image_mask
img_mask_rec = (1 - img_mask_g)
weighted_word_embedding_rec = self.word_attention(f_m_rec, word_embeddings, mask=text_mask, image_mask=img_mask_rec, inverse_attention=False)
weighted_word_embedding_g = self.word_attention(f_m_g, word_embeddings, mask=text_mask, image_mask=img_mask_g, inverse_attention=True)
weighted_word_embedding = torch.cat([weighted_word_embedding_g, weighted_word_embedding_rec])
(distribution, f_text, dual_word_embedding) = self.two_paths(out, sentence_embedding, weighted_word_embedding)
return (distribution, feature, f_text, dual_word_embedding)
else:
f_m = feature[(- 1)]
weighted_word_embedding = self.word_attention(f_m, word_embeddings, mask=text_mask, image_mask=image_mask, inverse_attention=True)
(distribution, f_m_text, infered_word_embedding) = self.one_path(out, sentence_embedding, weighted_word_embedding)
f_text = torch.cat([f_m_text, weighted_word_embedding], dim=1)
return (distribution, feature, f_text)
def one_path(self, f_in, sentence_embedding, weighted_word_embedding):
f_m = f_in
distribution = []
for i in range(self.L):
infer_prior = getattr(self, ('infer_prior' + str(i)))
f_m = infer_prior(f_m)
for i in range(self.L):
infer_prior_word = getattr(self, ('infer_prior_word' + str(i)))
infered_word_embedding = infer_prior_word(weighted_word_embedding)
(ix, iw) = (f_m.size(2), f_m.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_m_sent = torch.cat([f_m, sentence_embedding_replication], dim=1)
f_m_text = torch.cat([f_m_sent, infered_word_embedding], dim=1)
o = self.prior(f_m_text)
(q_mu, q_std) = torch.split(o, self.z_nc, dim=1)
distribution.append([q_mu, F.softplus(q_std)])
return (distribution, f_m_sent, infered_word_embedding)
def two_paths(self, f_in, sentence_embedding, weighted_word_embedding):
(f_m, f_c) = f_in.chunk(2)
(weighted_word_embedding_m, weighted_word_embedding_c) = weighted_word_embedding.chunk(2)
distributions = []
(ix, iw) = (f_c.size(2), f_c.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_c_sent = torch.cat([f_c, sentence_embedding_replication], dim=1)
f_c_text = torch.cat([f_c_sent, weighted_word_embedding_c], dim=1)
o = self.posterior(f_c_text)
(p_mu, p_std) = torch.split(o, self.z_nc, dim=1)
(distribution, f_m_sent, infered_word_embedding) = self.one_path(f_m, sentence_embedding, weighted_word_embedding_m)
distributions.append([p_mu, F.softplus(p_std), distribution[0][0], distribution[0][1]])
dual_word_embedding = torch.cat([infered_word_embedding, weighted_word_embedding_c], dim=0)
f_m_text = torch.cat([f_m_sent, infered_word_embedding], dim=1)
f_c_text = torch.cat([f_m_sent, infered_word_embedding], dim=1)
return (distributions, torch.cat([f_m_text, f_c_text], dim=0), dual_word_embedding) |
class Add2(nn.Module):
def __init__(self):
super(Add2, self).__init__()
def forward(self, x):
assert ((type(x) == list) and (len(x) == 2))
return (x[0] + x[1]) |
_grad()
def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
context = init_prompt(prompt, pipeline)
(uncond_embeddings, cond_embeddings) = context.chunk(2)
all_latent = [latent]
latent = latent.clone().detach()
for i in tqdm(range(num_inv_steps)):
t = ddim_scheduler.timesteps[((len(ddim_scheduler.timesteps) - i) - 1)]
noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
latent = next_step(noise_pred, t, latent, ddim_scheduler)
all_latent.append(latent)
return all_latent |
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--name', type=str, default='deleteme')
parser.add_argument('--batch_size', type=int, default=2)
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
parser.add_argument('--lamb', type=float, default=10.0)
parser.add_argument('--dnorm', type=float, default=0.0)
parser.add_argument('--iterator', type=str, default='iterators/mnist.py')
parser.add_argument('--resume', type=str, default=None)
spec = parser.add_mutually_exclusive_group()
spec.add_argument('--interactive', action='store_true')
spec.add_argument('--compute_stats', action='store_true')
spec.add_argument('--dump_depths', type=str)
parser.add_argument('--update_g_every', type=int, default=1)
parser.add_argument('--network', type=str, default='networks/mnist.py')
parser.add_argument('--save_path', type=str, default='./results_aigns')
parser.add_argument('--save_images_every', type=int, default=100)
parser.add_argument('--save_every', type=int, default=10)
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
return args |
def get_bleu(recover, reference):
return sentence_bleu([recover.split()], reference.split(), smoothing_function=SmoothingFunction().method4) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity(logging.INFO)
transformers.utils.logging.set_verbosity(logging.INFO)
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, and/or `do_eval`.')
return
column_names = raw_datasets['train'].column_names
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert ((data_args.target_lang is not None) and (data_args.source_lang is not None)), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and --target_lang arguments.'
tokenizer.src_lang = data_args.source_lang
tokenizer.tgt_lang = data_args.target_lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
source_lang = data_args.source_lang.split('_')[0]
target_lang = data_args.target_lang.split('_')[0]
padding = ('max_length' if data_args.pad_to_max_length else False)
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples['translation']]
targets = [ex[target_lang] for ex in examples['translation']]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
else:
train_dataset = None
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
else:
eval_dataset = None
with training_args.strategy.scope():
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model.resize_token_embeddings(len(tokenizer))
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
model.config.forced_bos_token_id = forced_bos_token_id
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
assert ((data_args.target_lang is not None) and (data_args.source_lang is not None)), 'mBart requires --target_lang and --source_lang'
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = (training_args.per_device_train_batch_size * num_replicas)
total_eval_batch_size = (training_args.per_device_eval_batch_size * num_replicas)
tf_train_dataset = dataset_to_tf(train_dataset, model, tokenizer, total_batch_size=total_train_batch_size, num_epochs=training_args.num_train_epochs, shuffle=True)
tf_eval_dataset = dataset_to_tf(eval_dataset, model, tokenizer, total_eval_batch_size, num_epochs=1, shuffle=False)
num_update_steps_per_epoch = (len(train_dataset) // training_args.per_device_train_batch_size)
num_train_steps = (training_args.num_train_epochs * num_update_steps_per_epoch)
(optimizer, lr_schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=training_args.warmup_steps)
def masked_sparse_categorical_crossentropy(y_true, y_pred):
losses = tf.keras.losses.sparse_categorical_crossentropy(tf.clip_by_value(y_true, 0, int(1000000.0)), y_pred, from_logits=True)
losses = tf.ragged.boolean_mask(losses, (y_true != (- 100)))
losses = tf.reduce_mean(losses, axis=(- 1))
return losses
metric = load_metric('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return (preds, labels)
model.compile(loss={'logits': masked_sparse_categorical_crossentropy}, optimizer=optimizer)
if training_args.do_train:
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {training_args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}')
logger.info(f' Total train batch size = {total_train_batch_size}')
logger.info(f' Total optimization steps = {num_train_steps}')
model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), steps_per_epoch=num_update_steps_per_epoch)
if (data_args.val_max_target_length is None):
data_args.val_max_target_length = data_args.max_target_length
gen_kwargs = {'max_length': data_args.val_max_target_length, 'num_beams': data_args.num_beams}
if training_args.do_eval:
logger.info('Evaluation...')
for (batch, labels) in tqdm(tf_eval_dataset, total=(len(eval_dataset) // training_args.per_device_eval_batch_size)):
batch.update(gen_kwargs)
generated_tokens = model.generate(**batch)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = metric.compute()
logger.info({'bleu': eval_metric['score']})
if (training_args.output_dir is not None):
model.save_pretrained(training_args.output_dir) |
def sklearn_BernoulliRBM(*args, **kwargs):
return sklearn.neural_network.BernoulliRBM(*args, **kwargs) |
def dla46c(**kwargs):
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name='dla46c', **kwargs) |
class mini_Decoder_BEVSegFormer(nn.Module):
def __init__(self, feat_dim, n_obj_classes):
super(mini_Decoder_BEVSegFormer, self).__init__()
self.layer1 = nn.Sequential(nn.Conv2d(feat_dim, 128, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=1, stride=1, bias=True), nn.BatchNorm2d(128), nn.ReLU(inplace=True))
self.layer2 = nn.Sequential(nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=1, stride=1, bias=True), nn.BatchNorm2d(64), nn.ReLU(inplace=True))
self.obj_layer = nn.Sequential(nn.Dropout(p=0.1), nn.Conv2d(64, n_obj_classes, kernel_size=1, stride=1, padding=0, bias=True))
def forward(self, memory):
l1 = self.layer1(memory)
l1_upsampling = F.interpolate(l1, size=(200, 200), mode='bilinear', align_corners=True)
l2 = self.layer2(l1_upsampling)
l2_upsampling = F.interpolate(l2, size=(500, 500), mode='bilinear', align_corners=True)
out_obj = self.obj_layer(l2_upsampling)
return out_obj |
class CamCANHandler():
def __init__(self, args):
'
if (args.dataset_path is None):
args.dataset_path = os.path.join(ROOT, 'data', 'datasets', 'MRI', 'CamCAN')
print('Preparing CamCAN directory')
self.prepare_CamCAN(args)
print(f'Skull stripping for CamCAN {args.weighting} scans')
def prepare_CamCAN(args):
if (not os.path.exists(os.path.join(args.dataset_path, 'cc700'))):
raise RuntimeError(f'Missing dataset. Apply for CamCAN data and place it into {args.dataset_path}')
normal_dir = os.path.join(args.dataset_path, 'normal')
os.makedirs(normal_dir, exist_ok=True)
patient_dirs = glob(f"{os.path.join(args.dataset_path, 'cc700/mri/pipeline/release004/BIDS_/anat')}/sub*/")
for d in tqdm(patient_dirs):
for f in glob(f'{d}anat/*'):
shutil.move(f, d)
shutil.rmtree(f'{d}anat/', ignore_errors=True)
shutil.move(d, normal_dir)
def register_CamCAN(args):
print('Registering CamCAN')
files = glob(f"{os.path.join(args.dataset_path, 'normal')}/*/*T1w_stripped.nii.gz")
print(f'Found {len(files)} files')
if (len(files) == 0):
raise RuntimeError('Found 0 files')
template_path = os.path.join(ROOT, 'data', 'data_preprocessing', 'BrainAtlases/T1_brain.nii')
registrator = MRIRegistrator(template_path=template_path)
transformations = registrator.register_batch(files)
for (path, t) in tqdm(transformations.items()):
base = path[:path.rfind('T1')]
path = (base + 'T2w_stripped.nii.gz')
save_path = (base + 'T2w_stripped_registered.nii.gz')
registrator.transform(img=path, save_path=save_path, transformation=t, affine=registrator.template_affine, dtype='short')
def skull_strip_CamCAN(self, args):
w = args.weighting
if (not isinstance(w, str)):
raise RuntimeError(f'Invalid value for --weighting {w}')
paths = glob(f"{os.path.join(args.dataset_path, 'normal')}/*/*{w.upper()}w.nii.gz")
print(f'Found {len(paths)}')
if (len(paths) == 0):
raise RuntimeError('No paths found')
if (w.lower() == 't1'):
strip_skull_ROBEX(paths)
elif (w.lower() == 't2'):
self.skull_strip_CamCAN_T2(paths)
else:
raise NotImplementedError(f'CamCAN skull stripping not implemented for --weighting {w}')
def skull_strip_CamCAN_T2(paths):
for path in tqdm(paths):
t1_stripped_path = f"{path[:path.rfind('T2w')]}T1w_stripped.nii.gz"
t2_stripped_path = f"{path[:path.rfind('T2w')]}T2w_stripped.nii.gz"
if (not os.path.exists(t1_stripped_path)):
print(f'WARNING: No T1 skull stripped file found for {path}')
t2_data = nib.load(path)
affine = t2_data.affine
t2 = np.asarray(t2_data.dataobj, dtype=np.short)
t1_stripped = np.asarray(nib.load(t1_stripped_path).dataobj, dtype=np.short)
t2_stripped = t2.copy()
t2_stripped[(t1_stripped == 0)] = 0
nib.save(nib.Nifti1Image(t2_stripped.astype(np.short), affine), t2_stripped_path) |
class LoaderCfg(TypedDict):
batch_size: int
num_workers: Optional[int]
drop_last: Optional[bool]
shuffle: Optional[bool]
pin_memory: Optional[bool]
train: Optional['LoaderCfg']
val: Optional['LoaderCfg']
test: Optional['LoaderCfg'] |
class Processor():
def __init__(self, arg):
self.arg = arg
self.save_arg()
if (arg.phase == 'train'):
if (not arg.train_feeder_args['debug']):
if os.path.isdir(arg.model_saved_name):
shutil.rmtree(arg.model_saved_name)
print('Dir removed: ', arg.model_saved_name)
self.train_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'train'), 'train')
self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'val'), 'val')
else:
self.train_writer = self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'test'), 'test')
self.global_step = 0
self.load_model()
self.load_optimizer()
self.load_data()
self.lr = self.arg.base_lr
self.best_acc = 0
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if (self.arg.phase == 'train'):
self.data_loader['train'] = torch.utils.data.DataLoader(dataset=Feeder(**self.arg.train_feeder_args), batch_size=self.arg.batch_size, shuffle=True, num_workers=self.arg.num_worker, drop_last=True, worker_init_fn=init_seed)
self.data_loader['test'] = torch.utils.data.DataLoader(dataset=Feeder(**self.arg.test_feeder_args), batch_size=self.arg.test_batch_size, shuffle=False, num_workers=self.arg.num_worker, drop_last=False, worker_init_fn=init_seed)
def load_model(self):
output_device = (self.arg.device[0] if (type(self.arg.device) is list) else self.arg.device)
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args).cuda(output_device)
self.loss = nn.CrossEntropyLoss().cuda(output_device)
if self.arg.weights:
self.global_step = int(arg.weights[:(- 3)].split('-')[(- 1)])
self.print_log('Load weights from {}.'.format(self.arg.weights))
if ('.pkl' in self.arg.weights):
with open(self.arg.weights, 'r') as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict([[k.split('module.')[(- 1)], v.cuda(output_device)] for (k, v) in weights.items()])
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if (w in key):
if (weights.pop(key, None) is not None):
self.print_log('Sucessfully Remove Weights: {}.'.format(key))
else:
self.print_log('Can Not Remove Weights: {}.'.format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print('Can not find these weights:')
for d in diff:
print((' ' + d))
state.update(weights)
self.model.load_state_dict(state)
if (type(self.arg.device) is list):
if (len(self.arg.device) > 1):
self.model = nn.DataParallel(self.model, device_ids=self.arg.device, output_device=output_device)
def load_optimizer(self):
if (self.arg.optimizer == 'SGD'):
self.optimizer = optim.SGD(self.model.parameters(), lr=self.arg.base_lr, momentum=0.9, nesterov=self.arg.nesterov, weight_decay=self.arg.weight_decay)
elif (self.arg.optimizer == 'Adam'):
self.optimizer = optim.Adam(self.model.parameters(), lr=self.arg.base_lr, weight_decay=self.arg.weight_decay)
else:
raise ValueError()
lr_scheduler_pre = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.arg.step, gamma=0.1)
self.lr_scheduler = GradualWarmupScheduler(self.optimizer, total_epoch=self.arg.warm_up_epoch, after_scheduler=lr_scheduler_pre)
self.print_log('using warm up, epoch: {}'.format(self.arg.warm_up_epoch))
def save_arg(self):
arg_dict = vars(self.arg)
if (not os.path.exists(self.arg.work_dir)):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if ((self.arg.optimizer == 'SGD') or (self.arg.optimizer == 'Adam')):
if (epoch < self.arg.warm_up_epoch):
lr = ((self.arg.base_lr * (epoch + 1)) / self.arg.warm_up_epoch)
else:
lr = (self.arg.base_lr * (0.1 ** np.sum((epoch >= np.array(self.arg.step)))))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log(('Local current time : ' + localtime))
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = ((('[ ' + localtime) + ' ] ') + str)
print(str)
if self.arg.print_log:
with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = (time.time() - self.cur_time)
self.record_time()
return split_time
def train(self, epoch, save_model=False):
self.model.train()
self.print_log('Training epoch: {}'.format((epoch + 1)))
loader = self.data_loader['train']
self.adjust_learning_rate(epoch)
loss_value = []
self.train_writer.add_scalar('epoch', epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if (epoch > self.arg.only_train_epoch):
print('only train part, require grad')
for (key, value) in self.model.named_parameters():
if ('PA' in key):
value.requires_grad = True
else:
print('only train part, do not require grad')
for (key, value) in self.model.named_parameters():
if ('PA' in key):
value.requires_grad = False
for (batch_idx, (data, label, index)) in enumerate(process):
self.global_step += 1
data = Variable(data.float().cuda(self.output_device), requires_grad=False)
label = Variable(label.long().cuda(self.output_device), requires_grad=False)
timer['dataloader'] += self.split_time()
output = self.model(data)
if isinstance(output, tuple):
(output, l1) = output
l1 = l1.mean()
else:
l1 = 0
loss = (self.loss(output, label) + l1)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_value.append(loss.data.item())
timer['model'] += self.split_time()
(value, predict_label) = torch.max(output.data, 1)
acc = torch.mean((predict_label == label.data).float())
self.train_writer.add_scalar('acc', acc, self.global_step)
self.train_writer.add_scalar('loss', loss.data.item(), self.global_step)
self.train_writer.add_scalar('loss_l1', l1, self.global_step)
self.lr = self.optimizer.param_groups[0]['lr']
self.train_writer.add_scalar('lr', self.lr, self.global_step)
timer['statistics'] += self.split_time()
proportion = {k: '{:02d}%'.format(int(round(((v * 100) / sum(timer.values()))))) for (k, v) in timer.items()}
self.print_log('\tMean training loss: {:.4f}.'.format(np.mean(loss_value)))
self.print_log('\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(**proportion))
if save_model:
state_dict = self.model.state_dict()
weights = OrderedDict([[k.split('module.')[(- 1)], v.cpu()] for (k, v) in state_dict.items()])
torch.save(weights, (((((self.arg.model_saved_name + '-') + str(epoch)) + '-') + str(int(self.global_step))) + '.pt'))
def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None):
if (wrong_file is not None):
f_w = open(wrong_file, 'w')
if (result_file is not None):
f_r = open(result_file, 'w')
self.model.eval()
self.print_log('Eval epoch: {}'.format((epoch + 1)))
for ln in loader_name:
loss_value = []
score_frag = []
right_num_total = 0
total_num = 0
loss_total = 0
step = 0
process = tqdm(self.data_loader[ln])
for (batch_idx, (data, label, index)) in enumerate(process):
with torch.no_grad():
data = Variable(data.float().cuda(self.output_device), requires_grad=False, volatile=True)
label = Variable(label.long().cuda(self.output_device), requires_grad=False, volatile=True)
output = self.model(data)
if isinstance(output, tuple):
(output, l1) = output
l1 = l1.mean()
else:
l1 = 0
loss = self.loss(output, label)
score_frag.append(output.data.cpu().numpy())
loss_value.append(loss.data.item())
(_, predict_label) = torch.max(output.data, 1)
step += 1
if ((wrong_file is not None) or (result_file is not None)):
predict = list(predict_label.cpu().numpy())
true = list(label.data.cpu().numpy())
for (i, x) in enumerate(predict):
if (result_file is not None):
f_r.write((((str(x) + ',') + str(true[i])) + '\n'))
if ((x != true[i]) and (wrong_file is not None)):
f_w.write((((((str(index[i]) + ',') + str(x)) + ',') + str(true[i])) + '\n'))
score = np.concatenate(score_frag)
loss = np.mean(loss_value)
accuracy = self.data_loader[ln].dataset.top_k(score, 1)
if (accuracy > self.best_acc):
self.best_acc = accuracy
print('Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name)
if (self.arg.phase == 'train'):
self.val_writer.add_scalar('loss', loss, self.global_step)
self.val_writer.add_scalar('loss_l1', l1, self.global_step)
self.val_writer.add_scalar('acc', accuracy, self.global_step)
score_dict = dict(zip(self.data_loader[ln].dataset.sample_name, score))
self.print_log('\tMean {} loss of {} batches: {}.'.format(ln, len(self.data_loader[ln]), np.mean(loss_value)))
for k in self.arg.show_topk:
self.print_log('\tTop{}: {:.2f}%'.format(k, (100 * self.data_loader[ln].dataset.top_k(score, k))))
if save_score:
with open('{}/epoch{}_{}_score.pkl'.format(self.arg.work_dir, (epoch + 1), ln), 'wb') as f:
pickle.dump(score_dict, f)
def start(self):
if (self.arg.phase == 'train'):
self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
self.global_step = ((self.arg.start_epoch * len(self.data_loader['train'])) / self.arg.batch_size)
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
save_model = ((((epoch + 1) % self.arg.save_interval) == 0) or ((epoch + 1) == self.arg.num_epoch))
self.train(epoch, save_model=save_model)
self.eval(epoch, save_score=self.arg.save_score, loader_name=['test'])
print('best accuracy: ', self.best_acc, ' model_name: ', self.arg.model_saved_name)
elif (self.arg.phase == 'test'):
if (not self.arg.test_feeder_args['debug']):
wf = (self.arg.model_saved_name + '_wrong.txt')
rf = (self.arg.model_saved_name + '_right.txt')
else:
wf = rf = None
if (self.arg.weights is None):
raise ValueError('Please appoint --weights.')
self.arg.print_log = False
self.print_log('Model: {}.'.format(self.arg.model))
self.print_log('Weights: {}.'.format(self.arg.weights))
self.eval(epoch=0, save_score=self.arg.save_score, loader_name=['test'], wrong_file=wf, result_file=rf)
self.print_log('Done.\n') |
def _compute_cvmdsum(cv):
params = dict(vars(cv).items())
params['class'] = cv.__class__.__name__
out = None
if ('random_state' in params):
if (params['random_state'] is None):
if (params.get('shuffle', True) is True):
out = 'non-reproducible'
if isinstance(cv, _CVIterableWrapper):
splits = params.pop('cv')
params['cv'] = _recurse_to_list(splits)
if isinstance(cv, PredefinedSplit):
params['test_fold'] = params['test_fold'].tolist()
params['unique_folds'] = params['unique_folds'].tolist()
if ('cv' in params):
if inspect.isclass(params['cv']):
params['cv'] = params['cv'].__class__.__name__
if (out is None):
out = hashlib.md5(json.dumps(params, sort_keys=True).encode('utf-8')).hexdigest()
return out |
class KandinskyV22InpaintPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class RefCOCODataset(Dataset):
def __init__(self, refcoco_dir, refcoco_images_dir, split='val'):
self.image_dir = refcoco_images_dir
mattnet_maskrcnn_detections_path = refcoco_dir.joinpath('detections/refcocog_umd/res101_coco_minus_refer_notime_dets.json')
with open(mattnet_maskrcnn_detections_path) as f:
mattnet_maskrcnn_detections = json.load(f)
id2dets = {}
for det in mattnet_maskrcnn_detections:
image_id = det['image_id']
if (image_id not in id2dets):
id2dets[image_id] = []
id2dets[image_id].append(det)
self.id2dets = id2dets
print('Load mattnet detections from', mattnet_maskrcnn_detections_path)
assert (split in ['train', 'val', 'test'])
workspace_dir = Path(__file__).resolve().parent.parent
refcoco_util_dir = workspace_dir.joinpath('refcoco_utils')
import sys
sys.path.append(str(refcoco_util_dir))
from refer import REFER
self.refer = REFER('refcocog', 'umd')
ref_ids = self.refer.getRefIds(split=split)
img_ids = []
image_fns = []
for ref_id in ref_ids:
ref = self.refer.Refs[ref_id]
img_id = ref['image_id']
if (img_id not in img_ids):
img_ids.append(img_id)
fn_ann = ref['file_name']
suffix = fn_ann.split('.')[(- 1)]
fname = (('_'.join(fn_ann.split('_')[:(- 1)]) + '.') + suffix)
image_fns.append(fname)
self.image_ids = img_ids
self.image_fns = image_fns
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
image_id = self.image_ids[idx]
image_fn = self.image_fns[idx]
image_path = self.image_dir.joinpath(image_fn)
assert Path(image_path).exists(), image_path
img = cv2.imread(str(image_path))
(H, W, C) = img.shape
dets = self.id2dets[image_id]
cat_names = [det['category_name'] for det in dets]
boxes = []
for (i, region) in enumerate([det['box'] for det in dets]):
(x, y, w, h) = region[:4]
(x1, y1, x2, y2) = (x, y, (x + w), (y + h))
assert (x2 <= W), (image_id, i, region)
assert (y2 <= H), (image_id, i, region)
box = [x1, y1, x2, y2]
boxes.append(box)
boxes = np.array(boxes)
return {'img_id': str(image_id), 'img_fn': image_fn, 'img': img, 'boxes': boxes, 'captions': cat_names} |
class SublayerConnection(nn.Module):
def __init__(self, length, d_model, dropout):
super(SublayerConnection, self).__init__()
self.norm = nn.LayerNorm(normalized_shape=[length, d_model])
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return (x + self.dropout(sublayer(self.norm(x)))) |
def main(args):
assert len(args.tree_subsample_order)
in_dir = os.path.join(args.in_dir, f'tree_subsample_{args.tree_subsample_order[0]}')
in_dir2 = os.path.join(args.in_dir2)
out_dir = os.path.join(args.out_dir, args.tree_subsample_order[0], args.scoring)
os.makedirs(out_dir, exist_ok=True)
logger = exp_util.get_logger(os.path.join(out_dir, 'log.txt'))
logger.info(args)
logger.info(datetime.now())
process(args, in_dir, in_dir2, out_dir, logger) |
def load_tokenizer(dataset):
data_dir = (('./data/' + dataset) + '/')
if path.isfile((data_dir + 'train_tokenized.pkl')):
train_tokenized = pickle.load(open((data_dir + 'train_tokenized.pkl'), 'rb'))
else:
train_tokenized = []
vocab_stoi = json.load(open((data_dir + 'vocab_stoi.json'), 'r'))
vocab_freq = json.load(open((data_dir + 'vocab_freq.json'), 'r'))
with open((data_dir + 'train.txt'), 'r') as f:
for line in f:
words = line.split()
train_tokenized.append(([vocab_stoi[ele] for ele in words] + [vocab_stoi['<eos>']]))
pickle.dump(train_tokenized, open((data_dir + 'train_tokenized.pkl'), 'wb'))
return train_tokenized |
_registry(op_types='Conv, FusedConv')
class ConvOperator(Operator):
def __init__(self, onnx_quantizer, onnx_node):
super(ConvOperator, self).__init__(onnx_quantizer, onnx_node)
def quantize(self):
node = self.node
if (node.op_type == 'FusedConv'):
kwargs = {}
for attribute in node.attribute:
if ((attribute.name == 'activation') and (attribute.s in [b'Relu', b'Clip'])):
continue
if (attribute.name == 'activation_params'):
continue
kwargs.update(attribute_to_kwarg(attribute))
conv = onnx.helper.make_node('Conv', node.input, node.output, node.name, **kwargs)
node.CopyFrom(conv)
self.quantizer.quantize_inputs(node, [0])
if self.per_channel:
self.quantizer.quantize_weights_per_channel(node, [1], self.weight_dtype, self.weight_scheme, 0)
else:
self.quantizer.quantize_inputs(node, [1])
if ((not self.disable_qdq_for_node_output) or (self.quantizer.mode != 'qdq')):
self.quantizer.quantize_outputs(node)
if (len(node.input) == 3):
self.quantizer.quantize_bias_tensor(node)
node.name = (node.name + '_quant')
def convert_check(self, convert_format):
node = self.node
assert (convert_format in ['dynamic', 'static']), 'convert format for {} should be in [dynamic, static]'.format(node.op_type)
return True
def convert(self, convert_format):
node = self.node
if (convert_format == 'dynamic'):
inputs = []
parents = self.quantizer.model.get_parents(node)
if (parents[0].op_type == 'QuantizeLinear'):
inputs.append(parents[0].output[0])
inputs.append(parents[1].input[0])
inputs.append(parents[0].input[2])
inputs.append(parents[1].input[2])
scale_0 = parents[0].input[1]
else:
inputs.append(parents[0].output[0])
inputs.append(parents[1].input[0])
inputs.append(parents[0].output[2])
inputs.append(parents[1].input[2])
scale_0 = parents[0].output[1]
scale_1 = parents[1].input[1]
quantized_bias_name = ''
bias_present = False
if (len(node.input) == 3):
quantized_bias_name = (node.input[2] + '_quantized')
bias_present = True
conv_integer_output = (node.output[0] + '_output_quantized')
kwargs = {}
for attribute in node.attribute:
if ((attribute.name == 'activation') and (attribute.s in [b'Relu', b'Clip'])):
continue
if (attribute.name == 'activation_params'):
continue
kwargs.update(attribute_to_kwarg(attribute))
conv_integer_node = onnx.helper.make_node('ConvInteger', inputs, [conv_integer_output], node.name, **kwargs)
self.quantizer.new_nodes.append(conv_integer_node)
if bias_present:
conv_integer_output = self.quantizer.get_bias_add_nodes(node, parents[1].input[0], conv_integer_output, quantized_bias_name)
cast_op_output = (conv_integer_output + '_cast_output')
cast_node = onnx.helper.make_node('Cast', [conv_integer_output], [cast_op_output], (conv_integer_output + '_cast'), to=onnx_proto.TensorProto.FLOAT)
self.quantizer.new_nodes.append(cast_node)
scales_mul_op = (node.name + '_scales_mul')
scales_mul_node = find_by_name(scales_mul_op, self.quantizer.new_nodes)
if (scales_mul_node is None):
scales_mul_node = onnx.helper.make_node('Mul', [scale_0, scale_1], [(scales_mul_op + ':0')], scales_mul_op)
self.quantizer.new_nodes.append(scales_mul_node)
scales_mul_op_output = scales_mul_node.output[0]
output_scale_mul_op = (node.name + '_output_scale_mul')
self.quantizer.new_nodes.append(onnx.helper.make_node('Mul', [cast_op_output, scales_mul_op_output], [node.output[0]], output_scale_mul_op))
self.quantizer.remove_nodes.extend(parents[1:])
self.quantizer.remove_nodes.append(node)
elif (convert_format == 'static'):
if ((len(self.quantizer.model.get_children(node)) == 0) or (not node.name.endswith('_quant'))):
return
parents = self.quantizer.model.get_parents(node)
child = self.quantizer.model.get_children(node)[0]
qlinear_conv_inputs = []
for parent in parents[0:2]:
qlinear_conv_inputs.extend(parent.input)
qlinear_conv_inputs.extend(child.input[1:])
if (len(parents) == 3):
qlinear_conv_inputs.append(parents[(- 1)].input[0])
qlinear_conv_output = child.output[0]
kwargs = {}
for attribute in node.attribute:
if ((attribute.name == 'activation') and (attribute.s in [b'Relu', b'Clip'])):
continue
if (attribute.name == 'activation_params'):
continue
kwargs.update(attribute_to_kwarg(attribute))
qlinear_conv_node = onnx.helper.make_node('QLinearConv', qlinear_conv_inputs, [qlinear_conv_output], node.name, **kwargs)
self.quantizer.new_nodes.append(qlinear_conv_node)
self.quantizer.remove_nodes.extend(parents)
self.quantizer.remove_nodes.append(child)
self.quantizer.remove_nodes.append(node) |
class Algorithm():
def __init__(self, baselearner_fn, baselearner_args, opt_fn, T, T_val, T_test, test_batch_size, lr, dev, **kwargs):
self.baselearner_fn = baselearner_fn
self.baselearner_args = baselearner_args
self.opt_fn = opt_fn
self.T = T
self.T_val = T_val
self.T_test = T_test
self.test_batch_size = test_batch_size
self.lr = lr
self.dev = dev
self.trainable = True
self.init_score = (- float('inf'))
def train(self, train_x, train_y, test_x, test_y):
raise NotImplementedError()
def evaluate(self, num_classes, train_x, train_y, test_x, test_y):
raise NotImplementedError()
def dump_state(self):
raise NotImplementedError()
def load_state(self, state):
raise NotImplementedError()
def store_file(self, filename):
state = self.dump_state()
with open(filename, 'wb+') as f:
pickle.dump(state, f) |
def register_meshes(mesh_infos: Iterable[MeshInfo], base_path: Optional[str]) -> None:
for mesh_info in mesh_infos:
register_mesh(mesh_info, base_path) |
def example():
dt = (1.0 / 60)
F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])
H = np.array([1, 0, 0]).reshape(1, 3)
Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]])
R = np.array([0.5]).reshape(1, 1)
x = np.linspace((- 10), 10, 100)
measurements = ((- (((x ** 2) + (2 * x)) - 2)) + np.random.normal(0, 5, 100))
kf = KalmanFilter(F=F, H=H, Q=Q, R=R)
kf_predictions = []
kf.fit(measurements)
for mu in kf.mus:
kf_predictions.append(H.dot(mu)[0])
ks_predictions = []
ks = KalmanSmoother(F, Q, H, R)
ks.fit(measurements)
for mu in ks.mus:
ks_predictions.append(H.dot(mu)[0])
plt.plot(range(len(measurements)), measurements, label='Measurements')
plt.plot(range(len(measurements)), np.array(kf_predictions), label='Kalman Filter Prediction')
plt.plot(range(len(measurements)), np.array(ks_predictions), label='Kalman Smoother Prediction')
plt.legend()
plt.show() |
_REGISTRY.register()
def resnet18_ms_l123(pretrained=True, **kwargs):
from dassl.modeling.ops import MixStyle
model = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], ms_class=MixStyle, ms_layers=['layer1', 'layer2', 'layer3'])
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model |
def squeezenet1_0(pretrained=False, **kwargs):
model = SqueezeNet(1.0)
if pretrained:
model.load_state_dict(torch.load(os.path.join(models_dir, squeeze1_0_model_name)))
return model |
def test_logger():
for logger_type in ['normal', 'rich', 'dumb']:
if (logger_type == 'normal'):
class_name = 'Logger'
elif (logger_type == 'rich'):
class_name = 'RichLogger'
elif (logger_type == 'dumb'):
class_name = 'DumbLogger'
print(f'===== Test `utils.logger.{class_name}` =====')
logger = build_logger(logger_type, logger_name=logger_type, logfile_name=f'test_{logger_type}_logger.log')
logger.print('print log')
logger.debug('debug log')
logger.info('info log')
logger.warning('warning log')
logger.init_pbar()
task1 = logger.add_pbar_task('Task 1', 500)
task2 = logger.add_pbar_task('Task 2', 1000)
for _ in range(1000):
logger.update_pbar(task1, 1)
logger.update_pbar(task2, 1)
time.sleep(0.005)
logger.close_pbar()
print('Success!') |
def run_server(host='', port=8000, cmd_args=None):
global HOSTNAME, PORT
HOSTNAME = host
PORT = port
app.run(host=host, port=port) |
class Transformation():
def __init__(self, x_scaler, y_scaler):
self.x_scaler = x_scaler
self.y_scaler = y_scaler
def fit(self, x, y):
self.x_scaler = self.x_scaler.fit(x)
self.y_scaler = self.y_scaler.fit(y)
def do(self, x=None, y=None):
assert ((x is not None) or (y is not None))
if (x is not None):
x_res = self.x_scaler.transform(np.atleast_2d(x))
if (len(np.array(x).shape) < 2):
x_res = x_res.squeeze()
if (y is not None):
y_res = self.y_scaler.transform(np.atleast_2d(y))
if (len(np.array(y).shape) < 2):
y_res = y_res.squeeze()
if ((x is not None) and (y is not None)):
return (x_res, y_res)
elif (x is not None):
return x_res
elif (y is not None):
return y_res
def undo(self, x=None, y=None):
assert ((x is not None) or (y is not None))
if (x is not None):
x_res = self.x_scaler.inverse_transform(np.atleast_2d(x))
if (len(np.array(x).shape) < 2):
x_res = x_res.squeeze()
if (y is not None):
y_res = self.y_scaler.inverse_transform(np.atleast_2d(y))
if (len(np.array(y).shape) < 2):
y_res = y_res.squeeze()
if ((x is not None) and (y is not None)):
return (x_res, y_res)
elif (x is not None):
return x_res
elif (y is not None):
return y_res |
class CallableModule(types.ModuleType):
def __init__(self):
types.ModuleType.__init__(self, __name__)
self.__dict__.update(sys.modules[__name__].__dict__)
def __call__(self, x=None, *args, **kwargs):
show(x, *args, **kwargs) |
def test_simple_env_metric_1():
env = MockEnv()
metric = ph.metrics.SimpleEnvMetric(env_property='test_property', train_reduce_action='last', eval_reduce_action='none')
values = []
for _ in range(5):
env.step()
values.append(metric.extract(env))
assert (metric.reduce(values, mode='train') == 5.0)
assert np.all((metric.reduce(values, mode='evaluate') == np.arange(1.0, 6.0))) |
class USInvertedResidual(nn.Module):
def __init__(self, inplanes, outplanes, stride, t, expand=1.0):
super(USInvertedResidual, self).__init__()
assert (stride in [1, 2])
self.inplanes = inplanes
self.outplanes = outplanes
self.stride = stride
self.t = t
hidden_dim = int((inplanes * t))
self.hidden_dim = hidden_dim
self.expand = expand
self.relu = nn.ReLU6(inplace=True)
if (t != 1):
self.conv1 = USConv2d(inplanes, hidden_dim, 1, bias=False, expand=expand)
self.bn1 = USBatchNorm2d(hidden_dim, expand=expand)
self.conv2 = USConv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False, expand=expand)
self.bn2 = USBatchNorm2d(hidden_dim, expand=expand)
self.conv3 = USConv2d(hidden_dim, outplanes, 1, bias=False, expand=expand)
self.bn3 = USBatchNorm2d(outplanes, expand=expand)
def forward(self, x):
residual = x
if (self.t != 1):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
else:
out = x
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if ((self.stride == 1) and (self.inplanes == self.outplanes)):
out += residual
return out
def non_uniform_set_width(self, ch_in, candidate):
if (self.t == 1):
ch_out = ch_in
else:
self.conv1.set_input_width(specific_ch=ch_in)
(_, ch_out) = self.conv1.random_sample_output_width(candidate)
self.bn1.set_output_width(specific_ch=ch_out)
self.conv2.set_input_width(specific_ch=ch_out)
self.conv2.set_output_width(specific_ch=ch_out)
self.bn2.set_output_width(specific_ch=ch_out)
self.conv3.set_input_width(specific_ch=ch_out)
if ((self.stride == 1) and (self.inplanes == self.outplanes)):
self.conv3.set_output_width(specific_ch=ch_in)
self.bn3.set_output_width(specific_ch=ch_in)
ch_out = ch_in
else:
(_, ch_out) = self.conv3.random_sample_output_width(candidate)
self.bn3.set_output_width(specific_ch=ch_out)
return ch_out |
def encrypt_bytes_with_AES_CBC(plain_text_bytes, secret_key, salt, key_len=128, block_size=16):
key = get_private_key(secret_key, salt, key_len)
iv = os.urandom(block_size)
padder = padding.PKCS7(key_len).padder()
data = padder.update(plain_text_bytes)
data += padder.finalize()
encryptor = Cipher(algorithms.AES(key), modes.CBC(iv), backend=back_end).encryptor()
ct = (encryptor.update(data) + encryptor.finalize())
return (iv + ct) |
def _read_stream(fd, fn):
while True:
buff = fd.read(8192)
if buff:
fn(buff) |
class TestBuilders(unittest.TestCase):
def test_simple_build(self):
transformer = TransformerEncoderBuilder().get()
builder = TransformerEncoderBuilder()
builder.n_layers = 1
builder.n_heads = 4
builder.query_dimensions = 32
builder.attention_type = 'linear'
transformer = builder.get()
with self.assertRaises(ValueError):
builder = TransformerEncoderBuilder()
builder.attention_type = 'whatever'
def test_builder_factory_methods(self):
builder = TransformerEncoderBuilder.from_kwargs(n_layers=1, n_heads=4, query_dimensions=32, attention_type='linear')
with self.assertRaises(ValueError):
TransformerEncoderBuilder.from_kwargs(foobar=1)
TransformerEncoderBuilder.from_kwargs(foobar=1, strict=False)
parser = argparse.ArgumentParser()
parser.add_argument('--n_layers', type=int)
parser.add_argument('--n_heads', type=int)
args = parser.parse_args(['--n_heads', '42'])
builder = TransformerEncoderBuilder.from_namespace(args)
self.assertEqual(builder.n_heads, 42)
self.assertTrue((builder.n_layers is None))
def test_recurrent_build(self):
transformer = RecurrentEncoderBuilder().get()
builder = RecurrentEncoderBuilder()
builder.n_layers = 1
builder.n_heads = 4
builder.query_dimensions = 32
builder.attention_type = 'linear'
transformer = builder.get()
with self.assertRaises(ValueError):
builder.attention_type = 'whatever'
def test_decoder_build(self):
transformer = TransformerDecoderBuilder().get()
builder = TransformerDecoderBuilder()
builder.n_layers = 1
builder.n_heads = 4
builder.query_dimensions = 32
builder.self_attention_type = 'linear'
transformer = builder.get()
with self.assertRaises(ValueError):
builder = TransformerDecoderBuilder()
builder.self_attention_type = 'whatever'
with self.assertRaises(ValueError):
builder = TransformerDecoderBuilder()
builder.cross_attention_type = 'whatever'
builder.cross_n_heads = 7
builder.cross_value_dimensions = 32
transformer = builder.get()
x = torch.rand(1, 20, (4 * 64))
m = torch.rand(1, 13, (7 * 32))
y = transformer(x, m)
t = TransformerDecoderBuilder.from_kwargs(n_layers=1, n_heads=4, query_dimensions=32, cross_n_heads=7, cross_value_dimensions=32, cross_query_dimensions=32).get()
def test_recurrent_decoder(self):
transformer = RecurrentDecoderBuilder().get()
builder = RecurrentDecoderBuilder()
builder.n_layers = 1
builder.n_heads = 4
builder.query_dimensions = 32
builder.self_attention_type = 'linear'
transformer = builder.get()
with self.assertRaises(ValueError):
builder = RecurrentDecoderBuilder()
builder.self_attention_type = 'whatever'
with self.assertRaises(ValueError):
builder = RecurrentDecoderBuilder()
builder.cross_attention_type = 'whatever'
builder.cross_n_heads = 7
builder.cross_value_dimensions = 32
transformer = builder.get()
x = torch.rand(1, (4 * 64))
m = torch.rand(1, 13, (7 * 32))
(y, s) = transformer(x, m)
(y, s) = transformer(x, m, state=s)
def test_attention_parameter(self):
builder = TransformerEncoderBuilder()
builder.n_layers = 3
builder.n_heads = 4
builder.feed_forward_dimensions = 512
builder.query_dimensions = 32
builder.value_dimensions = 64
builder.dropout = 0.1
builder.activation = 'relu'
builder.final_normalization = True
builder.softmax_temp = 1.0
builder.attention_dropout = 0.1
builder.feature_map = (lambda x: ((x > 0).float() * x))
builder.clusters = 100
builder.iterations = 10
builder.bits = 32
builder.hash_bias = True
builder.topk = 32
builder.length_limit = 512
builder.chunk_size = 32
builder.rounds = 1
invalid = ['dropout_rate']
for name in invalid:
with self.assertRaises(AttributeError):
setattr(builder, name, None)
def test_attention_composition(self):
transformer = TransformerEncoderBuilder.from_kwargs(attention_type='conditional-full:improved-clustered', attention_dropout=0.1, softmax_temp=0.125, clusters=256, bits=32, topk=32, length_limit=512).get()
with self.assertRaises(TypeError):
transformer = TransformerEncoderBuilder.from_kwargs(attention_type='conditional-full', attention_dropout=0.1, softmax_temp=0.125, length_limit=512).get()
def test_transformer_parameters_to_attention(self):
with self.assertRaises(ValueError):
transformer = TransformerEncoderBuilder.from_kwargs(attention_type='test-attention').get()
transformer = TransformerEncoderBuilder.from_kwargs(attention_type='test-attention', n_heads=8, query_dimensions=64).get() |
class ResNet(nn.Module):
def __init__(self, layers, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, drop_path_rate=0.0):
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(Bottleneck, 64, layers[0], drop_path_rate=drop_path_rate)
self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], drop_path_rate=drop_path_rate)
self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], drop_path_rate=drop_path_rate)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.SyncBatchNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False, drop_path_rate=0.0):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, blocks)]
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer, drop_path_rate=dpr[i]))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
def forward(self, x):
return self._forward_impl(x) |
def text_to_words(review_text):
letters_only = re.sub('[^a-zA-Z]', ' ', review_text)
words = letters_only.lower().split()
return words |
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer: torch.optim.Optimizer, max_iters: int, warmup_factor: float=0.001, warmup_iters: int=1000, warmup_method: str='linear', last_epoch: int=(- 1)):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]:
warmup_factor = _get_warmup_factor_at_iter(self.warmup_method, self.last_epoch, self.warmup_iters, self.warmup_factor)
return [(((base_lr * warmup_factor) * 0.5) * (1.0 + math.cos(((math.pi * self.last_epoch) / self.max_iters)))) for base_lr in self.base_lrs]
def _compute_values(self) -> List[float]:
return self.get_lr() |
def check_and_return_expected(value, undefined_value, expected_value, name=None):
if (((undefined_value is None) and (value is None)) or (undefined_value == value)):
return expected_value
if (value != expected_value):
str_name = ('' if (name is None) else '{} '.format(name))
str_value = ('{}' if (name is None) else '({})')
str_value = str_value.format(value)
raise ValueError('Expected {}{} == {}'.format(str_name, str_value, expected_value))
return expected_value |
def all_reduce_item(value, op='sum'):
if (torch.distributed.is_available() and torch.distributed.is_initialized()):
if ((op == 'sum') or (op == 'mean')):
dop = torch.distributed.ReduceOp.SUM
elif (op == 'min'):
dop = torch.distributed.ReduceOp.MIN
elif (op == 'max'):
dop = torch.distributed.ReduceOp.MAX
elif (op == 'product'):
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if (backend == torch.distributed.Backend.NCCL):
device = torch.device('cuda')
elif (backend == torch.distributed.Backend.GLOO):
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if (op == 'mean'):
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret |
def valid_data_dict(data_dict):
if (data_dict is None):
return True
if (not isinstance(data_dict, dict)):
return False
return True |
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes, momentum=BN_MOMENTUM)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, (planes * 2), kernel_size=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out |
def get_args(args: dict, eval: bool=False):
parser = argparse.ArgumentParser()
parser.add_argument('--cudaid', type=str, default=None, help="cuda id: '0,1,2,3'")
parser.add_argument('--MYSEED', type=str, default=None, help='Seed.')
parser.add_argument('--debug_subfolder', type=str, default=None, help="Name of subfold for debugging. Default: ''.")
parser.add_argument('--dataset', type=str, default=None, help='Name of the dataset.')
parser.add_argument('--fold', type=int, default=None, help='Fold of dataset.')
parser.add_argument('--magnification', type=str, default=None, help='Magnififcation of BreakHis dataset.')
parser.add_argument('--runmode', type=str, default=None, help='Run mode: hyper-parameter search or final.')
parser.add_argument('--num_classes', type=int, default=None, help='Number of classes in the dataset.')
parser.add_argument('--crop_size', type=int, default=None, help='Crop size (int) of the patches in training.')
parser.add_argument('--resize_size', type=int, default=None, help='Resize image into this size before processing.')
parser.add_argument('--max_epochs', type=int, default=None, help='Max epoch.')
parser.add_argument('--batch_size', type=int, default=None, help='Training batch size (optimizer).')
parser.add_argument('--num_workers', type=int, default=None, help='Number of workers for dataloader multi-proc.')
parser.add_argument('--exp_id', type=str, default=None, help='Exp id.')
parser.add_argument('--verbose', type=str2bool, default=None, help='Verbosity (bool).')
parser.add_argument('--fd_exp', type=str, default=None, help='Relative path to exp folder.')
parser.add_argument('--data_root', default=None, help='path to dataset images')
parser.add_argument('--metadata_root', type=str, default=None)
parser.add_argument('--mask_root', default=None, help='path to masks')
parser.add_argument('--proxy_training_set', type=str2bool, default=None, help='Efficient hyper_parameter search with a proxy training set.')
parser.add_argument('--num_val_sample_per_class', type=int, default=None, help='Number of full_supervision validation sample per class. 0 means "use all available samples".')
parser.add_argument('--cam_curve_interval', type=float, default=None, help='CAM curve interval')
parser.add_argument('--multi_contour_eval', type=str2bool, default=None)
parser.add_argument('--multi_iou_eval', type=str2bool, default=None)
parser.add_argument('--box_v2_metric', type=str2bool, default=None)
parser.add_argument('--eval_checkpoint_type', type=str, default=None)
parser.add_argument('--opt__name_optimizer', type=str, default=None, help="Name of the optimizer 'sgd', 'adam'.")
parser.add_argument('--opt__lr', type=float, default=None, help='Learning rate (optimizer)')
parser.add_argument('--opt__momentum', type=float, default=None, help='Momentum (optimizer)')
parser.add_argument('--opt__dampening', type=float, default=None, help='Dampening for Momentum (optimizer)')
parser.add_argument('--opt__nesterov', type=str2bool, default=None, help='Nesterov or not for Momentum (optimizer)')
parser.add_argument('--opt__weight_decay', type=float, default=None, help='Weight decay (optimizer)')
parser.add_argument('--opt__beta1', type=float, default=None, help='Beta1 for adam (optimizer)')
parser.add_argument('--opt__beta2', type=float, default=None, help='Beta2 for adam (optimizer)')
parser.add_argument('--opt__eps_adam', type=float, default=None, help='eps for adam (optimizer)')
parser.add_argument('--opt__amsgrad', type=str2bool, default=None, help='amsgrad for adam (optimizer)')
parser.add_argument('--opt__lr_scheduler', type=str2bool, default=None, help='Whether to use or not a lr scheduler')
parser.add_argument('--opt__name_lr_scheduler', type=str, default=None, help='Name of the lr scheduler')
parser.add_argument('--opt__gamma', type=float, default=None, help='Gamma of the lr scheduler. (mystep)')
parser.add_argument('--opt__last_epoch', type=int, default=None, help='Index last epoch to stop adjust LR(mystep)')
parser.add_argument('--opt__min_lr', type=float, default=None, help='Minimum allowed value for lr.')
parser.add_argument('--opt__t_max', type=float, default=None, help='T_max, maximum epochs to restart. (cosine)')
parser.add_argument('--opt__step_size', type=int, default=None, help='Step size for lr scheduler.')
parser.add_argument('--opt__lr_classifier_ratio', type=float, default=None, help='Multiplicative factor for the classifier head learning rate.')
parser.add_argument('--arch', type=str, default=None, help="model's name.")
parser.add_argument('--encoder_name', type=str, default=None, help='Name of the backbone')
parser.add_argument('--in_channels', type=int, default=None, help='Input channels number.')
parser.add_argument('--strict', type=str2bool, default=None, help='strict mode for loading weights.')
parser.add_argument('--encoder_weights', type=str, default=None, help='Pre-trained weights.')
parser.add_argument('--path_pre_trained', type=str, default=None, help='Absolute/relative path to file of weights.')
parser.add_argument('--support_background', type=str2bool, default=None, help='use or not 1 extra plan for background cams.')
parser.add_argument('--scale_in', type=float, default=None, help='How much to scale the input.')
parser.add_argument('--freeze_cl', type=str2bool, default=None, help='whether or not to freeze the classifier.')
parser.add_argument('--folder_pre_trained_cl', type=str, default=None, help="NAME of folder containing classifier's weights.")
parser.add_argument('--method', type=str, default=None, help='Name of method.')
parser.add_argument('--spatial_pooling', type=str, default=None, help='Name of spatial pooling for classification.')
parser.add_argument('--wc_alpha', type=float, default=None, help='Alpha (classifier, wildcat)')
parser.add_argument('--wc_kmax', type=float, default=None, help='Kmax (classifier, wildcat)')
parser.add_argument('--wc_kmin', type=float, default=None, help='Kmin (classifier, wildcat)')
parser.add_argument('--wc_dropout', type=float, default=None, help='Dropout (classifier, wildcat)')
parser.add_argument('--wc_modalities', type=int, default=None, help='Number of modalities (classifier, wildcat)')
parser.add_argument('--lse_r', type=float, default=None, help='LSE r pooling.')
parser.add_argument('--seg_mode', type=str, default=None, help='Segmentation mode.')
parser.add_argument('--task', type=str, default=None, help='Type of the task.')
parser.add_argument('--multi_label_flag', type=str2bool, default=None, help='Whether the dataset is multi-label.')
parser.add_argument('--elb_init_t', type=float, default=None, help='Init t for elb.')
parser.add_argument('--elb_max_t', type=float, default=None, help='Max t for elb.')
parser.add_argument('--elb_mulcoef', type=float, default=None, help='Multi. coef. for elb..')
parser.add_argument('--crf_fc', type=str2bool, default=None, help='CRF over fcams flag.')
parser.add_argument('--crf_lambda', type=float, default=None, help='Lambda for crf flag.')
parser.add_argument('--crf_sigma_rgb', type=float, default=None, help='sigma rgb of crf flag.')
parser.add_argument('--crf_sigma_xy', type=float, default=None, help='sigma xy for crf flag.')
parser.add_argument('--crf_scale', type=float, default=None, help='scale factor for crf flag.')
parser.add_argument('--crf_start_ep', type=int, default=None, help='epoch start crf loss.')
parser.add_argument('--crf_end_ep', type=int, default=None, help='epoch end crf loss. use -1 for end training.')
parser.add_argument('--entropy_fc', type=str2bool, default=None, help='Entropy over fcams flag.')
parser.add_argument('--entropy_fc_lambda', type=float, default=None, help='lambda for entropy over fcams flag.')
parser.add_argument('--max_sizepos_fc', type=str2bool, default=None, help='Max size pos fcams flag.')
parser.add_argument('--max_sizepos_fc_lambda', type=float, default=None, help='lambda for max size low pos fcams flag.')
parser.add_argument('--max_sizepos_fc_start_ep', type=int, default=None, help='epoch start maxsz loss.')
parser.add_argument('--max_sizepos_fc_end_ep', type=int, default=None, help='epoch end maxsz. -1 for end training.')
parser.add_argument('--im_rec', type=str2bool, default=None, help='image reconstruction flag.')
parser.add_argument('--im_rec_lambda', type=float, default=None, help='Lambda for image reconstruction.')
parser.add_argument('--im_rec_elb', type=str2bool, default=None, help='use/not elb for image reconstruction.')
parser.add_argument('--sl_fc', type=str2bool, default=None, help='Self-learning over fcams.')
parser.add_argument('--sl_fc_lambda', type=float, default=None, help='Lambda for self-learning fcams.')
parser.add_argument('--sl_start_ep', type=int, default=None, help='Start epoch for self-learning fcams.')
parser.add_argument('--sl_end_ep', type=int, default=None, help='End epoch for self-learning fcams.')
parser.add_argument('--sl_min', type=int, default=None, help='MIN for self-learning fcams.')
parser.add_argument('--sl_max', type=int, default=None, help='MAX for self-learning fcams.')
parser.add_argument('--sl_ksz', type=int, default=None, help='Kernel size for dilation for self-learning fcams.')
parser.add_argument('--sl_min_p', type=float, default=None, help='Percentage of pixels to be considered background to sample from.')
parser.add_argument('--sl_fg_erode_k', type=int, default=None, help='Kernel size of erosion for foreground.')
parser.add_argument('--sl_fg_erode_iter', type=int, default=None, help='Number of time to perform erosion over foreground.')
parser.add_argument('--sl_min_ext', type=int, default=None, help='MIN extent for self-learning fcams.')
parser.add_argument('--sl_max_ext', type=int, default=None, help='MAX extent for self-learning fcams.')
parser.add_argument('--sl_block', type=int, default=None, help='Size of the blocks for self-learning fcams.')
parser.add_argument('--seg_ignore_idx', type=int, default=None, help='Ignore index for segmentation.')
parser.add_argument('--amp', type=str2bool, default=None, help='Whether to use automatic mixed precision for training.')
parser.add_argument('--amp_eval', type=str2bool, default=None, help='Whether to use automatic mixed precision for inference.')
parser.add_argument('--local_rank', type=int, default=None, help='DDP. Local rank. Set too zero if you are using one node. not CC().')
parser.add_argument('--local_world_size', type=int, default=None, help='DDP. Local world size: number of gpus per node. Not CC().')
parser.add_argument('--init_method', default=None, type=str, help='DDP. init method. CC().')
parser.add_argument('--dist_backend', default=None, type=str, help='DDP. Distributed backend. CC()')
parser.add_argument('--world_size', type=int, default=None, help='DDP. World size. CC().')
parser.add_argument('--adl_drop_rate', type=float, default=None, help='Float.drop-rate for ADL.')
parser.add_argument('--adl_drop_threshold', type=float, default=None, help='Float. threshold for ADL.')
parser.add_argument('--adl_large_feature_map', type=str2bool, default=None, help='Use/not large feature maps for ADL.')
parser.add_argument('--acol_drop_threshold', type=float, default=None, help='Float. threshold for ACOL.')
parser.add_argument('--acol_large_feature_map', type=str2bool, default=None, help='Use/not large feature maps for ACOL.')
parser.add_argument('--spg_threshold_1h', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_threshold_1l', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_threshold_2h', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_threshold_2l', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_threshold_3h', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_threshold_3l', type=float, default=None, help='SPG threshold')
parser.add_argument('--spg_large_feature_map', type=str2bool, default=None, help='Use/not large feature maps for SPG.')
parser.add_argument('--has_grid_size', type=int, default=None, help='HAS patch size. int.')
parser.add_argument('--has_drop_rate', type=float, default=None, help='HAS. percentage of patches to be dropped.[0, 1[')
parser.add_argument('--cutmix_beta', type=float, default=None, help='CUTMIX beta.')
parser.add_argument('--cutmix_prob', type=float, default=None, help='CUTMIX. probablity to do it over a minibatch.')
parser.add_argument('--mil_mid_channels', type=int, default=None, help='Deep mil mid-channels.')
parser.add_argument('--mil_gated', type=str2bool, default=None, help='Deep mil attention type.')
parser.add_argument('--maxmin_w', type=float, default=None, help='maxmin w.')
parser.add_argument('--minmax_lambda_size', type=float, default=None, help='maxmin lambda size.')
parser.add_argument('--minmax_lambda_neg', type=float, default=None, help='minmax lambda negative info.')
parser.add_argument('--prm_ks', type=int, default=None, help='PRM: kernel size.')
parser.add_argument('--prm_st', type=int, default=None, help='PRM: kernel stride.')
parser.add_argument('--sl_ng', type=str2bool, default=None, help='negev: self-learning on/off.')
parser.add_argument('--sl_ng_seeder', type=str, default=None, help='negev: self-learning: seeder type.')
parser.add_argument('--sl_ng_lambda', type=float, default=None, help='negev: self-learning: lambda.')
parser.add_argument('--sl_ng_start_ep', type=int, default=None, help='negev: self-learning: start epoch.')
parser.add_argument('--sl_ng_end_ep', type=int, default=None, help='negev: self-learning: end epoch.')
parser.add_argument('--sl_ng_min', type=int, default=None, help='negev: self-learning: seeds to sample background.')
parser.add_argument('--sl_ng_max', type=int, default=None, help='negev: self-learning: seeds to sample foreground.')
parser.add_argument('--sl_ng_ksz', type=int, default=None, help='negev: self-learning: kernel size dilation.')
parser.add_argument('--sl_ng_min_ext', type=int, default=None, help='negev: self-learning: extent background region.')
parser.add_argument('--sl_ng_max_ext', type=int, default=None, help='negev: self-learning: extent foreground region.')
parser.add_argument('--sl_ng_block', type=int, default=None, help='negev: self-learning: size sampling block for seeds.')
parser.add_argument('--sl_ng_min_p', type=float, default=None, help='negev: self-learning: percentage to be considered background.')
parser.add_argument('--sl_ng_fg_erode_k', type=int, default=None, help='negev: self-learning: Erosion kernel size.')
parser.add_argument('--sl_ng_fg_erode_iter', type=int, default=None, help='negev: self-learning: number erosion iterations.')
parser.add_argument('--crf_ng', type=str2bool, default=None, help='negev: crf: on/off.')
parser.add_argument('--crf_ng_lambda', type=float, default=None, help='negev: crf: lambda.')
parser.add_argument('--crf_ng_sigma_rgb', type=float, default=None, help='negev: crf: sigma rgb.')
parser.add_argument('--crf_ng_sigma_xy', type=float, default=None, help='negev: crf: sigma xy.')
parser.add_argument('--crf_ng_scale', type=float, default=None, help='negev: crf: scale image.')
parser.add_argument('--crf_ng_start_ep', type=int, default=None, help='negev: crf: start epoch.')
parser.add_argument('--crf_ng_end_ep', type=int, default=None, help='negev: crf: end epoch.')
parser.add_argument('--jcrf_ng', type=str2bool, default=None, help='negev: jcrf: on/off.')
parser.add_argument('--jcrf_ng_lambda', type=float, default=None, help='negev: jcrf: lambda.')
parser.add_argument('--jcrf_ng_sigma_rgb', type=float, default=None, help='negev: jcrf: sigma rgb.')
parser.add_argument('--jcrf_ng_scale', type=float, default=None, help='negev: jcrf: scale image.')
parser.add_argument('--jcrf_ng_start_ep', type=int, default=None, help='negev: jcrf: start epoch.')
parser.add_argument('--jcrf_ng_end_ep', type=int, default=None, help='negev: jcrf: end epoch.')
parser.add_argument('--jcrf_ng_pair_mode', type=str, default=None, help='negev: jcrf: pairing mode.')
parser.add_argument('--jcrf_ng_n', type=int, default=None, help='negev: jcrf: number of samples to pair with.')
parser.add_argument('--max_sizepos_ng', type=str2bool, default=None, help='negev: size const: on/off.')
parser.add_argument('--max_sizepos_ng_lambda', type=float, default=None, help='negev: size const: lambda.')
parser.add_argument('--max_sizepos_ng_start_ep', type=int, default=None, help='negev: size const: start epoch.')
parser.add_argument('--max_sizepos_ng_end_ep', type=int, default=None, help='negev: size const: end epoch.')
parser.add_argument('--neg_samples_ng', type=str2bool, default=None, help='negev: negative samples: on/off.')
parser.add_argument('--neg_samples_ng_lambda', type=float, default=None, help='negev: negative samples: lambda.')
parser.add_argument('--neg_samples_ng_start_ep', type=int, default=None, help='negev: negative samples: start epoch.')
parser.add_argument('--neg_samples_ng_end_ep', type=int, default=None, help='negev: negative samples: end epoch.')
parser.add_argument('--negev_ptretrained_cl_cp', type=str, default=None, help='negev: checkpoint for pretrained classifier.')
input_parser = parser.parse_args()
def warnit(name, vl_old, vl):
if (vl_old != vl):
print('Changing {}: {} -----> {}'.format(name, vl_old, vl))
else:
print('{}: {}'.format(name, vl_old))
attributes = input_parser.__dict__.keys()
for k in attributes:
val_k = getattr(input_parser, k)
if (k in args.keys()):
if (val_k is not None):
warnit(k, args[k], val_k)
args[k] = val_k
else:
warnit(k, args[k], args[k])
elif (k in args['model'].keys()):
if (val_k is not None):
warnit('model.{}'.format(k), args['model'][k], val_k)
args['model'][k] = val_k
else:
warnit('model.{}'.format(k), args['model'][k], args['model'][k])
elif (k in args['optimizer'].keys()):
if (val_k is not None):
warnit('optimizer.{}'.format(k), args['optimizer'][k], val_k)
args['optimizer'][k] = val_k
else:
warnit('optimizer.{}'.format(k), args['optimizer'][k], args['optimizer'][k])
else:
raise ValueError('Key {} was not found in args. ...[NOT OK]'.format(k))
os.environ['MYSEED'] = str(args['MYSEED'])
(args['outd'], args['subpath']) = outfd(Dict2Obj(args), eval=eval)
args['outd_backup'] = args['outd']
if is_cc():
_tag = '{}__{}'.format(basename(normpath(args['outd'])), '{}'.format(np.random.randint(low=0, high=, size=1)[0]))
args['outd'] = join(os.environ['SLURM_TMPDIR'], _tag)
mkdir(args['outd'])
cmdr = (not constants.OVERRUN)
cmdr &= (not eval)
if is_cc():
cmdr &= os.path.isfile(join(args['outd_backup'], 'passed.txt'))
else:
cmdr &= os.path.isfile(join(args['outd'], 'passed.txt'))
if cmdr:
warnings.warn('EXP {} has already been done. EXITING.'.format(args['outd']))
sys.exit(0)
args['scoremap_paths'] = configure_scoremap_output_paths(Dict2Obj(args))
if args['box_v2_metric']:
args['multi_contour_eval'] = True
args['multi_iou_eval'] = True
else:
args['multi_contour_eval'] = False
args['multi_iou_eval'] = False
if args['model']['freeze_cl']:
if (args['task'] == constants.NEGEV):
cl_cp = args['negev_ptretrained_cl_cp']
std_cl_args = deepcopy(args)
std_cl_args['task'] = constants.STD_CL
tag = get_tag(Dict2Obj(std_cl_args), checkpoint_type=cl_cp)
else:
cl_cp = args['eval_checkpoint_type']
tag = get_tag(Dict2Obj(args), checkpoint_type=cl_cp)
args['model']['folder_pre_trained_cl'] = join(root_dir, 'pretrained', tag)
zz = args['model']['folder_pre_trained_cl']
assert os.path.isdir(args['model']['folder_pre_trained_cl']), zz
if (args['task'] in [constants.F_CL, constants.NEGEV]):
for split in constants.SPLITS:
if (args['task'] == constants.NEGEV):
cl_cp = args['negev_ptretrained_cl_cp']
std_cl_args = deepcopy(args)
std_cl_args['task'] = constants.STD_CL
tag = get_tag(Dict2Obj(std_cl_args), checkpoint_type=cl_cp)
else:
cl_cp = args['eval_checkpoint_type']
tag = get_tag(Dict2Obj(args), checkpoint_type=cl_cp)
tag += '_cams_{}'.format(split)
if is_cc():
baseurl_sc = '{}/datasets/wsol-done-right'.format(os.environ['SCRATCH'])
scratch_path = join(baseurl_sc, '{}.tar.gz'.format(tag))
if os.path.isfile(scratch_path):
slurm_dir = config.get_root_wsol_dataset()
cmds = ['cp {} {} '.format(scratch_path, slurm_dir), 'cd {} '.format(slurm_dir), 'tar -xf {}'.format('{}.tar.gz'.format(tag))]
cmdx = ' && '.join(cmds)
print('Running bash-cmds: \n{}'.format(cmdx.replace('&& ', '\n')))
subprocess.run(cmdx, shell=True, check=True)
assert os.path.isdir(join(slurm_dir, tag))
args['std_cams_folder'][split] = join(slurm_dir, tag)
else:
path_cams = join(root_dir, constants.DATA_CAMS, tag)
cndx = (not os.path.isdir(path_cams))
cndx &= os.path.isfile('{}.tar.gz'.format(path_cams))
if cndx:
cmds_untar = ['cd {} '.format(join(root_dir, constants.DATA_CAMS)), 'tar -xf {} '.format('{}.tar.gz'.format(tag))]
cmdx = ' && '.join(cmds_untar)
print('Running bash-cmds: \n{}'.format(cmdx.replace('&& ', '\n')))
subprocess.run(cmdx, shell=True, check=True)
cndx = os.path.isdir(path_cams)
cndx &= os.path.isfile('{}.tar.gz'.format(path_cams))
if cndx:
args['std_cams_folder'][split] = path_cams
ngpus_per_node = torch.cuda.device_count()
if is_cc():
local_rank = int(os.environ.get('SLURM_LOCALID'))
rank = ((int(os.environ.get('SLURM_NODEID')) * ngpus_per_node) + local_rank)
current_device = local_rank
torch.cuda.set_device(current_device)
args['rank'] = rank
args['local_rank'] = local_rank
args['is_master'] = ((local_rank == 0) and (rank == 0))
args['c_cudaid'] = current_device
else:
args['local_rank'] = int(os.environ['LOCAL_RANK'])
args['world_size'] = ngpus_per_node
args['is_master'] = (args['local_rank'] == 0)
torch.cuda.set_device(args['local_rank'])
args['c_cudaid'] = args['local_rank']
args['world_size'] = ngpus_per_node
reproducibility.set_to_deterministic(seed=int(args['MYSEED']), verbose=True)
args_dict = deepcopy(args)
args = Dict2Obj(args)
if (args.task == constants.NEGEV):
assert (args.negev_ptretrained_cl_cp in [constants.BEST_LOC, constants.BEST_CL])
assert (args.runmode in [constants.RMODE_FINAL, constants.RMODE_SEARCH])
if (args.method == constants.METHOD_CUTMIX):
assert (0.0 <= args.cutmix_prob <= 1.0)
assert (args.cutmix_beta > 0)
if (args.method == constants.METHOD_HAS):
assert (args.has_grid_size > 0)
assert isinstance(args.has_grid_size, int)
assert (0.0 <= args.has_drop_rate < 1.0)
if (args.dataset == constants.BREAKHIS):
assert (args.magnification in constants.MAGNIFICATIONSBHIS)
assert (args.fold in list(range(5)))
if (args.task == constants.SEG):
assert (args.dataset in [constants.GLAS, constants.CAMELYON512])
assert (args.spatial_pooling == constants.METHOD_2_POOLINGHEAD[args.method])
assert (args.model['encoder_name'] in constants.BACKBONES)
assert (not args.multi_label_flag)
assert (args.seg_mode == constants.BINARY_MODE)
if isinstance(args.resize_size, int):
if isinstance(args.crop_size, int):
assert (args.resize_size >= args.crop_size)
assert (args.model['scale_in'] > 0.0)
assert isinstance(args.model['scale_in'], float)
if (args.task == constants.STD_CL):
assert (not args.model['freeze_cl'])
assert (args.model['folder_pre_trained_cl'] in [None, '', 'None'])
used_constraints_f_cl = [args.sl_fc, args.crf_fc, args.entropy_fc, args.max_sizepos_fc]
used_constraints_negev = [args.sl_ng, args.crf_ng, args.jcrf_ng, args.max_sizepos_ng, args.neg_samples_ng]
if (args.task == constants.STD_CL):
assert (not any(used_constraints_f_cl))
assert (not any(used_constraints_negev))
assert (args.resize_size == constants.RESIZE_SIZE)
assert (args.crop_size == constants.CROP_SIZE)
if (args.task == constants.F_CL):
assert any(used_constraints_f_cl)
assert (args.model['arch'] == constants.UNETFCAM)
assert (args.eval_checkpoint_type == constants.BEST_LOC)
if (args.task == constants.NEGEV):
assert any(used_constraints_negev)
assert (args.model['arch'] == constants.UNETNEGEV)
assert (args.eval_checkpoint_type == constants.BEST_LOC)
if args.neg_samples_ng:
assert constants.DS_HAS_NEG_SAM[args.dataset]
if (args.task == constants.SEG):
assert (args.dataset in [constants.GLAS, constants.CAMELYON512])
assert (args.model['arch'] in [constants.UNET])
assert (args.eval_checkpoint_type == constants.BEST_LOC)
assert (args.method == constants.METHOD_SEG)
assert (args.spatial_pooling == constants.NONEPOOL)
assert (args.model['arch'] in constants.ARCHS)
assert (not args.im_rec)
return (args, args_dict) |
class CheckpointConfig(FairseqDataclass):
save_dir: str = field(default='checkpoints', metadata={'help': 'path to save checkpoints'})
restore_file: str = field(default='checkpoint_last.pt', metadata={'help': 'filename from which to load checkpoint (default: <save-dir>/checkpoint_last.pt'})
finetune_from_model: Optional[str] = field(default=None, metadata={'help': 'finetune from a pretrained model; note that meters and lr scheduler will be reset'})
reset_dataloader: bool = field(default=False, metadata={'help': 'if set, does not reload dataloader state from the checkpoint'})
reset_lr_scheduler: bool = field(default=False, metadata={'help': 'if set, does not load lr scheduler state from the checkpoint'})
reset_meters: bool = field(default=False, metadata={'help': 'if set, does not load meters from the checkpoint'})
reset_optimizer: bool = field(default=False, metadata={'help': 'if set, does not load optimizer state from the checkpoint'})
optimizer_overrides: str = field(default='{}', metadata={'help': 'a dictionary used to override optimizer args when loading a checkpoint'})
save_interval: int = field(default=1, metadata={'help': 'save a checkpoint every N epochs'})
save_interval_updates: int = field(default=0, metadata={'help': 'save a checkpoint (and validate) every N updates'})
keep_interval_updates: int = field(default=(- 1), metadata={'help': 'keep the last N checkpoints saved with --save-interval-updates'})
keep_interval_updates_pattern: int = field(default=(- 1), metadata={'help': 'when used with --keep-interval-updates, skips deleting any checkpoints with update X where X %% keep_interval_updates_pattern == 0'})
keep_last_epochs: int = field(default=(- 1), metadata={'help': 'keep last N epoch checkpoints'})
keep_best_checkpoints: int = field(default=(- 1), metadata={'help': 'keep best N checkpoints based on scores'})
no_save: bool = field(default=False, metadata={'help': "don't save models or checkpoints"})
no_epoch_checkpoints: bool = field(default=False, metadata={'help': 'only store last and best checkpoints'})
no_last_checkpoints: bool = field(default=False, metadata={'help': "don't store last checkpoints"})
no_save_optimizer_state: bool = field(default=False, metadata={'help': "don't save optimizer-state as part of checkpoint"})
best_checkpoint_metric: str = field(default='loss', metadata={'help': 'metric to use for saving "best" checkpoints'})
maximize_best_checkpoint_metric: bool = field(default=False, metadata={'help': 'select the largest metric value for saving "best" checkpoints'})
patience: int = field(default=(- 1), metadata={'help': "early stop training if valid performance doesn't improve for N consecutive validation runs; note that this is influenced by --validate-interval"})
checkpoint_suffix: str = field(default='', metadata={'help': 'suffix to add to the checkpoint file name'})
checkpoint_shard_count: int = field(default=1, metadata={'help': 'Number of shards containing the checkpoint - if the checkpoint is over 300GB, it is preferable to split it into shards to prevent OOM on CPU while loading the checkpoint'})
load_checkpoint_on_all_dp_ranks: bool = field(default=False, metadata={'help': 'load checkpoints on all data parallel devices (default: only load on rank 0 and broadcast to other devices)'})
write_checkpoints_asynchronously: bool = field(default=False, metadata={'help': 'Write checkpoints asynchronously in a separate thread. NOTE: This feature is currently being tested.', 'argparse_alias': '--save-async'})
model_parallel_size: int = II('common.model_parallel_size') |
class Capture():
def __init__(self, capfd):
self.capfd = capfd
self.out = ''
self.err = ''
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
(self.out, self.err) = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if (a == b):
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return (item in self.out)
def unordered(self):
return Unordered(self.out)
def stderr(self):
return Output(self.err) |
def parse_args():
parser = argparse.ArgumentParser(description='Parse arguments', prefix_chars='-')
parser.add_argument('--gpu', '--list', nargs='+', default=[0], help='GPU indices, if more than 1 parallel modules will be called')
parser.add_argument('--bs', default=64, type=int, help='batch size.')
parser.add_argument('--model_id', type=str, default='robust_resnet')
parser.add_argument('--load_act', action='store_true')
parser.add_argument('--load_direction', action='store_true')
return parser.parse_args() |
def process_digit_article(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manual_map.setdefault(word, word)
if (word not in articles):
outText.append(word)
else:
pass
for (wordId, word) in enumerate(outText):
if (word in contractions):
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText |
class NullCrossoverTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
solution = NullCrossover()
self.assertIsNotNone(solution)
def test_should_constructor_create_a_valid_operator(self):
operator = NullCrossover()
self.assertEqual(0, operator.probability)
def test_should_the_solution_remain_unchanged(self):
operator = NullCrossover()
solution1 = BinarySolution(number_of_variables=1, number_of_objectives=1)
solution1.variables[0] = [True, False, False, True, True, False]
solution2 = BinarySolution(number_of_variables=1, number_of_objectives=1)
solution2.variables[0] = [False, True, False, False, True, False]
offspring = operator.execute([solution1, solution2])
self.assertEqual([True, False, False, True, True, False], offspring[0].variables[0])
self.assertEqual([False, True, False, False, True, False], offspring[1].variables[0]) |
def convert(cfgfile, ptxtfile, approximate):
parser = ConfigParser(dict_type=uniqdict)
parser.read(cfgfile)
netname = os.path.basename(cfgfile).split('.')[0]
gen = CaffeProtoGenerator(netname)
for section in parser.sections():
_section = section.split('_')[0]
if (_section in ['crop', 'cost']):
continue
batchnorm_followed = False
relu_followed = False
items = dict(parser.items(section))
if (('batch_normalize' in items) and items['batch_normalize']):
batchnorm_followed = True
if (('activation' in items) and (items['activation'] != 'linear')):
relu_followed = True
if (_section == 'net'):
gen.add_input_layer(items)
elif (_section == 'convolutional'):
gen.add_convolution_layer(items)
if batchnorm_followed:
gen.add_batchnorm_layer(items)
gen.add_scale_layer(items)
if relu_followed:
if approximate:
gen.add_relu_separate_layer(items)
gen.add_power_layer(items)
gen.add_eltwise_layer(items)
else:
gen.add_leaky_relu_layer(items)
elif (_section == 'connected'):
gen.add_innerproduct_layer(items)
if relu_followed:
gen.add_leaky_relu_layer(items)
elif (_section == 'maxpool'):
gen.add_pooling_layer('MAX', items)
elif (_section == 'avgpool'):
gen.add_pooling_layer('AVE', items, global_pooling=True)
elif (_section == 'dropout'):
gen.add_dropout_layer(items)
elif (_section == 'softmax'):
gen.add_softmax_layer(items)
else:
logging.error('{} layer is not supported'.format(_section))
gen.update_last_convolution_layer()
gen.write(ptxtfile) |
def get_norm_layer(norm_type='instance'):
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif (norm_type == 'group'):
norm_layer = functools.partial(nn.GroupNorm, 32)
elif (norm_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % norm_type))
return norm_layer |
class DecoderLayer(tf.keras.Model):
def __init__(self, filters, kernel_size, strides_s=2, apply_dropout=False, add=False):
super(DecoderLayer, self).__init__()
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.02)
dconv = layers.Conv2DTranspose(filters=filters, kernel_size=kernel_size, strides=strides_s, padding='same', kernel_initializer=initializer, use_bias=False)
bn = layers.BatchNormalization()
ac = layers.ReLU()
self.decoder_layer = None
if add:
self.decoder_layer = tf.keras.Sequential([dconv])
elif apply_dropout:
drop = layers.Dropout(rate=0.5)
self.decoder_layer = tf.keras.Sequential([dconv, bn, drop, ac])
else:
self.decoder_layer = tf.keras.Sequential([dconv, bn, ac])
def call(self, x):
return self.decoder_layer(x) |
def depth_split(root, depth=0):
res = defaultdict(list)
res[depth].append(root)
for child in root.children:
for (k, v) in depth_split(child, (depth + 1)).items():
res[k] += v
return res |
def visualization_save(mask, save_path, boxes, info, color_list, color_list2, num_class):
font = cv2.FONT_HERSHEY_SIMPLEX
mask = mask.astype(np.uint8)
vis = mask.copy()
for (id, coord) in enumerate(boxes):
(x0, y0, w, h) = coord[:4]
label = info[id][0]
pt1 = (int(x0), int(y0))
pt2 = (int((x0 + w)), int((y0 + h)))
color = color_list[int(label)].tolist()
if (label > (num_class - 1)):
color = color_list2[int(label)].tolist()
cv2.rectangle(vis, pt1, pt2, color, 2)
(x, y) = pt1
category_name = info[id][1]
cv2.putText(vis, category_name, ((x + 3), (y + 10)), font, 0.35, color, 1)
cv2.imwrite(save_path, vis) |
def powerset(arr):
if arr:
(first, *rest) = arr
rest_subsets = powerset(rest)
return (rest_subsets + [([first] + subset) for subset in rest_subsets])
else:
return [[]] |
def test(args, test_loader, model, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
model.eval()
if (not args.no_progress):
test_loader = tqdm(test_loader, disable=(args.local_rank not in [(- 1), 0]))
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(test_loader):
data_time.update((time.time() - end))
inputs = inputs.to(args.device)
targets = targets.to(args.device)
outputs = model(inputs)
loss = F.cross_entropy(outputs, targets)
(prec1, prec5) = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.shape[0])
top1.update(prec1.item(), inputs.shape[0])
top5.update(prec5.item(), inputs.shape[0])
batch_time.update((time.time() - end))
end = time.time()
if (not args.no_progress):
test_loader.set_description('Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. '.format(batch=(batch_idx + 1), iter=len(test_loader), data=data_time.avg, bt=batch_time.avg, loss=losses.avg, top1=top1.avg, top5=top5.avg))
if (not args.no_progress):
test_loader.close()
model.train()
logger.info('top-1 acc: {:.2f}'.format(top1.avg))
logger.info('top-5 acc: {:.2f}'.format(top5.avg))
return (losses.avg, top1.avg) |
def plot_ce_bins(ax, t, y, dy, freq, ce_proc):
ax.set_xlim(0, 1)
y0 = min(y)
yrange = (max(y) - y0)
phi = phase(t, freq)
phi_bins = np.floor((phi * ce_proc.phase_bins)).astype(np.int)
yi = ((ce_proc.mag_bins * (y - y0)) / yrange)
mag_bins = np.floor(yi).astype(np.int)
bins = [[sum(((phi_bins == i) & (mag_bins == j))) for j in range(ce_proc.mag_bins)] for i in range(ce_proc.phase_bins)]
bins = np.array(bins).astype(np.float)
bins /= np.sum(bins.ravel())
p_phi = [np.sum(bins[i]) for i in range(ce_proc.phase_bins)]
dm = (float((1 + ce_proc.mag_overlap)) / ce_proc.mag_bins)
dphi = (float((1 + ce_proc.phase_overlap)) / ce_proc.phase_bins)
dY = (yrange * dm)
dH = [[((bins[i][j] * np.log(((dm * p_phi[i]) / bins[i][j]))) if (bins[i][j] > 0) else 0.0) for j in range(ce_proc.mag_bins)] for i in range(ce_proc.phase_bins)]
dH = np.array(dH)
extent = [0, 1, min(y), max(y)]
dH = np.ma.masked_where((dH == 0), dH)
palette = copy(plt.cm.GnBu_r)
palette.set_bad('w', 0.0)
for i in range((ce_proc.phase_bins + 1)):
ax.axvline((0 + (i * dphi)), ls=':', color='k', alpha=0.5, zorder=95)
for i in range((ce_proc.mag_bins + 1)):
ax.axhline((min(y) + (i * dY)), ls=':', color='k', alpha=0.5, zorder=95)
cplot = ax.imshow(dH.T, cmap=palette, extent=extent, aspect='auto', origin='lower', alpha=0.5, zorder=90)
ax.scatter(phi, y, c='k', s=1, alpha=1, zorder=100)
return cplot |
def load_graphdata(dataname, datadir='dataset', max_nodes=None, edge_labels=False):
prefix = os.path.join(datadir, dataname, dataname)
filename_graph_indic = (prefix + '_graph_indicator.txt')
graph_indic = {}
with open(filename_graph_indic) as f:
i = 1
for line in f:
line = line.strip('\n')
graph_indic[i] = int(line)
i += 1
filename_nodes = (prefix + '_node_labels.txt')
node_labels = []
min_label_val = None
try:
with open(filename_nodes) as f:
for line in f:
line = line.strip('\n')
l = int(line)
node_labels += [l]
if ((min_label_val is None) or (min_label_val > l)):
min_label_val = l
num_unique_node_labels = ((max(node_labels) - min_label_val) + 1)
node_labels = [(l - min_label_val) for l in node_labels]
except IOError:
print('No node labels')
filename_node_attrs = (prefix + '_node_attributes.txt')
node_attrs = []
try:
with open(filename_node_attrs) as f:
for line in f:
line = line.strip('\\s\n')
attrs = [float(attr) for attr in re.split('[,\\s]+', line) if (not (attr == ''))]
node_attrs.append(np.array(attrs))
except IOError:
print('No node attributes')
label_has_zero = False
filename_graphs = (prefix + '_graph_labels.txt')
graph_labels = []
label_vals = []
with open(filename_graphs) as f:
for line in f:
line = line.strip('\n')
val = int(line)
if (val not in label_vals):
label_vals.append(val)
graph_labels.append(val)
label_map_to_int = {val: i for (i, val) in enumerate(label_vals)}
graph_labels = np.array([label_map_to_int[l] for l in graph_labels])
if edge_labels:
filename_edges = (prefix + '_edge_labels.txt')
edge_labels = []
edge_label_vals = []
with open(filename_edges) as f:
for line in f:
line = line.strip('\n')
val = int(line)
if (val not in edge_label_vals):
edge_label_vals.append(val)
edge_labels.append(val)
edge_label_map_to_int = {val: i for (i, val) in enumerate(edge_label_vals)}
filename_adj = (prefix + '_A.txt')
adj_list = {i: [] for i in range(1, (len(graph_labels) + 1))}
index_graph = {i: [] for i in range(1, (len(graph_labels) + 1))}
num_edges = 0
with open(filename_adj) as f:
for line in f:
line = line.strip('\n').split(',')
(e0, e1) = (int(line[0].strip(' ')), int(line[1].strip(' ')))
adj_list[graph_indic[e0]].append((e0, e1))
index_graph[graph_indic[e0]] += [e0, e1]
num_edges += 1
for k in index_graph.keys():
index_graph[k] = [(u - 1) for u in set(index_graph[k])]
graphs = []
for i in range(1, (1 + len(adj_list))):
G = nx.from_edgelist(adj_list[i])
if ((max_nodes is not None) and (G.number_of_nodes() > max_nodes)):
continue
G.graph['label'] = graph_labels[(i - 1)]
for u in G.nodes():
if (len(node_labels) > 0):
node_label_one_hot = ([0] * num_unique_node_labels)
node_label = node_labels[(u - 1)]
node_label_one_hot[node_label] = 1
G.nodes[u]['label'] = node_label_one_hot
G.nodes[u]['tag'] = node_label
if (len(node_attrs) > 0):
G.nodes[u]['feat'] = node_attrs[(u - 1)]
if (len(node_attrs) > 0):
G.graph['feat_dim'] = node_attrs[0].shape[0]
mapping = {}
it = 0
if (float(nx.__version__) < 2.0):
for n in G.nodes():
mapping[n] = it
it += 1
else:
for n in G.nodes:
mapping[n] = it
it += 1
G = nx.relabel_nodes(G, mapping)
l = int(G.graph['label'])
adj = [[] for i in range(len(G))]
for (i, j) in G.edges():
adj[i].append(j)
adj[j].append(i)
degree_list = []
for i in range(len(G)):
degree_list.append(len(adj[i]))
if (len(node_attrs) > 0):
node_features = [G.nodes[u]['feat'] for u in G.nodes()]
node_features = np.asarray(node_features)
if (len(node_labels) > 0):
node_labels_one_hot = np.asarray([G.nodes[u]['label'] for u in G.nodes()])
else:
node_features = [G.nodes[u]['label'] for u in G.nodes()]
node_features = np.asarray(node_features)
node_labels_one_hot = node_features
G = S2VGraph(G, l, node_labels)
if edge_labels:
G.edge_labels = edge_labels
G.neighbors = adj
G.max_neighbor = max(degree_list)
G.mean_neighbor = (((sum(degree_list) + len(degree_list)) - 1) // len(degree_list))
G.degree_list = degree_list
G.node_features = node_features
G.node_labels = node_labels_one_hot
graphs.append(G)
return (graphs, (int(max(graph_labels)) + 1)) |
class DataIterator(Registrable):
default_implementation = 'bucket'
def __init__(self, batch_size: int=32, instances_per_epoch: int=None, max_instances_in_memory: int=None, cache_instances: bool=False, track_epoch: bool=False, maximum_samples_per_batch: Tuple[(str, int)]=None) -> None:
self.vocab: Vocabulary = None
self._batch_size = batch_size
self._max_instances_in_memory = max_instances_in_memory
self._instances_per_epoch = instances_per_epoch
self._maximum_samples_per_batch = maximum_samples_per_batch
self._cache_instances = cache_instances
self._cache: Dict[(int, List[TensorDict])] = defaultdict(list)
self._track_epoch = track_epoch
self._epochs: Dict[(int, int)] = defaultdict(int)
self._cursors: Dict[(int, Iterator[Instance])] = {}
def __call__(self, instances: Iterable[Instance], num_epochs: int=None, shuffle: bool=True) -> Iterator[TensorDict]:
key = id(instances)
starting_epoch = self._epochs[key]
if (num_epochs is None):
epochs: Iterable[int] = itertools.count(starting_epoch)
else:
epochs = range(starting_epoch, (starting_epoch + num_epochs))
for epoch in epochs:
self._epochs[key] = epoch
if (self._cache_instances and (key in self._cache)):
tensor_dicts = self._cache[key]
if shuffle:
random.shuffle(tensor_dicts)
for tensor_dict in tensor_dicts:
if self._track_epoch:
epoch_tensor: torch.Tensor = tensor_dict['epoch_num']
epoch_tensor.fill_(epoch)
(yield tensor_dict)
else:
batches = self._create_batches(instances, shuffle)
add_to_cache = (self._cache_instances and (key not in self._cache))
for batch in batches:
if self._track_epoch:
add_epoch_number(batch, epoch)
if (self.vocab is not None):
batch.index_instances(self.vocab)
padding_lengths = batch.get_padding_lengths()
logger.debug('Batch padding lengths: %s', str(padding_lengths))
logger.debug('Batch size: %d', len(batch.instances))
tensor_dict = batch.as_tensor_dict(padding_lengths)
if add_to_cache:
self._cache[key].append(tensor_dict)
(yield tensor_dict)
def _take_instances(self, instances: Iterable[Instance], max_instances: Optional[int]=None) -> Iterator[Instance]:
if (max_instances is None):
(yield from iter(instances))
else:
key = id(instances)
iterator = self._cursors.get(key, iter(instances))
while (max_instances > 0):
try:
(yield next(iterator))
max_instances -= 1
except StopIteration:
iterator = iter(instances)
self._cursors[key] = iterator
def _memory_sized_lists(self, instances: Iterable[Instance]) -> Iterable[List[Instance]]:
lazy = is_lazy(instances)
iterator = self._take_instances(instances, self._instances_per_epoch)
if (lazy and (self._max_instances_in_memory is None)):
(yield from lazy_groups_of(iterator, self._batch_size))
elif (self._max_instances_in_memory is not None):
(yield from lazy_groups_of(iterator, self._max_instances_in_memory))
elif (self._instances_per_epoch is None):
(yield ensure_list(instances))
else:
(yield list(iterator))
def _ensure_batch_is_sufficiently_small(self, batch_instances: Iterable[Instance]) -> List[List[Instance]]:
if (self._maximum_samples_per_batch is None):
return [list(batch_instances)]
(key, limit) = self._maximum_samples_per_batch
padding_length = (- 1)
list_batch_instances = list(batch_instances)
for instance in list_batch_instances:
if (self.vocab is not None):
instance.index_fields(self.vocab)
field_lengths = instance.get_padding_lengths()
for (_, lengths) in field_lengths.items():
try:
padding_length = max(padding_length, lengths[key])
except KeyError:
pass
if ((padding_length * len(list_batch_instances)) > limit):
num_samples = (padding_length * len(list_batch_instances))
num_shrunk_batches = math.ceil((num_samples / float(limit)))
shrunk_batch_size = math.ceil((len(list_batch_instances) / num_shrunk_batches))
shrunk_batches = []
start = 0
while (start < len(list_batch_instances)):
end = (start + shrunk_batch_size)
shrunk_batches.append(list_batch_instances[start:end])
start = end
return shrunk_batches
else:
return [list_batch_instances]
def get_num_batches(self, instances: Iterable[Instance]) -> int:
if (is_lazy(instances) and (self._instances_per_epoch is None)):
return 1
elif (self._instances_per_epoch is not None):
return math.ceil((self._instances_per_epoch / self._batch_size))
else:
return math.ceil((len(ensure_list(instances)) / self._batch_size))
def _create_batches(self, instances: Iterable[Instance], shuffle: bool) -> Iterable[Batch]:
raise NotImplementedError
def index_with(self, vocab: Vocabulary):
self.vocab = vocab |
def process_folder(q, static_frames, test_scenes, data_dir, output_dir, stride=1):
while True:
if q.empty():
break
folder = q.get()
if (folder in static_frames.keys()):
static_ids = static_frames[folder]
else:
static_ids = []
scene = folder.split('/')[1]
if (scene[:(- 5)] in test_scenes):
continue
image_path = os.path.join(data_dir, folder, 'image_02/data')
dump_image_path = os.path.join(output_dir, folder)
if (not os.path.isdir(dump_image_path)):
os.makedirs(dump_image_path)
f = open(os.path.join(dump_image_path, 'train.txt'), 'w')
numbers = len(os.listdir(image_path))
if (numbers < 3):
print('this folder do not have enough image, numbers < 3!')
for n in range((numbers - (2 * stride))):
s_idx = n
m_idx = (s_idx + stride)
e_idx = (s_idx + (2 * stride))
if ((('%.10d' % s_idx) in static_ids) or (('%.10d' % e_idx) in static_ids) or (('%.10d' % m_idx) in static_ids)):
continue
curr_image = cv2.imread((os.path.join(image_path, ('%.10d' % s_idx)) + '.png'))
middle_image = cv2.imread((os.path.join(image_path, ('%.10d' % m_idx)) + '.png'))
next_image = cv2.imread((os.path.join(image_path, ('%.10d' % e_idx)) + '.png'))
if (curr_image is None):
print((os.path.join(image_path, ('%.10d' % s_idx)) + '.png'))
continue
if (middle_image is None):
print((os.path.join(image_path, ('%.10d' % m_idx)) + '.png'))
continue
if (next_image is None):
print((os.path.join(image_path, ('%.10d' % e_idx)) + '.png'))
continue
seq_images = np.concatenate([curr_image, middle_image, next_image], axis=0)
cv2.imwrite((os.path.join(dump_image_path, ('%.10d' % s_idx)) + '.png'), seq_images.astype('uint8'))
date = folder.split('/')[0]
f.write(('%s %s\n' % ((os.path.join(folder, ('%.10d' % s_idx)) + '.png'), os.path.join(date, 'calib_cam_to_cam.txt'))))
print(folder) |
def Fully_ResNet(x, class_num):
repeat = 16
layer_num = 128
input_row = x.shape[1]
input_col = x.shape[2]
input_channel = x.shape[3]
if ([input_row, input_col, input_channel] != [256, 256, 3]):
print('U_Net input error: the size of input not matched\n')
return
net = batchnorm(x)
net = res_conv_layer(net, 3, 64, name='res1')
for i in range(0, repeat):
name = ('res1_' + str(i))
net = res_conv_layer(net, 64, 64, name=name)
net = res_conv_layer(net, 64, class_num, name='res2', relu=False)
return net |
class FeatureAttention(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.mlp = Sequential(Linear(channels, (channels // reduction), bias=False), nn.ReLU(inplace=True), Linear((channels // reduction), channels, bias=False))
self.reset_parameters()
def reset_parameters(self):
reset(self.mlp)
def forward(self, x, batch, size=None):
max_result = scatter(x, batch, dim=0, dim_size=size, reduce='max')
sum_result = scatter(x, batch, dim=0, dim_size=size, reduce='sum')
max_out = self.mlp(max_result)
sum_out = self.mlp(sum_result)
y = torch.sigmoid((max_out + sum_out))
y = y[batch]
return (x * y) |
def test_sword():
gt_prefix = 'SwordModel'
(gt_data_root, gt_download_dir, gt_extract_dir) = get_test_data_dirs(gt_prefix)
sword = o3d.data.SwordModel()
assert Path(gt_download_dir).is_dir()
gt_path_map = {'sword_material': (Path(gt_extract_dir) / 'UV.mtl'), 'sword_model': (Path(gt_extract_dir) / 'UV.obj'), 'base_color': (Path(gt_extract_dir) / 'UV_blinn1SG_BaseColor.png'), 'metallic': (Path(gt_extract_dir) / 'UV_blinn1SG_Metallic.png'), 'normal': (Path(gt_extract_dir) / 'UV_blinn1SG_Normal.png'), 'roughness': (Path(gt_extract_dir) / 'UV_blinn1SG_Roughness.png')}
for file_name in sword.path_map:
assert (Path(sword.path_map[file_name]) == gt_path_map[file_name])
assert Path(sword.path_map[file_name]).is_file()
assert (Path(sword.path) == (gt_extract_dir / 'UV.obj'))
assert Path(sword.path).is_file()
assert (sword.prefix == gt_prefix)
assert (Path(sword.data_root) == gt_data_root)
assert (Path(sword.download_dir) == gt_download_dir)
assert (Path(sword.extract_dir) == gt_extract_dir) |
class Mnih2015(nn.Module):
def __init__(self, image_shape, num_channels, num_actions):
super(Mnih2015, self).__init__()
self.num_actions = num_actions
self.conv1 = nn.Conv2d(num_channels, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
c_out = self.conv3(self.conv2(self.conv1(torch.randn(1, num_channels, *image_shape))))
self.conv3_size = np.prod(c_out.shape)
print('conv3: {}'.format(self.conv3_size))
self.fc1 = nn.Linear(self.conv3_size, 512)
self.fc2 = nn.Linear(512, num_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view((- 1), self.conv3_size)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x |
def plot_box(val_ent_pairs, title=None, step_size=0.25):
segs = np.arange(0, 8, step_size).tolist()
colorblind = sns.color_palette('coolwarm', 10)[::(- 1)]
bins = [[] for _ in range(len(segs))]
(x, y) = ([], [])
for p in val_ent_pairs:
(v, ent) = (p[0], p[1])
cat = int((ent // step_size))
try:
bins[cat].append(v)
x.append(cat)
y.append(v)
except:
pass
(fig1, ax1) = plt.subplots()
ax1.set_title(title)
ax1 = sns.violinplot(x=x, y=y, cut=0, palette=colorblind, inner='quartile')
return ax1 |
def all_gather_list(data, group=None, max_size=16384):
rank = get_rank()
world_size = get_world_size()
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4
size = (header_size + enc_size)
if (size > max_size):
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack('>I', enc_size)
cpu_buffer[:size] = torch.ByteTensor(list((header + enc)))
start = (rank * max_size)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
(enc_size,) = struct.unpack('>I', bytes(out_buffer[:header_size].tolist()))
if (enc_size > 0):
result.append(pickle.loads(bytes(out_buffer[header_size:(header_size + enc_size)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data. Try rerunning with --ddp-backend=no_c10d and see if that helps.') |
def main(input_file_or_instances, name: str, seed: int=42, batch_size: int=48, cuda: str='', test=True, prediction_name: str='prediction'):
os.environ['OMP_NUM_THREADS'] = '8'
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
os.environ['CUDA_VISIBLE_DEVICES'] = cuda
import torch
from allennlp.common import Params, util
from allennlp.data import DatasetReader
from allennlp.models import Model
import cap
def predict(raw_data: List[Dict], serialization_dir: str, prediction_name: str):
from allennlp.data.batch import Batch
from allennlp.nn import util
from cap import RetrievalDatasetReader
cuda_device = model._get_prediction_device()
def predict_batch(batch: List):
instances = list()
for raw in batch:
kwargs = {'text': raw['text']}
if isinstance(dataset_reader, RetrievalDatasetReader):
kwargs['hits'] = raw['hits']
ins = dataset_reader.text_to_instance(**kwargs)
dataset_reader.apply_token_indexers(ins)
instances.append(ins)
dataset = Batch(instances)
dataset.index_instances(model.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
outputs = model.make_output_human_readable(model(**model_input))
key = ('predicted_tags' if ('tags' in batch[0]) else 'tags')
for (i, obj) in enumerate(batch):
obj[key] = outputs['tags'][i]
del dataset, model_input, outputs
batch.clear()
batch = list()
for (i, obj) in enumerate(raw_data):
if ((i % 1000) == 0):
printf(i, '/', len(raw_data))
batch.append(obj)
if (len(batch) >= batch_size):
predict_batch(batch)
else:
if (len(batch) > 0):
predict_batch(batch)
dump_jsonline(f'{serialization_dir}/{prediction_name}.json', raw_data)
return
cuda_device = ((- 1) if (_ARGS.cuda == '') else 0)
serialization_dir = f'results/{name}-{seed}'
params = Params.from_file((serialization_dir + '/config.json'))
util.prepare_environment(params)
dataset_reader = DatasetReader.from_params(params['dataset_reader'])
printf('loding model from', serialization_dir)
model = Model.load(params, serialization_dir, cuda_device=cuda_device)
if test:
from allennlp.data import DataLoader
from allennlp.training.util import evaluate as allen_evaluate
params['data_loader']['batch_sampler']['batch_size'] = batch_size
data_loader = DataLoader.from_params(params['data_loader'], reader=dataset_reader, data_path=params['test_data_path'])
data_loader.index_with(model.vocab)
test_metrics = allen_evaluate(model, data_loader, cuda_device, output_file=(serialization_dir + '/metric_test.json'))
string = json.dumps(test_metrics, indent=2)
printf(string)
if isinstance(input_file_or_instances, str):
raw_data = list(jsonline_iter(input_file_or_instances))
printf('Read', len(raw_data), 'instances from:', input_file_or_instances)
elif isinstance(input_file_or_instances, list):
raw_data = input_file_or_instances
printf('Get', len(raw_data), 'instances.')
else:
raise Exception
with torch.no_grad():
model.eval()
predict(list(jsonline_iter(params['test_data_path'])), serialization_dir, 'test')
predict(raw_data, serialization_dir, prediction_name) |
def init_hf_modules():
if (HF_MODULES_CACHE in sys.path):
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = (Path(HF_MODULES_CACHE) / '__init__.py')
if (not init_path.exists()):
init_path.touch() |
class InputQueue():
def __init__(self, frontend_url=None, **kwargs):
host = (kwargs.get('host') if kwargs.get('host') else 'localhost')
port = (kwargs.get('port') if kwargs.get('port') else '9092')
self.topic_name = (kwargs.get('topic_name') if kwargs.get('topic_name') else 'serving_stream')
self.interval_if_error = 1
for key in ['host', 'port', 'topic_name']:
if (key in kwargs):
kwargs.pop(key)
self.db = KafkaProducer(bootstrap_servers=((host + ':') + port), key_serializer=(lambda k: json.dumps(k).encode('utf-8')), value_serializer=(lambda v: json.dumps(v).encode('utf-8')), **kwargs)
def enqueue(self, uri, **data):
b64str = self.data_to_b64(**data)
d = {'key': uri, 'value': {'uri': uri, 'data': b64str}}
self.__enqueue_data(d)
def data_to_b64(self, **data):
sink = pa.BufferOutputStream()
field_list = []
data_list = []
for (key, value) in data.items():
(field, data) = get_field_and_data(key, value)
field_list.append(field)
data_list.append(data)
schema = pa.schema(field_list)
batch = pa.RecordBatch.from_arrays(data_list, schema)
writer = pa.RecordBatchStreamWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
buf = sink.getvalue()
b = buf.to_pybytes()
b64str = self.base64_encode_image(b)
return b64str
def __enqueue_data(self, data):
future = self.db.send(self.topic_name, **data)
try:
future.get(timeout=10)
except kafka_errors:
traceback.format_exc()
print('Write to Kafka successful')
def base64_encode_image(img):
return base64.b64encode(img).decode('utf-8')
def close(self):
self.db.close() |
class ORCLayout(ABC):
def __init__(self, name, parent):
super().__init__()
self.root = False
self.name = name
self.height = (- 1)
self.width = (- 1)
self.parent = parent
self.children = []
if (parent != None):
parent.add_child(self)
self.variables = {}
self.constraints = []
self.objectives = []
self.add_boundary_variables()
self.belongs_to = None
self.weight = 1
self.loss = (- 1)
def set_weight(self, weight):
self.weight = weight
def add_child(self, child):
self.children.append(child)
def get_children(self):
return self.children
def get_parent(self):
return self.parent
def update_from_upper_tree(self):
if self.parent:
self.variables.update(self.parent.variables.copy())
self.constraints += self.parent.constraints.copy()
self.objectives += self.parent.objectives.copy()
def add_boundary_variables(self):
left = cvx.Variable()
right = cvx.Variable()
top = cvx.Variable()
bottom = cvx.Variable()
self.variables[(self.name + '_l')] = left
self.variables[(self.name + '_r')] = right
self.variables[(self.name + '_t')] = top
self.variables[(self.name + '_b')] = bottom
def constraint_spec(self):
raise NotImplementedError('Function constraint_spec() needs to be implemented!')
def copy_constraints(self):
self.variables_copy = self.variables.copy()
self.constraints_copy = self.constraints.copy()
self.objectives_copy = self.objectives.copy()
if (self.get_children() != []):
for child in self.get_children():
child.copy_constraints()
def solve(self):
global best_leaf_result
global best_leaf_loss
self.best = False
self.constraint_spec()
obj = cvx.Minimize(cvx.sum(self.objectives))
optimizer = cvx.Problem(obj, self.constraints)
optimizer.solve()
self.loss = optimizer.value
if ((best_leaf_result != None) and (best_leaf_loss < self.loss)):
pass
elif (self.get_children() != []):
for child in self.get_children():
child.solve()
if (child.best == True):
self.best = True
elif ((best_leaf_result == None) or (best_leaf_loss > self.loss)):
for (k, v) in self.variables.items():
if (v.value == None):
print('No Solution!')
exit()
else:
break
best_leaf_result = {k: round(float(v.value)) for (k, v) in self.variables.items()}
best_leaf_loss = self.loss
global best_leaf
best_leaf = self
node = self
while (node.parent != None):
node = node.parent
if (isinstance(node, FlowAroundFix) == True):
node.best_row_width_upper = node.row_width_upper
node.best_row_height_upper = node.row_height_upper
node.best_result_index_upper = node.result_index_upper
node.best_row_width_middle = node.row_width_middle
node.best_row_height_middle = node.row_height_middle
node.best_result_index_middle = node.result_index_middle
node.best_row_width_lower = node.row_width_lower
node.best_row_height_lower = node.row_height_lower
node.best_result_index_lower = node.result_index_lower
elif (isinstance(node, Flow) == True):
node.best_row_width = node.row_width
node.best_row_height = node.row_height
node.best_result_index = node.result_index
self.best = True
def get_best(self):
global best_leaf_result
global best_leaf_loss
global best_leaf
return (best_leaf, best_leaf_result, best_leaf_loss) |
class Pad(Base):
def __init__(self, pad):
self.pad = pad
self.px = tuple(zip(([0] * len(pad)), pad))
def sample(self, *shape):
shape = list(shape)
for i in range(len(shape)):
shape[i] += self.pad[(i + 1)]
return shape
def tf(self, img, k=0):
dim = len(img.shape)
return np.pad(img, self.px[:dim], mode='constant')
def __str__(self):
return 'Pad(({}, {}, {}))'.format(*self.pad) |
class Machine(aiprt.Machine_Generator):
def __init__(self):
((x_train, y_train), (x_test, y_test)) = datasets.cifar10.load_data()
(_, X, _, y) = model_selection.train_test_split(x_train, y_train, test_size=0.02)
X = X.astype(float)
super().__init__(X, y, nb_classes=10) |
_cache()
def statcast_outfielder_jump(year: int, min_att: Union[(int, str)]='q') -> pd.DataFrame:
url = f'
res = requests.get(url, timeout=None).content
data = pd.read_csv(io.StringIO(res.decode('utf-8')))
data = sanitize_statcast_columns(data)
return data |
def load_textset(n_jobs, use_gpu, pin_memory, corpus, text):
tokenizer = load_text_encoder(**text)
(tr_set, dv_set, tr_loader_bs, dv_loader_bs, data_msg) = create_textset(tokenizer, **corpus)
collect_tr = partial(collect_text_batch, mode='train')
collect_dv = partial(collect_text_batch, mode='dev')
tr_set = DataLoader(tr_set, batch_size=tr_loader_bs, shuffle=True, drop_last=True, collate_fn=collect_tr, num_workers=0, pin_memory=use_gpu)
dv_set = DataLoader(dv_set, batch_size=dv_loader_bs, shuffle=False, drop_last=False, collate_fn=collect_dv, num_workers=0, pin_memory=pin_memory)
data_msg.append('I/O spec. | Token type = {}\t| Vocab size = {}'.format(tokenizer.token_type, tokenizer.vocab_size))
return (tr_set, dv_set, tokenizer.vocab_size, tokenizer, data_msg) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a model for protein tertiary structure prediction')
parser.add_argument('config', type=str)
parser.add_argument('-s', '--serialization-dir', type=str)
parser.add_argument('-o', '--overrides', type=str)
parser.add_argument('-d', '--seed', type=int, default=3435)
return parser.parse_args() |
def _get_reference_md5sum(url):
url_md5sum = (url + '.md5sum')
md5sum = urlopen(url_md5sum).read().strip()
return md5sum |
class User(ABC):
def __init__(self):
pass
def init_dialog(self):
pass
def generate_response(self, utterance, persona=None):
pass |
_module()
class ToyFullInitDataset(Dataset):
METAINFO = dict()
data = torch.randn(12, 2)
label = torch.ones(12)
def __init__(self):
self.pipeline = Compose([(lambda x: x)])
def __len__(self):
return self.data.size(0)
def get_data_info(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
def full_init(self):
pass
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index]) |
class LocalUpdate(object):
def __init__(self, args, dataset, u_id, idxs, sampling_prob, optimizer):
self.u_id = u_id
self.args = args
(self.trainloader, self.testloader) = self.train_val_test(dataset, list(idxs))
self.device = ('cuda' if args.gpu else 'cpu')
self.criterion = nn.CrossEntropyLoss().to(self.device)
self.dataset_size = len(dataset)
self.sasampling_prob = sampling_prob
self.optimizer = optimizer
def train_val_test(self, dataset, idxs):
_split = 0
if (self.args.local_test_split > 0.0):
_split = max(int(np.round((self.args.local_test_split * len(idxs)))), 1)
idxs_train = idxs[_split:]
trainloader = DataLoader(DatasetSplit(dataset, idxs_train), batch_size=len(idxs), shuffle=True, drop_last=True)
testloader = None
if (_split > 0):
idxs_test = idxs[:_split]
testloader = DataLoader(DatasetSplit(dataset, idxs_test), batch_size=int(len(idxs_test)), shuffle=False)
return (trainloader, testloader)
def update_weights(self, model, global_round):
model.to(self.device)
model.train()
epoch_loss = []
for iter in range(self.args.local_ep):
batch_loss = []
self.optimizer.zero_grad()
if self.args.withDP:
virtual_batch_rate = int((self.args.virtual_batch_size / self.args.local_bs))
for (batch_idx, (images, labels)) in enumerate(self.trainloader):
indices = np.random.permutation(len(labels))
rnd_sampled = np.random.binomial(len(labels), self.sasampling_prob)
if (rnd_sampled > 0):
images = images[indices][:rnd_sampled]
labels = labels[indices][:rnd_sampled]
else:
return (model.state_dict(), 0.0, self.optimizer)
(images, labels) = (images.to(self.device), labels.to(self.device))
model_preds = model(images)
loss = self.criterion(model_preds, labels)
loss.backward()
if self.args.withDP:
if ((((batch_idx + 1) % virtual_batch_rate) == 0) or ((batch_idx + 1) == len(self.trainloader))):
self.optimizer.step()
self.optimizer.zero_grad()
else:
self.optimizer.virtual_step()
else:
self.optimizer.step()
self.optimizer.zero_grad()
batch_loss.append(loss.item())
epoch_loss.append((sum(batch_loss) / len(batch_loss)))
return (model.state_dict(), (sum(epoch_loss) / len(epoch_loss)), self.optimizer) |
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file):
tf.logging.info(('Writing predictions to: %s' % output_prediction_file))
tf.logging.info(('Writing nbest to: %s' % output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit'])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
min_null_feature_index = 0
null_start_logit = 0
null_end_logit = 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if FLAGS.version_2_with_negative:
feature_null_score = (result.start_logits[0] + result.end_logits[0])
if (feature_null_score < score_null):
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
if (start_index >= len(feature.tokens)):
continue
if (end_index >= len(feature.tokens)):
continue
if (start_index not in feature.token_to_orig_map):
continue
if (end_index not in feature.token_to_orig_map):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True)
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit'])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
if (pred.start_index > 0):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
if FLAGS.version_2_with_negative:
if ('' not in seen_predictions):
nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit))
if (not nbest):
nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert (len(nbest) >= 1)
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_logit + entry.end_logit))
if (not best_non_null_entry):
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
assert (len(nbest_json) >= 1)
if (not FLAGS.version_2_with_negative):
all_predictions[example.qas_id] = nbest_json[0]['text']
else:
score_diff = ((score_null - best_non_null_entry.start_logit) - best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if (score_diff > FLAGS.null_score_diff_threshold):
all_predictions[example.qas_id] = ''
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
with tf.gfile.GFile(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n')) |
def sampler(sample_function: Callable) -> Callable:
def generate_sampler(continuous_pulse: Callable) -> Callable:
(continuous_pulse)
def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:
sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)
return np.asarray(sampled_pulse, dtype=np.complex_)
call_sampler = _update_annotations(call_sampler)
call_sampler = _update_docstring(call_sampler, sample_function)
call_sampler.__dict__.pop('__wrapped__')
return commands.functional_pulse(call_sampler)
return generate_sampler |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.