code stringlengths 101 5.91M |
|---|
def LCS_mask(src, tgt, stop_words):
m = len(src)
n = len(tgt)
if (stop_words is None):
stop_words = set()
mat = [([0] * (n + 1)) for row in range((m + 1))]
for row in range(1, (m + 1)):
for col in range(1, (n + 1)):
if ((src[(row - 1)] == tgt[(col - 1)]) and (src[(row - 1)] not in stop_words)):
mat[row][col] = (mat[(row - 1)][(col - 1)] + 1)
else:
mat[row][col] = max(mat[row][(col - 1)], mat[(row - 1)][col])
(x, y) = (m, n)
mask = []
while ((y > 0) and (x > 0)):
if (mat[x][y] == (mat[(x - 1)][(y - 1)] + 1)):
x -= 1
y -= 1
mask.append(1)
elif (mat[x][y] == mat[x][(y - 1)]):
y -= 1
mask.append(0)
else:
x -= 1
while (y > 0):
y -= 1
mask.append(0)
return mask[::(- 1)] |
class Expr(Node):
def __init__(self, start, end, expr):
Node.__init__(self, start, end)
self.expr = expr
def Requires(self, node):
return False
def __str__(self):
return self._StringHelper(self.__class__.__name__, str(self.expr)) |
class DenseInverseAutoRegressive(nn.Module):
def __init__(self, n):
super(DenseInverseAutoRegressive, self).__init__()
self.mean = Dense(n, n)
self.std = Dense(n, n)
def forward(self, input):
return ((input - self.mean(input)) / self.std(input)) |
def chunked_dataset_iterator(chunk_refs: List, read_chunk_fn: Callable[([Any], Iterator)], buffer_size: int, train: bool=True, seed: Optional[int]=None, shuffle: bool=True, use_windowed: bool=False, transform: Callable[([Any], Any)]=None, prefetch: bool=True, num_instances: int=1, instance_rank: int=0):
if ((not train) and shuffle):
raise ValueError('shuffling is not supported when train=False')
chunk_refs = create_source_iterator(chunk_refs, train=train, seed=seed, shuffle=shuffle, num_instances=num_instances, instance_rank=instance_rank)
samples = SelectManyIterator(source_iterator=chunk_refs, collection_selector=read_chunk_fn)
if prefetch:
samples = PrefetchIterator(samples, buffer_size)
if shuffle:
if use_windowed:
samples = BufferedShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
else:
samples = BlockwiseShuffleIterator(samples, buffer_size, bump_seed(seed, 1))
if (transform is not None):
samples = MapIterator(samples, transform)
return samples |
def main():
rospy.init_node('model_free_version', log_level=rospy.WARN)
env = Env(is_training)
policy = TD3(S_DIM, A_DIM)
print()
policy.load((pkg_path + '/Models/TEST/test'))
replay_buffer = utils.ReplayBuffer(S_DIM, A_DIM)
total_step = 0
save_time = 0
episode_num = 1
success_num = 0
actor_loss_episode = 0
critic_loss_episode = 0
net_action_noise = init_net_action_noise
action_1 = [0.0, 0.0]
action_2 = [0.0, 0.0]
action_3 = [0.0, 0.0]
action_4 = [0.0, 0.0]
episode_reward_all = 0.0
social_safe_step_1 = 0
social_safe_step_2 = 0
social_safe_step_3 = 0
social_safe_step_4 = 0
ego_safe_step_1 = 0
ego_safe_step_2 = 0
ego_safe_step_3 = 0
ego_safe_step_4 = 0
total_step_1 = 0
total_step_2 = 0
total_step_3 = 0
total_step_4 = 0
if is_training:
print('Training mode')
while True:
(state_all_1, state_all_2, state_all_3, state_all_4) = env.reset()
one_round_step = 0
flag_stop_1 = 0
flag_stop_2 = 0
flag_stop_3 = 0
flag_stop_4 = 0
print(('Training Episode > ' + str(episode_num)))
while True:
if (not (len(state_all_1) == len(state_all_2) == len(state_all_3) == len(state_all_4) == S_DIM)):
print(len(state_all_1))
print('Something Wrong with the simulator !!!')
break
if (total_step < start_timesteps):
action_1[0] = random.uniform((- max_action), max_action)
action_1[1] = random.uniform((- max_action), max_action)
else:
net_action_1 = policy.select_action(np.array(state_all_1))
action_1[0] = (net_action_1[0] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
action_1[1] = (net_action_1[1] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
if (total_step < start_timesteps):
action_2[0] = random.uniform((- max_action), max_action)
action_2[1] = random.uniform((- max_action), max_action)
else:
net_action_2 = policy.select_action(np.array(state_all_2))
action_2[0] = (net_action_2[0] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
action_2[1] = (net_action_2[1] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
if (total_step < start_timesteps):
action_3[0] = random.uniform((- max_action), max_action)
action_3[1] = random.uniform((- max_action), max_action)
else:
net_action_3 = policy.select_action(np.array(state_all_3))
action_3[0] = (net_action_3[0] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
action_3[1] = (net_action_3[1] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
if (total_step < start_timesteps):
action_4[0] = random.uniform((- max_action), max_action)
action_4[1] = random.uniform((- max_action), max_action)
else:
net_action_4 = policy.select_action(np.array(state_all_4))
action_4[0] = (net_action_4[0] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
action_4[1] = (net_action_4[1] + np.random.normal(0, (max_action * net_action_noise), size=1)).clip((- max_action), max_action)
action_1 = np.around(action_1, decimals=5)
action_2 = np.around(action_2, decimals=5)
action_3 = np.around(action_3, decimals=5)
action_4 = np.around(action_4, decimals=5)
print(action_1)
print(action_2)
print(action_3)
print(action_4)
action = [action_1[0], action_1[1], action_2[0], action_2[1], action_3[0], action_3[1], action_4[0], action_4[1]]
(next_state_all_1, reward_1, done_1, arrive_1, next_state_all_2, reward_2, done_2, arrive_2, next_state_all_3, reward_3, done_3, arrive_3, next_state_all_4, reward_4, done_4, arrive_4, flag_ego_safety_1, flag_social_safety_1, flag_ego_safety_2, flag_social_safety_2, flag_ego_safety_3, flag_social_safety_3, flag_ego_safety_4, flag_social_safety_4) = env.step(action)
writer.add_scalar('Reward/reward_1', reward_1, total_step)
writer.add_scalar('Reward/reward_2', reward_2, total_step)
writer.add_scalar('Reward/reward_3', reward_3, total_step)
writer.add_scalar('Reward/reward_4', reward_4, total_step)
t1 = time.time()
if (flag_stop_1 == 0):
replay_buffer.add(state_all_1, action_1, next_state_all_1, reward_1, (done_1 or arrive_1), 1)
episode_reward_all += reward_1
social_safe_step_1 += flag_social_safety_1
ego_safe_step_1 += flag_ego_safety_1
total_step_1 += 1
if (flag_stop_2 == 0):
replay_buffer.add(state_all_2, action_2, next_state_all_2, reward_2, (done_2 or arrive_2), 2)
episode_reward_all += reward_2
social_safe_step_2 += flag_social_safety_2
ego_safe_step_2 += flag_ego_safety_2
total_step_2 += 1
if (flag_stop_3 == 0):
replay_buffer.add(state_all_3, action_3, next_state_all_3, reward_3, (done_3 or arrive_3), 3)
episode_reward_all += reward_3
social_safe_step_3 += flag_social_safety_3
ego_safe_step_3 += flag_ego_safety_3
total_step_3 += 1
if (flag_stop_4 == 0):
replay_buffer.add(state_all_4, action_4, next_state_all_4, reward_4, (done_4 or arrive_4), 4)
episode_reward_all += reward_4
social_safe_step_4 += flag_social_safety_4
ego_safe_step_4 += flag_ego_safety_4
total_step_4 += 1
t2 = time.time()
state_all_1 = next_state_all_1[:]
state_all_2 = next_state_all_2[:]
state_all_3 = next_state_all_3[:]
state_all_4 = next_state_all_4[:]
if (total_step >= start_timesteps):
print('TRAIN step :', (total_step - start_timesteps))
t1 = time.time()
(actor_loss_episode, critic_loss_episode) = policy.train(replay_buffer, batch_size)
t2 = time.time()
print('TRAIN Time : {} ms'.format(round((1000 * (t2 - t1)), 2)))
writer.add_scalar('Training/actor_loss_episode', actor_loss_episode, (total_step - start_timesteps))
writer.add_scalar('Training/critic_loss_episode', critic_loss_episode, (total_step - start_timesteps))
one_round_step += 1
total_step += 1
if arrive_1:
result_1 = 'Success'
else:
result_1 = 'Fail'
if arrive_2:
result_2 = 'Success'
else:
result_2 = 'Fail'
if arrive_3:
result_3 = 'Success'
else:
result_3 = 'Fail'
if arrive_4:
result_4 = 'Success'
else:
result_4 = 'Fail'
if ((arrive_1 or done_1) and (flag_stop_1 == 0)):
print(('Agent 1 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_1)
flag_stop_1 = 1
if arrive_1:
success_num += 1
if ((arrive_2 or done_2) and (flag_stop_2 == 0)):
print(('Agent 2 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_2)
flag_stop_2 = 1
if arrive_2:
success_num += 1
if ((arrive_3 or done_3) and (flag_stop_3 == 0)):
print(('Agent 3 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_3)
flag_stop_3 = 1
if arrive_3:
success_num += 1
if ((arrive_4 or done_4) and (flag_stop_4 == 0)):
print(('Agent 4 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_4)
flag_stop_4 = 1
if arrive_4:
success_num += 1
if (((arrive_1 or done_1) and (arrive_2 or done_2) and (arrive_3 or done_3) and (arrive_4 or done_4)) or (one_round_step >= MAX_STEPS_TRAINING)):
writer.add_scalar('Criteria/episode_reward_all', episode_reward_all, episode_num)
writer.add_scalar('Criteria/success_rate', ((success_num / episode_num) / 4), episode_num)
writer.add_scalar('Criteria/average_step', (total_step / episode_num), episode_num)
writer.add_scalar('Social_score/1', (social_safe_step_1 / total_step_1), episode_num)
writer.add_scalar('Social_score/2', (social_safe_step_2 / total_step_2), episode_num)
writer.add_scalar('Social_score/3', (social_safe_step_3 / total_step_3), episode_num)
writer.add_scalar('Social_score/4', (social_safe_step_4 / total_step_4), episode_num)
writer.add_scalar('Ego_score/1', (ego_safe_step_1 / total_step_1), episode_num)
writer.add_scalar('Ego_score/2', (ego_safe_step_2 / total_step_2), episode_num)
writer.add_scalar('Ego_score/3', (ego_safe_step_3 / total_step_3), episode_num)
writer.add_scalar('Ego_score/4', (ego_safe_step_4 / total_step_4), episode_num)
print(('All Agents DONE !!! : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|')
print(('net_action_noise: %4f' % net_action_noise))
if ((episode_num % 15) == 0):
policy.save(((weight_outdir + '/') + str(save_time)))
save_time += 1
if (total_step > start_timesteps):
policy.scheduler_actor.step()
policy.scheduler_critic.step()
writer.add_scalar('Learning Rate', policy.actor_optimizer.param_groups[0]['lr'], episode_num)
episode_reward_all = 0.0
episode_num += 1
if (episode_num > noise_decay_episode):
net_action_noise = (net_action_noise * 0.999)
break
else:
print('Testing mode')
while True:
(state_all_1, state_all_2, state_all_3, state_all_4) = env.reset()
one_round_step = 0
flag_stop_1 = 0
flag_stop_2 = 0
flag_stop_3 = 0
flag_stop_4 = 0
print(('Training Episode > ' + str(episode_num)))
while True:
if (not (len(state_all_1) == len(state_all_2) == len(state_all_3) == len(state_all_4) == S_DIM)):
print(len(state_all_1))
print('Something Wrong with the simulator !!!')
break
net_action_1 = policy.select_action(np.array(state_all_1))
net_action_2 = policy.select_action(np.array(state_all_2))
net_action_3 = policy.select_action(np.array(state_all_3))
net_action_4 = policy.select_action(np.array(state_all_4))
action = [net_action_1[0], net_action_1[1], net_action_2[0], net_action_2[1], net_action_3[0], net_action_3[1], net_action_4[0], net_action_4[1]]
(next_state_all_1, reward_1, done_1, arrive_1, next_state_all_2, reward_2, done_2, arrive_2, next_state_all_3, reward_3, done_3, arrive_3, next_state_all_4, reward_4, done_4, arrive_4, flag_ego_safety_1, flag_social_safety_1, flag_ego_safety_2, flag_social_safety_2, flag_ego_safety_3, flag_social_safety_3, flag_ego_safety_4, flag_social_safety_4) = env.step(action)
writer.add_scalar('Reward/reward_1', reward_1, total_step)
writer.add_scalar('Reward/reward_2', reward_2, total_step)
writer.add_scalar('Reward/reward_3', reward_3, total_step)
writer.add_scalar('Reward/reward_4', reward_4, total_step)
t1 = time.time()
if (flag_stop_1 == 0):
episode_reward_all += reward_1
social_safe_step_1 += flag_social_safety_1
ego_safe_step_1 += flag_ego_safety_1
total_step_1 += 1
if (flag_stop_2 == 0):
episode_reward_all += reward_2
social_safe_step_2 += flag_social_safety_2
ego_safe_step_2 += flag_ego_safety_2
total_step_2 += 1
if (flag_stop_3 == 0):
episode_reward_all += reward_3
social_safe_step_3 += flag_social_safety_3
ego_safe_step_3 += flag_ego_safety_3
total_step_3 += 1
if (flag_stop_4 == 0):
episode_reward_all += reward_4
social_safe_step_4 += flag_social_safety_4
ego_safe_step_4 += flag_ego_safety_4
total_step_4 += 1
t2 = time.time()
state_all_1 = next_state_all_1[:]
state_all_2 = next_state_all_2[:]
state_all_3 = next_state_all_3[:]
state_all_4 = next_state_all_4[:]
one_round_step += 1
total_step += 1
if arrive_1:
result_1 = 'Success'
else:
result_1 = 'Fail'
if arrive_2:
result_2 = 'Success'
else:
result_2 = 'Fail'
if arrive_3:
result_3 = 'Success'
else:
result_3 = 'Fail'
if arrive_4:
result_4 = 'Success'
else:
result_4 = 'Fail'
if ((arrive_1 or done_1) and (flag_stop_1 == 0)):
print(('Agent 1 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_1)
flag_stop_1 = 1
if arrive_1:
success_num += 1
if ((arrive_2 or done_2) and (flag_stop_2 == 0)):
print(('Agent 2 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_2)
flag_stop_2 = 1
if arrive_2:
success_num += 1
if ((arrive_3 or done_3) and (flag_stop_3 == 0)):
print(('Agent 3 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_3)
flag_stop_3 = 1
if arrive_3:
success_num += 1
if ((arrive_4 or done_4) and (flag_stop_4 == 0)):
print(('Agent 4 : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|', result_4)
flag_stop_4 = 1
if arrive_4:
success_num += 1
if (((arrive_1 or done_1) and (arrive_2 or done_2) and (arrive_3 or done_3) and (arrive_4 or done_4)) or (one_round_step >= MAX_STEPS_TRAINING)):
writer.add_scalar('Criteria/episode_reward_all', episode_reward_all, episode_num)
writer.add_scalar('Criteria/success_rate', ((success_num / episode_num) / 4), episode_num)
writer.add_scalar('Criteria/average_step', (total_step / episode_num), episode_num)
writer.add_scalar('Social_score/1', (social_safe_step_1 / total_step_1), episode_num)
writer.add_scalar('Social_score/2', (social_safe_step_2 / total_step_2), episode_num)
writer.add_scalar('Social_score/3', (social_safe_step_3 / total_step_3), episode_num)
writer.add_scalar('Social_score/4', (social_safe_step_4 / total_step_4), episode_num)
writer.add_scalar('Ego_score/1', (ego_safe_step_1 / total_step_1), episode_num)
writer.add_scalar('Ego_score/2', (ego_safe_step_2 / total_step_2), episode_num)
writer.add_scalar('Ego_score/3', (ego_safe_step_3 / total_step_3), episode_num)
writer.add_scalar('Ego_score/4', (ego_safe_step_4 / total_step_4), episode_num)
print(('All Agents DONE !!! : Step: %4i' % one_round_step), ('| Time step: %i' % total_step), '|')
episode_reward_all = 0.0
episode_num += 1
break |
class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass):
_model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING |
def image_dataset_kwargs(parsed_args):
return {'source_names': parsed_args.source_names, 'target_names': parsed_args.target_names, 'root': parsed_args.root, 'split_id': parsed_args.split_id, 'height': parsed_args.height, 'width': parsed_args.width, 'train_batch_size': parsed_args.train_batch_size, 'test_batch_size': parsed_args.test_batch_size, 'num_instances': parsed_args.num_instances, 'workers': parsed_args.workers, 'train_sampler': parsed_args.train_sampler, 'cuhk03_labeled': parsed_args.cuhk03_labeled, 'cuhk03_classic_split': parsed_args.cuhk03_classic_split} |
def seresnext101_32x4d(**kwargs):
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name='seresnext101_32x4d', **kwargs) |
class PerformanceWidget():
def __init__(self, viz):
self.viz = viz
self.gui_times = ([float('nan')] * 60)
self.render_times = ([float('nan')] * 30)
self.norm_times = ([float('nan')] * 30)
self.predict_times = ([float('nan')] * 30)
def timing_text(self, times):
viz = self.viz
imgui.same_line((viz.label_w + (viz.font_size * 7)))
t = [x for x in times if (x > 0)]
t = (np.mean(t) if (len(t) > 0) else 0)
imgui.text((f'{(t * 1000.0):.1f} ms' if (t > 0) else 'N/A'))
if imgui.is_item_hovered():
imgui.set_tooltip((f'{(1 / t):.1f} FPS' if (t > 0) else 'N/A'))
_utils.scoped_by_object_id
def __call__(self, show=True):
viz = self.viz
self.gui_times = (self.gui_times[1:] + [viz.frame_delta])
if ('render_time' in viz.result):
self.render_times = (self.render_times[1:] + [viz.result.render_time])
del viz.result.render_time
if ('norm_time' in viz.result):
self.norm_times = (self.norm_times[1:] + [viz.result.norm_time])
del viz.result.norm_time
if ('inference_time' in viz.result):
self.predict_times = (self.predict_times[1:] + [viz.result.inference_time])
del viz.result.inference_time
if show:
viz.header('Performance')
if viz.collapsing_header('Timing', default=True):
imgui.text_colored('GUI', *viz.theme.dim)
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 6)):
imgui.plot_lines('##gui_times', array.array('f', self.gui_times), scale_min=0)
self.timing_text(self.gui_times)
imgui.text_colored('Render', *viz.theme.dim)
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 6)):
imgui.plot_lines('##render_times', array.array('f', self.render_times), scale_min=0)
self.timing_text(self.render_times)
imgui.text_colored('Normalize', *viz.theme.dim)
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 6)):
imgui.plot_lines('##norm_times', array.array('f', self.norm_times), scale_min=0)
self.timing_text(self.norm_times)
imgui.text_colored('Predict', *viz.theme.dim)
imgui.same_line(viz.label_w)
with imgui_utils.item_width((viz.font_size * 6)):
imgui.plot_lines('##predict_times', array.array('f', self.predict_times), scale_min=0)
self.timing_text(self.predict_times) |
def add_tabular_output(file_name):
if (file_name in _tabular_fds_hold.keys()):
_tabular_outputs.append(file_name)
_tabular_fds[file_name] = _tabular_fds_hold[file_name]
else:
_add_output(file_name, _tabular_outputs, _tabular_fds, mode='w') |
(version='2.0')
_registry('DyNAS')
class DyNAS(NASBase):
def __init__(self, conf_fname_or_obj):
super().__init__()
self.init_cfg(conf_fname_or_obj)
self.dynas_manager = DyNASManager(supernet=self.supernet, optimization_metrics=self.metrics, measurements=self.metrics, search_tactic='linas', num_evals=self.num_evals, results_path=self.results_csv_path, dataset_path=self.dataset_path, seed=self.seed, population=self.population, batch_size=self.batch_size, eval_batch_size=self.eval_batch_size, search_algo=self.search_algo, supernet_ckpt_path=self.supernet_ckpt_path, dataloader_workers=self.num_workers, distributed=self.distributed, test_fraction=self.test_fraction)
def search(self):
return self.dynas_manager.search()
def select_model_arch(self):
pass
def init_cfg(self, conf_fname_or_obj):
logger.info('init_cfg')
if isinstance(conf_fname_or_obj, str):
if os.path.isfile(conf_fname_or_obj):
self.conf = Conf(conf_fname_or_obj).usr_cfg
elif isinstance(conf_fname_or_obj, NASConfig):
conf_fname_or_obj.validate()
self.conf = conf_fname_or_obj.usr_cfg
else:
raise NotImplementedError('Please provide a str path to the config file or an object of NASConfig.')
assert ('dynas' in self.conf.nas), 'Must specify dynas section.'
dynas_config = self.conf.nas.dynas
self.seed = self.conf.nas.search.seed
self.search_algo = self.conf.nas.search.search_algorithm
self.supernet = dynas_config.supernet
self.distributed = dynas_config.distributed
self.metrics = dynas_config.metrics
self.num_evals = dynas_config.num_evals
self.results_csv_path = dynas_config.results_csv_path
self.dataset_path = dynas_config.dataset_path
self.supernet_ckpt_path = dynas_config.supernet_ckpt_path
self.batch_size = dynas_config.batch_size
self.eval_batch_size = dynas_config.eval_batch_size
self.num_workers = dynas_config.num_workers
self.test_fraction = dynas_config.test_fraction
if (dynas_config.population < 10):
raise NotImplementedError('Please specify a population size >= 10')
else:
self.population = dynas_config.population |
def test_get_point_rgb_correspondences_raytracing() -> None:
origin = np.array([1., 0., 1.])
img_h = 2048
img_w = 1550
fx = 1683.
fy = 1683.
u = (img_w // 2)
v = (img_h - 1)
ray_dir = compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)
v0 = np.array([1, 10, 0]).astype(np.float32)
v1 = np.array([1, (- 10), 0]).astype(np.float32)
v2 = np.array([100, 0, 0]).astype(np.float32)
(inter_exists, P) = ray_triangle_intersect(origin, ray_dir, v0, v1, v2)
assert inter_exists
assert np.allclose(P, np.array([(((1.42 / 0.519) * 0.85) + 1.63), 0, 0]), atol=0.1)
nearby_triangles = triangle_grid_utils.get_flat_plane_grid_triangles(range_m=5)
for (i, tri) in enumerate(nearby_triangles):
if ((i % 100) == 0):
print(f'On {i}/{len(nearby_triangles)}')
(v0, v1, v2) = tri
(inter_exists, P) = ray_triangle_intersect(origin, ray_dir, v0, v1, v2)
if inter_exists:
assert np.allclose(P, np.array([3.98, 0.0, 0.0]), atol=0.01) |
class RandomResize():
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if (max_size is None):
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
size = random.randint(self.min_size, self.max_size)
image = functional.resize(image, size)
target = functional.resize(target, size, interpolation=transforms.InterpolationMode.NEAREST)
return (image, target) |
def test_snapshotKeplerPotential_Rforce_naz():
s = pynbody.new(star=1)
s['mass'] = 1.0
s['eps'] = 0.0
sp = potential.SnapshotRZPotential(s, num_threads=1)
spaz = potential.SnapshotRZPotential(s, num_threads=1, nazimuths=12)
assert (numpy.fabs((sp.Rforce(1.0, 0.0) - spaz.Rforce(1.0, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.Rforce(0.5, 0.0) - spaz.Rforce(0.5, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.Rforce(1.0, 0.5) - spaz.Rforce(1.0, 0.5))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.Rforce(1.0, (- 0.5)) - spaz.Rforce(1.0, (- 0.5)))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
return None |
class LTOCF2(BasicModel):
def __init__(self, config: dict, dataset: BasicDataset):
super(LTOCF2, self).__init__()
self.config = config
self.dataset: dataloader.BasicDataset = dataset
self.__init_weight()
self.__init_ode()
def __init_weight(self):
self.num_users = self.dataset.n_users
self.num_items = self.dataset.m_items
self.latent_dim = self.config['latent_dim_rec']
self.n_layers = self.config['lightGCN_n_layers']
self.keep_prob = self.config['keep_prob']
self.A_split = self.config['A_split']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
if (self.config['pretrain'] == 0):
nn.init.normal_(self.embedding_user.weight, std=0.1)
nn.init.normal_(self.embedding_item.weight, std=0.1)
world.cprint('use NORMAL distribution initilizer')
else:
print('use pretarined data')
self.f = nn.Sigmoid()
self.Graph = self.dataset.getSparseGraph()
print(f"lgn is already to go(dropout:{self.config['dropout']})")
def __init_ode(self):
if (world.config['learnable_time'] == True):
self.time_split = self.config['time_split']
self.odetimes = ode.ODETimeSetter(self.time_split, self.config['K'])
self.odetime_1 = [self.odetimes[0]]
self.odetime_2 = [self.odetimes[1]]
self.ode_block_test_1 = ode.ODEBlockTimeFirst(ode.ODEFunction(self.Graph), self.time_split, self.config['solver'])
self.ode_block_test_2 = ode.ODEBlockTimeMiddle(ode.ODEFunction(self.Graph), self.time_split, self.config['solver'])
self.ode_block_test_3 = ode.ODEBlockTimeLastK(ode.ODEFunction(self.Graph), self.time_split, self.config['solver'], self.config['K'])
else:
self.ode_block_1 = ode.ODEBlock(ode.ODEFunction(self.Graph), self.config['solver'], 0, (self.config['K'] / 3))
self.ode_block_2 = ode.ODEBlock(ode.ODEFunction(self.Graph), self.config['solver'], (self.config['K'] / 3), ((2 * self.config['K']) / 3))
self.ode_block_3 = ode.ODEBlock(ode.ODEFunction(self.Graph), self.config['solver'], ((2 * self.config['K']) / 3), self.config['K'])
def get_time(self):
ode_times = (list(self.odetime_1) + list(self.odetime_2))
return ode_times
def __dropout_x(self, x, keep_prob):
size = x.size()
index = x.indices().t()
values = x.values()
random_index = (torch.rand(len(values)) + keep_prob)
random_index = random_index.int().bool()
index = index[random_index]
values = (values[random_index] / keep_prob)
g = torch.sparse.FloatTensor(index.t(), values, size)
return g
def __dropout(self, keep_prob):
if self.A_split:
graph = []
for g in self.Graph:
graph.append(self.__dropout_x(g, keep_prob))
else:
graph = self.__dropout_x(self.Graph, keep_prob)
return graph
def computer(self):
users_emb = self.embedding_user.weight
items_emb = self.embedding_item.weight
all_emb = torch.cat([users_emb, items_emb])
embs = [all_emb]
if self.config['dropout']:
if self.training:
g_droped = self.__dropout(self.keep_prob)
else:
g_droped = self.Graph
else:
g_droped = self.Graph
'\n layers\n '
if (world.config['learnable_time'] == True):
out_1 = self.ode_block_test_1(all_emb, self.odetime_1)
if (world.config['dual_res'] == False):
out_1 = (out_1 - all_emb)
embs.append(out_1)
out_2 = self.ode_block_test_2(out_1, self.odetime_1, self.odetime_2)
if (world.config['dual_res'] == False):
out_2 = (out_2 - out_1)
embs.append(out_2)
out_3 = self.ode_block_test_3(out_2, self.odetime_2)
if (world.config['dual_res'] == False):
out_3 = (out_3 - out_2)
embs.append(out_3)
elif (world.config['learnable_time'] == False):
all_emb_1 = self.ode_block_1(all_emb)
all_emb_1 = (all_emb_1 - all_emb)
embs.append(all_emb_1)
all_emb_2 = self.ode_block_2(all_emb_1)
all_emb_2 = (all_emb_2 - all_emb_1)
embs.append(all_emb_2)
all_emb_3 = self.ode_block_3(all_emb_2)
all_emb_3 = (all_emb_3 - all_emb_2)
embs.append(all_emb_3)
embs = torch.stack(embs, dim=1)
light_out = torch.mean(embs, dim=1)
(users, items) = torch.split(light_out, [self.num_users, self.num_items])
return (users, items)
def getUsersRating(self, users):
(all_users, all_items) = self.computer()
users_emb = all_users[users.long()]
items_emb = all_items
rating = self.f(torch.matmul(users_emb, items_emb.t()))
return rating
def getEmbedding(self, users, pos_items, neg_items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
pos_emb = all_items[pos_items]
neg_emb = all_items[neg_items]
users_emb_ego = self.embedding_user(users)
pos_emb_ego = self.embedding_item(pos_items)
neg_emb_ego = self.embedding_item(neg_items)
return (users_emb, pos_emb, neg_emb, users_emb_ego, pos_emb_ego, neg_emb_ego)
def bpr_loss(self, users, pos, neg):
(users_emb, pos_emb, neg_emb, userEmb0, posEmb0, negEmb0) = self.getEmbedding(users.long(), pos.long(), neg.long())
reg_loss = (((1 / 2) * ((userEmb0.norm(2).pow(2) + posEmb0.norm(2).pow(2)) + negEmb0.norm(2).pow(2))) / float(len(users)))
pos_scores = torch.mul(users_emb, pos_emb)
pos_scores = torch.sum(pos_scores, dim=1)
neg_scores = torch.mul(users_emb, neg_emb)
neg_scores = torch.sum(neg_scores, dim=1)
loss = torch.mean(torch.nn.functional.softplus((neg_scores - pos_scores)))
return (loss, reg_loss)
def forward(self, users, items):
(all_users, all_items) = self.computer()
users_emb = all_users[users]
items_emb = all_items[items]
inner_pro = torch.mul(users_emb, items_emb)
gamma = torch.sum(inner_pro, dim=1)
return gamma |
def _ReadImageList(list_path):
with tf.gfile.GFile(list_path, 'r') as f:
image_paths = f.readlines()
image_paths = [entry.rstrip() for entry in image_paths]
return image_paths |
class AdamW_GCC2(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW_GCC2, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW_GCC2, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
if (len(list(grad.size())) > 3):
delta = (step_size * torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom)).clone()
delta.add_((- delta.mean(dim=tuple(range(1, len(list(grad.size())))), keepdim=True)))
p.data.add_((- delta))
else:
p.data.add_((- step_size), torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom))
return loss |
class MyLogger(object):
def __init__(self, fname, reinitialize=False, logstyle='%3.3f'):
self.root = fname
if (not os.path.exists(self.root)):
os.mkdir(self.root)
self.reinitialize = reinitialize
self.metrics = []
self.logstyle = logstyle
def reinit(self, item):
if os.path.exists(('%s/%s.log' % (self.root, item))):
if self.reinitialize:
if ('sv' in item):
if (not any((('sv' in item) for item in self.metrics))):
print('Deleting singular value logs...')
else:
print('{} exists, deleting...'.format(('%s_%s.log' % (self.root, item))))
os.remove(('%s/%s.log' % (self.root, item)))
def log(self, itr, **kwargs):
for arg in kwargs:
if (arg not in self.metrics):
if self.reinitialize:
self.reinit(arg)
self.metrics += [arg]
if (self.logstyle == 'pickle'):
print('Pickle not currently supported...')
elif (self.logstyle == 'mat'):
print('.mat logstyle not currently supported...')
else:
with open(('%s/%s.log' % (self.root, arg)), 'a') as f:
f.write(('%d: %s\n' % (itr, (self.logstyle % kwargs[arg])))) |
def main(args):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
(model, _) = clip.load(args.model_type_or_path, jit=False, device=device)
imgs = utils.load_json(args.anno)['images']
random.shuffle(imgs)
for img in tqdm(imgs):
image_id = img['cocoid']
dst_path = os.path.join(args.output_dir, (str(image_id) + '.npz'))
if os.path.isfile(dst_path):
continue
sents = [sent['raw'].lower().strip().strip('.') for sent in img['sentences']]
sents = clip.tokenize(sents).to(device)
with torch.no_grad():
text_feat = model.encode_text(sents)
text_feat = text_feat.data.cpu().float().numpy()
if args.debug:
print('Text feature shape: ', text_feat.shape)
break
np.savez_compressed(dst_path, g_feature=text_feat) |
def need_apply(configs_mapping: Dict[(Tuple[(str, callable)], BaseConfig)], algo_name):
return any(((config.name == algo_name) for config in configs_mapping.values())) |
def worker(worker_id, start, end):
np.random.seed(worker_id)
env_args = dict(domain='rope_sac', task='easy', max_path_length=5, pixel_wrapper_kwargs=dict(observation_key='pixels', pixels_only=False, render_kwargs=dict(width=64, height=64, camera_id=0)))
env = DMControlEnv(**env_args)
total = 0
if (worker_id == 0):
pbar = tqdm(total=(end - start))
for i in range(start, end):
str_i = str(i)
run_folder = join(root, 'run{}'.format(str_i.zfill(5)))
if (not exists(run_folder)):
os.makedirs(run_folder)
actions = []
o = env.reset()
np.random.seed(0)
for t in itertools.count():
a = env.action_space.sample()
a = ((a / np.linalg.norm(a)) * 1)
actions.append(np.concatenate((o.location[:2], a)))
str_t = str(t)
imageio.imwrite(join(run_folder, 'img_{}.png'.format(str_t.zfill(2))), o.pixels.astype('uint8'))
(o, _, terminal, info) = env.step(a)
if (terminal or info.traj_done):
break
actions = np.stack(actions, axis=0)
np.save(join(run_folder, 'actions.npy'), actions)
if (worker_id == 0):
pbar.update(1)
if (worker_id == 0):
pbar.close() |
def sig_handler(signum, frame):
logger.warning(('Signal handler called with signal ' + str(signum)))
prod_id = int(os.environ['SLURM_PROCID'])
logger.warning(('Host: %s - Global rank: %i' % (socket.gethostname(), prod_id)))
if (prod_id == 0):
logger.warning(('Requeuing job ' + os.environ['SLURM_JOB_ID']))
os.system(('scontrol requeue ' + os.environ['SLURM_JOB_ID']))
else:
logger.warning('Not the main process, no need to requeue.')
sys.exit((- 1)) |
def main():
start_time = time.time()
parser = argparse.ArgumentParser()
add_args(parser)
args = parser.parse_args()
print(args)
print('Is jax using decorators?', (not jax.config.read('jax_disable_jit')))
rng_seq = hk.PRNGSequence(args.random_seed)
p_log_prob = hk.transform((lambda x, z: Model(args.latent_size, args.hidden_size, MNIST_IMAGE_SHAPE)(x=x, z=z)))
if (args.variational == 'mean-field'):
variational = VariationalMeanField
elif (args.variational == 'flow'):
variational = VariationalFlow
q_sample_and_log_prob = hk.transform((lambda x, num_samples: variational(args.latent_size, args.hidden_size)(x, num_samples)))
p_params = p_log_prob.init(next(rng_seq), z=np.zeros((1, args.latent_size), dtype=np.float32), x=np.zeros((1, *MNIST_IMAGE_SHAPE), dtype=np.float32))
q_params = q_sample_and_log_prob.init(next(rng_seq), x=np.zeros((1, *MNIST_IMAGE_SHAPE), dtype=np.float32), num_samples=1)
optimizer = optax.rmsprop(args.learning_rate)
params = (p_params, q_params)
opt_state = optimizer.init(params)
def objective_fn(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> jnp.ndarray:
x = batch['image']
(p_params, q_params) = params
(z, log_q_z) = q_sample_and_log_prob.apply(q_params, rng_key, x=x, num_samples=1)
log_p_x_z = p_log_prob.apply(p_params, rng_key, x=x, z=z)
elbo = (log_p_x_z - log_q_z)
elbo = elbo.mean(axis=0)
elbo = elbo.sum(axis=0)
return (- elbo)
def train_step(params: hk.Params, rng_key: PRNGKey, opt_state: optax.OptState, batch: Batch) -> Tuple[(hk.Params, optax.OptState)]:
grads = jax.grad(objective_fn)(params, rng_key, batch)
(updates, new_opt_state) = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return (new_params, new_opt_state)
def importance_weighted_estimate(params: hk.Params, rng_key: PRNGKey, batch: Batch) -> Tuple[(jnp.ndarray, jnp.ndarray)]:
x = batch['image']
(p_params, q_params) = params
(z, log_q_z) = q_sample_and_log_prob.apply(q_params, rng_key, x=x, num_samples=args.num_importance_samples)
log_p_x_z = p_log_prob.apply(p_params, rng_key, x, z)
elbo = (log_p_x_z - log_q_z)
log_p_x = (jax.nn.logsumexp(elbo, axis=0) - jnp.log(jnp.shape(elbo)[0]))
log_p_x = log_p_x.sum(0)
elbo = elbo.mean(axis=0)
elbo = elbo.sum(axis=0)
return (elbo, log_p_x)
def evaluate(dataset: Generator[(Batch, None, None)], params: hk.Params, rng_seq: hk.PRNGSequence) -> Tuple[(float, float)]:
total_elbo = 0.0
total_log_p_x = 0.0
dataset_size = 0
for batch in dataset:
(elbo, log_p_x) = importance_weighted_estimate(params, next(rng_seq), batch)
total_elbo += elbo
total_log_p_x += log_p_x
dataset_size += len(batch['image'])
return ((total_elbo / dataset_size), (total_log_p_x / dataset_size))
train_ds = load_dataset(tfds.Split.TRAIN, args.batch_size, args.random_seed, repeat=True)
test_ds = load_dataset(tfds.Split.TEST, args.batch_size, args.random_seed)
def print_progress(step: int, examples_per_sec: float):
valid_ds = load_dataset(tfds.Split.VALIDATION, args.batch_size, args.random_seed)
(elbo, log_p_x) = evaluate(valid_ds, params, rng_seq)
train_elbo = ((- objective_fn(params, next(rng_seq), next(train_ds))) / args.batch_size)
print(f'Step {step:<10d} Train ELBO estimate: {train_elbo:<5.3f} Validation ELBO estimate: {elbo:<5.3f} Validation log p(x) estimate: {log_p_x:<5.3f} Speed: {examples_per_sec:<5.2e} examples/s')
t0 = time.time()
for step in range(args.training_steps):
if ((step % args.log_interval) == 0):
t1 = time.time()
examples_per_sec = ((args.log_interval * args.batch_size) / (t1 - t0))
print_progress(step, examples_per_sec)
t0 = t1
(params, opt_state) = train_step(params, next(rng_seq), opt_state, next(train_ds))
test_ds = load_dataset(tfds.Split.TEST, args.batch_size, args.random_seed)
(elbo, log_p_x) = evaluate(test_ds, params, rng_seq)
print(f'Step {step:<10d} Test ELBO estimate: {elbo:<5.3f} Test log p(x) estimate: {log_p_x:<5.3f} ')
print(f'Total time: {((time.time() - start_time) / 60):.3f} minutes') |
class CryptoAgent(Agent):
def __init__(self):
super(CryptoAgent, self).__init__()
self.key = None |
class AttentionReplace(AttentionControlEdit):
def __init__(self, prompts, tokenizer, num_steps: int, cross_replace_steps: float, self_replace_steps: float, device='cpu'):
super(AttentionReplace, self).__init__(prompts, num_steps, cross_replace_steps, self_replace_steps, tokenizer, device=device)
self.mapper = get_replacement_mapper(prompts, tokenizer).to(device)
def replace_cross_attention(self, attn_base, att_replace):
return torch.einsum('hpw,bwn->bhpn', attn_base, self.mapper) |
def match_argument(args_A: List['ArgDef'], args_B: List['ArgDef'], verbose=True) -> List[Tuple[(int, int)]]:
sim = ArgDef.similarity(args_A, args_B, verbose=False)
sim_matrix = [[(5 - y) for y in x] for x in sim]
m = Munkres()
indices = m.compute(sim_matrix)
indices.sort(key=(lambda x: x[1]))
filter_ind = []
for (i, j) in indices:
if ((args_A[i].name == '**kwargs') or (args_B[j].name == '**kwargs')):
continue
if ((args_A[i].name == '*args') or (args_B[j].name == '*args')):
continue
filter_ind.append((i, j))
return filter_ind |
class TFLiteRunner():
def __init__(self, tfnet_callable: TFNetCallable) -> None:
self.tfnet_callable = tfnet_callable
def __call__(self, input: Dict[(str, np.ndarray)]) -> Dict[(str, np.ndarray)]:
return {k: np.array(v) for (k, v) in self.tfnet_callable(**input).items()} |
class GhostObsFilter(ObsFilter):
def __init__(self, obs_filter: ObsFilter, ghost_name: PlayerName, further_than: float=0):
assert issubclass(type(obs_filter), ObsFilter)
self.obs_filter = obs_filter
self.ghost_name: PlayerName = ghost_name
self.further_than: float = further_than
def sense(self, scenario: DgScenario, full_obs: SimObservations, pov: PlayerName) -> SimObservations:
obs = self.obs_filter.sense(scenario, full_obs, pov)
if ((pov == self.ghost_name) or (self.ghost_name not in obs.players)):
return obs
pov_position = extract_2d_position_from_state(obs.players[pov].state)
ghost_position = extract_2d_position_from_state(obs.players[self.ghost_name].state)
distance = np.linalg.norm((pov_position - ghost_position))
if (distance > self.further_than):
filtered_players = obs.players.delete(self.ghost_name)
obs = replace(obs, players=filtered_players)
return obs |
def run_posegraph_optimization(pose_graph_name, pose_graph_optimized_name, max_correspondence_distance, preference_loop_closure):
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
method = o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt()
criteria = o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria()
option = o3d.pipelines.registration.GlobalOptimizationOption(max_correspondence_distance=max_correspondence_distance, edge_prune_threshold=0.25, preference_loop_closure=preference_loop_closure, reference_node=0)
pose_graph = o3d.io.read_pose_graph(pose_graph_name)
o3d.pipelines.registration.global_optimization(pose_graph, method, criteria, option)
o3d.io.write_pose_graph(pose_graph_optimized_name, pose_graph)
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error) |
class CamembertConfig(PretrainedConfig):
model_type = 'camembert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout |
_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
def __init__(self, **kwargs):
super().__init__(a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs)
def __call__(self, module):
super().__call__(module) |
def finish_dual_setup(prob: cp.Problem, S: np.ndarray, X: np.ndarray, quantile: float, Phi: np.ndarray, x_calib: np.ndarray, infinite_params={}):
prob.param_dict['S_test'].value = np.asarray([[S]])
prob.param_dict['Phi_test'].value = Phi.reshape(1, (- 1))
prob.param_dict['quantile'].value = quantile
kernel = infinite_params.get('kernel', FUNCTION_DEFAULTS['kernel'])
gamma = infinite_params.get('gamma', FUNCTION_DEFAULTS['gamma'])
radius = (1 / infinite_params.get('lambda', FUNCTION_DEFAULTS['lambda']))
if (kernel is not None):
K_12 = pairwise_kernels(X=np.concatenate([x_calib, X.reshape(1, (- 1))], axis=0), Y=X.reshape(1, (- 1)), metric=kernel, gamma=gamma)
if ('K_12' in prob.param_dict):
prob.param_dict['K_12'].value = K_12[:(- 1)]
prob.param_dict['K_21'].value = K_12.T
(_, L_11) = _get_kernel_matrix(x_calib, kernel, gamma)
K_22 = pairwise_kernels(X=X.reshape(1, (- 1)), metric=kernel, gamma=gamma)
L_21 = np.linalg.solve(L_11, K_12[:(- 1)]).T
L_22 = (K_22 - (L_21 L_21.T))
L_22[(L_22 < 0)] = 0
L_22 = np.sqrt(L_22)
prob.param_dict['L_21_22'].value = np.hstack([L_21, L_22])
prob.param_dict['radius'].value = radius
prob.param_dict['quantile'].value *= (radius / (len(x_calib) + 1))
return prob |
def get_select_student_channels_list(out_channels):
the_list = [(out_channels * 2.5), (out_channels * 2), (out_channels * 1.5), (out_channels * 1.25), out_channels, (out_channels / 1.25), (out_channels / 1.5), (out_channels / 2), (out_channels / 2.5)]
the_list = [min(2048, max(8, x)) for x in the_list]
the_list = [global_utils.smart_round(x, base=8) for x in the_list]
the_list = list(set(the_list))
the_list.sort(reverse=True)
return the_list |
def remove_files_if_exist(file_paths):
for fp in file_paths:
if os.path.isfile(fp):
os.remove(fp) |
class CaseWithoutAVX512():
def test_unsupported_HW_or_OS(self):
model = resnet18(num_classes=10)
with pytest.raises(RuntimeError, match='Applying IPEX BF16 optimization needs the cpu support avx512.'):
bf16_model = InferenceOptimizer.quantize(model, precision='bf16', use_ipex=True) |
class DebertaOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
if (self._config.type_vocab_size > 0):
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
def default_onnx_opset(self) -> int:
return 12
def generate_dummy_inputs(self, preprocessor: Union[('PreTrainedTokenizerBase', 'FeatureExtractionMixin')], batch_size: int=(- 1), seq_length: int=(- 1), num_choices: int=(- 1), is_pair: bool=False, framework: Optional['TensorType']=None, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[(str, Any)]:
dummy_inputs = super().generate_dummy_inputs(preprocessor=preprocessor, framework=framework)
if ((self._config.type_vocab_size == 0) and ('token_type_ids' in dummy_inputs)):
del dummy_inputs['token_type_ids']
return dummy_inputs |
def create_metric(metric):
metrics = []
Class = load_class(('evaluation.metrics.' + metric['class']))
if ('length' in metric):
for list_length in metric['length']:
metrics.append(Class(list_length))
else:
metrics.append(Class())
return metrics |
def partition_refs_to_creator(partition_refs, shuffle=False):
def data_creator(config, kv):
import mxnet as mx
invalidInputError(('batch_size' in config), 'batch_size must be set in config')
(data, label) = partitions_get_data_label(ray.get(partition_refs), allow_tuple=False, allow_list=False)
train_data_iter = mx.io.NDArrayIter(data=data, label=label, batch_size=config['batch_size'], shuffle=shuffle)
if ('train_resize_batch_num' in config):
train_data_iter = mx.io.ResizeIter(train_data_iter, config['train_resize_batch_num'])
return train_data_iter
return data_creator |
def crps_minimization(std_dev_array, y, yHat_means):
return np.mean(ps.crps_gaussian(y, mu=yHat_means, sig=std_dev_array[0])) |
class DataProcessor(object):
def get_src_train_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'en_conll_train.pkl')), 'conll_train')
def get_src_dev_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'en_conll_test.pkl')), 'conll_dev')
def get_src_trans_train_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_conll_train.pkl'))), 'conll_train')
def get_src_trans_dev_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_conll_test.pkl'))), 'conll_dev')
def get_sep_tgt_train_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_sep_train.pkl'))), (language_type + '_train'))
def get_sep_tgt_test_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_sep_test.pkl'))), (language_type + '_test'))
def get_twitter_general_examples(self, data_dir):
return self._create_examples_without_replacement(self._read_pkl(os.path.join(data_dir, 'twitter_general.pkl')), 'twitter_general')
def get_labels(self, data_dir):
return ['B', 'I', 'O']
def _create_examples(self, data, set_type):
examples = []
for (i, elem) in enumerate(data):
guid = ('%s-%s' % (set_type, i))
text = elem[0]
for j in range(mlm_cvg_hack):
examples.append(InputExample(guid=guid, text=text))
return examples
def _create_examples_without_replacement(self, data, set_type):
examples = []
for (i, elem) in enumerate(data):
guid = ('%s-%s' % (set_type, i))
text = elem[0]
examples.append(InputExample(guid=guid, text=text))
return examples
def _read_pkl(self, input_file):
data = pickle.load(open(input_file, 'rb'))
return data |
def main_split_file():
excluded_show_name = 'friends'
archive_show_name2desc_ids = load_json(archive_show_name2desc_ids_path)
for path_mapping in [archive_split_name2data_path_mapping, release_split_name2data_path_mapping]:
for (split_name, split_path) in path_mapping.items():
desc_id2data = {e['desc_id']: e for e in load_jsonl(split_path)}
excluded_desc_ids = archive_show_name2desc_ids[split_name][excluded_show_name]
other_desc_ids = flat_list_of_lists([v for (k, v) in archive_show_name2desc_ids[split_name].items() if (k != excluded_show_name)])
excluded_data = [desc_id2data[k] for k in excluded_desc_ids]
save_jsonl(excluded_data, split_path.replace('.jsonl', f'_{excluded_show_name}.jsonl'))
other_data = [desc_id2data[k] for k in other_desc_ids]
save_jsonl(other_data, split_path.replace('.jsonl', f'_except_{excluded_show_name}.jsonl'))
print(f'split_path {split_path}, #lines: total {len(desc_id2data)}, excluded {len(excluded_data)}, others {len(other_data)}') |
class VideoMAEModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class OutlierDetector():
def __init__(self):
pass
def detect_by_std_mean(self, data, n_dev):
if (len(data) == 0):
return []
data_std = np.std(data)
data_mean = np.mean(data)
anomaly_cut_off = (data_std * n_dev)
lower_limit = (data_mean - anomaly_cut_off)
upper_limit = (data_mean + anomaly_cut_off)
outliers = []
for data_point in data:
if ((data_point > upper_limit) or (data_point < lower_limit)):
outliers.append(data_point)
return outliers
def detect_by_abd_median(self, data, n_dev):
if (len(data) == 0):
return []
data_median = np.median(data)
abs_deviation = np.abs((data - data_median))
abs_deviation_median = np.median(abs_deviation)
b = 1.4826
mad = (abs_deviation_median * b)
anomaly_cut_off = (mad * n_dev)
lower_limit = (data_median - anomaly_cut_off)
upper_limit = (data_median + anomaly_cut_off)
outliers = []
for data_point in data:
if ((data_point > upper_limit) or (data_point < lower_limit)):
outliers.append(data_point)
return outliers |
class MsmarcoDataset(Dataset):
def __init__(self, collection_path: str, tokenizer: PreTrainedTokenizer, p_max_len=192):
self.collection = []
self.docids = []
for filename in os.listdir(collection_path):
with open(f'{collection_path}/{filename}', 'r') as f:
lines = f.readlines()
for line in tqdm(lines, desc='loading collection....'):
data = json.loads(line)
self.collection.append(data['psg'])
self.docids.append(data['pid'])
self.tok = tokenizer
self.p_max_len = p_max_len
def __len__(self):
return len(self.collection)
def __getitem__(self, item) -> [BatchEncoding, BatchEncoding]:
psg = self.collection[item]
encoded_psg = self.tok.encode_plus(psg, max_length=self.p_max_len, truncation='only_first', return_attention_mask=False)
return encoded_psg
def get_docids(self):
return self.docids |
def _mobilenet_v3_conf(arch: str, params: Dict[(str, Any)]):
reduce_divider = (2 if params.pop('_reduced_tail', False) else 1)
dilation = (2 if params.pop('_dilated', False) else 1)
width_mult = params.pop('_width_mult', 1.0)
bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult)
if (arch == 'mobilenet_v3_large'):
inverted_residual_setting = [bneck_conf(16, 3, 16, 16, False, 'RE', 1, 1), bneck_conf(16, 3, 64, 24, False, 'RE', 2, 1), bneck_conf(24, 3, 72, 24, False, 'RE', 1, 1), bneck_conf(24, 5, 72, 40, True, 'RE', 2, 1), bneck_conf(40, 5, 120, 40, True, 'RE', 1, 1), bneck_conf(40, 5, 120, 40, True, 'RE', 1, 1), bneck_conf(40, 3, 240, 80, False, 'HS', 2, 1), bneck_conf(80, 3, 200, 80, False, 'HS', 1, 1), bneck_conf(80, 3, 184, 80, False, 'HS', 1, 1), bneck_conf(80, 3, 184, 80, False, 'HS', 1, 1), bneck_conf(80, 3, 480, 112, True, 'HS', 1, 1), bneck_conf(112, 3, 672, 112, True, 'HS', 1, 1), bneck_conf(112, 5, 672, (160 // reduce_divider), True, 'HS', 2, dilation), bneck_conf((160 // reduce_divider), 5, (960 // reduce_divider), (160 // reduce_divider), True, 'HS', 1, dilation), bneck_conf((160 // reduce_divider), 5, (960 // reduce_divider), (160 // reduce_divider), True, 'HS', 1, dilation)]
last_channel = adjust_channels((1280 // reduce_divider))
elif (arch == 'mobilenet_v3_small'):
inverted_residual_setting = [bneck_conf(16, 3, 16, 16, True, 'RE', 2, 1), bneck_conf(16, 3, 72, 24, False, 'RE', 2, 1), bneck_conf(24, 3, 88, 24, False, 'RE', 1, 1), bneck_conf(24, 5, 96, 40, True, 'HS', 2, 1), bneck_conf(40, 5, 240, 40, True, 'HS', 1, 1), bneck_conf(40, 5, 240, 40, True, 'HS', 1, 1), bneck_conf(40, 5, 120, 48, True, 'HS', 1, 1), bneck_conf(48, 5, 144, 48, True, 'HS', 1, 1), bneck_conf(48, 5, 288, (96 // reduce_divider), True, 'HS', 2, dilation), bneck_conf((96 // reduce_divider), 5, (576 // reduce_divider), (96 // reduce_divider), True, 'HS', 1, dilation), bneck_conf((96 // reduce_divider), 5, (576 // reduce_divider), (96 // reduce_divider), True, 'HS', 1, dilation)]
last_channel = adjust_channels((1024 // reduce_divider))
else:
raise ValueError('Unsupported model type {}'.format(arch))
return (inverted_residual_setting, last_channel) |
class PegasusTokenizerFast(PreTrainedTokenizerFast):
offset = 103
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = PegasusTokenizer
model_input_names = ['attention_mask']
def __init__(self, vocab_file, tokenizer_file=None, pad_token='<pad>', eos_token='</s>', unk_token='<unk>', mask_token='<mask_2>', mask_token_sent='<mask_1>', additional_special_tokens=None, **kwargs):
if (additional_special_tokens is not None):
assert isinstance(additional_special_tokens, list), f'additional_special_tokens should be of type {type(list)}, but is {type(additional_special_tokens)}'
additional_special_tokens_extended = (([mask_token_sent] + additional_special_tokens) if (mask_token_sent not in additional_special_tokens) else additional_special_tokens)
additional_special_tokens_extended += [f'<unk_{i}>' for i in range(len(additional_special_tokens_extended), (self.offset - 1))]
if (len(set(additional_special_tokens_extended)) != len(additional_special_tokens_extended)):
raise ValueError(f'Please make sure that the provided additional_special_tokens do not contain an incorrectly shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.')
additional_special_tokens = additional_special_tokens_extended
else:
additional_special_tokens = [mask_token_sent]
additional_special_tokens += [f'<unk_{i}>' for i in range(2, self.offset)]
super().__init__(vocab_file, tokenizer_file=tokenizer_file, pad_token=pad_token, eos_token=eos_token, unk_token=unk_token, mask_token=mask_token, mask_token_sent=mask_token_sent, additional_special_tokens=additional_special_tokens, **kwargs)
self.vocab_file = vocab_file
def _special_token_mask(self, seq):
all_special_ids = set(self.all_special_ids)
all_special_ids.remove(self.unk_token_id)
assert (all_special_ids == set(range((len(self.additional_special_tokens) + 3)))), f'There should be 3 special tokens: mask_token, pad_token, and eos_token + {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}'
return [(1 if (x in all_special_ids) else 0) for x in seq]
def get_special_tokens_mask(self, token_ids_0: List, token_ids_1: Optional[List]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(token_ids_0)
elif (token_ids_1 is None):
return (self._special_token_mask(token_ids_0) + [1])
else:
return (self._special_token_mask((token_ids_0 + token_ids_1)) + [1])
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.eos_token_id])
return ((token_ids_0 + token_ids_1) + [self.eos_token_id])
_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch(self, src_texts: List[str], tgt_texts: Optional[List[str]]=None, max_length: Optional[int]=None, max_target_length: Optional[int]=None, return_tensors: str=None, truncation=True, padding='longest', **unused) -> BatchEncoding:
if ('' in src_texts):
raise ValueError(f'found empty string in src_texts: {src_texts}')
tokenizer_kwargs = dict(add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, truncation=truncation, padding=padding)
model_inputs: BatchEncoding = self(src_texts, **tokenizer_kwargs)
if (tgt_texts is None):
return model_inputs
if (max_target_length is not None):
tokenizer_kwargs['max_length'] = max_target_length
labels: BatchEncoding = self(tgt_texts, **tokenizer_kwargs)['input_ids']
model_inputs['labels'] = labels
return model_inputs
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
def sample(map, corridor_radius):
random_x = np.random.choice(range((corridor_radius + 2), ((map.shape[0] - corridor_radius) - 1), 1))
random_y = np.random.choice(range((corridor_radius + 2), ((map.shape[1] - corridor_radius) - 1), 1))
return [random_x, random_y] |
def main():
parser = argparse.ArgumentParser(description='Creates the ops.py file')
parser.add_argument('--input', type=str, required=True, help='input file with header')
parser.add_argument('--output', type=str, required=True, help='output file')
parser.add_argument('--lib', type=str, required=True, help='path to open3d_tf_ops.so')
args = parser.parse_args()
print(args)
oplib = tf.load_op_library(args.lib)
generated_function_strs = ''
for (fn_name, value) in inspect.getmembers(oplib):
if ((not inspect.isfunction(value)) or (not fn_name.startswith('open3d_')) or fn_name.endswith('_eager_fallback')):
continue
docstring = getattr(oplib, fn_name).__doc__
docstring = (('"""' + docstring) + '\n"""')
docstring = textwrap.indent(docstring, INDENT_SPACES)
signature = inspect.signature(value)
fn_args = []
args_fwd = []
for (_, param) in signature.parameters.items():
tmp = param.name
if (param.default != inspect.Parameter.empty):
if isinstance(param.default, str):
tmp += '="{}"'.format(str(param.default))
elif isinstance(param.default, type(tf.float32)):
tmp += '=_tf.{}'.format(param.default.name)
else:
tmp += '={}'.format(str(param.default))
fn_args.append(tmp)
args_fwd.append('{arg}={arg}'.format(arg=param.name))
fn_args = ', '.join(fn_args)
args_fwd = ', '.join(args_fwd)
generated_function_strs += FN_TEMPLATE_STR.format(fn_name_short=fn_name[7:], fn_name=('_lib.' + fn_name), fn_args=fn_args, docstring=docstring, args_fwd=args_fwd)
with open(args.input, 'r') as f:
input_header = f.read()
os.makedirs(os.path.dirname(args.output), exist_ok=True)
with open(args.output, 'w') as f:
f.write((input_header + generated_function_strs))
FormatFile(args.output, in_place=True)
return 0 |
def test_forward_method_accepted():
cnn = CNN(model_config=CustomModel(model=ForwardModel(), transform=ForwardModel.transform, name=ForwardModel.name))
assert (cnn.model_config.name == ForwardModel.name)
assert (cnn.model_config.transform == ForwardModel.transform)
try:
cnn.encode_images(TEST_IMAGE_DIR)
except Exception as e:
pytest.fail(f'Unexpected exception: {e}') |
def calc_error(est_disp=None, gt_disp=None, lb=None, ub=None):
error1 = torch.Tensor([0.0])
error2 = torch.Tensor([0.0])
error3 = torch.Tensor([0.0])
error5 = torch.Tensor([0.0])
epe = torch.Tensor([0.0])
if ((not torch.is_tensor(est_disp)) or (not torch.is_tensor(gt_disp))):
return {'1px': (error1 * 100), '2px': (error2 * 100), '3px': (error3 * 100), '5px': (error5 * 100), 'epe': epe}
assert (torch.is_tensor(est_disp) and torch.is_tensor(gt_disp))
assert (est_disp.shape == gt_disp.shape)
est_disp = est_disp.clone().cpu()
gt_disp = gt_disp.clone().cpu()
mask = torch.ones(gt_disp.shape, dtype=torch.bool)
if (lb is not None):
mask = (mask & (gt_disp > lb))
if (ub is not None):
mask = (mask & (gt_disp < ub))
mask.detach_()
if (abs(mask.float().sum()) < 1.0):
return {'1px': (error1 * 100), '2px': (error2 * 100), '3px': (error3 * 100), '5px': (error5 * 100), 'epe': epe}
gt_disp = gt_disp[mask]
est_disp = est_disp[mask]
abs_error = torch.abs((gt_disp - est_disp))
total_num = mask.float().sum()
error1 = (torch.sum(torch.gt(abs_error, 1).float()) / total_num)
error2 = (torch.sum(torch.gt(abs_error, 2).float()) / total_num)
error3 = (torch.sum(torch.gt(abs_error, 3).float()) / total_num)
error5 = (torch.sum(torch.gt(abs_error, 5).float()) / total_num)
epe = abs_error.float().mean()
return {'1px': (error1 * 100), '2px': (error2 * 100), '3px': (error3 * 100), '5px': (error5 * 100), 'epe': epe} |
_model_architecture('s2t_transformer_w2v2', 's2t_transformer_b_w2v_6tenc_6dec')
def s2t_transformer_b_12aenc_6tenc_6dec(args):
args.translation_encoder_layers = getattr(args, 'translation_encoder_layers', 6)
base_architecture(args) |
_module
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return (self.__class__.__name__ + '(keys={})'.format(self.keys)) |
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert (get_int(data[:4]) == 2051)
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
images = []
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols) |
def load_multprec_system():
from phcpy.phcpy2c3 import py2c_syscon_number_of_multprec_polynomials
from phcpy.phcpy2c3 import py2c_syscon_load_multprec_polynomial
dim = py2c_syscon_number_of_multprec_polynomials()
result = []
for ind in range(1, (dim + 1)):
result.append(py2c_syscon_load_multprec_polynomial(ind))
return result |
def get_required_argument(dotmap, key, message, default=None):
val = dotmap.get(key, default)
if (val is default):
raise ValueError(message)
return val |
def scaffold_similarity(smiles_1: List[str], smiles_2: List[str]):
scaffold_to_smiles_1 = scaffold_to_smiles(smiles_1)
scaffold_to_smiles_2 = scaffold_to_smiles(smiles_2)
(scaffolds_1, smiles_sets_1) = zip(*scaffold_to_smiles_1.items())
(scaffolds_2, smiles_sets_2) = zip(*scaffold_to_smiles_2.items())
smiles_to_scaffold = {smiles: scaffold for (scaffold, smiles_set) in scaffold_to_smiles_1.items() for smiles in smiles_set}
smiles_to_scaffold.update({smiles: scaffold for (scaffold, smiles_set) in scaffold_to_smiles_2.items() for smiles in smiles_set})
(scaffolds_1, scaffolds_2) = (set(scaffolds_1), set(scaffolds_2))
(smiles_1, smiles_2) = (set(smiles_1), set(smiles_2))
all_scaffolds = (scaffolds_1 | scaffolds_2)
all_smiles = (smiles_1 | smiles_2)
scaffolds_intersection = (scaffolds_1 & scaffolds_2)
smiles_intersection = {smiles for smiles in all_smiles if (smiles_to_scaffold[smiles] in scaffolds_intersection)}
smiles_in_1_with_scaffold_in_2 = {smiles for smiles in smiles_1 if (smiles_to_scaffold[smiles] in scaffolds_2)}
smiles_in_2_with_scaffold_in_1 = {smiles for smiles in smiles_2 if (smiles_to_scaffold[smiles] in scaffolds_1)}
sizes_1 = np.array([len(smiles_set) for smiles_set in smiles_sets_1])
sizes_2 = np.array([len(smiles_set) for smiles_set in smiles_sets_2])
print()
print(f'Number of molecules = {len(all_smiles):,}')
print(f'Number of scaffolds = {len(all_scaffolds):,}')
print()
print(f'Number of scaffolds in both datasets = {len(scaffolds_intersection):,}')
print(f'Scaffold intersection over union = {(len(scaffolds_intersection) / len(all_scaffolds)):.4f}')
print()
print(f'Number of molecules with scaffold in both datasets = {len(smiles_intersection):,}')
print(f'Molecule intersection over union = {(len(smiles_intersection) / len(all_smiles)):.4f}')
print()
print(f'Number of molecules in dataset 1 = {np.sum(sizes_1):,}')
print(f'Number of scaffolds in dataset 1 = {len(scaffolds_1):,}')
print()
print(f'Number of molecules in dataset 2 = {np.sum(sizes_2):,}')
print(f'Number of scaffolds in dataset 2 = {len(scaffolds_2):,}')
print()
print(f'Percent of scaffolds in dataset 1 which are also in dataset 2 = {((100 * len(scaffolds_intersection)) / len(scaffolds_1)):.2f}%')
print(f'Percent of scaffolds in dataset 2 which are also in dataset 1 = {((100 * len(scaffolds_intersection)) / len(scaffolds_2)):.2f}%')
print()
print(f'Number of molecules in dataset 1 with scaffolds in dataset 2 = {len(smiles_in_1_with_scaffold_in_2):,}')
print(f'Percent of molecules in dataset 1 with scaffolds in dataset 2 = {((100 * len(smiles_in_1_with_scaffold_in_2)) / len(smiles_1)):.2f}%')
print()
print(f'Number of molecules in dataset 2 with scaffolds in dataset 1 = {len(smiles_in_2_with_scaffold_in_1):,}')
print(f'Percent of molecules in dataset 2 with scaffolds in dataset 1 = {((100 * len(smiles_in_2_with_scaffold_in_1)) / len(smiles_2)):.2f}%')
print()
print(f'Average number of molecules per scaffold in dataset 1 = {np.mean(sizes_1):.4f} +/- {np.std(sizes_1):.4f}')
print('Percentiles for molecules per scaffold in dataset 1')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_1, i)):,}' for i in range(0, 101, 10)]))
print()
print(f'Average number of molecules per scaffold in dataset 2 = {np.mean(sizes_2):.4f} +/- {np.std(sizes_2):.4f}')
print('Percentiles for molecules per scaffold in dataset 2')
print(' | '.join([f'{i}% = {int(np.percentile(sizes_2, i)):,}' for i in range(0, 101, 10)])) |
def test_handle_hidden_limit_orders():
bid_order = LimitOrder(agent_id=1, time_placed=TIME, symbol=SYMBOL, quantity=10, side=Side.BID, is_hidden=True, limit_price=100)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(bid_order)
assert (book.bids == [PriceLevel([(bid_order, {})])])
assert (book.asks == [])
assert (len(agent.messages) == 1)
assert (agent.messages[0][0] == 1)
assert (agent.messages[0][1].order.agent_id == 1)
assert (agent.messages[0][1].order.side == Side.BID)
assert (agent.messages[0][1].order.is_hidden == True)
assert (agent.messages[0][1].order.limit_price == 100)
assert (agent.messages[0][1].order.quantity == 10)
ask_order = LimitOrder(agent_id=1, time_placed=TIME, symbol=SYMBOL, quantity=10, side=Side.ASK, is_hidden=True, limit_price=100)
agent = FakeExchangeAgent()
book = OrderBook(agent, SYMBOL)
book.handle_limit_order(ask_order)
assert (book.bids == [])
assert (book.asks == [PriceLevel([(ask_order, {})])])
assert (len(agent.messages) == 1)
assert (agent.messages[0][0] == 1)
assert (agent.messages[0][1].order.agent_id == 1)
assert (agent.messages[0][1].order.side == Side.ASK)
assert (agent.messages[0][1].order.is_hidden == True)
assert (agent.messages[0][1].order.limit_price == 100)
assert (agent.messages[0][1].order.quantity == 10) |
def reload_data(data_paths):
exps_data = copy.copy(core.load_exps_data(data_paths, disable_variant=False, ignore_missing_keys=True))
plottable_keys = copy.copy(sorted(list(set(flatten((list(exp.progress.keys()) for exp in exps_data))))))
distinct_params = copy.copy(sorted(core.extract_distinct_params(exps_data)))
return (exps_data, plottable_keys, distinct_params) |
def _check_sequence_input(x, name, req_sizes):
msg = (req_sizes[0] if (len(req_sizes) < 2) else ' or '.join([str(s) for s in req_sizes]))
if (not isinstance(x, Sequence)):
raise TypeError('{} should be a sequence of length {}.'.format(name, msg))
if (len(x) not in req_sizes):
raise ValueError('{} should be sequence of length {}.'.format(name, msg)) |
def create_model(sess, config, cate_list):
print(json.dumps(config, indent=4), flush=True)
model = Model(config, cate_list)
print('All global variables:')
for v in tf.global_variables():
if (v not in tf.trainable_variables()):
print('\t', v)
else:
print('\t', v, 'trainable')
ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
if (ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path)):
print('Reloading model parameters..', flush=True)
model.restore(sess, ckpt.model_checkpoint_path)
else:
if (not os.path.exists(FLAGS.model_dir)):
os.makedirs(FLAGS.model_dir)
print('Created new model parameters..', flush=True)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
return model |
class ResizeBatch(Module):
def __init__(self, *size: int):
self.size = size
def forward(self, x):
return x.view(((x.size(0),) + self.size)) |
def collect_files(img_dir, gt_dir, split):
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
suffixes = ['.png', '.PNG', '.jpg', '.JPG', '.jpeg', '.JPEG']
imgs_list = []
for suffix in suffixes:
imgs_list.extend(glob.glob(osp.join(img_dir, ('*' + suffix))))
imgs_list = sorted(imgs_list)
ann_list = sorted([osp.join(gt_dir, gt_file) for gt_file in os.listdir(gt_dir)])
files = [(img_file, gt_file) for (img_file, gt_file) in zip(imgs_list, ann_list)]
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files |
def process_gallery_sysu_all(mode='all', data_path_ori='/home/share/reid_dataset/SYSU-MM01/'):
if (mode == 'all'):
rgb_cameras = ['cam1', 'cam2', 'cam4', 'cam5']
elif (mode == 'indoor'):
rgb_cameras = ['cam1', 'cam2']
file_path = os.path.join(data_path_ori, 'exp/test_id.txt')
files_rgb = []
with open(file_path, 'r') as file:
ids = file.read().splitlines()
ids = [int(y) for y in ids[0].split(',')]
ids = [('%04d' % x) for x in ids]
for id in sorted(ids):
for cam in rgb_cameras:
img_dir = os.path.join(data_path_ori, cam, id)
if os.path.isdir(img_dir):
new_files = sorted([((img_dir + '/') + i) for i in os.listdir(img_dir)])
files_rgb.extend(new_files)
gall_img = []
gall_id = []
gall_cam = []
for img_path in files_rgb:
(camid, pid) = (int(img_path[(- 15)]), int(img_path[(- 13):(- 9)]))
gall_img.append(img_path)
gall_id.append(pid)
gall_cam.append(camid)
return (gall_img, np.array(gall_id), np.array(gall_cam)) |
class CNNGeometric(nn.Module):
def __init__(self, output_dim=6, feature_extraction_cnn='vgg', feature_extraction_last_layer='', return_correlation=False, fr_feature_size=15, fr_kernel_sizes=[7, 5], fr_channels=[128, 64], feature_self_matching=False, normalize_features=True, normalize_matches=True, batch_normalization=True, train_fe=False, use_cuda=True):
super(CNNGeometric, self).__init__()
self.use_cuda = use_cuda
self.feature_self_matching = feature_self_matching
self.normalize_features = normalize_features
self.normalize_matches = normalize_matches
self.return_correlation = return_correlation
self.FeatureExtraction = FeatureExtraction(train_fe=train_fe, feature_extraction_cnn=feature_extraction_cnn, last_layer=feature_extraction_last_layer, normalization=normalize_features, use_cuda=self.use_cuda)
self.FeatureCorrelation = FeatureCorrelation(shape='3D', normalization=normalize_matches)
self.FeatureRegression = FeatureRegression(output_dim, use_cuda=self.use_cuda, feature_size=fr_feature_size, kernel_sizes=fr_kernel_sizes, channels=fr_channels, batch_normalization=batch_normalization)
self.ReLU = nn.ReLU(inplace=True)
def forward(self, tnf_batch):
feature_A = self.FeatureExtraction(tnf_batch['source_image'])
feature_B = self.FeatureExtraction(tnf_batch['target_image'])
correlation = self.FeatureCorrelation(feature_A, feature_B)
theta = self.FeatureRegression(correlation)
if self.return_correlation:
return (theta, correlation)
else:
return theta |
def writeWorld(output):
global wroteWorld
if wroteWorld:
return
writePreamble(output)
writeSuites(output)
if (options.root or (not options.part)):
writeRoot(output)
writeWorldDescr(output)
if options.noStaticInit:
writeInitialize(output)
wroteWorld = 1 |
def compute_detection_metrics(df: pd.DataFrame, max_dets: list=[30], max_ddg: float=(- 0.5)):
metrics_pdb = []
for pdb_id in df.pdb_id.unique():
df_pdb = df[(df.pdb_id == pdb_id)].sort_values('scores', ascending=False)
scores = df_pdb.scores.to_numpy()
ddg = df_pdb.ddg.to_numpy()
nddg = np.maximum((- ddg), 0)
df_pdb_stbl = df_pdb[(ddg <= max_ddg)]
muts_sorted = df_pdb.mut_info.to_list()
if ((len(df_pdb) <= 1) or (len(df_pdb_stbl) == 0)):
continue
for max_det in sorted(max_dets):
metrics_ = {'pdb_id': pdb_id, 'max_det': max_det, 'max_ddg': max_ddg, 'n_tot_muts': len(df_pdb), 'ndcg': ndcg_score(nddg[None], scores[None], k=max_det), **compute_precision(df_pdb_stbl, muts_sorted[:max_det])}
metrics_pdb.append(metrics_)
metrics_pdb = pd.DataFrame(metrics_pdb)
assert (len(metrics_pdb) > 0), 'no pdbs evaluated'
summary = metrics_pdb.groupby(['max_ddg', 'max_det'], as_index=False).mean(numeric_only=True)
counts = metrics_pdb.groupby(['max_ddg', 'max_det'], as_index=False).pdb_id.count()
summary = pd.merge(counts, summary, on=['max_ddg', 'max_det'])
return (summary, metrics_pdb) |
class Occlusion_detector(nn.Module):
def __init__(self, input_channels=768, num_tokens=128, num_latents=64, latent_dim=768, cross_heads=8, latent_heads=8, cross_dim_head=96, latent_dim_head=96, attn_dropout=0.0, ff_dropout=0.0):
super().__init__()
self.latents1 = nn.Parameter(torch.randn(1, num_latents, latent_dim))
self.latents2 = nn.Parameter(torch.randn(1, num_tokens, latent_dim))
self.layers1 = PreNorm(latent_dim, Attention(latent_dim, heads=cross_heads, dim_head=cross_dim_head, dropout=attn_dropout))
self.layers2 = PreNorm(latent_dim, FeedForward(latent_dim, dropout=ff_dropout))
self.layers3 = PreNorm(latent_dim, Attention(latent_dim, heads=cross_heads, dim_head=cross_dim_head, dropout=attn_dropout))
self.layers4 = PreNorm(latent_dim, FeedForward(latent_dim, dropout=ff_dropout))
def forward(self, data, mask=None):
b = data.size(0)
x1 = self.latents1.repeat(b, 1, 1)
x2 = self.latents2.repeat(b, 1, 1)
x1 = (self.layers1(x1, data) + x1)
x1 = (self.layers2(x1) + x1)
x2 = (self.layers3(x2, x1) + x2)
x2 = (self.layers4(x2) + x2)
return x2 |
class SnippetInfill(ast.NodeTransformer):
def __init__(self, mask_identifier: str, api_call: str, prefix: str, library: str, replace_type: str='argument'):
self.mask_identifier = mask_identifier
self.api_call = api_call
self.num_replaced = 0
self.line_no = (- 1)
self.prefix = prefix
self.library = library
self.replace_type = replace_type
self.replace = False
def add_infill(self, snippet: str) -> (int, str, str):
try:
o_ast = ast.parse(snippet)
original_code = astunparse.unparse(o_ast).strip()
o_ast = ast.parse(original_code)
except:
return ((- 1), '', '')
self.visit(o_ast)
if (self.num_replaced < 1):
return (self.num_replaced, '', original_code)
(self.replace, self.num_replaced) = (True, 0)
modified = self.visit(o_ast)
modified = ast.fix_missing_locations(modified)
if (self.replace_type == 'argument'):
infill_code = astunparse.unparse(modified).strip().replace("'{}'".format(self.mask_identifier), self.mask_identifier.format(0))
elif (self.replace_type == 'prefix'):
infill_code = ('import torch\n' if (self.library == 'torch') else 'import tensorflow as tf\n')
infill_code += 'import numpy as np\n'
end_replace = random.randint(0, (self.line_no - 1))
start_replace = random.randint(0, end_replace)
infill_code += '\n'.join(original_code.splitlines()[:start_replace])
if (start_replace != 0):
infill_code += '\n'
infill_code += ((self.mask_identifier.format(0) + '\n') + '\n'.join(original_code.splitlines()[end_replace:]))
elif (self.replace_type == 'suffix'):
start_replace = random.randint(self.line_no, len(original_code.splitlines()))
end_replace = random.randint(start_replace, len(original_code.splitlines()))
infill_code = (('\n'.join(original_code.splitlines()[:start_replace]) + '\n') + self.mask_identifier.format(0))
if (end_replace != len(original_code.splitlines())):
infill_code += '\n'
infill_code += '\n'.join(original_code.splitlines()[end_replace:])
elif (self.replace_type == 'prefix-argument'):
t_infill_code = astunparse.unparse(modified).strip().replace("'{}'".format(self.mask_identifier), self.mask_identifier.format(1))
infill_code = ('import torch\n' if (self.library == 'torch') else 'import tensorflow as tf\n')
infill_code += 'import numpy as np\n'
end_replace = random.randint(0, (self.line_no - 1))
start_replace = random.randint(0, end_replace)
infill_code += '\n'.join(t_infill_code.splitlines()[:start_replace])
if (start_replace != 0):
infill_code += '\n'
infill_code += ((self.mask_identifier.format(0) + '\n') + '\n'.join(t_infill_code.splitlines()[end_replace:]))
elif (self.replace_type == 'suffix-argument'):
t_infill_code = astunparse.unparse(modified).strip().replace("'{}'".format(self.mask_identifier), self.mask_identifier.format(0))
start_replace = random.randint(self.line_no, len(t_infill_code.splitlines()))
end_replace = random.randint(start_replace, len(t_infill_code.splitlines()))
infill_code = (('\n'.join(t_infill_code.splitlines()[:start_replace]) + '\n') + self.mask_identifier.format(1))
if (end_replace != len(t_infill_code.splitlines())):
infill_code += '\n'
infill_code += '\n'.join(t_infill_code.splitlines()[end_replace:])
else:
assert False
return (self.num_replaced, infill_code, original_code)
def visit_Call(self, node: ast.Call):
if (('attr' in dir(node.func)) and (self.api_call == node.func.attr)):
temp_node = node.func
prefix = ''
while (('value' in dir(temp_node)) and ('attr' in dir(temp_node.value))):
prefix = ((temp_node.value.attr + '.') + prefix)
temp_node = temp_node.value
if prefix.endswith('.'):
prefix = prefix[:(- 1)]
if (prefix != self.prefix):
self.generic_visit(node)
return node
self.num_replaced += 1
if (not self.replace):
self.generic_visit(node)
return node
if ('argument' in self.replace_type):
node.args = [ast.Constant(value=self.mask_identifier)]
if ('keywords' in dir(node)):
node.keywords = []
self.replace = False
self.line_no = node.lineno
self.generic_visit(node)
return node
else:
self.generic_visit(node)
return node
def visit(self, node):
if isinstance(node, ast.Call):
return self.visit_Call(node)
self.generic_visit(node)
return node |
def test_SE2_inverse_transform_point_cloud_identity() -> None:
transformed_pts = np.array([[0.5, 0], [1, (- 0.5)], [1.5, 0], [2, (- 1)]])
dst_se2_src = SE2(rotation=np.eye(2), translation=np.zeros(2))
pts = dst_se2_src.inverse_transform_point_cloud(transformed_pts.copy())
assert np.allclose(pts, transformed_pts)
with pytest.raises(ValueError):
dst_se2_src.transform_point_cloud(np.random.rand(1, 3)) |
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, batch_size, num_instances):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = (self.batch_size // self.num_instances)
self.index_dic = defaultdict(list)
for (index, (_, pid, _, _)) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if (num < self.num_instances):
num = self.num_instances
self.length += (num - (num % self.num_instances))
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if (len(idxs) < self.num_instances):
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if (len(batch_idxs) == self.num_instances):
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while (len(avai_pids) >= self.num_pids_per_batch):
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if (len(batch_idxs_dict[pid]) == 0):
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length |
def nlvr2_paired_eval_collate(inputs):
(qids, batch) = ([], [])
for (id_, *tensors) in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_paired_collate(batch)
batch['qids'] = qids
return batch |
class BilinearDecoder(nn.Module):
def __init__(self, input_dim: int, dropout: float=0.0, act=(lambda x: x)):
super(BilinearDecoder, self).__init__()
self.dropout = nn.Dropout(dropout)
self.act = act
self.relation = Parameter(torch.FloatTensor(input_dim, input_dim))
self.reset_parameter()
def reset_parameter(self):
nn.init.xavier_uniform_(self.relation.data)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
inputs_row = inputs
inputs_col = inputs.transpose(0, 1)
inputs_row = self.dropout(inputs_row)
inputs_col = self.dropout(inputs_col)
intermediate_product = torch.mm(inputs_row, self.relation)
rec = torch.mm(intermediate_product, inputs_col)
outputs = self.act(rec)
return outputs |
def test():
api_name = 'torch.nn.Conv2d'
api = TorchAPI(api_name)
MyPytorch = TorchLibrary('torch-output')
print(MyPytorch.generate_code(api, OracleType.CRASH))
print(MyPytorch.generate_code(api, OracleType.CUDA))
print(MyPytorch.generate_code(api, OracleType.PRECISION))
MyPytorch.test_with_oracle(api, OracleType.CRASH)
MyPytorch.test_with_oracle(api, OracleType.CUDA)
MyPytorch.test_with_oracle(api, OracleType.PRECISION) |
def analyze_pred_dist_single_step(pred_distribution: np.ndarray, k=5):
ent = scipy.stats.entropy(pred_distribution)
level_of_ent = (int(ent) * 3)
topk_idx = pred_distribution.argsort()[(- k):][::(- 1)]
(words, probs) = ([], [])
decoded_word = bpe_tokenizer.decode(int(topk_idx[0]))
for index in topk_idx:
_word_ = bpe_tokenizer.decode(int(index))
_prob_ = pred_distribution[int(index)]
words.append(_word_)
probs.append(_prob_)
out = format_word_importances(words, probs)
logger.info(f"<p style='text-indent: {level_of_ent}0px'><strong>{decoded_word}</strong> Ent: {ent}</p>")
logger.info(f"<p style='text-indent: {level_of_ent}0px'>{out}</p>") |
def get_config():
name = 'finite_drift'
n_arm = 3
agents = collections.OrderedDict([('stationary_ts', functools.partial(FiniteBernoulliBanditTS, n_arm)), ('nonstationary_ts', functools.partial(DriftingFiniteBernoulliBanditTS, n_arm))])
environments = collections.OrderedDict([('env', functools.partial(DriftingFiniteArmedBernoulliBandit, n_arm))])
experiments = collections.OrderedDict([(name, BaseExperiment)])
n_steps = 1000
n_seeds = 10000
config = Config(name, agents, environments, experiments, n_steps, n_seeds)
return config |
class ScalarField(object):
name = attr.ib(type=str)
upper_bound = attr.ib(type=float)
lower_bound = attr.ib(type=float) |
class NoAugWaterbirdsCelebATransform(BaseWaterbirdsCelebATransform):
def __init__(self, train):
super().__init__(augment=False, normalize_stats=IMAGENET_STATS) |
def get_latent_grads(backdoor_label, model, inputs, labels):
model.eval()
model.zero_grad()
pred = model(inputs)
z = torch.zeros_like(pred)
z[(list(range(labels.shape[0])), labels)] = 1
pred = (pred * z)
pred.sum().backward(retain_graph=True)
gradients = model.get_gradient()[(labels == backdoor_label)]
pooled_gradients = torch.mean(gradients, dim=[0, 2, 3]).detach()
model.zero_grad()
return pooled_gradients |
class LSTM_Univariate(nn.Module):
def __init__(self, feats):
super(LSTM_Univariate, self).__init__()
self.name = 'LSTM_Univariate'
self.lr = 0.002
self.n_feats = feats
self.n_hidden = 1
self.lstm = nn.ModuleList([nn.LSTM(1, self.n_hidden) for i in range(feats)])
def forward(self, x):
hidden = [(torch.rand(1, 1, self.n_hidden, dtype=torch.float64), torch.randn(1, 1, self.n_hidden, dtype=torch.float64)) for i in range(self.n_feats)]
outputs = []
for (i, g) in enumerate(x):
multivariate_output = []
for j in range(self.n_feats):
univariate_input = g.view((- 1))[j].view(1, 1, (- 1))
(out, hidden[j]) = self.lstm[j](univariate_input, hidden[j])
multivariate_output.append((2 * out.view((- 1))))
output = torch.cat(multivariate_output)
outputs.append(output)
return torch.stack(outputs) |
class DataHandlerTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def fake_folders(self, kind):
if (kind['matching'] == False):
if (kind['res'] == 'hr'):
return ['data2.gif', 'data1.png', 'data0.jpeg']
elif (kind['res'] == 'lr'):
return ['data1.png']
else:
raise
if (kind['matching'] == True):
if (kind['res'] == 'hr'):
return ['data2.gif', 'data1.png', 'data0.jpeg']
elif (kind['res'] == 'lr'):
return ['data1.png', 'data0.jpeg']
else:
raise
def path_giver(self, d, b):
if (d['res'] == 'hr'):
return 'hr'
else:
return 'lr'
def image_getter(self, res):
if (res == 'hr'):
return np.random.random((20, 20, 3))
else:
return np.random.random((10, 10, 3))
def test__make_img_list_non_validation(self):
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': False}, hr_dir={'res': 'hr', 'matching': False}, patch_size=0, scale=0, n_validation_samples=None)
expected_ls = {'hr': ['data0.jpeg', 'data1.png'], 'lr': ['data1.png']}
self.assertTrue(np.all((DH.img_list['hr'] == expected_ls['hr'])))
self.assertTrue(np.all((DH.img_list['lr'] == expected_ls['lr'])))
def test__make_img_list_validation(self):
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
with patch('numpy.random.choice', return_value=np.array([0])):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': False}, hr_dir={'res': 'hr', 'matching': False}, patch_size=0, scale=0, n_validation_samples=10)
expected_ls = {'hr': ['data0.jpeg'], 'lr': ['data1.png']}
self.assertTrue(np.all((DH.img_list['hr'] == expected_ls['hr'])))
self.assertTrue(np.all((DH.img_list['lr'] == expected_ls['lr'])))
def test__check_dataset_with_mismatching_data(self):
try:
with patch('os.listdir', side_effect=self.fake_folders):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': False}, hr_dir={'res': 'hr', 'matching': False}, patch_size=0, scale=0, n_validation_samples=None)
except:
self.assertTrue(True)
else:
self.assertTrue(False)
def test__check_dataset_with_matching_data(self):
with patch('os.listdir', side_effect=self.fake_folders):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=0, scale=0, n_validation_samples=None)
def test__not_flat_with_flat_patch(self):
lr_patch = np.zeros((5, 5, 3))
with patch('ISR.utils.datahandler.DataHandler._make_img_list', return_value=True):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir=None, hr_dir=None, patch_size=0, scale=0, n_validation_samples=None)
self.assertFalse(DH._not_flat(lr_patch, flatness=0.1))
def test__not_flat_with_non_flat_patch(self):
lr_patch = np.random.random((5, 5, 3))
with patch('ISR.utils.datahandler.DataHandler._make_img_list', return_value=True):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir=None, hr_dir=None, patch_size=0, scale=0, n_validation_samples=None)
self.assertTrue(DH._not_flat(lr_patch, flatness=1e-05))
def test__crop_imgs_crops_shapes(self):
with patch('ISR.utils.datahandler.DataHandler._make_img_list', return_value=True):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir=None, hr_dir=None, patch_size=3, scale=2, n_validation_samples=None)
imgs = {'hr': np.random.random((20, 20, 3)), 'lr': np.random.random((10, 10, 3))}
crops = DH._crop_imgs(imgs, batch_size=2, flatness=0)
self.assertTrue((crops['hr'].shape == (2, 6, 6, 3)))
self.assertTrue((crops['lr'].shape == (2, 3, 3, 3)))
def test__apply_transorm(self):
I = np.ones((2, 2))
A = (I * 0)
B = (I * 1)
C = (I * 2)
D = (I * 3)
image = np.block([[A, B], [C, D]])
with patch('ISR.utils.datahandler.DataHandler._make_img_list', return_value=True):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir=None, hr_dir=None, patch_size=3, scale=2, n_validation_samples=None)
transf = [[1, 0], [0, 1], [2, 0], [0, 2], [1, 1], [0, 0]]
self.assertTrue(np.all((np.block([[C, A], [D, B]]) == DH._apply_transform(image, transf[0]))))
self.assertTrue(np.all((np.block([[C, D], [A, B]]) == DH._apply_transform(image, transf[1]))))
self.assertTrue(np.all((np.block([[B, D], [A, C]]) == DH._apply_transform(image, transf[2]))))
self.assertTrue(np.all((np.block([[B, A], [D, C]]) == DH._apply_transform(image, transf[3]))))
self.assertTrue(np.all((np.block([[D, B], [C, A]]) == DH._apply_transform(image, transf[4]))))
self.assertTrue(np.all((image == DH._apply_transform(image, transf[5]))))
def test__transform_batch(self):
with patch('ISR.utils.datahandler.DataHandler._make_img_list', return_value=True):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir=None, hr_dir=None, patch_size=3, scale=2, n_validation_samples=None)
I = np.ones((2, 2))
A = (I * 0)
B = (I * 1)
C = (I * 2)
D = (I * 3)
image = np.block([[A, B], [C, D]])
t_image_1 = np.block([[D, B], [C, A]])
t_image_2 = np.block([[B, D], [A, C]])
batch = np.array([image, image])
expected = np.array([t_image_1, t_image_2])
self.assertTrue(np.all((DH._transform_batch(batch, [[1, 1], [2, 0]]) == expected)))
def test_get_batch_shape_and_diversity(self):
patch_size = 3
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=patch_size, scale=2, n_validation_samples=None)
with patch('imageio.imread', side_effect=self.image_getter):
with patch('os.path.join', side_effect=self.path_giver):
batch = DH.get_batch(batch_size=5)
self.assertTrue((type(batch) is dict))
self.assertTrue((batch['hr'].shape == (5, (patch_size * 2), (patch_size * 2), 3)))
self.assertTrue((batch['lr'].shape == (5, patch_size, patch_size, 3)))
self.assertTrue(np.any([(batch['lr'][0] != batch['lr'][1]), (batch['lr'][1] != batch['lr'][2]), (batch['lr'][2] != batch['lr'][3]), (batch['lr'][3] != batch['lr'][4])]))
def test_get_validation_batches_invalid_number_of_samples(self):
patch_size = 3
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=patch_size, scale=2, n_validation_samples=None)
with patch('imageio.imread', side_effect=self.image_getter):
with patch('os.path.join', side_effect=self.path_giver):
try:
with patch('raise', None):
batch = DH.get_validation_batches(batch_size=5)
except:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_get_validation_batches_requesting_more_than_available(self):
patch_size = 3
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
try:
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=patch_size, scale=2, n_validation_samples=10)
except:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_get_validation_batches_valid_request(self):
patch_size = 3
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
with patch('os.listdir', side_effect=self.fake_folders):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=patch_size, scale=2, n_validation_samples=2)
with patch('imageio.imread', side_effect=self.image_getter):
with patch('os.path.join', side_effect=self.path_giver):
batch = DH.get_validation_batches(batch_size=12)
self.assertTrue((len(batch) == 2))
self.assertTrue((type(batch) is list))
self.assertTrue((type(batch[0]) is dict))
self.assertTrue((batch[0]['hr'].shape == (12, (patch_size * 2), (patch_size * 2), 3)))
self.assertTrue((batch[0]['lr'].shape == (12, patch_size, patch_size, 3)))
self.assertTrue((batch[1]['hr'].shape == (12, (patch_size * 2), (patch_size * 2), 3)))
self.assertTrue((batch[1]['lr'].shape == (12, patch_size, patch_size, 3)))
def test_validation_set(self):
patch_size = 3
with patch('os.listdir', side_effect=self.fake_folders):
with patch('ISR.utils.datahandler.DataHandler._check_dataset', return_value=True):
DH = DataHandler(lr_dir={'res': 'lr', 'matching': True}, hr_dir={'res': 'hr', 'matching': True}, patch_size=patch_size, scale=2, n_validation_samples=2)
with patch('imageio.imread', side_effect=self.image_getter):
with patch('os.path.join', side_effect=self.path_giver):
batch = DH.get_validation_set(batch_size=12)
self.assertTrue((type(batch) is dict))
self.assertTrue((len(batch) == 2))
self.assertTrue((batch['hr'].shape == (24, (patch_size * 2), (patch_size * 2), 3)))
self.assertTrue((batch['lr'].shape == (24, patch_size, patch_size, 3))) |
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp13():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = False
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state |
def get_dataset(name: str, use_lcc: bool=True, data_dir=DEFAULT_DATA_PATH) -> InMemoryDataset:
path = os.path.join(data_dir, name)
if (name in ['Cora', 'Citeseer', 'Pubmed']):
dataset = Planetoid(path, name)
elif (name in ['Computers', 'Photo']):
dataset = Amazon(path, name)
elif (name == 'CoauthorCS'):
dataset = Coauthor(path, 'CS')
elif (name in ['Cornell', 'Texas', 'Wisconsin']):
dataset = WebKB(path, name)
elif (name in ['Chameleon', 'Squirrel']):
dataset = WikipediaNetwork(path, name, geom_gcn_preprocess=True)
elif (name == 'Actor'):
dataset = Actor(path, 'Actor')
else:
raise Exception(f'Unknown dataset: {name}')
if use_lcc:
lcc = get_largest_connected_component(dataset)
x_new = dataset.data.x[lcc]
y_new = dataset.data.y[lcc]
(row, col) = dataset.data.edge_index.numpy()
edges = [[i, j] for (i, j) in zip(row, col) if ((i in lcc) and (j in lcc))]
edges = remap_edges(edges, get_node_mapper(lcc))
data = Data(x=x_new, edge_index=torch.LongTensor(edges), y=y_new, train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool), val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool))
dataset.data = data
mapping = dict(zip(np.unique(dataset.data.y), range(len(np.unique(dataset.data.y)))))
dataset.data.y = torch.LongTensor([mapping[u] for u in np.array(dataset.data.y)])
return dataset |
def check_box_3d_format(input_data):
if isinstance(input_data, np.ndarray):
if (input_data.ndim == 2):
if (input_data.shape[1] != 7):
raise TypeError('Given input does not have valid number of attributes. Should be N x 7 for box_3d.')
elif (input_data.ndim == 1):
if (input_data.shape[0] != 7):
raise TypeError('Given input does not have valid number of attributes. Should be 7 for box_3d.')
elif isinstance(input_data, tf.Tensor):
if isinstance(input_data, tf.Tensor):
if (input_data.shape[1] != 7):
raise TypeError('Given input does not have valid number of attributes. Should be N x 7 for box_3d.')
else:
raise TypeError('Given input is not of valid types.(i.e. np.ndarray or tf.Tensor)') |
def validate(val_loader, tracking_module, step, part='train', fusion_list=None, fuse_prob=False):
logger = logging.getLogger('global_logger')
for (i, sequence) in enumerate(val_loader):
logger.info('Test: [{}/{}]\tSequence ID: KITTI-{}'.format(i, len(val_loader), sequence.name))
seq_loader = DataLoader(sequence, batch_size=config.batch_size, shuffle=False, num_workers=config.workers, pin_memory=True)
if (len(seq_loader) == 0):
tracking_module.eval()
logger.info('Empty Sequence ID: KITTI-{}, skip'.format(sequence.name))
else:
validate_seq(seq_loader, tracking_module)
write_kitti_result(args.result_path, sequence.name, step, tracking_module.frames_id, tracking_module.frames_det, part=part)
(MOTA, MOTP, recall, prec, F1, fp, fn, id_switches) = evaluate(step, args.result_path, part=part)
tracking_module.train()
return (MOTA, MOTP, recall, prec, F1, fp, fn, id_switches) |
_grad()
def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_model, push_to_hub):
original_model = model_classes[model_name](include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax')
tf_params = original_model.trainable_variables
tf_non_train_params = original_model.non_trainable_variables
tf_params = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
tf_params[param.name] = param.numpy()
tf_param_names = list(tf_params.keys())
config = get_efficientnet_config(model_name)
hf_model = EfficientNetForImageClassification(config).eval()
hf_params = hf_model.state_dict()
print('Converting parameters...')
key_mapping = rename_keys(tf_param_names)
replace_params(hf_params, tf_params, key_mapping)
preprocessor = convert_image_processor(model_name)
inputs = preprocessor(images=prepare_img(), return_tensors='pt')
hf_model.eval()
with torch.no_grad():
outputs = hf_model(**inputs)
hf_logits = outputs.logits.detach().numpy()
original_model.trainable = False
image_size = CONFIG_MAP[model_name]['image_size']
img = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
original_logits = original_model.predict(x)
assert np.allclose(original_logits, hf_logits, atol=0.001), 'The predicted logits are not the same.'
print('Model outputs match!')
if save_model:
if (not os.path.isdir(pytorch_dump_folder_path)):
os.mkdir(pytorch_dump_folder_path)
hf_model.save_pretrained(pytorch_dump_folder_path)
preprocessor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f'Pushing converted {model_name} to the hub...')
model_name = f'efficientnet-{model_name}'
preprocessor.push_to_hub(model_name)
hf_model.push_to_hub(model_name) |
def Variable(initial_value, dtype=None):
return tf.Variable(initial_value=initial_value, trainable=True, dtype=dtype) |
def create_lm_sequence(dp_json):
((prompt_keyword, prompt_text), (completion_keyword, completion_text)) = list(dp_json.items())
prompt = ((prompt_keyword.upper() + ' ') + str(prompt_text))
completion = ((completion_keyword.upper() + ' ') + str(completion_text))
return ((prompt + ' ') + completion) |
class HyperAnalysisTransform(nn.Module):
def __init__(self, num_filters=192):
super(HyperAnalysisTransform, self).__init__()
self.conv_h1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.relu_h1 = nn.ReLU()
self.conv_h2 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2)
self.relu_h2 = nn.ReLU()
self.conv_h3 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2, bias=False)
def forward(self, x):
x = self.relu_h1(x)
x = self.conv_h1(x)
x = self.conv_h2(x)
x = self.relu_h2(x)
x = self.conv_h3(x)
return x |
('cnndm')
class CNNDMDatasetReader(DatasetReader):
def __init__(self, lazy: bool=True, bert_model_name: str='bert-base-uncased', max_bpe: int=None, token_indexers: Dict[(str, TokenIndexer)]=PretrainedBertIndexer('bert-base-uncased'), debug: bool=False, bertsum_oracle: bool=True, semantic_red_map: bool=True, semantic_red_map_key: List[str]=None) -> None:
super().__init__(lazy=lazy)
self._token_indexers = (token_indexers or {'tokens': SingleIdTokenIndexer()})
if (max_bpe is not None):
self._token_indexers['bert'].max_pieces = max_bpe
self._debug = debug
self.bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
self.lowercase_input = ('uncased' in bert_model_name)
logger.info('Finish Initializing of Dataset Reader')
self.bert_lut = list(self._token_indexers['bert'].vocab.items())
self.bert_lut = [x[0] for x in self.bert_lut]
self.max_bpe = max_bpe
self.train_pts = []
self.bertsum_oracle = bertsum_oracle
self.semantic_red_map = semantic_red_map
if semantic_red_map:
self.map_kiosk = MapKiosk(semantic_red_map_key)
random.seed(1112)
def boil_pivot_table(self, sent_txt):
pass
def refill_data(self, fpath):
partition_name = identify_partition_name(fpath)
if hasattr(self, partition_name):
if (getattr(self, partition_name) == []):
print('start a new round of loading training data')
files = os.listdir(fpath)
files = [f for f in files if f.endswith('pt')]
random.shuffle(files)
setattr(self, partition_name, files)
else:
print('start a new round of loading training data')
files = os.listdir(fpath)
files = [f for f in files if f.endswith('pt')]
random.shuffle(files)
setattr(self, partition_name, files)
def yield_data(self, part_name, fpath):
while True:
self.refill_data(fpath)
(yield getattr(self, part_name).pop())
def _read(self, file_path):
partition_name = identify_partition_name(file_path)
if (partition_name == 'train'):
for f in self.yield_data(partition_name, file_path):
dataset = torch.load(os.path.join(file_path, f))
print(('Loading dataset from %s, number of examples: %d' % (f, len(dataset))))
logger.info(('Loading dataset from %s, number of examples: %d' % (f, len(dataset))))
if ('cnn' in f):
name = 'cnn'
elif ('dailymail' in f):
name = 'dailymail'
elif ('nyt' in f):
name = 'nyt'
else:
name = 'unk'
for d in dataset:
(yield self.text_to_instance(d['src'], d['labels'], d['segs'], d['clss'], d['sent_txt'], d['disco_txt'], d['tgt_list_str'], d['tgt_tok_list_list_str'], d['d_labels'], d['d_span'], d['d_coref'], d['d_graph'], d['disco_dep'], d['doc_id'], identify_partition_name(f), name))
else:
files = os.listdir(file_path)
files = [f for f in files if f.endswith('pt')]
if self._debug:
logger.warning('debug mode only loads part of test set!')
files = files[:1]
for f in files:
dataset = torch.load(os.path.join(file_path, f))
print(('Loading dataset from %s, number of examples: %d' % (f, len(dataset))))
logger.info(('Loading dataset from %s, number of examples: %d' % (f, len(dataset))))
if ('cnn' in f):
name = 'cnn'
elif ('dailymail' in f):
name = 'dailymail'
elif ('nyt' in f):
name = 'nyt'
else:
name = 'unk'
for d in dataset:
(yield self.text_to_instance(d['src'], d['labels'], d['segs'], d['clss'], d['sent_txt'], d['disco_txt'], d['tgt_list_str'], d['tgt_tok_list_list_str'], d['d_labels'], d['d_span'], d['d_coref'], d['d_graph'], d['disco_dep'], d['doc_id'], identify_partition_name(f), name))
def create_disco_coref(disco_coref, num_of_disco):
disco_coref = [x for x in disco_coref if (x[0] != x[1])]
coref_graph_as_list_of_tuple = [(x, x) for x in range(num_of_disco)]
for cor in disco_coref:
(x, y) = cor
if ((x < num_of_disco) and (y < num_of_disco)):
coref_graph_as_list_of_tuple.append((x, y))
coref_graph_as_list_of_tuple.append((y, x))
return coref_graph_as_list_of_tuple
def create_disco_graph(disco_graph, num_of_disco: int) -> List[tuple]:
dis_graph_as_list_of_tuple = []
for rst in disco_graph:
(rst_src, rst_tgt) = (rst[0], rst[1])
if ((rst_src < num_of_disco) and (rst_tgt < num_of_disco)):
dis_graph_as_list_of_tuple.append((rst_src, rst_tgt))
return dis_graph_as_list_of_tuple
def map_disco_to_sent(disco_span: List[tuple]):
map_to_sent = [0 for _ in range(len(disco_span))]
curret_sent = 0
current_idx = 1
for (idx, disco) in enumerate(disco_span):
if (disco[0] == current_idx):
map_to_sent[idx] = curret_sent
else:
curret_sent += 1
map_to_sent[idx] = curret_sent
current_idx = disco[1]
return map_to_sent
def text_to_instance(self, doc_text: List[int], labels: List[int], segs: List[int], clss: List[int], sent_txt: List[str], disco_txt: List[str], tgt_list_str: List[str], tgt_tok_list_list_str: List[List[str]], disco_label, disco_span, disco_coref, disco_graph, disco_dep, doc_id: str, spilit_type, dataset_name):
assert (len(segs) > 0)
assert (len(labels) > 0)
assert (len(clss) > 0)
if (self.max_bpe < 768):
clss = [x for x in clss if (x < self.max_bpe)]
doc_text = doc_text[:self.max_bpe]
segs = segs[:self.max_bpe]
actual_sent_len = len(clss)
labels = [l[:actual_sent_len] for l in labels]
disco_span = [x for x in disco_span if (x[1] < self.max_bpe)]
num_of_disco = len(disco_span)
disco_label = [l[:num_of_disco] for l in disco_label]
else:
actual_sent_len = len(sent_txt)
num_of_disco = len(disco_label[0])
text_tokens = [Token(text=self.bert_lut[x], idx=x) for x in doc_text][1:(- 1)]
text_tokens = TextField(text_tokens, self._token_indexers)
if self.semantic_red_map:
maps = self.map_kiosk.single_entry_entrance(sent_txt[:actual_sent_len], tgt_tok_list_list_str)
for (k, v) in maps.items():
_v = ArrayField(np.asarray(v), padding_value=(- 1), dtype=np.float32)
maps[k] = _v
else:
maps = {}
if self.bertsum_oracle:
labels = original_greedy_selection(sent_txt[:actual_sent_len], tgt_tok_list_list_str, 3)
z = np.zeros((1, actual_sent_len))
for l in labels:
z[0][l] = 1
labels = z
labels = ArrayField(np.asarray(labels), padding_value=(- 1), dtype=np.int)
segs = ArrayField(np.asarray(segs), padding_value=0, dtype=np.int)
clss = ArrayField(np.asarray(clss), padding_value=(- 1), dtype=np.int)
disco_label = label_filter(disco_label)
disco_label = ArrayField(np.asarray(disco_label), padding_value=(- 1), dtype=np.int)
disco_map_to_sent: List[int] = self.map_disco_to_sent(disco_span)
disco_span = ArrayField(np.asarray(disco_span), padding_value=(- 1), dtype=np.int)
coref_graph = self.create_disco_coref(disco_coref, num_of_disco)
dis_graph = self.create_disco_graph(disco_graph, num_of_disco)
meta_field = MetadataField({'source': dataset_name, 'type': spilit_type, 'sent_txt': sent_txt, 'disco_txt': disco_txt, 'tgt_txt': '<q>'.join(tgt_list_str), 'disco_dep': disco_dep, 'doc_id': doc_id, 'disco_rst_graph': dis_graph, 'disco_coref_graph': coref_graph, 'disco_map_to_sent': disco_map_to_sent})
fields = {'tokens': text_tokens, 'labels': labels, 'segs': segs, 'clss': clss, 'meta_field': meta_field, 'disco_label': disco_label, 'disco_span': disco_span}
fields = {**maps, **fields}
return Instance(fields) |
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, nf, bias):
super(ResNet, self).__init__()
self.in_planes = nf
self.conv1 = conv3x3(3, (nf * 1))
self.bn1 = nn.BatchNorm2d((nf * 1))
self.layer1 = self._make_layer(block, (nf * 1), num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, (nf * 2), num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, (nf * 4), num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, (nf * 8), num_blocks[3], stride=2)
self.linear = nn.Linear(((nf * 8) * block.expansion), num_classes, bias=bias)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def features(self, x):
out = relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = avg_pool2d(out, 4)
out = out.contiguous().view(out.size(0), (- 1))
return out
def logits(self, x):
x = self.linear(x)
return x
def forward(self, x):
out = self.features(x)
logits = self.logits(out)
return (logits, out) |
class OpenVINOModel():
def __init__(self, base_model):
self.ie = IECore()
self.exec_net = None
self.base_model = base_model
self.device = 'CPU'
torch.square = (lambda x: torch.pow(x, 2))
def _get_input_names(self, inputs):
names = []
for (name, tensor) in inputs.items():
if isinstance(tensor, list):
for i in range(len(tensor)):
names.append((name + str(i)))
else:
names.append(name)
return names
def _get_inputs(self, inputs, export=False):
if isinstance(inputs, dataloaders.concat_batcher.KPConvBatch):
inputs = {'features': inputs.features, 'points': inputs.points, 'neighbors': inputs.neighbors, 'pools': inputs.pools, 'upsamples': inputs.upsamples}
elif isinstance(inputs, dataloaders.concat_batcher.ObjectDetectBatch):
(voxels, num_points, coors) = self.base_model.voxelize(inputs.point)
voxel_features = self.base_model.voxel_encoder(voxels, num_points, coors)
batch_size = (coors[((- 1), 0)].item() + 1)
x = self.base_model.middle_encoder(voxel_features, coors, batch_size)
inputs = {'x': x}
elif (not isinstance(inputs, dict)):
raise TypeError(f'Unknown inputs type: {inputs.__class__}')
return inputs
def _read_torch_model(self, inputs):
inputs = copy.deepcopy(inputs)
tensors = self._get_inputs(inputs)
input_names = self._get_input_names(tensors)
origin_forward = self.base_model.forward
self.base_model.forward = (lambda x: origin_forward(inputs))
self.base_model.extract_feats = (lambda *args: pointpillars_extract_feats(self.base_model, tensors[input_names[0]]))
buf = io.BytesIO()
self.base_model.device = torch.device('cpu')
self.base_model.eval()
torch.onnx.export(self.base_model, tensors, buf, input_names=input_names)
self.base_model.forward = origin_forward
net = self.ie.read_network(buf.getvalue(), b'', init_from_buffer=True)
self.exec_net = self.ie.load_network(net, str(self.device).upper())
def forward(self, inputs):
if (self.exec_net is None):
self._read_torch_model(inputs)
inputs = self._get_inputs(inputs)
tensors = {}
for (name, tensor) in inputs.items():
if (name == 'labels'):
continue
if isinstance(tensor, list):
for i in range(len(tensor)):
if (tensor[i].nelement() > 0):
tensors[(name + str(i))] = tensor[i].detach().numpy()
elif (tensor.nelement() > 0):
tensors[name] = tensor.detach().numpy()
output = self.exec_net.infer(tensors)
if (len(output) == 1):
output = next(iter(output.values()))
return torch.tensor(output)
else:
return tuple([torch.tensor(out) for out in output.values()])
def __call__(self, inputs):
return self.forward(inputs)
def load_state_dict(self, *args):
self.base_model.load_state_dict(*args)
def eval(self):
pass
def cfg(self):
return self.base_model.cfg
def classes(self):
return self.base_model.classes
def inference_end(self, *args):
return self.base_model.inference_end(*args)
def preprocess(self, *args):
return self.base_model.preprocess(*args)
def transform(self, *args):
return self.base_model.transform(*args)
def to(self, device):
self.device = device |
def get_config(num_predators):
state_initialization = StateInitialization(num_predators=num_predators, step_scaling_factor=0.1, threshold_trial_len=200)
agent_friction_force = physics_lib.Drag(coeff_friction=0.25)
predator_friction_force = physics_lib.Drag(coeff_friction=0.04)
predator_random_force = physics_lib.RandomForce(max_force_magnitude=0.03)
predator_attraction = physics_lib.DistanceForce(physics_lib.linear_force_fn(zero_intercept=(- 0.0025), slope=0.0001))
elastic_asymmetric_collision = physics_lib.Collision(elasticity=1.0, symmetric=False)
inelastic_asymmetric_collision = physics_lib.Collision(elasticity=0.0, symmetric=False)
forces = ((agent_friction_force, 'agent'), (predator_friction_force, 'predators'), (predator_random_force, 'predators'), (predator_attraction, 'agent', 'predators'), (elastic_asymmetric_collision, 'predators', 'walls'), (inelastic_asymmetric_collision, 'agent', 'walls'))
physics = physics_lib.Physics(*forces, updates_per_env_step=10)
task = tasks.ContactReward((- 1), layers_0='agent', layers_1='predators', reset_steps_after_contact=0)
action_space = action_spaces.Joystick(scaling_factor=0.01, action_layers='agent')
observer = observers.PILRenderer(image_size=(64, 64), anti_aliasing=1, color_to_rgb='hsv_to_rgb')
def _increment_count(meta_state):
meta_state['count'] += 1
rules = game_rules.ModifyMetaState(_increment_count)
config = {'state_initializer': state_initialization.state_initializer, 'physics': physics, 'task': task, 'action_space': action_space, 'observers': {'image': observer}, 'meta_state_initializer': state_initialization.meta_state_initializer}
return config |
def parser():
PARSER = argparse.ArgumentParser(description='Training parameters.')
PARSER.add_argument('--dataset', default='CIFAR10', type=str, choices=['CIFAR10', 'CelebA', 'Imagenette', 'ImageNet32', 'ImageNet64'], help='Data to be used.')
PARSER.add_argument('--img_resize', default=32, type=int, help='Change image resolution.')
PARSER.add_argument('--model', default='VAE', type=str, choices=['VAE', 'srVAE'], help='Model to be used.')
PARSER.add_argument('--network', default='densenet32', type=str, choices=['densenet32', 'densenet16x32'], help='Neural Network architecture to be used.')
PARSER.add_argument('--prior', default='MixtureOfGaussians', type=str, choices=['StandardNormal', 'MixtureOfGaussians', 'RealNVP'], help='Prior type.')
PARSER.add_argument('--z_dim', default=1024, type=int, help='Dimensionality of z latent space.')
PARSER.add_argument('--u_dim', default=1024, type=int, help='Dimensionality of z latent space.')
PARSER.add_argument('--likelihood', default='dmol', type=str, choices=['dmol'], help='Type of likelihood.')
PARSER.add_argument('--iw_test', default=512, type=int, help='Number of Importance Weighting samples used for approximating the test log-likelihood.')
PARSER.add_argument('--batch_size', default=32, type=int, help='Batch size.')
PARSER.add_argument('--epochs', default=2000, type=int, help='Number of training epochs.')
PARSER.add_argument('--seed', default=None, type=int, help='Fix random seed.')
PARSER.add_argument('--n_samples', default=8, type=int, help='Number of generated samples.')
PARSER.add_argument('--log_interval', default=True, type=bool, help='Print progress on every batch.')
PARSER.add_argument('--device', default=None, type=str, choices=['cpu', 'cuda'], help='Device to run the experiment.')
PARSER.add_argument('--use_tb', default=True, type=bool, help='Use TensorBoard.')
PARSER.add_argument('--tags', default='logs', type=str, help='Run tags.')
ARGS = PARSER.parse_args()
if (ARGS.device is None):
ARGS.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
return ARGS |
def test(model, loader):
model.eval()
device = next(model.parameters()).device
correct = 0
loss = 0
total = 0
for (i, (x, y)) in enumerate(loader):
x = x.to(device)
y = y.to(device)
with torch.no_grad():
yhat = model(x)
(_, pred) = yhat.max(1)
correct += pred.eq(y).sum().item()
loss += (F.cross_entropy(yhat, y) * len(x))
total += len(x)
acc = ((correct / total) * 100.0)
loss = (loss / total)
model.train()
return (acc, loss) |
class GraphSignature(torch.nn.Module):
def __init__(self, args, in_channels, out_channels):
super(GraphSignature, self).__init__()
self.args = args
if self.args.use_gcn_sig:
self.conv1 = MetaGCNConv(in_channels, (2 * out_channels), cached=False)
self.fc1 = nn.Linear((2 * out_channels), (2 * out_channels), bias=True)
self.fc2 = nn.Linear((2 * out_channels), (2 * out_channels), bias=True)
self.fc3 = nn.Linear((2 * out_channels), out_channels, bias=True)
self.fc4 = nn.Linear((2 * out_channels), out_channels, bias=True)
else:
self.gated_conv1 = MetaGatedGraphConv(in_channels, args.num_gated_layers)
self.fc1 = nn.Linear(in_channels, (2 * out_channels), bias=True)
self.fc2 = nn.Linear(in_channels, (2 * out_channels), bias=True)
self.fc3 = nn.Linear(in_channels, out_channels, bias=True)
self.fc4 = nn.Linear(in_channels, out_channels, bias=True)
def forward(self, x, edge_index, weights, keys):
if self.args.use_gcn_sig:
x = F.relu(self.conv1(x, edge_index, weights['encoder.signature.conv1.weight'], weights['encoder.signature.conv1.bias']))
else:
x = F.relu(self.gated_conv1(x, edge_index, weights, keys))
x = x.sum(0)
x_gamma_1 = F.linear(x, weights['encoder.signature.fc1.weight'], weights['encoder.signature.fc1.bias'])
x_beta_1 = F.linear(x, weights['encoder.signature.fc2.weight'], weights['encoder.signature.fc2.bias'])
x_gamma_2 = F.linear(x, weights['encoder.signature.fc3.weight'], weights['encoder.signature.fc3.bias'])
x_beta_2 = F.linear(x, weights['encoder.signature.fc4.weight'], weights['encoder.signature.fc4.bias'])
return (torch.tanh(x_gamma_1), torch.tanh(x_beta_1), torch.tanh(x_gamma_2), torch.tanh(x_beta_2)) |
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.elided[linenum]
match = Match('^(.*\\S)&&', line)
if (not match):
match = Match('(.*)&&\\S', line)
if ((not match) or ('(&&)' in line) or Search('\\boperator\\s*$', match.group(1))):
return
typenames = GetTemplateArgs(clean_lines, linenum)
and_pos = len(match.group(1))
if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
if (not IsRValueAllowed(clean_lines, linenum, typenames)):
error(filename, linenum, 'build/c++11', 3, 'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around &&') |
def add_crd_args(parser):
group = parser.add_argument_group('CRD')
group.add_argument('--teacher_model_arch', '--tma', default='roberta_base', type=str, metavar='N', help='teacher model arch')
group.add_argument('--teacher_model_checkpoint', '--tmc', default=None, type=str, metavar='N', help='teacher model arch checkpoint directory name')
group.add_argument('--teacher_model_pt', '--tmp', default='model.pt', type=str, metavar='N', help='teacher model arch checkpoint pt file')
group.add_argument('--data_name_or_path', '--dnp', default='', type=str, metavar='N', help='data name or path to load teacher model')
group.add_argument('--temperature', '--temp', default=1, type=float, metavar='N', help='teacher model arch checkpoint')
group.add_argument('--kd_weight', '--kw', default=1, type=float, metavar='N', help='kd loss weight, default is 1')
group.add_argument('--crd_weight', '--cw', default=0.0, type=float, metavar='N', help='crd loss weight, default is 0')
group.add_argument('--nce_k', '--nk', default=100, type=int, metavar='N', help='number of negative samples')
group.add_argument('--s_dim_feat', '--sdf', default=4608, type=int, metavar='N', help='number of feats in student')
group.add_argument('--t_dim_feat', '--tdf', default=4608, type=int, metavar='N', help='number of feats in teacher')
group.add_argument('--s_dim_attn', '--sda', default=36504, type=int, metavar='N', help='number of attns in student')
group.add_argument('--t_dim_attn', '--tda', default=36504, type=int, metavar='N', help='number of attns in teacher')
group.add_argument('--crd_feat_dim', '--cfd', default=128, type=int, metavar='N', help='feat dim for CRD')
group.add_argument('--use_mse', action='store_true', help='use MSE loss for KD')
return group |
class UNet2DModel(ModelMixin, ConfigMixin):
_to_config
def __init__(self, sample_size: Optional[Union[(int, Tuple[(int, int)])]]=None, in_channels: int=3, out_channels: int=3, center_input_sample: bool=False, time_embedding_type: str='positional', freq_shift: int=0, flip_sin_to_cos: bool=True, down_block_types: Tuple[str]=('DownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D'), up_block_types: Tuple[str]=('AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'UpBlock2D'), block_out_channels: Tuple[int]=(224, 448, 672, 896), layers_per_block: int=2, mid_block_scale_factor: float=1, downsample_padding: int=1, downsample_type: str='conv', upsample_type: str='conv', dropout: float=0.0, act_fn: str='silu', attention_head_dim: Optional[int]=8, norm_num_groups: int=32, attn_norm_num_groups: Optional[int]=None, norm_eps: float=1e-05, resnet_time_scale_shift: str='default', add_attention: bool=True, class_embed_type: Optional[str]=None, num_class_embeds: Optional[int]=None, num_train_timesteps: Optional[int]=None):
super().__init__()
self.sample_size = sample_size
time_embed_dim = (block_out_channels[0] * 4)
if (len(down_block_types) != len(up_block_types)):
raise ValueError(f'Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.')
if (len(block_out_channels) != len(down_block_types)):
raise ValueError(f'Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.')
self.conv_in = nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
if (time_embedding_type == 'fourier'):
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=16)
timestep_input_dim = (2 * block_out_channels[0])
elif (time_embedding_type == 'positional'):
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
elif (time_embedding_type == 'learned'):
self.time_proj = nn.Embedding(num_train_timesteps, block_out_channels[0])
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
if ((class_embed_type is None) and (num_class_embeds is not None)):
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif (class_embed_type == 'timestep'):
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif (class_embed_type == 'identity'):
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
output_channel = block_out_channels[0]
for (i, down_block_type) in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = (i == (len(block_out_channels) - 1))
down_block = get_down_block(down_block_type, num_layers=layers_per_block, in_channels=input_channel, out_channels=output_channel, temb_channels=time_embed_dim, add_downsample=(not is_final_block), resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=(attention_head_dim if (attention_head_dim is not None) else output_channel), downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, downsample_type=downsample_type, dropout=dropout)
self.down_blocks.append(down_block)
self.mid_block = UNetMidBlock2D(in_channels=block_out_channels[(- 1)], temb_channels=time_embed_dim, dropout=dropout, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, attention_head_dim=(attention_head_dim if (attention_head_dim is not None) else block_out_channels[(- 1)]), resnet_groups=norm_num_groups, attn_groups=attn_norm_num_groups, add_attention=add_attention)
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for (i, up_block_type) in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min((i + 1), (len(block_out_channels) - 1))]
is_final_block = (i == (len(block_out_channels) - 1))
up_block = get_up_block(up_block_type, num_layers=(layers_per_block + 1), in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=time_embed_dim, add_upsample=(not is_final_block), resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, attention_head_dim=(attention_head_dim if (attention_head_dim is not None) else output_channel), resnet_time_scale_shift=resnet_time_scale_shift, upsample_type=upsample_type, dropout=dropout)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
num_groups_out = (norm_num_groups if (norm_num_groups is not None) else min((block_out_channels[0] // 4), 32))
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=num_groups_out, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def forward(self, sample: torch.FloatTensor, timestep: Union[(torch.Tensor, float, int)], class_labels: Optional[torch.Tensor]=None, return_dict: bool=True) -> Union[(UNet2DOutput, Tuple)]:
if self.config.center_input_sample:
sample = ((2 * sample) - 1.0)
timesteps = timestep
if (not torch.is_tensor(timesteps)):
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif (torch.is_tensor(timesteps) and (len(timesteps.shape) == 0)):
timesteps = timesteps[None].to(sample.device)
timesteps = (timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device))
t_emb = self.time_proj(timesteps)
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if (self.class_embedding is not None):
if (class_labels is None):
raise ValueError('class_labels should be provided when doing class conditioning')
if (self.config.class_embed_type == 'timestep'):
class_labels = self.time_proj(class_labels)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
emb = (emb + class_emb)
elif ((self.class_embedding is None) and (class_labels is not None)):
raise ValueError('class_embedding needs to be initialized in order to use class conditioning')
skip_sample = sample
sample = self.conv_in(sample)
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, 'skip_conv'):
(sample, res_samples, skip_sample) = downsample_block(hidden_states=sample, temb=emb, skip_sample=skip_sample)
else:
(sample, res_samples) = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
sample = self.mid_block(sample, emb)
skip_sample = None
for upsample_block in self.up_blocks:
res_samples = down_block_res_samples[(- len(upsample_block.resnets)):]
down_block_res_samples = down_block_res_samples[:(- len(upsample_block.resnets))]
if hasattr(upsample_block, 'skip_conv'):
(sample, skip_sample) = upsample_block(sample, res_samples, emb, skip_sample)
else:
sample = upsample_block(sample, res_samples, emb)
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if (skip_sample is not None):
sample += skip_sample
if (self.config.time_embedding_type == 'fourier'):
timesteps = timesteps.reshape((sample.shape[0], *([1] * len(sample.shape[1:]))))
sample = (sample / timesteps)
if (not return_dict):
return (sample,)
return UNet2DOutput(sample=sample) |
def get_labelname(label):
num_labels = len(labelmap.item)
found = False
for i in xrange(0, num_labels):
if (label == labelmap.item[i].label):
found = True
return labelmap.item[i].display_name
assert (found == True) |
def to_file(out, u_rels, k, min_ims, complete_line):
line_to_synset = {}
with open(complete_line, 'w') as f:
for (i, (key, value)) in enumerate(out.items()):
if value:
line_to_synset[i] = key
f.write((((str(i) + ' ') + ' '.join([str(v) for v in value.values()])) + '\n'))
with open((complete_line + '_lts'), 'w') as f:
json.dump(line_to_synset, f)
with open((complete_line + '_edges'), 'w') as f:
json.dump(u_rels, f)
print('done') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.