code stringlengths 101 5.91M |
|---|
class _opener(object):
def __init__(self, file_like):
self.file_like = file_like
def __enter__(self):
return self.file_like
def __exit__(self, *args):
pass |
class LambdaLayer(kbase.ZooKerasLayer):
def __init__(self, input_vars, out_var, input_shape=None, **kwargs):
super(LambdaLayer, self).__init__(None, input_vars, out_var, (list(input_shape) if input_shape else None), **kwargs) |
def main():
parser = argparse.ArgumentParser(description='Convert keys in official pretrained segformer to MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if ('state_dict' in checkpoint):
state_dict = checkpoint['state_dict']
elif ('model' in checkpoint):
state_dict = checkpoint['model']
else:
state_dict = checkpoint
weight = convert_mit(state_dict)
mmcv.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst) |
def eval_directory(embs_dir: str, eval_script: str, output_dir: str):
emb_list: List[str] = [os.path.join(embs_dir, f) for f in os.listdir(embs_dir) if os.path.isfile(os.path.join(embs_dir, f))]
for emb in tqdm(emb_list, desc='Evaluating embeddings'):
eval_embedding(eval_script=eval_script, embedding_path=emb, output_path=os.path.join(output_dir, f"{''.join(os.path.basename(emb).split('.')[:(- 1)])}.txt")) |
def vgg_fc_layer(x, out_dim, var_list, apply_relu=True, name='fc'):
in_dim = x.get_shape().as_list()[1]
stdv = (1.0 / math.sqrt(in_dim))
with tf.variable_scope(name):
w = tf.get_variable('weights', [in_dim, out_dim], tf.float32, initializer=tf.random_uniform_initializer((- stdv), stdv))
b = tf.get_variable('biases', [out_dim], tf.float32, initializer=tf.random_uniform_initializer((- stdv), stdv))
var_list.append(w)
var_list.append(b)
output = (tf.matmul(x, w) + b)
if apply_relu:
output = tf.nn.relu(output)
return output |
def morgan_similarity(smiles_1: List[str], smiles_2: List[str], radius: int, sample_rate: float):
similarities = []
num_pairs = (len(smiles_1) * len(smiles_2))
if (sample_rate < 1.0):
sample_num_pairs = (sample_rate * num_pairs)
sample_size = math.ceil(math.sqrt(sample_num_pairs))
sample_smiles_1 = np.random.choice(smiles_1, size=sample_size, replace=True)
sample_smiles_2 = np.random.choice(smiles_2, size=sample_size, replace=True)
else:
(sample_smiles_1, sample_smiles_2) = (smiles_1, smiles_2)
sample_num_pairs = (len(sample_smiles_1) * len(sample_smiles_2))
for (smile_1, smile_2) in tqdm(product(sample_smiles_1, sample_smiles_2), total=sample_num_pairs):
(mol_1, mol_2) = (Chem.MolFromSmiles(smile_1), Chem.MolFromSmiles(smile_2))
(fp_1, fp_2) = (AllChem.GetMorganFingerprint(mol_1, radius), AllChem.GetMorganFingerprint(mol_2, radius))
similarity = DataStructs.TanimotoSimilarity(fp_1, fp_2)
similarities.append(similarity)
similarities = np.array(similarities)
print()
print(f'Average dice similarity = {np.mean(similarities):.4f} +/- {np.std(similarities):.4f}')
print(f'Minimum dice similarity = {np.min(similarities):.4f}')
print(f'Maximum dice similarity = {np.max(similarities):.4f}')
print()
print('Percentiles for dice similarity')
print(' | '.join([f'{i}% = {np.percentile(similarities, i):.4f}' for i in range(0, 101, 10)])) |
def extract_losses(line):
chunks = line.split('\t')
total_loss = chunks[1].split(':')[1]
iou_loss = chunks[2].split(':')[1]
return (total_loss, iou_loss) |
def main():
if (len(sys.argv) > 1):
configFile = sys.argv[1]
else:
configFile = 'test_configuration'
print('Configuration File = ', (configFile + '.txt'))
config = ConfigParser()
config.read((configFile + '.txt'))
BUFFER_SIZE = config.getint('hyperparam', 'BUFFER_SIZE')
BATCH_SIZE = config.getint('hyperparam', 'BATCH_SIZE')
GAMMA = config.getfloat('hyperparam', 'GAMMA')
TAU = config.getfloat('hyperparam', 'TAU')
LR_ACTOR = config.getfloat('hyperparam', 'LR_ACTOR')
LR_CRITIC = config.getfloat('hyperparam', 'LR_CRITIC')
WEIGHT_DECAY = config.getfloat('hyperparam', 'WEIGHT_DECAY')
UPDATE_EVERY = config.getint('hyperparam', 'UPDATE_EVERY')
UPDATE_TIMES = config.getint('hyperparam', 'UPDATE_TIMES')
SEED = config.getint('hyperparam', 'SEED')
BENCHMARK = config.getboolean('hyperparam', 'BENCHMARK')
EXP_REP_BUF = config.getboolean('hyperparam', 'EXP_REP_BUF')
PRE_TRAINED = config.getboolean('hyperparam', 'PRE_TRAINED')
SCENARIO = config.get('hyperparam', 'SCENARIO')
RENDER = config.getboolean('hyperparam', 'RENDER')
PROGRESS_BAR = config.getboolean('hyperparam', 'PROGRESS_BAR')
RNN = config.getboolean('hyperparam', 'RNN')
HISTORY_LENGTH = config.getint('hyperparam', 'HISTORY_LENGTH')
DNN = config.get('hyperparam', 'DNN')
START_STEPS = config.getint('hyperparam', 'START_STEPS')
REWARD_WINDOWS = config.getint('hyperparam', 'REWARD_WINDOWS')
LANDMARK_ERROR_WINDOWS = config.getint('hyperparam', 'LANDMARK_ERROR_WINDOWS')
COLLISION_OUTWORLD_WINDOWS = config.getint('hyperparam', 'COLLISION_OUTWORLD_WINDOWS')
ALPHA = config.getfloat('hyperparam', 'ALPHA')
AUTOMATIC_ENTROPY = config.getboolean('hyperparam', 'AUTOMATIC_ENTROPY')
DIM_1 = config.getint('hyperparam', 'DIM_1')
DIM_2 = config.getint('hyperparam', 'DIM_2')
parallel_envs = config.getint('hyperparam', 'parallel_envs')
num_agents = config.getint('hyperparam', 'num_agents')
num_landmarks = config.getint('hyperparam', 'num_landmarks')
landmark_depth = config.getfloat('hyperparam', 'landmark_depth')
landmark_movable = config.getboolean('hyperparam', 'landmark_movable')
landmark_vel = config.getfloat('hyperparam', 'landmark_vel')
movement = config.get('hyperparam', 'movement')
pf_method = config.getboolean('hyperparam', 'pf_method')
rew_err_th = config.getfloat('hyperparam', 'rew_err_th')
rew_dis_th = config.getfloat('hyperparam', 'rew_dis_th')
max_range = config.getfloat('hyperparam', 'max_range')
max_current_vel = config.getfloat('hyperparam', 'max_current_vel')
range_dropping = config.getfloat('hyperparam', 'range_dropping')
number_of_episodes = config.getint('hyperparam', 'number_of_episodes')
episode_length = config.getint('hyperparam', 'episode_length')
save_interval = config.getint('hyperparam', 'save_interval')
noise = config.getfloat('hyperparam', 'noise')
noise_reduction = config.getfloat('hyperparam', 'noise_reduction')
fol_in = int((np.random.rand() * 1000))
try:
max_vel = config.getfloat('hyperparam', 'max_vel')
random_vel = config.getboolean('hyperparam', 'random_vel')
except:
print('no max_vel or random_vel found in config file')
max_vel = 0.0
random_vel = False
DEVICE = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
common_folder = ('/logs/' + configFile)
log_path = ((os.path.dirname(os.getcwd()) + common_folder) + '/log')
model_dir = ((os.path.dirname(os.getcwd()) + common_folder) + '/model_dir')
os.makedirs(model_dir, exist_ok=True)
if PRE_TRAINED:
PRE_TRAINED_EP = max([int(aux.split('_')[(- 1)][:(- 3)]) for (i, aux) in enumerate(glob.glob((model_dir + '/episode_last_*.pt')))])
else:
PRE_TRAINED_EP = 0
print('Hyperparameters:')
print('BUFFER_SIZE = ', BUFFER_SIZE)
print('BATCH_SIZE = ', BATCH_SIZE)
print('GAMMA = ', GAMMA)
print('TAU = ', TAU)
print('LR_ACTOR = ', LR_ACTOR)
print('LR_CRITIC = ', LR_CRITIC)
print('WEIGHT_DECAY = ', WEIGHT_DECAY)
print('UPDATE_EVERY = ', UPDATE_EVERY)
print('UPDATE_TIMES = ', UPDATE_TIMES)
print('SEED = ', SEED)
print('BENCHMARK = ', BENCHMARK)
print('EXP_REP_BUF = ', EXP_REP_BUF)
print('PRE_TRAINED = ', PRE_TRAINED)
print('PRE_TRAINED_EP = ', PRE_TRAINED_EP)
print('SCENARIO = ', SCENARIO)
print('RNN activated = ', RNN)
print('HISTORY_LENGTH = ', HISTORY_LENGTH)
print('RENDER = ', RENDER)
print('PROGRESS_BAR = ', PROGRESS_BAR)
print('DEVICE = ', DEVICE)
print('parallel_envs = ', parallel_envs)
print('num_agents = ', num_agents)
print('num_landmarks = ', num_landmarks)
print('landmark_depth = ', landmark_depth)
print('landmark_velocity = ', landmark_vel)
print('number_of_episodes = ', number_of_episodes)
print('episode_length = ', episode_length)
print('save_interval = ', save_interval)
print('noise = ', noise)
print('noise_reduction = ', noise_reduction)
print('DNN architecture = ', DNN)
print('Alpha temperature = ', ALPHA)
print('DNN Layer 1 size = ', DIM_1)
print('DNN Layer 2 size = ', DIM_2)
print('Folder name = ', common_folder)
print('Model directory = ', model_dir)
print('TIMESTAMP = ', time.strftime('%m%d%y_%H%M%S'))
seeding(seed=(SEED + PRE_TRAINED_EP))
t = 0
if BENCHMARK:
benchmark_dir = ((os.getcwd() + common_folder) + '/benchmark_dir')
os.makedirs(benchmark_dir, exist_ok=True)
print('Initialize the number of parallel envs in torch')
torch.set_num_threads(parallel_envs)
print('Initialize the environments')
env = envs.make_parallel_env(parallel_envs, SCENARIO, seed=(SEED + PRE_TRAINED_EP), num_agents=num_agents, num_landmarks=num_landmarks, landmark_depth=landmark_depth, landmark_movable=landmark_movable, landmark_vel=landmark_vel, max_vel=max_vel, random_vel=random_vel, movement=movement, pf_method=pf_method, rew_err_th=rew_err_th, rew_dis_th=rew_dis_th, max_range=max_range, max_current_vel=max_current_vel, range_dropping=range_dropping, benchmark=BENCHMARK)
if (EXP_REP_BUF == False):
buffer = ReplayBuffer(int(BUFFER_SIZE))
else:
buffer = ReplayBuffer_SummTree(int(BUFFER_SIZE), (SEED + PRE_TRAINED_EP))
priority = np.ones(num_agents)
print('Initialize the Actor-Critic networks')
if (DNN == 'MADDPG'):
maddpg = MADDPG(num_agents=num_agents, num_landmarks=num_landmarks, landmark_depth=landmark_depth, discount_factor=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, device=DEVICE, rnn=RNN, dim_1=DIM_1, dim_2=DIM_2)
elif (DNN == 'MATD3'):
maddpg = MATD3_BC(num_agents=num_agents, num_landmarks=num_landmarks, landmark_depth=landmark_depth, discount_factor=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, device=DEVICE, rnn=RNN, dim_1=DIM_1, dim_2=DIM_2)
elif (DNN == 'MASAC'):
maddpg = MASAC(num_agents=num_agents, num_landmarks=num_landmarks, landmark_depth=landmark_depth, discount_factor=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, device=DEVICE, rnn=RNN, alpha=ALPHA, automatic_entropy_tuning=AUTOMATIC_ENTROPY, dim_1=DIM_1, dim_2=DIM_2)
elif (DNN == 'MAHRSAC'):
maddpg = MAHRSAC(num_agents=num_agents, num_landmarks=num_landmarks, landmark_depth=landmark_depth, discount_factor=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, weight_decay=WEIGHT_DECAY, device=DEVICE, rnn=RNN, alpha=ALPHA, automatic_entropy_tuning=AUTOMATIC_ENTROPY, dim_1=DIM_1, dim_2=DIM_2)
else:
print('ERROR UNKNOWN DNN ARCHITECTURE')
logger = SummaryWriter(log_dir=log_path)
agents_reward = []
for n in range(num_agents):
agents_reward.append([])
if BENCHMARK:
landmark_error_episode = []
for i in range(num_landmarks):
landmark_error_episode.append([1])
agent_outofworld_episode = []
agent_collision_episode = []
landmark_collision_episode = []
for i in range(num_agents):
agent_outofworld_episode.append([0])
agent_collision_episode.append([0])
landmark_collision_episode.append([0])
if (PRE_TRAINED == True):
trained_checkpoint = (model_dir + '/episode')
aux = torch.load((trained_checkpoint + '_last.pt'))
if ((DNN == 'MASAC') or (DNN == 'MAHRSAC')):
with open((trained_checkpoint + '_target_entropy_last.file'), 'rb') as f:
target_entropy_aux = pickle.load(f)
with open((trained_checkpoint + '_log_alpha_last.file'), 'rb') as f:
log_alpha_aux = pickle.load(f)
with open((trained_checkpoint + '_alpha_last.file'), 'rb') as f:
alpha_aux = pickle.load(f)
for i in range(num_agents):
if (DNN == 'MADDPG'):
maddpg.maddpg_agent[i].actor.load_state_dict(aux[i]['actor_params'])
maddpg.maddpg_agent[i].critic.load_state_dict(aux[i]['critic_params'])
maddpg.maddpg_agent[i].target_actor.load_state_dict(aux[i]['target_actor_params'])
maddpg.maddpg_agent[i].target_critic.load_state_dict(aux[i]['target_critic_params'])
maddpg.maddpg_agent[i].actor_optimizer.load_state_dict(aux[i]['actor_optim_params'])
maddpg.maddpg_agent[i].critic_optimizer.load_state_dict(aux[i]['critic_optim_params'])
elif (DNN == 'MATD3'):
maddpg.matd3_bc_agent[i].actor.load_state_dict(aux[i]['actor_params'])
maddpg.matd3_bc_agent[i].critic.load_state_dict(aux[i]['critic_params'])
maddpg.matd3_bc_agent[i].target_actor.load_state_dict(aux[i]['target_actor_params'])
maddpg.matd3_bc_agent[i].target_critic.load_state_dict(aux[i]['target_critic_params'])
maddpg.matd3_bc_agent[i].actor_optimizer.load_state_dict(aux[i]['actor_optim_params'])
maddpg.matd3_bc_agent[i].critic_optimizer.load_state_dict(aux[i]['critic_optim_params'])
elif ((DNN == 'MASAC') or (DNN == 'MAHRSAC')):
if AUTOMATIC_ENTROPY:
maddpg.masac_agent[i].actor.load_state_dict(aux[i]['actor_params'])
maddpg.masac_agent[i].critic.load_state_dict(aux[i]['critic_params'])
maddpg.masac_agent[i].target_critic.load_state_dict(aux[i]['target_critic_params'])
maddpg.masac_agent[i].actor_optimizer.load_state_dict(aux[i]['actor_optim_params'])
maddpg.masac_agent[i].critic_optimizer.load_state_dict(aux[i]['critic_optim_params'])
maddpg.masac_agent[i].alpha_optimizer.load_state_dict(aux[i]['alpha_optim_params'])
maddpg.masac_agent[i].target_entropy = target_entropy_aux[i]
maddpg.masac_agent[i].log_alpha = log_alpha_aux[i]
maddpg.masac_agent[i].alpha = alpha_aux[i]
else:
maddpg.masac_agent[i].actor.load_state_dict(aux[i]['actor_params'])
maddpg.masac_agent[i].critic.load_state_dict(aux[i]['critic_params'])
maddpg.masac_agent[i].target_critic.load_state_dict(aux[i]['target_critic_params'])
maddpg.masac_agent[i].actor_optimizer.load_state_dict(aux[i]['actor_optim_params'])
maddpg.masac_agent[i].critic_optimizer.load_state_dict(aux[i]['critic_optim_params'])
else:
break
buffer.reload((trained_checkpoint + '_last.file'))
print('next')
with open((trained_checkpoint + '_reward_last.file'), 'rb') as f:
agents_reward = pickle.load(f)
with open((trained_checkpoint + '_lerror_last.file'), 'rb') as f:
landmark_error_episode = pickle.load(f)
with open((trained_checkpoint + '_outworld_last.file'), 'rb') as f:
agent_outofworld_episode = pickle.load(f)
with open((trained_checkpoint + '_agentcoll_last.file'), 'rb') as f:
agent_collision_episode = pickle.load(f)
with open((trained_checkpoint + '_landcoll_last.file'), 'rb') as f:
landmark_collision_episode = pickle.load(f)
print('batch_size_was=', BATCH_SIZE)
BATCH_SIZE *= (2 ** int((PRE_TRAINED_EP / 200000)))
if (BATCH_SIZE > 2048):
BATCH_SIZE = 2048
print('batch_size_is_now=', BATCH_SIZE)
print('Starting iterations... \r\n')
if (PROGRESS_BAR == True):
import tqdm
timer_bar = tqdm.tqdm(range(number_of_episodes), desc='\r\n Episode', position=0)
counter = 0
avg_rewards_best = (- 1000.0)
for episode in range(0, number_of_episodes, parallel_envs):
if (PRE_TRAINED == True):
episode += PRE_TRAINED_EP
if (episode == PRE_TRAINED_EP):
noise *= (noise_reduction ** int((PRE_TRAINED_EP / parallel_envs)))
if (PROGRESS_BAR == True):
timer_bar.update(parallel_envs)
all_obs = env.reset()
for i in range(num_agents):
if (DNN == 'MADDPG'):
maddpg.maddpg_agent[i].noise.reset()
elif (DNN == 'MATD3'):
maddpg.matd3_bc_agent[i].noise.reset()
elif ((DNN == 'MASAC') or (DNN == 'MAHRSAC')):
maddpg.masac_agent[i].noise.reset()
else:
break
reward_this_episode = np.zeros((parallel_envs, num_agents))
landmark_error = []
for i in range(num_landmarks):
landmark_error.append([])
obs_roll = np.rollaxis(all_obs, 1)
obs = transpose_list(obs_roll)
obs_size = obs[0][0].size
history = copy.deepcopy(obs)
for n in range(parallel_envs):
for m in range(num_agents):
for i in range((HISTORY_LENGTH - 1)):
if (i == 0):
history[n][m] = (history[n][m].reshape(1, obs_size) * 0.0)
aux = (obs[n][m].reshape(1, obs_size) * 0.0)
history[n][m] = np.concatenate((history[n][m], aux), axis=0)
history_a = np.zeros([parallel_envs, num_agents, HISTORY_LENGTH, 1])
frames = []
tmax = 0
his = []
if (RENDER == True):
frames.append(env.render('rgb_array'))
for episode_t in range(episode_length):
his = []
for i in range(num_agents):
his.append(torch.cat((transpose_to_tensor(history)[i], transpose_to_tensor(history_a)[i]), dim=2))
if (episode < START_STEPS):
actions_array = np.random.uniform((- 1), 1, (num_agents, parallel_envs, 1))
else:
actions = maddpg.act(his, transpose_to_tensor(obs), noise=noise)
actions_array = torch.stack(actions).detach().numpy()
actions_for_env = np.rollaxis(actions_array, 1)
(next_obs, rewards, dones, info) = env.step(actions_for_env)
transition = (history, history_a, obs, actions_for_env, rewards, next_obs, dones)
if (EXP_REP_BUF == False):
buffer.push(transition)
else:
buffer.push(transition, priority)
reward_this_episode += rewards
if RNN:
for n in range(parallel_envs):
for m in range(num_agents):
aux = obs[n][m].reshape(1, obs_size)
history[n][m] = np.concatenate((history[n][m], aux), axis=0)
history[n][m] = np.delete(history[n][m], 0, 0)
history_a = np.concatenate((history_a, actions_for_env.reshape(parallel_envs, num_agents, 1, 1)), axis=2)
history_a = np.delete(history_a, 0, 2)
obs = next_obs
t += parallel_envs
if (RENDER == True):
frames.append(env.render('rgb_array'))
tmax += 1
if BENCHMARK:
error_mean = np.zeros(num_landmarks)
for (e, inf) in enumerate(info):
for l in range(num_landmarks):
error_mean[l] = np.add(error_mean[l], inf['n'][0][0][l])
error_mean /= parallel_envs
for i in range(num_landmarks):
landmark_error[i].append(error_mean[i])
if dones.any():
break
noise *= noise_reduction
if ((len(buffer) > BATCH_SIZE) and ((episode % UPDATE_EVERY) < parallel_envs)):
for _ in range(UPDATE_TIMES):
priority = np.zeros(num_agents)
for a_i in range(num_agents):
if (EXP_REP_BUF == False):
samples = buffer.sample(BATCH_SIZE)
priority = maddpg.update(samples, a_i, logger)
else:
(samples, indexes) = buffer.sample(BATCH_SIZE)
new_priorities = maddpg.update(samples, a_i, logger)
priority[a_i] = buffer.update(indexes, new_priorities)
if (EXP_REP_BUF == True):
priority /= num_agents
maddpg.update_targets()
for i in range(parallel_envs):
for n in range(num_agents):
agents_reward[n].append(reward_this_episode[(i, n)])
if (len(agents_reward[n]) > REWARD_WINDOWS):
agents_reward[n] = agents_reward[n][1:]
if (BENCHMARK and (episode_t > 180)):
for i in range(num_landmarks):
landmark_error_episode[i].append(np.array(landmark_error[i][(- 100):]).mean())
if (len(landmark_error_episode[i]) > LANDMARK_ERROR_WINDOWS):
landmark_error_episode[i] = landmark_error_episode[i][1:]
if BENCHMARK:
for ii in range(num_agents):
agent_outofworld = 0
landmark_collision = 0
agent_collision = 0
for (i, inf) in enumerate(info):
agent_outofworld += inf['n'][ii][2]
landmark_collision += inf['n'][ii][3]
agent_collision += inf['n'][ii][4]
agent_outofworld_episode[ii].append(agent_outofworld)
landmark_collision_episode[ii].append(landmark_collision)
agent_collision_episode[ii].append(agent_collision)
if (len(agent_outofworld_episode[ii]) > COLLISION_OUTWORLD_WINDOWS):
agent_outofworld_episode[ii] = agent_outofworld_episode[ii][1:]
landmark_collision_episode[ii] = landmark_collision_episode[ii][1:]
agent_collision_episode[ii] = agent_collision_episode[ii][1:]
if (((episode % 1000) < parallel_envs) or (episode == (number_of_episodes - 1))):
if ((PRE_TRAINED == True) and (episode == PRE_TRAINED_EP)):
pass
else:
avg_rewards = []
std_rewards = []
for n in range(num_agents):
avg_rewards.append(np.mean(agents_reward[n]))
std_rewards.append(np.std(agents_reward[n]))
for (a_i, avg_rew) in enumerate(avg_rewards):
logger.add_scalar(('agent%i/mean_episode_rewards' % a_i), avg_rew, episode)
logger.add_scalar(('agent%i/std_episode_rewards' % a_i), std_rewards[a_i], episode)
if BENCHMARK:
logger.add_scalar(('agent%i/agent_outofworld_episode' % a_i), np.array(agent_outofworld_episode[a_i]).sum(), episode)
logger.add_scalar(('agent%i/landmark_collision_episode' % a_i), np.array(landmark_collision_episode[a_i]).sum(), episode)
logger.add_scalar(('agent%i/agent_collision_episode' % a_i), np.array(agent_collision_episode[a_i]).sum(), episode)
if BENCHMARK:
for (l_i, err) in enumerate(landmark_error_episode):
logger.add_scalar(('landmark%i/mean_episode_error' % l_i), np.array(err).mean(), episode)
logger.add_scalar(('landmark%i/std_episode_error' % l_i), np.array(err).std(), episode)
if (PROGRESS_BAR == True):
timer_bar.set_postfix({'avg_rew': avg_rew, 'avg_error': np.array(err).mean()})
elif (PROGRESS_BAR == True):
timer_bar.set_postfix({'avg_rew': avg_rew})
if (counter > 400000):
print('batch_size_was=', BATCH_SIZE)
BATCH_SIZE *= 2
if (BATCH_SIZE > 2048):
BATCH_SIZE = 2048
print('batch_size_is_now=', BATCH_SIZE)
counter = 0
counter += parallel_envs
if (PRE_TRAINED == True):
aux_episode = (PRE_TRAINED_EP + 0)
else:
aux_episode = 0
save_info = ((((episode % save_interval) < parallel_envs) and (episode > aux_episode)) or (episode == (number_of_episodes - parallel_envs)))
save_dict_list = []
target_entropy_list = []
log_alpha_list = []
alpha_list = []
if save_info:
for i in range(num_agents):
if (DNN == 'MADDPG'):
save_dict = {'actor_params': maddpg.maddpg_agent[i].actor.state_dict(), 'target_actor_params': maddpg.maddpg_agent[i].target_actor.state_dict(), 'actor_optim_params': maddpg.maddpg_agent[i].actor_optimizer.state_dict(), 'critic_params': maddpg.maddpg_agent[i].critic.state_dict(), 'target_critic_params': maddpg.maddpg_agent[i].target_critic.state_dict(), 'critic_optim_params': maddpg.maddpg_agent[i].critic_optimizer.state_dict()}
elif (DNN == 'MATD3'):
save_dict = {'actor_params': maddpg.matd3_bc_agent[i].actor.state_dict(), 'target_actor_params': maddpg.matd3_bc_agent[i].target_actor.state_dict(), 'actor_optim_params': maddpg.matd3_bc_agent[i].actor_optimizer.state_dict(), 'critic_params': maddpg.matd3_bc_agent[i].critic.state_dict(), 'target_critic_params': maddpg.matd3_bc_agent[i].target_critic.state_dict(), 'critic_optim_params': maddpg.matd3_bc_agent[i].critic_optimizer.state_dict()}
elif ((DNN == 'MASAC') or (DNN == 'MAHRSAC')):
if AUTOMATIC_ENTROPY:
save_dict = {'actor_params': maddpg.masac_agent[i].actor.state_dict(), 'actor_optim_params': maddpg.masac_agent[i].actor_optimizer.state_dict(), 'critic_params': maddpg.masac_agent[i].critic.state_dict(), 'target_critic_params': maddpg.masac_agent[i].target_critic.state_dict(), 'critic_optim_params': maddpg.masac_agent[i].critic_optimizer.state_dict(), 'alpha_optim_params': maddpg.masac_agent[i].alpha_optimizer.state_dict()}
target_entropy_list.append(maddpg.masac_agent[i].target_entropy)
log_alpha_list.append(maddpg.masac_agent[i].log_alpha)
alpha_list.append(maddpg.masac_agent[i].alpha)
else:
save_dict = {'actor_params': maddpg.masac_agent[i].actor.state_dict(), 'actor_optim_params': maddpg.masac_agent[i].actor_optimizer.state_dict(), 'critic_params': maddpg.masac_agent[i].critic.state_dict(), 'target_critic_params': maddpg.masac_agent[i].target_critic.state_dict(), 'critic_optim_params': maddpg.masac_agent[i].critic_optimizer.state_dict()}
else:
break
save_dict_list.append(save_dict)
torch.save([], os.path.join(model_dir, 'episode_last_{}.pt'.format(episode)))
torch.save(save_dict_list, os.path.join(model_dir, 'episode_last.pt'))
buffer.save(os.path.join(model_dir, 'episode_last.file'))
with open(os.path.join(model_dir, 'episode_reward_last.file'), 'wb') as f:
pickle.dump(agents_reward, f)
with open(os.path.join(model_dir, 'episode_lerror_last.file'), 'wb') as f:
pickle.dump(landmark_error_episode, f)
with open(os.path.join(model_dir, 'episode_outworld_last.file'), 'wb') as f:
pickle.dump(agent_outofworld_episode, f)
with open(os.path.join(model_dir, 'episode_agentcoll_last.file'), 'wb') as f:
pickle.dump(agent_collision_episode, f)
with open(os.path.join(model_dir, 'episode_landcoll_last.file'), 'wb') as f:
pickle.dump(landmark_collision_episode, f)
with open(os.path.join(model_dir, 'episode_target_entropy_last.file'), 'wb') as f:
pickle.dump(target_entropy_list, f)
with open(os.path.join(model_dir, 'episode_log_alpha_last.file'), 'wb') as f:
pickle.dump(log_alpha_list, f)
with open(os.path.join(model_dir, 'episode_alpha_last.file'), 'wb') as f:
pickle.dump(alpha_list, f)
if (np.mean(avg_rewards) > np.mean(avg_rewards_best)):
torch.save([], os.path.join(model_dir, 'episode_best_{}.pt'.format(episode)))
torch.save(save_dict_list, os.path.join(model_dir, 'episode_best.pt'))
buffer.save(os.path.join(model_dir, 'episode_best.file'))
with open(os.path.join(model_dir, 'episode_reward_best.file'), 'wb') as f:
pickle.dump(agents_reward, f)
with open(os.path.join(model_dir, 'episode_lerror_best.file'), 'wb') as f:
pickle.dump(landmark_error_episode, f)
with open(os.path.join(model_dir, 'episode_outworld_best.file'), 'wb') as f:
pickle.dump(agent_outofworld_episode, f)
with open(os.path.join(model_dir, 'episode_agentcoll_best.file'), 'wb') as f:
pickle.dump(agent_collision_episode, f)
with open(os.path.join(model_dir, 'episode_landcoll_best.file'), 'wb') as f:
pickle.dump(landmark_collision_episode, f)
with open(os.path.join(model_dir, 'episode_target_entropy_best.file'), 'wb') as f:
pickle.dump(target_entropy_list, f)
with open(os.path.join(model_dir, 'episode_log_alpha_best.file'), 'wb') as f:
pickle.dump(log_alpha_list, f)
with open(os.path.join(model_dir, 'episode_alpha_best.file'), 'wb') as f:
pickle.dump(alpha_list, f)
try:
avg_rewards_best = avg_rewards.copy()
except:
pass
if (RENDER == True):
imageio.mimsave(os.path.join(model_dir, 'episode-{}.gif'.format(episode)), frames, duration=0.04)
env.close()
logger.close() |
class black_box_benchmarks(object):
def __init__(self, shadow_train_performance, shadow_test_performance, target_train_performance, target_test_performance, num_classes):
self.num_classes = num_classes
(self.s_tr_outputs, self.s_tr_labels) = shadow_train_performance
(self.s_te_outputs, self.s_te_labels) = shadow_test_performance
(self.t_tr_outputs, self.t_tr_labels) = target_train_performance
(self.t_te_outputs, self.t_te_labels) = target_test_performance
self.s_tr_corr = (np.argmax(self.s_tr_outputs, axis=1) == self.s_tr_labels).astype(int)
self.s_te_corr = (np.argmax(self.s_te_outputs, axis=1) == self.s_te_labels).astype(int)
self.t_tr_corr = (np.argmax(self.t_tr_outputs, axis=1) == self.t_tr_labels).astype(int)
self.t_te_corr = (np.argmax(self.t_te_outputs, axis=1) == self.t_te_labels).astype(int)
self.s_tr_conf = np.array([self.s_tr_outputs[(i, self.s_tr_labels[i])] for i in range(len(self.s_tr_labels))])
self.s_te_conf = np.array([self.s_te_outputs[(i, self.s_te_labels[i])] for i in range(len(self.s_te_labels))])
self.t_tr_conf = np.array([self.t_tr_outputs[(i, self.t_tr_labels[i])] for i in range(len(self.t_tr_labels))])
self.t_te_conf = np.array([self.t_te_outputs[(i, self.t_te_labels[i])] for i in range(len(self.t_te_labels))])
self.s_tr_entr = self._entr_comp(self.s_tr_outputs)
self.s_te_entr = self._entr_comp(self.s_te_outputs)
self.t_tr_entr = self._entr_comp(self.t_tr_outputs)
self.t_te_entr = self._entr_comp(self.t_te_outputs)
self.s_tr_m_entr = self._m_entr_comp(self.s_tr_outputs, self.s_tr_labels)
self.s_te_m_entr = self._m_entr_comp(self.s_te_outputs, self.s_te_labels)
self.t_tr_m_entr = self._m_entr_comp(self.t_tr_outputs, self.t_tr_labels)
self.t_te_m_entr = self._m_entr_comp(self.t_te_outputs, self.t_te_labels)
def _log_value(self, probs, small_value=1e-30):
return (- np.log(np.maximum(probs, small_value)))
def _entr_comp(self, probs):
return np.sum(np.multiply(probs, self._log_value(probs)), axis=1)
def _m_entr_comp(self, probs, true_labels):
log_probs = self._log_value(probs)
reverse_probs = (1 - probs)
log_reverse_probs = self._log_value(reverse_probs)
modified_probs = np.copy(probs)
modified_probs[(range(true_labels.size), true_labels)] = reverse_probs[(range(true_labels.size), true_labels)]
modified_log_probs = np.copy(log_reverse_probs)
modified_log_probs[(range(true_labels.size), true_labels)] = log_probs[(range(true_labels.size), true_labels)]
return np.sum(np.multiply(modified_probs, modified_log_probs), axis=1)
def _thre_setting(self, tr_values, te_values):
value_list = np.concatenate((tr_values, te_values))
(thre, max_acc) = (0, 0)
for value in value_list:
tr_ratio = (np.sum((tr_values >= value)) / (len(tr_values) + 0.0))
te_ratio = (np.sum((te_values < value)) / (len(te_values) + 0.0))
acc = (0.5 * (tr_ratio + te_ratio))
if (acc > max_acc):
(thre, max_acc) = (value, acc)
return thre
def _mem_inf_via_corr(self):
t_tr_acc = (np.sum(self.t_tr_corr) / (len(self.t_tr_corr) + 0.0))
t_te_acc = (np.sum(self.t_te_corr) / (len(self.t_te_corr) + 0.0))
mem_inf_acc = (0.5 * ((t_tr_acc + 1) - t_te_acc))
print('For membership inference attack via correctness, the attack acc is {acc1:.3f}, with train acc {acc2:.3f} and test acc {acc3:.3f}'.format(acc1=mem_inf_acc, acc2=t_tr_acc, acc3=t_te_acc))
return
def _mem_inf_thre(self, v_name, s_tr_values, s_te_values, t_tr_values, t_te_values):
(t_tr_mem, t_te_non_mem) = (0, 0)
for num in range(self.num_classes):
thre = self._thre_setting(s_tr_values[(self.s_tr_labels == num)], s_te_values[(self.s_te_labels == num)])
t_tr_mem += np.sum((t_tr_values[(self.t_tr_labels == num)] >= thre))
t_te_non_mem += np.sum((t_te_values[(self.t_te_labels == num)] < thre))
mem_inf_acc = (0.5 * ((t_tr_mem / (len(self.t_tr_labels) + 0.0)) + (t_te_non_mem / (len(self.t_te_labels) + 0.0))))
print('For membership inference attack via {n}, the attack acc is {acc:.3f}'.format(n=v_name, acc=mem_inf_acc))
return
def _mem_inf_benchmarks(self, all_methods=True, benchmark_methods=[]):
if (all_methods or ('correctness' in benchmark_methods)):
self._mem_inf_via_corr()
if (all_methods or ('confidence' in benchmark_methods)):
self._mem_inf_thre('confidence', self.s_tr_conf, self.s_te_conf, self.t_tr_conf, self.t_te_conf)
if (all_methods or ('entropy' in benchmark_methods)):
self._mem_inf_thre('entropy', (- self.s_tr_entr), (- self.s_te_entr), (- self.t_tr_entr), (- self.t_te_entr))
if (all_methods or ('modified entropy' in benchmark_methods)):
self._mem_inf_thre('modified entropy', (- self.s_tr_m_entr), (- self.s_te_m_entr), (- self.t_tr_m_entr), (- self.t_te_m_entr))
return |
def sum_metric(tensor, name):
sum_var = tf.compat.v1.Variable(initial_value=tf.zeros(shape=(), dtype=tensor.dtype), trainable=False, collections=[tf.compat.v1.GraphKeys.LOCAL_VARIABLES, tf.compat.v1.GraphKeys.METRIC_VARIABLES], name='{}_total'.format(name), aggregation=tf.VariableAggregation.SUM)
update_op = tf.identity(tf.compat.v1.assign_add(sum_var, tensor))
return (tf.identity(sum_var, name=name), update_op) |
class EnvBatch(object):
def __init__(self, connectivity_dir, scan_data_dir=None, feat_db=None, batch_size=100):
self.feat_db = feat_db
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
if scan_data_dir:
sim.setDatasetPath(scan_data_dir)
sim.setNavGraphPath(connectivity_dir)
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
def _make_id(self, scanId, viewpointId):
return ((scanId + '_') + viewpointId)
def newEpisodes(self, scanIds, viewpointIds, headings):
for (i, (scanId, viewpointId, heading)) in enumerate(zip(scanIds, viewpointIds, headings)):
self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
def getStates(self):
feature_states = []
for (i, sim) in enumerate(self.sims):
state = sim.getState()
feature = self.feat_db.get_image_feature(state.scanId, state.location.viewpointId)
feature_states.append((feature, state))
return feature_states
def makeActions(self, actions):
for (i, (index, heading, elevation)) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation) |
_task('denoising')
class DenoisingTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for dataset')
parser.add_argument('--sample-break-mode', default='complete_doc', type=str, help='mode for breaking sentence')
parser.add_argument('--mask', default=0.0, type=float, help='fraction of words/subwords that will be masked')
parser.add_argument('--mask-random', default=0.0, type=float, help='instead of using [MASK], use random token this often')
parser.add_argument('--insert', default=0.0, type=float, help='insert this percentage of additional random tokens')
parser.add_argument('--permute', default=0.0, type=float, help='take this proportion of subwords and permute them')
parser.add_argument('--rotate', default=0.5, type=float, help='rotate this proportion of inputs')
parser.add_argument('--poisson-lambda', default=3.0, type=float, help='randomly shuffle sentences for this proportion of inputs')
parser.add_argument('--permute-sentences', default=0.0, type=float, help='shuffle this proportion of sentences in all inputs')
parser.add_argument('--mask-length', default='subword', type=str, choices=['subword', 'word', 'span-poisson'], help='mask length to choose')
parser.add_argument('--replace-length', default=(- 1), type=int, help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
self.mask_idx = self.dictionary.add_symbol('<mask>')
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
logger.info('dictionary: {} types'.format(len(dictionary)))
if (not hasattr(args, 'shuffle_instance')):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.args.data)
assert (len(paths) > 0)
data_path = paths[((epoch - 1) % len(paths))]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = StripTokenDataset(dataset, self.dictionary.eos())
dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 2), pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, document_sep_len=0)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = (get_whole_word_mask(self.args, self.source_dictionary) if (self.args.mask_length != 'subword') else None)
self.datasets[split] = DenoisingDataset(dataset, dataset.sizes, self.dictionary, self.mask_idx, mask_whole_words, shuffle=self.args.shuffle_instance, seed=self.seed, args=self.args)
logger.info('Split: {0}, Loaded {1} samples of denoising_dataset'.format(split, len(self.datasets[split])))
def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs):
pad = self.source_dictionary.pad()
eos = self.source_dictionary.eos()
src_dataset = TokenBlockDataset(src_tokens, src_lengths, block_size=(self.args.tokens_per_sample - 2), pad=pad, eos=eos, break_mode=self.args.sample_break_mode, document_sep_len=0)
prev_output_tokens = PrependTokenDataset(StripTokenDataset(src_dataset, eos), eos)
src_dataset = PadDataset(src_dataset, pad_idx=pad, left_pad=False)
return NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False), 'prev_output_tokens': PadDataset(prev_output_tokens, pad_idx=pad, left_pad=False)}, 'target': src_dataset}, sizes=[np.array(src_lengths)])
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', default='data/3DMatch/rgbd')
parser.add_argument('--out_root', default='data/3DMatch/rgbd_fragments/')
parser.add_argument('--depth_scale', type=float, default=1000.0)
parser.add_argument('--depth_trunc', type=float, default=6.0)
parser.add_argument('--frames_per_frag', type=int, default=50)
parser.add_argument('--height', type=int, default=480)
parser.add_argument('--threads', type=int, default=1)
parser.add_argument('--tsdf_cubic_size', type=float, default=3.0)
parser.add_argument('--width', type=int, default=640)
return parser.parse_args() |
class colors():
RED = '\x1b[31;1m'
GREEN = '\x1b[32;1m'
YELLOW = '\x1b[33;1m'
BLUE = '\x1b[34;1m'
MAGENTA = '\x1b[35;1m'
CYAN = '\x1b[36;1m'
BOLD = '\x1b[1m'
UNDERLINE = '\x1b[4m'
ENDC = '\x1b[0m' |
def replan(agent, destination, origin_map):
agent.set_destination((destination.location.x, destination.location.y, destination.location.z))
plan_map = draw_route(agent, destination, origin_map)
return plan_map |
def word_tokenize(text, language='english'):
if (sys.version_info[0] < 3):
return [token for token in _treebank_word_tokenize(text)]
else:
return [token for token in _treebank_word_tokenize(text.decode('UTF-8'))] |
def parse_paired_list_value(value):
if re.match(MULTI_DEPS_PATTERN, value):
return [(part.split(':', 1)[1], parse_int_value(part.split(':', 1)[0])) for part in value.split('|')]
return parse_nullable_value(value) |
class StringIndex():
def __init__(self, df: 'SparkDataFrame', col_name: str) -> None:
cols = df.columns
invalidInputError((len(cols) >= 2), 'StringIndex should have >= 2 columns: col_name, id and other columns')
invalidInputError(('id' in cols), 'id should be a column of the DataFrame')
invalidInputError((col_name in cols), (col_name + ' should be a column of the DataFrame'))
self.col_name = col_name
self.df = df
def broadcast(self) -> None:
from pyspark.sql.functions import broadcast
self.df = broadcast(self.df)
def from_dict(cls, indices: Dict[(str, int)], col_name: str) -> 'StringIndex':
spark = OrcaContext.get_spark_session()
if (not isinstance(indices, dict)):
invalidInputError(False, ('indices should be dict, but get ' + indices.__class__.__name__))
if (not col_name):
invalidInputError(False, 'col_name should be str, but get None')
if (not isinstance(col_name, str)):
invalidInputError(False, ('col_name should be str, but get ' + col_name.__class__.__name__))
new_indices = map((lambda x: {col_name: x[0], 'id': x[1]}), indices.items())
schema = StructType([StructField(col_name, StringType(), False), StructField('id', IntegerType(), False)])
df = spark.createDataFrame((Row(**x) for x in new_indices), schema=schema)
return cls(df, col_name)
def to_dict(self) -> Dict[(str, int)]:
cols = self.df.columns
index_id = cols.index('id')
col_id = cols.index(self.col_name)
rows = self.df.collect()
res_dict = {}
for row in rows:
res_dict[row[col_id]] = row[index_id]
return res_dict |
class EncModule(nn.Module):
def __init__(self, in_channels, nclass, ncodes=32, se_loss=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(EncModule, self).__init__()
self.se_loss = se_loss
self.encoding = nn.Sequential(nn.Conv2d(in_channels, in_channels, 1, bias=False), norm_layer(in_channels, **({} if (norm_kwargs is None) else norm_kwargs)), nn.ReLU(True), Encoding(D=in_channels, K=ncodes), nn.BatchNorm1d(ncodes), nn.ReLU(True), Mean(dim=1))
self.fc = nn.Sequential(nn.Linear(in_channels, in_channels), nn.Sigmoid())
if self.se_loss:
self.selayer = nn.Linear(in_channels, nclass)
def forward(self, x):
en = self.encoding(x)
(b, c, _, _) = x.size()
gamma = self.fc(en)
y = gamma.view(b, c, 1, 1)
outputs = [F.relu_((x + (x * y)))]
if self.se_loss:
outputs.append(self.selayer(en))
return tuple(outputs) |
def simpleTokenize_software(text):
splitPunctText = splitEdgePunct_software(text)
textLength = len(splitPunctText)
bads = []
badSpans = []
for match in Protected.finditer(splitPunctText):
if (match.start() != match.end()):
bads.append([splitPunctText[match.start():match.end()]])
badSpans.append((match.start(), match.end()))
indices = [0]
for (first, second) in badSpans:
indices.append(first)
indices.append(second)
indices.append(textLength)
splitGoods = []
for i in range(0, len(indices), 2):
goodstr = splitPunctText[indices[i]:indices[(i + 1)]]
splitstr = goodstr.strip().split(' ')
splitGoods.append(splitstr)
zippedStr = []
for i in range(len(bads)):
zippedStr = addAllnonempty(zippedStr, splitGoods[i])
zippedStr = addAllnonempty(zippedStr, bads[i])
zippedStr = addAllnonempty(zippedStr, splitGoods[len(bads)])
splitStr = []
for tok in zippedStr:
splitStr.extend(splitToken(tok))
zippedStr = splitStr
return zippedStr |
def load_svhn(path_train, path_test):
svhn_train = loadmat(path_train)
svhn_test = loadmat(path_test)
svhn_train_im = svhn_train['X']
svhn_train_im = svhn_train_im.transpose(3, 2, 0, 1).astype(np.float32)
svhn_label = dense_to_one_hot(svhn_train['y'])
svhn_test_im = svhn_test['X']
svhn_test_im = svhn_test_im.transpose(3, 2, 0, 1).astype(np.float32)
svhn_label_test = dense_to_one_hot(svhn_test['y'])
return (svhn_train_im, svhn_label, svhn_test_im, svhn_label_test) |
def angle_distance(theta_a: np.ndarray, theta_b: np.ndarray) -> float:
diff = np.abs(np.arctan2(np.sin((theta_a - theta_b)), np.cos((theta_a - theta_b))))
return diff.mean() |
def idletime(idletime, frequency):
l = len(idletime)
vgg19 = ([0] * l)
for i in range(l):
vgg19[i] = (((idletime[i] * frequency[i]) / 1530) / 29)
fp = []
x = np.array([2, 3, 4, 5])
for i in range(1, len(vgg19)):
fp.append((vgg19[i] - vgg19[0]))
fp = np.array(fp)
f1 = np.polyfit(x, fp, 1)
return f1 |
def IS_FILE_NAME(token):
list_test = file_rule.findall(token)
if (len(list_test) > 0):
return True
return False |
def get_spectrum_hdulist(HDUlist):
data_in_hdu = False
for extname in flux_HDU_names:
if (extname in HDUlist):
data = HDUlist[extname].data
data_hdr = HDUlist[extname].header
data_in_hdu = True
if (not data_in_hdu):
raise FormatError('Could not find Flux Array')
error_in_hdu = False
for extname in error_HDU_names:
if (extname in HDUlist):
if (extname == 'IVAR'):
error = (1.0 / np.sqrt(HDUlist[extname].data))
elif (extname == 'VAR'):
error = np.sqrt(HDUlist[extname].data)
else:
error = HDUlist[extname].data
error_in_hdu = True
if (not error_in_hdu):
raise FormatError('Could not find Error Array')
extname = 'MASK'
if (extname in HDUlist):
mask = HDUlist[extname].data
else:
mask = np.ones_like(data, dtype=bool)
return (data, error, mask, data_hdr) |
def compute_additional_loss(result_index, row_width, row_height, widget_list, max_w_list, min_w_list, max_h_list, min_h_list, add_index=0):
loss = 0
for i in range(len(result_index)):
for j in range(len(result_index[i])):
index = (result_index[i][j] + add_index)
if (row_width[i][j] > max_w_list[index]):
loss += ((10 * widget_list[index].weight) * ((row_width[i][j] - max_w_list[index]) ** 2))
elif (row_width[i][j] < min_w_list[index]):
loss += ((10 * widget_list[index].weight) * ((row_width[i][j] - min_w_list[index]) ** 2))
if (row_height[i] > max_h_list[index]):
loss += ((10 * widget_list[index].weight) * ((row_height[i] - max_h_list[index]) ** 2))
elif (row_height[i] < min_h_list[index]):
loss += ((10 * widget_list[index].weight) * ((row_height[i] - min_h_list[index]) ** 2))
return loss |
def reset_decola_cls_test(model, cls_path, num_classes):
model.num_classes = num_classes
model.detr.num_classes = num_classes
model.detr.transformer.num_classes = num_classes
if (type(cls_path) == str):
print('Resetting zs_weight', cls_path)
zs_weight = torch.tensor(np.load(cls_path), dtype=torch.float32)
else:
zs_weight = cls_path
zs_obj_weight_path = 'datasets/metadata/lvis_v1_clip_a+object.npy'
zs_obj_weight = torch.tensor(np.load(zs_obj_weight_path), dtype=torch.float32)
zs_weight = torch.cat([zs_weight, zs_obj_weight], dim=0)
if model.detr.transformer.decoder.class_embed[0].norm_weight:
zs_weight = F.normalize(zs_weight, p=2, dim=1)
print(f'custom weight normalized. (shape: {zs_weight.shape})', flush=True)
zs_weight = zs_weight.to(model.device)
for k in range(len(model.detr.transformer.decoder.class_embed)):
model.detr.transformer.decoder.class_embed[k].zs_weight = zs_weight
model.detr.transformer.decoder.class_embed[k].num_classes = num_classes |
class CellPinCombArc(BBAStruct):
from_pin: int
delay: TimingValue = field(default_factory=TimingValue)
def serialise_lists(self, context: str, bba: BBAWriter):
pass
def serialise(self, context: str, bba: BBAWriter):
bba.u32(self.from_pin.index)
self.delay.serialise(context, bba) |
class TFGPT2PreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class FMClassification(BaseFMClassifier):
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123, l2_reg_w=0, l2_reg_V=0, l2_reg=None, step_size=0.1):
super(FMClassification, self).__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank, random_state=random_state)
if (l2_reg is not None):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.l2_reg = l2_reg
self.step_size = step_size
self.task = 'classification'
def fit(self, X, y):
y = _validate_class_labels(y)
self.classes_ = np.unique(y)
if (len(self.classes_) != 2):
raise ValueError(('This solver only supports binary classification but the data contains class: %r' % self.classes_))
y_train = y.copy()
i_class1 = (y_train == self.classes_[0])
y_train[i_class1] = (- 1)
y_train[(~ i_class1)] = 1
check_consistent_length(X, y)
y = y.astype(np.float64)
X = X.T
X = check_array(X, accept_sparse='csc', dtype=np.float64)
(self.w0_, self.w_, self.V_) = ffm.ffm_sgd_fit(self, X, y)
return self |
def fft2(inp, norm=None):
s = inp.shape[(- 3):(- 1)]
cond_norm = _unitary(norm)
scaling = 1
if (cond_norm == 'ortho'):
scaling = T.sqrt(s.prod().astype(inp.dtype))
return (fft2_op(inp, s) / scaling) |
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x |
def path_to_name(path):
_file = path.split('/')[(- 1)]
if (len(_file.split('.')) == 1):
return _file
else:
return '.'.join(_file.split('.')[:(- 1)]) |
class _RCParallelOpFirstBound():
def __init__(self, discardNonchemical: bool, first: RCExpExp) -> None:
self.discardNonchemical = discardNonchemical
self.first = first
def __mul__(self, second: RCExpExp) -> RCExpExp:
return RCExpComposeParallel(rcExp(self.first), rcExp(second), self.discardNonchemical) |
def compute_context_embeddings(model, eval_dataset, opt):
model.eval()
eval_dataset.set_data_mode('context')
context_eval_loader = DataLoader(eval_dataset, collate_fn=retrieval_collate, batch_size=opt.eval_ctx_bsz, num_workers=opt.num_workers, shuffle=False, pin_memory=opt.pin_memory)
n_videos = len(eval_dataset)
eval_ctx_bsz = opt.eval_ctx_bsz
global_meta_list = []
(global_video_embedding, global_sub_embedding) = (None, None)
if model.use_video:
global_video_embedding = torch.empty((n_videos, model.config.output_size), dtype=torch.float32, device=opt.device)
if model.use_sub:
global_sub_embedding = torch.empty((n_videos, model.config.output_size), dtype=torch.float32, device=opt.device)
for (idx, batch) in tqdm(enumerate(context_eval_loader), desc='Computing context embedding for videos', total=len(context_eval_loader)):
global_meta_list.extend(batch[0])
model_inputs = prepare_batch_inputs(batch[1], device=opt.device, non_blocking=opt.pin_memory)
(encoded_video, encoded_sub) = model.encode_context(model_inputs['video_feat'], model_inputs['sub_feat'])
if model.use_video:
global_video_embedding[(idx * eval_ctx_bsz):((idx + 1) * eval_ctx_bsz)] = encoded_video
if model.use_sub:
global_sub_embedding[(idx * eval_ctx_bsz):((idx + 1) * eval_ctx_bsz)] = encoded_sub
if (opt.debug and (idx == 100)):
break
return dict(video_meta=global_meta_list, encoded_video=global_video_embedding, encoded_sub=global_sub_embedding) |
class UNet_Attention(nn.Module):
def __init__(self, input_dim=3, num_classes=1):
super(UNet_Attention, self).__init__()
self.input_dim = input_dim
self.num_classes = num_classes
n1 = 64
filters = [n1, (n1 * 2), (n1 * 4), (n1 * 8), (n1 * 16)]
self.Maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Maxpool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(input_dim, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up5 = up_conv(filters[4], filters[3])
self.Att5 = Attention_block(F_g=filters[3], F_l=filters[3], F_int=filters[2])
self.Up_conv5 = conv_block(filters[4], filters[3])
self.Up4 = up_conv(filters[3], filters[2])
self.Att4 = Attention_block(F_g=filters[2], F_l=filters[2], F_int=filters[1])
self.Up_conv4 = conv_block(filters[3], filters[2])
self.Up3 = up_conv(filters[2], filters[1])
self.Att3 = Attention_block(F_g=filters[1], F_l=filters[1], F_int=filters[0])
self.Up_conv3 = conv_block(filters[2], filters[1])
self.Up2 = up_conv(filters[1], filters[0])
self.Att2 = Attention_block(F_g=filters[0], F_l=filters[0], F_int=32)
self.Up_conv2 = conv_block(filters[1], filters[0])
self.Conv = nn.Conv2d(filters[0], num_classes, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
d5 = self.Up5(e5)
x4 = self.Att5(g=d5, x=e4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=e3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=e2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=e1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
return out |
.parametrize('X_types,apply_to,error', [({}, ['duck'], True), ({'duck': 'B'}, ['duck'], False), ({}, ['continuous'], False), (None, ['continuous'], False), ({'continuous': 'A', 'cat': 'B'}, ['continuous', 'cat'], False), ({'continuous': 'A', 'cat': 'B'}, ['continuous'], True), ({'continuous': 'A', 'cat': 'B'}, ['*'], False), ({'continuous': 'A', 'cat': 'B'}, ['.*'], False)])
def test_X_types_to_pattern_errors(X_types: Dict[(str, List[str])], apply_to: ColumnTypesLike, error: bool) -> None:
pipeline_creator = PipelineCreator(problem_type='classification').add('zscore', apply_to=apply_to)
if error:
with pytest.raises(ValueError, match='Extra X_types were provided'):
pipeline_creator._check_X_types(X_types)
else:
pipeline_creator._check_X_types(X_types) |
class AutoConfig():
def __init__(self):
raise EnvironmentError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.')
def for_model(cls, model_type: str, *args, **kwargs):
if (model_type in CONFIG_MAPPING):
config_class = CONFIG_MAPPING[model_type]
return config_class(*args, **kwargs)
raise ValueError('Unrecognized model identifier: {}. Should contain one of {}'.format(model_type, ', '.join(CONFIG_MAPPING.keys())))
_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
(config_dict, _) = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
if ('model_type' in config_dict):
config_class = CONFIG_MAPPING[config_dict['model_type']]
return config_class.from_dict(config_dict, **kwargs)
else:
for (pattern, config_class) in CONFIG_MAPPING.items():
if (pattern in str(pretrained_model_name_or_path)):
return config_class.from_dict(config_dict, **kwargs)
raise ValueError('Unrecognized model in {}. Should have a `model_type` key in its config.json, or contain one of the following strings in its name: {}'.format(pretrained_model_name_or_path, ', '.join(CONFIG_MAPPING.keys()))) |
def load_data(store, name, name_buys):
store = pd.HDFStore(store)
data = store[name]
buys = store[name_buys]
del data['Time']
data['SessionId'] = data['SessionDay']
data['ItemId'] = data['Item']
data['Time'] = data['TimeObject'].apply((lambda t: t.timestamp()))
data['UserId'] = data['User']
data['TimeO'] = data['TimeObject']
del data['Session']
del data['SessionDay']
del data['Item']
del data['User']
del data['TimeObject']
data.sort_values(['SessionId', 'Time'], inplace=True)
sessionid_map = {}
sessiontime_map = {}
for row in data.itertuples(index=False):
(user, session, time) = (row[3], row[0], row[4])
key = time.date()
if (not (key in sessionid_map)):
sessionid_map[key] = {}
sessiontime_map[key] = {}
if (not (user in sessionid_map[key])):
sessionid_map[key][user] = {}
sessiontime_map[key][user] = {}
sessionid_map[key][user] = session
sessiontime_map[session] = time.timestamp()
del data['TimeO']
buys['SessionId'] = buys.apply((lambda row: (sessionid_map[row['Day'].date()][row['User']] if ((row['Day'].date() in sessionid_map) and (row['User'] in sessionid_map[row['Day'].date()])) else (- 1))), axis=1)
buys['ItemId'] = buys['Item']
buys['TimeO'] = buys['Day']
del buys['Time']
buys = buys[(buys['SessionId'] > 0)]
buys['Time'] = buys.apply((lambda row: (sessiontime_map[row['SessionId']] + 1)), axis=1)
buys['UserId'] = buys['User']
del buys['User']
del buys['Item']
del buys['Day']
del buys['TimeO']
return (data, buys) |
def IPOT_torch(C, n, m, miu, nu, beta=0.5):
sigma = (torch.ones(int(m), 1).float().cuda() / m)
T = torch.ones(n, m).cuda()
C = torch.exp(((- C) / beta)).float()
for t in range(20):
T = (C * T)
for k in range(1):
delta = (miu / torch.squeeze(torch.matmul(T, sigma)))
sigma = (torch.unsqueeze(nu, 1) / torch.matmul(torch.transpose(T, 0, 1), torch.unsqueeze(delta, 1)))
T = ((torch.unsqueeze(delta, 1) * T) * sigma.transpose(1, 0))
return T.detach() |
def get_dataset(*args):
dset = SequenceList()
for name in args:
dset.extend(load_dataset(name))
return dset |
def pose_decoder_mlp(params):
init_fn = (utils.normal_init_ if (params['init_fn'] == 'normal_init') else utils.xavier_init_)
pose_decoder = nn.Linear(params['model_dim'], params['pose_dim'])
utils.weight_init(pose_decoder, init_fn_=init_fn)
return pose_decoder |
_start_docstrings('\n VAN Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ', VAN_START_DOCSTRING)
class VanForImageClassification(VanPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.van = VanModel(config)
self.classifier = (nn.Linear(config.hidden_sizes[(- 1)], config.num_labels) if (config.num_labels > 0) else nn.Identity())
self.post_init()
_start_docstrings_to_model_forward(VAN_INPUTS_DOCSTRING)
_code_sample_docstrings(checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=ImageClassifierOutputWithNoAttention, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT)
def forward(self, pixel_values: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[(Tuple, ImageClassifierOutputWithNoAttention)]:
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.van(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = (outputs.pooler_output if return_dict else outputs[1])
logits = self.classifier(pooled_output)
loss = None
if (labels is not None):
if (self.config.problem_type is None):
if (self.config.num_labels == 1):
self.config.problem_type = 'regression'
elif ((self.config.num_labels > 1) and ((labels.dtype == torch.long) or (labels.dtype == torch.int))):
self.config.problem_type = 'single_label_classification'
else:
self.config.problem_type = 'multi_label_classification'
if (self.config.problem_type == 'regression'):
loss_fct = MSELoss()
if (self.config.num_labels == 1):
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif (self.config.problem_type == 'single_label_classification'):
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), self.config.num_labels), labels.view((- 1)))
elif (self.config.problem_type == 'multi_label_classification'):
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if (not return_dict):
output = ((logits,) + outputs[2:])
return (((loss,) + output) if (loss is not None) else output)
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) |
def dec_prior_uniform(samples, min_value=((- np.pi) / 2.0), max_value=(np.pi / 2.0)):
lower = (samples['dec'] > min_value)
upper = (samples['dec'] < max_value)
return np.logical_and(lower, upper) |
def convert_onnx(net, path_module, output, opset=11, simplify=False):
assert isinstance(net, torch.nn.Module)
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
img = img.astype(np.float)
img = (((img / 255.0) - 0.5) / 0.5)
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float()
weight = torch.load(path_module)
net.load_state_dict(weight)
net.eval()
torch.onnx.export(net, img, output, keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
model = onnx.load(output)
graph = model.graph
graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
if simplify:
from onnxsim import simplify
(model, check) = simplify(model)
assert check, 'Simplified ONNX model could not be validated'
onnx.save(model, output) |
class DenoisingConfig(FairseqDataclass):
data: str = field(default=MISSING, metadata={'help': 'path to data directory'})
bpe: Optional[str] = field(default=None, metadata={'help': 'TODO'})
tokens_per_sample: int = field(default=512, metadata={'help': 'max number of total tokens over all segments per sample for dataset'})
sample_break_mode: SAMPLE_BREAK_MODE_CHOICES = field(default='complete_doc', metadata={'help': 'If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.'})
replace_length: int = field(default=0, metadata={'help': 'TODO, should only allow -1, 0 and 1'})
mask: float = field(default=0.0, metadata={'help': 'fraction of words/subwords that will be masked'})
mask_random: float = field(default=0.0, metadata={'help': 'instead of using [MASK], use random token this often'})
insert: float = field(default=0.0, metadata={'help': 'insert this percentage of additional random tokens'})
permute: float = field(default=0.0, metadata={'help': 'take this proportion of subwords and permute them'})
rotate: float = field(default=0.5, metadata={'help': 'rotate this proportion of inputs'})
poisson_lambda: float = field(default=3.0, metadata={'help': 'randomly shuffle sentences for this proportion of inputs'})
shuffle_instance: float = field(default=0.0, metadata={'help': 'shuffle this proportion of sentences in all inputs'})
mask_length: MASK_LENGTH_CHOICES = field(default='subword', metadata={'help': 'mask length to choose'})
permute_sentences: int = field(default=(- 1), metadata={'help': 'when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)'})
seed: int = II('common.seed')
shorten_method: SHORTEN_METHOD_CHOICES = field(default='none', metadata={'help': 'if not none, shorten sequences that exceed --tokens-per-sample'})
shorten_data_split_list: str = field(default='', metadata={'help': 'comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)'})
max_source_positions: int = field(default=1024, metadata={'help': 'max number of tokens in the source sequence'})
max_target_positions: int = field(default=1024, metadata={'help': 'max number of tokens in the target sequence'})
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II('dataset.dataset_impl') |
def accuracy(logits, targets, weights=None):
if (logits.ndim != (targets.ndim + 1)):
raise ValueError(('Incorrect shapes. Got shape %s logits and %s targets' % (str(logits.shape), str(targets.shape))))
loss = jnp.equal(jnp.argmax(logits, axis=(- 1)), targets)
loss *= weights
return (loss.sum(), weights.sum()) |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = (x + self.drop_path(self.attn(self.norm1(x))))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x |
def mobilenet_v2(**config):
dataset = config.pop('dataset', 'imagenet')
assert (dataset == 'imagenet')
if ('depth' in config):
config.pop('depth')
return MobileNetV2(**config) |
def _spherical_kmeans_single_lloyd(X, n_clusters, sample_weight=None, max_iter=300, init='k-means++', verbose=False, x_squared_norms=None, random_state=None, tol=0.0001, precompute_distances=True):
random_state = check_random_state(random_state)
sample_weight = _check_sample_weight(sample_weight, X)
(best_labels, best_inertia, best_centers) = (None, None, None)
centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms)
if verbose:
print('Initialization complete')
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
for i in range(max_iter):
centers_old = centers.copy()
(labels, inertia) = _labels_inertia(X, sample_weight, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances)
if sp.issparse(X):
centers = _k_means._centers_sparse(X, sample_weight, labels, n_clusters, distances)
else:
centers = _k_means._centers_dense(X.astype(np.float), sample_weight.astype(np.float), labels, n_clusters, distances.astype(np.float))
centers = normalize(centers)
if verbose:
print(('Iteration %2d, inertia %.3f' % (i, inertia)))
if ((best_inertia is None) or (inertia < best_inertia)):
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm((centers_old - centers))
if (center_shift_total <= tol):
if verbose:
print(('Converged at iteration %d: center shift %e within tolerance %e' % (i, center_shift_total, tol)))
break
if (center_shift_total > 0):
(best_labels, best_inertia) = _labels_inertia(X, sample_weight, x_squared_norms, best_centers, precompute_distances=precompute_distances, distances=distances)
return (best_labels, best_inertia, best_centers, (i + 1)) |
def quaternion_multiply(q1, q2):
(w1, x1, y1, z1) = (q1[(..., 0)], q1[(..., 1)], q1[(..., 2)], q1[(..., 3)])
(w2, x2, y2, z2) = (q2[(..., 0)], q2[(..., 1)], q2[(..., 2)], q2[(..., 3)])
w = ((((w1 * w2) - (x1 * x2)) - (y1 * y2)) - (z1 * z2))
x = ((((w1 * x2) + (x1 * w2)) + (y1 * z2)) - (z1 * y2))
y = ((((w1 * y2) - (x1 * z2)) + (y1 * w2)) + (z1 * x2))
z = ((((w1 * z2) + (x1 * y2)) - (y1 * x2)) + (z1 * w2))
return torch.stack((w, x, y, z), dim=(- 1)) |
def eval_redwood_scene(model, dloader, config, posegraph_name, use_icp):
pose_graph = o3d.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry))
orig_points_dict = {}
num_pair = dloader.dataset.__len__()
dloader_iter = dloader.__iter__()
num_pcd = dloader.dataset.num_pcds
with torch.no_grad():
for _ in tqdm(range(num_pair)):
(corr, src_keypts, tgt_keypts, gt_trans, gt_labels, key) = dloader_iter.next()
(corr, src_keypts, tgt_keypts, gt_trans, gt_labels) = (corr.cuda(), src_keypts.cuda(), tgt_keypts.cuda(), gt_trans.cuda(), gt_labels.cuda())
data = {'corr_pos': corr, 'src_keypts': src_keypts, 'tgt_keypts': tgt_keypts, 'testing': True}
(source_id, target_id) = (int(key[0].split('')[1].split('_')[0]), int(key[0].split('')[1].split('_')[1]))
if (target_id == (source_id + 1)):
scene = dloader.dataset.scene_list[0]
pose_graph_frag = o3d.io.read_pose_graph(os.path.join(f'{config.root}/{scene}/', ('fragments/fragment_optimized_%03d.json' % source_id)))
n_nodes = len(pose_graph_frag.nodes)
transformation_init = np.linalg.inv(pose_graph_frag.nodes[(n_nodes - 1)].pose)
(transformation, information) = local_refinement(np.load(f'{config.root}/{scene}/fragments/fragment_{str(source_id).zfill(3)}_fpfh.npz')['xyz'], np.load(f'{config.root}/{scene}/fragments/fragment_{str(target_id).zfill(3)}_fpfh.npz')['xyz'], transformation_init)
odometry = np.dot(transformation, odometry)
pose_graph.nodes.append(o3d.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph.edges.append(o3d.registration.PoseGraphEdge(source_id, target_id, transformation, information, uncertain=False))
else:
res = model(data)
(pred_trans, pred_labels) = (res['final_trans'], res['final_labels'])
transformation = pred_trans.detach().cpu().numpy()[0]
information = o3d.registration.get_information_matrix_from_point_clouds(make_point_cloud(src_keypts[0].detach().cpu().numpy()), make_point_cloud(tgt_keypts[0].detach().cpu().numpy()), max_correspondence_distance=(0.05 * 1.4), transformation=transformation)
if (((information[(5, 5)] / min(src_keypts.shape[1], tgt_keypts.shape[1])) < 0.3) or (transformation.trace() == 4.0)):
continue
pose_graph.edges.append(o3d.registration.PoseGraphEdge(source_id, target_id, transformation, information, uncertain=True))
src_keypts = np.load(f'{config.root}/{scene}/fragments/fragment_{str(source_id).zfill(3)}_fpfh.npz')['xyz']
tgt_keypts = np.load(f'{config.root}/{scene}/fragments/fragment_{str(target_id).zfill(3)}_fpfh.npz')['xyz']
orig_points_dict[f'{source_id}_{target_id}'] = [src_keypts, tgt_keypts]
o3d.io.write_pose_graph((posegraph_name + '_0.json'), pose_graph)
print(f'Before optimization {len(pose_graph.nodes)} nodes {len(pose_graph.edges)} edges')
print('Optimizing PoseGraph ...')
option = o3d.registration.GlobalOptimizationOption(max_correspondence_distance=(0.05 * 1.4), edge_prune_threshold=0.25, preference_loop_closure=20.0, reference_node=0)
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
o3d.registration.global_optimization(pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(), o3d.registration.GlobalOptimizationConvergenceCriteria(), option)
print(f'After optimization {len(pose_graph.nodes)} nodes {len(pose_graph.edges)} edges')
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
o3d.io.write_pose_graph((posegraph_name + '_1.json'), pose_graph)
if (use_icp is False):
return pose_graph
print('Refine each edge with ICP ...')
matching_results = {}
for edge in pose_graph.edges:
s = edge.source_node_id
e = edge.target_node_id
matching_results[f'{s}_{e}'] = MatchingResult(s, e, edge.transformation)
pose_graph_new = o3d.registration.PoseGraph()
odometry = np.eye(4)
pose_graph_new.nodes.append(o3d.registration.PoseGraphNode(odometry))
for (k, result) in matching_results.items():
(src_keypts, tgt_keypts) = orig_points_dict[k]
(source_id, target_id) = (int(k.split('_')[0]), int(k.split('_')[1]))
(transformation, information) = local_refinement(src_keypts, tgt_keypts, result.transformation)
if (target_id == (source_id + 1)):
odometry = np.dot(transformation, odometry)
pose_graph_new.nodes.append(o3d.registration.PoseGraphNode(np.linalg.inv(odometry)))
pose_graph_new.edges.append(o3d.registration.PoseGraphEdge(source_id, target_id, transformation, information, uncertain=False))
else:
pose_graph_new.edges.append(o3d.registration.PoseGraphEdge(source_id, target_id, transformation, information, uncertain=True))
print(f'Before optimization {len(pose_graph_new.nodes)} nodes {len(pose_graph_new.edges)} edges')
print('Optimizing PoseGraph ...')
option = o3d.registration.GlobalOptimizationOption(max_correspondence_distance=(0.05 * 1.4), edge_prune_threshold=0.25, preference_loop_closure=20.0, reference_node=0)
o3d.registration.global_optimization(pose_graph_new, o3d.registration.GlobalOptimizationLevenbergMarquardt(), o3d.registration.GlobalOptimizationConvergenceCriteria(), option)
print(f'After optimization {len(pose_graph_new.nodes)} nodes {len(pose_graph_new.edges)} edges')
o3d.io.write_pose_graph((posegraph_name + '_2.json'), pose_graph_new)
return pose_graph_new |
def beam_decode(data, model, device):
(name, feat, feat_len, txt) = data
feat = feat.to(device)
feat_len = feat_len.to(device)
txt = txt.to(device)
txt_len = torch.sum((txt != 0), dim=(- 1))
model = model.to(device)
with torch.no_grad():
hyps = model(feat, feat_len)
hyp_seqs = [hyp.outIndex for hyp in hyps]
del hyps
return (name[0], hyp_seqs, txt[0].cpu().tolist()) |
class SubnetLeNet_Default(nn.Module):
def __init__(self, taskcla, sparsity):
super(SubnetLeNet_Default, self).__init__()
self.in_channel = []
self.conv1 = SubnetConv2d(1, 10, 5, sparsity=sparsity, bias=False, padding=2)
s = compute_conv_output_size(32, 5, 1, 2)
s = compute_conv_output_size(s, 3, 2, 1)
self.in_channel.append(3)
self.conv2 = SubnetConv2d(10, 20, 5, sparsity=sparsity, bias=False, padding=2)
s = compute_conv_output_size(s, 5, 1, 2)
s = compute_conv_output_size(s, 3, 2, 1)
self.in_channel.append(20)
self.smid = s
self.maxpool = torch.nn.MaxPool2d(3, 2, padding=1)
self.relu = torch.nn.ReLU()
self.drop1 = torch.nn.Dropout(0)
self.drop2 = torch.nn.Dropout(0)
self.fc1 = SubnetLinear(((20 * self.smid) * self.smid), 500, sparsity=sparsity, bias=False)
self.fc2 = SubnetLinear(500, 300, sparsity=sparsity, bias=False)
self.taskcla = taskcla
self.last = nn.ModuleList()
for (t, n) in self.taskcla:
self.last.append(nn.Linear(300, n, bias=False))
self.none_masks = {}
for (name, module) in self.named_modules():
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
self.none_masks[(name + '.weight')] = None
self.none_masks[(name + '.bias')] = None
def forward(self, x, task_id, mask, mode='train'):
if (mask is None):
mask = self.none_masks
bsz = deepcopy(x.size(0))
x = self.conv1(x, weight_mask=mask['conv1.weight'], bias_mask=mask['conv1.bias'], mode=mode)
x = self.maxpool(self.drop1(self.relu(x)))
x = self.conv2(x, weight_mask=mask['conv2.weight'], bias_mask=mask['conv2.bias'], mode=mode)
x = self.maxpool(self.drop1(self.relu(x)))
x = x.reshape(bsz, (- 1))
x = self.fc1(x, weight_mask=mask['fc1.weight'], bias_mask=mask['fc1.bias'], mode=mode)
x = self.drop2(self.relu(x))
x = self.fc2(x, weight_mask=mask['fc2.weight'], bias_mask=mask['fc2.bias'], mode=mode)
x = self.drop2(self.relu(x))
h_keys = ['last.{}.weight'.format(task_id), 'last.{}.bias'.format(task_id)]
y = self.last[task_id](x)
return y
def get_masks(self, task_id):
task_mask = {}
for (name, module) in self.named_modules():
if ('last' in name):
if (name != ('last.' + str(task_id))):
continue
if (isinstance(module, SubnetLinear) or isinstance(module, SubnetConv2d)):
print(name)
task_mask[(name + '.weight')] = (module.weight_mask.detach().clone() > 0).type(torch.uint8)
if (getattr(module, 'bias') is not None):
task_mask[(name + '.bias')] = (module.bias_mask.detach().clone() > 0).type(torch.uint8)
else:
task_mask[(name + '.bias')] = None
return task_mask |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dump_dir')
parser.add_argument('stage')
parser.add_argument('--dump_paths', default=None, help='Relative to `dump_dir/phrase`. If specified, creates subindex dir and save there with same name')
parser.add_argument('--subindex_name', default='index', help='used only if dump_path is specified.')
parser.add_argument('--offset', default=0, type=int)
parser.add_argument('--quantizer_path', default='quantizer.faiss')
parser.add_argument('--max_norm_path', default='max_norm.json')
parser.add_argument('--trained_index_path', default='trained.faiss')
parser.add_argument('--index_path', default='index.faiss')
parser.add_argument('--idx2id_path', default='idx2id.hdf5')
parser.add_argument('--inv_path', default='merged.invdata')
parser.add_argument('--add_all', default=False, action='store_true')
parser.add_argument('--num_clusters', type=int, default=16384)
parser.add_argument('--hnsw', default=False, action='store_true')
parser.add_argument('--fine_quant', default='SQ8', help='SQ8|SQ4|PQ# where # is number of bytes per vector (for SQ it would be 480 Bytes)')
parser.add_argument('--max_norm', default=None, type=float)
parser.add_argument('--max_norm_cf', default=1.0, type=float)
parser.add_argument('--norm_th', default=999, type=float)
parser.add_argument('--para', default=False, action='store_true')
parser.add_argument('--doc_sample_ratio', default=0.2, type=float)
parser.add_argument('--vec_sample_ratio', default=0.2, type=float)
parser.add_argument('--fs', default='local')
parser.add_argument('--cuda', default=False, action='store_true')
parser.add_argument('--num_dummy_zeros', default=0, type=int)
parser.add_argument('--replace', default=False, action='store_true')
parser.add_argument('--num_docs_per_add', default=1000, type=int)
args = parser.parse_args()
coarse = ('hnsw' if args.hnsw else 'flat')
args.index_name = ('%d_%s_%s' % (args.num_clusters, coarse, args.fine_quant))
if (args.fs == 'nfs'):
from nsml import NSML_NFS_OUTPUT
args.dump_dir = os.path.join(NSML_NFS_OUTPUT, args.dump_dir)
elif (args.fs == 'nsml'):
pass
args.index_dir = os.path.join(args.dump_dir, args.index_name)
args.quantizer_path = os.path.join(args.index_dir, args.quantizer_path)
args.max_norm_path = os.path.join(args.index_dir, args.max_norm_path)
args.trained_index_path = os.path.join(args.index_dir, args.trained_index_path)
args.inv_path = os.path.join(args.index_dir, args.inv_path)
args.subindex_dir = os.path.join(args.index_dir, args.subindex_name)
if (args.dump_paths is None):
args.index_path = os.path.join(args.index_dir, args.index_path)
args.idx2id_path = os.path.join(args.index_dir, args.idx2id_path)
else:
args.dump_paths = [os.path.join(args.dump_dir, 'phrase', path) for path in args.dump_paths.split(',')]
args.index_path = os.path.join(args.subindex_dir, ('%d.faiss' % args.offset))
args.idx2id_path = os.path.join(args.subindex_dir, ('%d.hdf5' % args.offset))
return args |
def plane_rcnn_loss(plane_pred, instances, loss_weight=1.0, smooth_l1_beta=0.0, plane_normal_only=False):
gt_param = []
for instances_per_image in instances:
if (len(instances_per_image) == 0):
continue
gt_param.append(instances_per_image.gt_planes)
if (len(gt_param) == 0):
return (plane_pred.sum() * 0)
gt_param = cat(gt_param, dim=0)
if plane_normal_only:
gt_param = F.normalize(gt_param, p=2, dim=1)
assert (len(plane_pred) > 0)
loss_plane_reg = smooth_l1_loss(plane_pred, gt_param, smooth_l1_beta, reduction='sum')
loss_plane_reg = ((loss_weight * loss_plane_reg) / len(plane_pred))
return loss_plane_reg |
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, bn=False, nonlin=True):
super(ConvBlock, self).__init__()
self.conv = Conv3x3(in_channels, out_channels)
if nonlin:
self.nonlin = nn.ELU(inplace=True)
else:
self.nonlin = None
if bn:
self.bn = nn.BatchNorm2d(out_channels)
else:
self.bn = None
def forward(self, x):
out = self.conv(x)
if (self.bn is not None):
out = self.bn(out)
if (self.nonlin is not None):
out = self.nonlin(out)
return out |
def run(config):
print('slac non-rigid optimization.')
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
path_dataset = config['path_dataset']
ply_file_names = get_file_list(join(config['path_dataset'], config['folder_fragment']), '.ply')
if (len(ply_file_names) == 0):
raise RuntimeError('No fragment found in {}, please make sure the reconstruction_system has finished running on the dataset.'.format(join(config['path_dataset'], config['folder_fragment'])))
pose_graph_fragment = o3d.io.read_pose_graph(join(path_dataset, config['template_refined_posegraph_optimized']))
slac_params = o3d.t.pipelines.slac.slac_optimizer_params(max_iterations=config['max_iterations'], voxel_size=config['voxel_size'], distance_threshold=config['distance_threshold'], fitness_threshold=config['fitness_threshold'], regularizer_weight=config['regularizer_weight'], device=o3d.core.Device(str(config['device'])), slac_folder=join(path_dataset, config['folder_slac']))
debug_option = o3d.t.pipelines.slac.slac_debug_option(False, 0)
pose_graph_updated = o3d.pipelines.registration.PoseGraph()
if (config['method'] == 'rigid'):
pose_graph_updated = o3d.t.pipelines.slac.run_rigid_optimizer_for_fragments(ply_file_names, pose_graph_fragment, slac_params, debug_option)
elif (config['method'] == 'slac'):
(pose_graph_updated, ctrl_grid) = o3d.t.pipelines.slac.run_slac_optimizer_for_fragments(ply_file_names, pose_graph_fragment, slac_params, debug_option)
hashmap = ctrl_grid.get_hashmap()
active_buf_indices = hashmap.active_buf_indices().to(o3d.core.Dtype.Int64)
key_tensor = hashmap.key_tensor()[active_buf_indices]
key_tensor.save(join(slac_params.get_subfolder_name(), 'ctr_grid_keys.npy'))
value_tensor = hashmap.value_tensor()[active_buf_indices]
value_tensor.save(join(slac_params.get_subfolder_name(), 'ctr_grid_values.npy'))
else:
raise RuntimeError('Requested optimization method {}, is not implemented. Implemented methods includes slac and rigid.'.format(config['method']))
o3d.io.write_pose_graph(join(slac_params.get_subfolder_name(), config['template_optimized_posegraph_slac']), pose_graph_updated)
fragment_folder = join(path_dataset, config['folder_fragment'])
params = []
for i in range(len(pose_graph_updated.nodes)):
fragment_pose_graph = o3d.io.read_pose_graph(join(fragment_folder, ('fragment_optimized_%03d.json' % i)))
for node in fragment_pose_graph.nodes:
pose = np.dot(pose_graph_updated.nodes[i].pose, node.pose)
param = o3d.camera.PinholeCameraParameters()
param.extrinsic = np.linalg.inv(pose)
params.append(param)
trajectory = o3d.camera.PinholeCameraTrajectory()
trajectory.parameters = params
o3d.io.write_pinhole_camera_trajectory((((slac_params.get_subfolder_name() + '/optimized_trajectory_') + str(config['method'])) + '.log'), trajectory) |
def np_mp_array(shape, dtype):
size = int(np.prod(shape))
nbytes = (size * np.dtype(dtype).itemsize)
mp_array = mp.RawArray(ctypes.c_char, nbytes)
return np.frombuffer(mp_array, dtype=dtype, count=size).reshape(shape) |
.slow
def test_alternating_autocorr_per_chain():
ntiles = 100
samples = jnp.tile(jnp.array([[1, 2], [(- 1), (- 2)]]), (ntiles, 1))
autocorr = statistics.per_chain_autocorr_fast(samples)
expected_autocorr = jnp.tile(jnp.array([[1.0, 1.0], [(- 1.0), (- 1.0)]]), (ntiles, 1))
np.testing.assert_allclose(autocorr, expected_autocorr, 1e-05) |
def _test():
import torch
pretrained = False
models = [spnasnet]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != spnasnet) or (weight_count == 4421616))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def tf_efficientnet_b2_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b2_ns', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs)
return model |
def reduce(l):
n = 10
res = []
(low, high) = ([], [])
for i in range(0, len(l)):
res.append(statistics.mean(l[max(0, (i - n)):min(len(l), (i + n))]))
low.append(min(l[max(0, (i - n)):min(len(l), (i + n))]))
high.append(max(l[max(0, (i - n)):min(len(l), (i + n))]))
return (res, low, high) |
def create_dictionaries(filename):
image_to_tag = {i: [] for i in ss}
tag_to_image = {}
with open(filename) as f:
f.readline()
for r in f:
(imageid, source, tag, confidence) = r.split(',')
tag = tag_to_name[tag]
if ((imageid in ss) and (int(confidence) == 1)):
image_to_tag[imageid].append(tag)
if (tag not in tag_to_image):
tag_to_image[tag] = []
tag_to_image[tag].append(imageid)
return (image_to_tag, tag_to_image) |
class CarlaSensorListMaster(object):
def __init__(self):
self.sensor_list = []
def append(self, sensor, transform, binded):
sensor_master = CarlaSensorMaster(sensor, transform, binded)
self.sensor_list.append(sensor_master)
def destroy(self):
for sensor_master in self.sensor_list:
sensor_master.destroy() |
def _weight_align(tp_model, ref_model):
sd = ref_model.state_dict()
new_sd = dict()
for (k, tensor) in sd.items():
if k.endswith('attn.c_attn.weight'):
new_tensor = torch.empty((tensor.shape[1] // 2), tensor.shape[0], device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[1] // 2), 0, stride=3, master_weight=tensor.t())
new_sd[k] = new_tensor
elif k.endswith('attn.c_attn.bias'):
new_tensor = torch.empty((tensor.shape[0] // 2), device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[0] // 2), 0, stride=3, master_weight=tensor)
new_sd[k] = new_tensor
elif k.endswith('mlp.c_fc.bias'):
new_tensor = torch.empty((tensor.shape[0] // 2), device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[0] // 2), 0, master_weight=tensor)
new_sd[k] = new_tensor
elif k.endswith('mlp.c_fc.weight'):
new_tensor = torch.empty((tensor.shape[1] // 2), tensor.shape[0], device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[1] // 2), 0, master_weight=tensor.t())
new_sd[k] = new_tensor
elif (k.endswith('attn.c_proj.weight') or k.endswith('mlp.c_proj.weight')):
new_tensor = torch.empty(tensor.shape[1], (tensor.shape[0] // 2), device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[0] // 2), 1, master_weight=tensor.t())
new_sd[k] = new_tensor
elif k.endswith('wte.weight'):
new_tensor = torch.empty((tensor.shape[0] // 2), tensor.shape[1], device=tensor.device)
_initialize_affine_weight(new_tensor, (tensor.shape[0] // 2), 0, master_weight=tensor)
new_sd[k] = new_tensor
else:
new_sd[k] = tensor
tp_model.load_state_dict(new_sd) |
def _test_skipping_update_params_nonfinite_loss(rank, world_size):
os.environ['LOCAL_RANK'] = str(rank)
os.environ['RANK'] = str(rank)
os.environ['WORLD_SIZE'] = str(world_size)
os.environ['NPROC_PER_NODE'] = str(world_size)
dist.init_process_group(backend='nccl', rank=rank, world_size=world_size)
device = f'cuda:{rank}'
strategy = Strategy([('parallel_mode', ([('data', torch.distributed.get_world_size())], None), False), ('fsdp', {'atorch_size_based_min_num_params': 1}, False), ('amp_native', {'dtype': torch.bfloat16, 'skip_if_nonfinite': True}, False)])
opt_lib = OptimizationLibrary()
def norm_loss_func(a, _):
return a.norm()
bf16_max = torch.finfo(torch.bfloat16).max
param_value = torch.tensor(10.0, dtype=torch.float32)
model = OneLinearModule(param_value)
model_context = ModelContext(model=model, optim_func=optim_func, dataset=None, loss_func=norm_loss_func, prepare_input=None, optim_args={'lr': 1})
model_context = model_transform(model_context, strategy, opt_lib, create_dataloader=False)
model = model_context.model
optimizer = model_context.optim
loss_func = model_context.loss_func
for i in range(2):
optimizer.zero_grad()
if (i == 0):
tensor_value = (bf16_max if (rank == 0) else 1)
else:
tensor_value = 1
x = torch.tensor([[tensor_value], [tensor_value]], dtype=torch.bfloat16, device=device)
y = model(x)
loss = loss_func(y, None)
scaler = optimizer.grad_scaler
if (i == 0):
if (rank == 0):
assert torch.all(loss.isinf())
else:
assert (not loss.isinf().any().item())
loss.backward()
for param in model.module.parameters():
assert torch.all(param.grad.isnan())
optimizer.step()
for param in model.module.parameters():
assert (not torch.all(param.isnan()))
assert scaler.has_overflow()
assert optimizer.step_was_skipped
else:
assert (not loss.isinf().any().item())
loss.backward()
optimizer.step()
assert (not scaler.has_overflow())
assert (not optimizer.step_was_skipped)
assert (scaler._init_scale == 1.0)
assert (scaler._growth_factor == 1.0)
assert (scaler._backoff_factor == 1.0)
dist.destroy_process_group() |
class ResConvLayer(tf.keras.layers.Layer):
def __init__(self, num_filters, *args, **kwargs):
self.num_filters = num_filters
super(ResConvLayer, self).__init__(*args, **kwargs)
def build(self, input_shape):
self.initial_conv = [tf.keras.layers.Conv2D(self.num_filters[0], (7, 7), input_shape=input_shape, padding='same'), tf.keras.layers.LeakyReLU(alpha=0.2)]
self.first_block = [ResUnit(self.num_filters[1]), ResUnit(self.num_filters[2]), tf.keras.layers.Conv2D(self.num_filters[3], (3, 3), padding='same'), tf.keras.layers.BatchNormalization()]
self.second_block = [tf.keras.layers.Conv2D(self.num_filters[4], (3, 3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.2), tf.keras.layers.Conv2D(self.num_filters[5], (3, 3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.2), tf.keras.layers.Conv2D(self.num_filters[6], (7, 7), padding='same')]
super(ResConvLayer, self).build(input_shape)
def call(self, tensor):
for layer in self.initial_conv:
tensor = layer(tensor)
in_tensor = tensor
for layer in self.first_block:
tensor = layer(tensor)
tensor = (in_tensor + tensor)
for layer in self.second_block:
tensor = layer(tensor)
return tensor |
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.index(args.silence_token)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index('<unk>')
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for (word, spellings) in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
(_, score) = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(args.beam, args.beam_threshold, args.lm_weight, args.word_score, args.unk_weight, False, args.sil_weight, self.criterion_type)
self.decoder = WordLMDecoder(self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, self.asg_transitions)
def decode(self, emissions):
(B, T, N) = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = (emissions.data_ptr() + ((4 * b) * emissions.stride(0)))
nbest_results = self.decoder.decode(emissions_ptr, T, N)[:self.nbest]
hypos.append([{'tokens': self.get_tokens(result.tokens), 'score': result.score} for result in nbest_results])
return hypos |
def DrawLegend(legend_labels, filename):
fig = pylab.figure()
ax1 = fig.add_subplot(111)
FIGURE_LABEL = legend_labels
LEGEND_FP = FontProperties(style='normal', size=26)
bars = ([None] * len(FIGURE_LABEL))
data = [1]
x_values = [1]
width = 0.3
for i in range(len(FIGURE_LABEL)):
bars[i] = ax1.bar(x_values, data, width, hatch=PATTERNS[i], color=LINE_COLORS[i], linewidth=0.2)
figlegend = pylab.figure(figsize=(11, 0.5))
figlegend.legend(bars, FIGURE_LABEL, prop=LEGEND_FP, loc=9, bbox_to_anchor=(0, 0.4, 1, 1), ncol=len(FIGURE_LABEL), mode='expand', shadow=False, frameon=False, handlelength=1.1, handletextpad=0.2, columnspacing=0.1)
figlegend.savefig((((FIGURE_FOLDER + '/') + filename) + '.pdf')) |
def downsize(scan):
(intrinsics, _) = camera_parameters(scan)
pano_ids = list(set([item.split('_')[0] for item in intrinsics.keys()]))
print(('Processing scan %s with %d panoramas' % (scan, len(pano_ids))))
for pano in pano_ids:
for skybox_ix in range(6):
skybox = cv2.imread((skybox_template % (base_dir, scan, pano, skybox_ix)))
newimg = cv2.resize(skybox, (DOWNSIZED_WIDTH, DOWNSIZED_HEIGHT), interpolation=cv2.INTER_AREA)
assert cv2.imwrite((skybox_small_template % (base_dir, scan, pano, skybox_ix)), newimg) |
def read_and_decode(filename_queue, IMG_HEIGHT, IMG_WIDTH):
reader = tf.TFRecordReader()
(_, serialized_example) = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'img1_raw': tf.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, 3], tf.float32), 'img2_raw': tf.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, 3], tf.float32), 'edge1_raw': tf.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, 1], tf.float32), 'edge2_raw': tf.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, 1], tf.float32), 'flow_raw': tf.FixedLenFeature([IMG_HEIGHT, IMG_WIDTH, 2], tf.float32)})
img1 = features['img1_raw']
img2 = features['img2_raw']
edge1 = features['edge1_raw']
edge2 = features['edge2_raw']
flow = features['flow_raw']
return (img1, img2, edge1, edge2, flow) |
class Layer():
def __init__(self, qregs, cregs):
self.qregs = qregs
self.cregs = cregs
self.qubit_layer = ([None] * len(qregs))
self.connections = []
self.clbit_layer = ([None] * len(cregs))
def full_layer(self):
return (self.qubit_layer + self.clbit_layer)
def set_qubit(self, qubit, element):
self.qubit_layer[self.qregs.index(qubit)] = element
def set_clbit(self, clbit, element):
self.clbit_layer[self.cregs.index(clbit)] = element
def _set_multibox(self, wire_type, bits, label, top_connect=None):
bits = list(bits)
if (wire_type == 'cl'):
bit_index = sorted([i for (i, x) in enumerate(self.cregs) if (x in bits)])
bits.sort(key=self.cregs.index)
qargs = ([''] * len(bits))
set_bit = self.set_clbit
BoxOnWire = BoxOnClWire
BoxOnWireTop = BoxOnClWireTop
BoxOnWireMid = BoxOnClWireMid
BoxOnWireBot = BoxOnClWireBot
elif (wire_type == 'qu'):
bit_index = sorted([i for (i, x) in enumerate(self.qregs) if (x in bits)])
qargs = [str(bits.index(qbit)) for qbit in self.qregs if (qbit in bits)]
bits.sort(key=self.qregs.index)
set_bit = self.set_qubit
BoxOnWire = BoxOnQuWire
BoxOnWireTop = BoxOnQuWireTop
BoxOnWireMid = BoxOnQuWireMid
BoxOnWireBot = BoxOnQuWireBot
else:
raise VisualizationError("_set_multibox only supports 'cl' and 'qu' as wire types.")
if (len(bit_index) == 1):
set_bit(bits[0], BoxOnWire(label, top_connect=top_connect))
else:
box_height = ((max(bit_index) - min(bit_index)) + 1)
set_bit(bits.pop(0), BoxOnWireTop(label, top_connect=top_connect, wire_label=qargs.pop(0)))
for (order, bit_i) in enumerate(range((min(bit_index) + 1), max(bit_index))):
if (bit_i in bit_index):
named_bit = bits.pop(0)
wire_label = qargs.pop(0)
else:
named_bit = (self.qregs + self.cregs)[bit_i]
wire_label = (' ' * len(qargs[0]))
set_bit(named_bit, BoxOnWireMid(label, box_height, order, wire_label=wire_label))
set_bit(bits.pop(0), BoxOnWireBot(label, box_height, wire_label=qargs.pop(0)))
def set_cl_multibox(self, creg, label, top_connect=''):
clbit = [bit for bit in self.cregs if (bit[0] == creg)]
self._set_multibox('cl', clbit, label, top_connect=top_connect)
def set_qu_multibox(self, bits, label):
self._set_multibox('qu', bits, label)
def connect_with(self, wire_char):
if (len([qbit for qbit in self.qubit_layer if (qbit is not None)]) == 1):
return
for (label, affected_bits) in self.connections:
if (not affected_bits):
continue
affected_bits[0].connect(wire_char, ['bot'])
for affected_bit in affected_bits[1:(- 1)]:
affected_bit.connect(wire_char, ['bot', 'top'])
affected_bits[(- 1)].connect(wire_char, ['top'], label)
if label:
for affected_bit in affected_bits:
affected_bit.right_fill = (len(label) + len(affected_bit.mid)) |
class AutoAdapterConfig(nn.Module):
def get(cls, config_name: str):
if (config_name in ADAPTER_CONFIG_MAPPING):
return ADAPTER_CONFIG_MAPPING[config_name]()
raise ValueError('Unrecognized adapter config type identifier: {}. Should contain one of {}'.format(config_name, ', '.join(ADAPTER_CONFIG_MAPPING.keys()))) |
class FineTuningConfig():
dataset: DataConfig = DataConfig()
stage1: Stage1Config = Stage1Config()
stage2: Stage2Config = Stage2Config()
optimizer: OptConfig = OptConfig()
experiment: ExpConfig = ExpConfig() |
def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
conf = ConvBertConfig.from_json_file(convbert_config_file)
model = ConvBertModel(conf)
model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
model.save_pretrained(pytorch_dump_path)
tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
tf_model.save_pretrained(pytorch_dump_path) |
def goToGoal(env, lastObs):
goal = lastObs['desired_goal']
objectPos = lastObs['observation'][3:6]
gripperPos = lastObs['observation'][:3]
gripperState = lastObs['observation'][9:11]
object_rel_pos = lastObs['observation'][6:9]
episodeAcs = []
episodeObs = []
episodeInfo = []
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03
print('Max episode steps ', env._max_episode_steps)
timeStep = 0
episodeObs.append(lastObs)
while ((np.linalg.norm(object_oriented_goal) >= 0.005) and (timeStep <= env._max_episode_steps)):
env.render()
action = [0, 0, 0, 0]
object_oriented_goal = object_rel_pos.copy()
object_oriented_goal[2] += 0.03
for i in range(len(object_oriented_goal)):
action[i] = (object_oriented_goal[i] * 6)
action[(len(action) - 1)] = 0.05
(obsDataNew, reward, done, info) = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
gripperPos = obsDataNew['observation'][:3]
gripperState = obsDataNew['observation'][9:11]
object_rel_pos = obsDataNew['observation'][6:9]
while ((np.linalg.norm(object_rel_pos) >= 0.005) and (timeStep <= env._max_episode_steps)):
env.render()
action = [0, 0, 0, 0]
for i in range(len(object_rel_pos)):
action[i] = (object_rel_pos[i] * 6)
action[(len(action) - 1)] = (- 0.005)
(obsDataNew, reward, done, info) = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
gripperPos = obsDataNew['observation'][:3]
gripperState = obsDataNew['observation'][9:11]
object_rel_pos = obsDataNew['observation'][6:9]
while ((np.linalg.norm((goal - objectPos)) >= 0.01) and (timeStep <= env._max_episode_steps)):
env.render()
action = [0, 0, 0, 0]
for i in range(len((goal - objectPos))):
action[i] = ((goal - objectPos)[i] * 6)
action[(len(action) - 1)] = (- 0.005)
(obsDataNew, reward, done, info) = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
gripperPos = obsDataNew['observation'][:3]
gripperState = obsDataNew['observation'][9:11]
object_rel_pos = obsDataNew['observation'][6:9]
while True:
env.render()
action = [0, 0, 0, 0]
action[(len(action) - 1)] = (- 0.005)
(obsDataNew, reward, done, info) = env.step(action)
timeStep += 1
episodeAcs.append(action)
episodeInfo.append(info)
episodeObs.append(obsDataNew)
objectPos = obsDataNew['observation'][3:6]
gripperPos = obsDataNew['observation'][:3]
gripperState = obsDataNew['observation'][9:11]
object_rel_pos = obsDataNew['observation'][6:9]
if (timeStep >= env._max_episode_steps):
break
actions.append(episodeAcs)
observations.append(episodeObs)
infos.append(episodeInfo) |
def get_runid(path):
name = Path(path).name
if (not os.path.exists(Path(path).parent)):
return '00001'
files = os.listdir(Path(path).parent)
runid = 0
for f in files:
try:
(id, val) = f.split('_', 1)
runid = max(runid, int(id))
except:
pass
runid = str((runid + 1))
runid = (('0' * (5 - len(runid))) + runid)
return runid |
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, wgan_epsilon=0.001):
_ = (opt, training_set)
latents = tf.random_normal(([minibatch_size] + G.input_shapes[0][1:]))
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = (fake_scores_out - real_scores_out)
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += (epsilon_penalty * wgan_epsilon)
return (loss, None) |
(nopython=True)
def _draw_loop(top_down_map, fog_of_war_mask, current_point, current_angle, max_line_len, angles):
for angle in angles:
draw_fog_of_war_line(top_down_map, fog_of_war_mask, current_point, (current_point + (max_line_len * np.array([np.cos((current_angle + angle)), np.sin((current_angle + angle))])))) |
def print_measures(auroc, aupr, fpr, method_name='Ours', recall_level=recall_level_default):
print(('\t\t\t\t' + method_name))
print('FPR{:d}:\t\t\t{:.2f}'.format(int((100 * recall_level)), (100 * fpr)))
print('AUROC: \t\t\t{:.2f}'.format((100 * auroc)))
print('AUPR: \t\t\t{:.2f}'.format((100 * aupr))) |
class BaseModule(nn.Module, metaclass=ABCMeta):
def __init__(self, init_cfg=None):
super(BaseModule, self).__init__()
self._is_init = False
self.init_cfg = init_cfg
def is_init(self):
return self._is_init
def init_weights(self):
from ..cnn import initialize
if (not self._is_init):
if self.init_cfg:
initialize(self, self.init_cfg)
if isinstance(self.init_cfg, (dict, ConfigDict)):
if (self.init_cfg['type'] == 'Pretrained'):
return
for m in self.children():
if hasattr(m, 'init_weights'):
m.init_weights()
self._is_init = True
else:
warnings.warn(f'init_weights of {self.__class__.__name__} has been called more than once.')
def __repr__(self):
s = super().__repr__()
if self.init_cfg:
s += f'''
init_cfg={self.init_cfg}'''
return s |
(version='2.0')
class Sampler(object):
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError |
def pick_examples(dataset):
if (FLAGS.model_type == 'retrieval'):
return dataset.enumerate_comp()
else:
return dataset.enumerate_freq() |
def get_connection():
if (not _db.connection):
_db.connection = connector.connect(**config_sql)
if (not _db.connection.is_connected()):
_db.connection.reconnect()
return _db.connection |
def vgg_munit(vgg, img, rec):
ff = torch.nn.functional.instance_norm(vgg.fw_relu(img, 13)[(- 1)])
fn = torch.nn.functional.instance_norm(vgg.fw_relu(rec, 13)[(- 1)])
vgg_imgs = []
vgg_imgs.append((ff - fn).pow(2).mean(dim=1, keepdim=True))
loss = vgg_imgs[(- 1)].mean()
return (loss, vgg_imgs) |
class FPN(nn.Module):
def __init__(self, norm_layer, num_filters=128, pretrained=True):
super().__init__()
net = MobileNetV2(n_class=1000)
if pretrained:
state_dict = torch.load('mobilenetv2.pth.tar')
net.load_state_dict(state_dict)
self.features = net.features
self.enc0 = nn.Sequential(*self.features[0:2])
self.enc1 = nn.Sequential(*self.features[2:4])
self.enc2 = nn.Sequential(*self.features[4:7])
self.enc3 = nn.Sequential(*self.features[7:11])
self.enc4 = nn.Sequential(*self.features[11:16])
self.td1 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), norm_layer(num_filters), nn.ReLU(inplace=True))
self.td2 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), norm_layer(num_filters), nn.ReLU(inplace=True))
self.td3 = nn.Sequential(nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), norm_layer(num_filters), nn.ReLU(inplace=True))
self.lateral4 = nn.Conv2d(160, num_filters, kernel_size=1, bias=False)
self.lateral3 = nn.Conv2d(64, num_filters, kernel_size=1, bias=False)
self.lateral2 = nn.Conv2d(32, num_filters, kernel_size=1, bias=False)
self.lateral1 = nn.Conv2d(24, num_filters, kernel_size=1, bias=False)
self.lateral0 = nn.Conv2d(16, (num_filters // 2), kernel_size=1, bias=False)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
for param in self.features.parameters():
param.requires_grad = False
def unfreeze(self):
for param in self.features.parameters():
param.requires_grad = True
def forward(self, x):
enc0 = self.enc0(x)
enc1 = self.enc1(enc0)
enc2 = self.enc2(enc1)
enc3 = self.enc3(enc2)
enc4 = self.enc4(enc3)
lateral4 = self.lateral4(enc4)
lateral3 = self.lateral3(enc3)
lateral2 = self.lateral2(enc2)
lateral1 = self.lateral1(enc1)
lateral0 = self.lateral0(enc0)
map4 = lateral4
map3 = self.td1((lateral3 + self.upsample3(map4)))
map2 = self.td2((lateral2 + self.upsample2(map3)))
map1 = self.td3((lateral1 + self.upsample1(map2)))
return (lateral0, map1, map2, map3, map4) |
class ResNeXtBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
super(ResNeXtBottleneck, self).__init__()
D = ((cardinality * out_channels) // widen_factor)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if (in_channels != out_channels):
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu((residual + bottleneck), inplace=True) |
def get_root_logger(log_file=None, log_level=logging.INFO, name='main'):
logger = get_logger(name=name, log_file=log_file, log_level=log_level)
logging_filter = logging.Filter(name)
logging_filter.filter = (lambda record: (record.find(name) != (- 1)))
return logger |
class Controller():
def __init__(self, dispatch_method: str):
self.worker_info = {}
self.dispatch_method = DispatchMethod.from_str(dispatch_method)
self.heart_beat_thread = threading.Thread(target=heart_beat_controller, args=(self,))
self.heart_beat_thread.start()
logger.info('Init controller')
def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict):
if (worker_name not in self.worker_info):
logger.info(f'Register a new worker: {worker_name}')
else:
logger.info(f'Register an existing worker: {worker_name}')
if (not worker_status):
worker_status = self.get_worker_status(worker_name)
if (not worker_status):
return False
self.worker_info[worker_name] = WorkerInfo(worker_status['model_names'], worker_status['speed'], worker_status['queue_length'], check_heart_beat, time.time())
logger.info(f'Register done: {worker_name}, {worker_status}')
return True
def get_worker_status(self, worker_name: str):
try:
r = requests.post((worker_name + '/worker_get_status'), timeout=5)
except requests.exceptions.RequestException as e:
logger.error(f'Get status fails: {worker_name}, {e}')
return None
if (r.status_code != 200):
logger.error(f'Get status fails: {worker_name}, {r}')
return None
return r.json()
def remove_worker(self, worker_name: str):
del self.worker_info[worker_name]
def refresh_all_workers(self):
old_info = dict(self.worker_info)
self.worker_info = {}
for (w_name, w_info) in old_info.items():
if (not self.register_worker(w_name, w_info.check_heart_beat, None)):
logger.info(f'Remove stale worker: {w_name}')
def list_models(self):
model_names = set()
for (w_name, w_info) in self.worker_info.items():
model_names.update(w_info.model_names)
return list(model_names)
def get_worker_address(self, model_name: str):
if (self.dispatch_method == DispatchMethod.LOTTERY):
worker_names = []
worker_speeds = []
for (w_name, w_info) in self.worker_info.items():
if (model_name in w_info.model_names):
worker_names.append(w_name)
worker_speeds.append(w_info.speed)
worker_speeds = np.array(worker_speeds, dtype=np.float32)
norm = np.sum(worker_speeds)
if (norm < 0.0001):
return ''
worker_speeds = (worker_speeds / norm)
if True:
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
return worker_name
while True:
pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds)
worker_name = worker_names[pt]
if self.get_worker_status(worker_name):
break
else:
self.remove_worker(worker_name)
worker_speeds[pt] = 0
norm = np.sum(worker_speeds)
if (norm < 0.0001):
return ''
worker_speeds = (worker_speeds / norm)
continue
return worker_name
elif (self.dispatch_method == DispatchMethod.SHORTEST_QUEUE):
worker_names = []
worker_qlen = []
for (w_name, w_info) in self.worker_info.items():
if (model_name in w_info.model_names):
worker_names.append(w_name)
worker_qlen.append((w_info.queue_length / w_info.speed))
if (len(worker_names) == 0):
return ''
min_index = np.argmin(worker_qlen)
w_name = worker_names[min_index]
self.worker_info[w_name].queue_length += 1
logger.info(f'names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}')
return w_name
else:
raise ValueError(f'Invalid dispatch method: {self.dispatch_method}')
def receive_heart_beat(self, worker_name: str, queue_length: int):
if (worker_name not in self.worker_info):
logger.info(f'Receive unknown heart beat. {worker_name}')
return False
self.worker_info[worker_name].queue_length = queue_length
self.worker_info[worker_name].last_heart_beat = time.time()
logger.info(f'Receive heart beat. {worker_name}')
return True
def remove_stable_workers_by_expiration(self):
expire = (time.time() - CONTROLLER_HEART_BEAT_EXPIRATION)
to_delete = []
for (worker_name, w_info) in self.worker_info.items():
if (w_info.check_heart_beat and (w_info.last_heart_beat < expire)):
to_delete.append(worker_name)
for worker_name in to_delete:
self.remove_worker(worker_name)
def worker_api_generate_stream(self, params):
print('params', params)
worker_addr = self.get_worker_address(params['model'])
if (not worker_addr):
logger.info(f"no worker: {params['model']}")
ret = {'text': server_error_msg, 'error_code': 2}
(yield (json.dumps(ret).encode() + b'\x00'))
try:
response = requests.post((worker_addr + '/worker_generate_stream'), json=params, stream=True, timeout=1000)
result = ''
for chunk in response.iter_lines(decode_unicode=False, delimiter=b'\x00'):
if chunk:
print('chunk=======', chunk)
a = chunk.decode('utf-8')
a = re.sub('\\\\u2019', "'", a)
a = re.sub('ufffd', '', a)
result += a
(yield f'''data: {a}
''')
import sys
sys.path.append('..')
from llmcache.cache import put
put(params['prompt'], result)
(yield f'''data: [DONE]
''')
except requests.exceptions.RequestException as e:
logger.info(f'worker timeout: {worker_addr}')
ret = {'text': server_error_msg, 'error_code': 3}
(yield (json.dumps(ret).encode() + b'\x00'))
def worker_api_get_status(self):
model_names = set()
speed = 0
queue_length = 0
for w_name in self.worker_info:
worker_status = self.get_worker_status(w_name)
if (worker_status is not None):
model_names.update(worker_status['model_names'])
speed += worker_status['speed']
queue_length += worker_status['queue_length']
return {'model_names': list(model_names), 'speed': speed, 'queue_length': queue_length} |
def set_double_double_start_solutions(nvr, sols, vrblvl=0):
if (vrblvl > 0):
print('in set_double_double_start_solutions, with nvr :', nvr)
print('the solutions :')
for (idx, sol) in enumerate(sols):
print('Solution', idx, ':')
print(sol)
clear_double_double_solutions(vrblvl)
set_double_double_solutions(nvr, sols, vrblvl)
startsols = get_double_double_solutions(vrblvl)
for (idx, sol) in enumerate(startsols):
print('Start solution', (idx + 1), ':')
print(sol)
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> set_double_double_start_solutions calls phc', end='')
retval = phc(258, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class VQModel(ModelMixin, ConfigMixin):
_to_config
def __init__(self, in_channels: int=3, out_channels: int=3, down_block_types: Tuple[(str, ...)]=('DownEncoderBlock2D',), up_block_types: Tuple[(str, ...)]=('UpDecoderBlock2D',), block_out_channels: Tuple[(int, ...)]=(64,), layers_per_block: int=1, act_fn: str='silu', latent_channels: int=3, sample_size: int=32, num_vq_embeddings: int=256, norm_num_groups: int=32, vq_embed_dim: Optional[int]=None, scaling_factor: float=0.18215, norm_type: str='group'):
super().__init__()
self.encoder = Encoder(in_channels=in_channels, out_channels=latent_channels, down_block_types=down_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, double_z=False)
vq_embed_dim = (vq_embed_dim if (vq_embed_dim is not None) else latent_channels)
self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1)
self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False)
self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1)
self.decoder = Decoder(in_channels=latent_channels, out_channels=out_channels, up_block_types=up_block_types, block_out_channels=block_out_channels, layers_per_block=layers_per_block, act_fn=act_fn, norm_num_groups=norm_num_groups, norm_type=norm_type)
_forward_hook
def encode(self, x: torch.FloatTensor, return_dict: bool=True) -> VQEncoderOutput:
h = self.encoder(x)
h = self.quant_conv(h)
if (not return_dict):
return (h,)
return VQEncoderOutput(latents=h)
_forward_hook
def decode(self, h: torch.FloatTensor, force_not_quantize: bool=False, return_dict: bool=True) -> Union[(DecoderOutput, torch.FloatTensor)]:
if (not force_not_quantize):
(quant, _, _) = self.quantize(h)
else:
quant = h
quant2 = self.post_quant_conv(quant)
dec = self.decoder(quant2, (quant if (self.config.norm_type == 'spatial') else None))
if (not return_dict):
return (dec,)
return DecoderOutput(sample=dec)
def forward(self, sample: torch.FloatTensor, return_dict: bool=True) -> Union[(DecoderOutput, Tuple[(torch.FloatTensor, ...)])]:
h = self.encode(sample).latents
dec = self.decode(h).sample
if (not return_dict):
return (dec,)
return DecoderOutput(sample=dec) |
def reduction_a(net, k, l, m, n):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3, scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net |
def find_LL_channel(h5):
LL_candidates = ['valiset.spl100.LL', 'valiset.spl25.LL', 'valiset.spl10.LL', 'valiset.spl5.LL', 'dataset.spl100.LL', 'dataset.spl25.LL', 'dataset.spl10.LL', 'dataset.spl5.LL']
for name in LL_candidates:
if (name in h5):
return name
raise ValueError('Could not find LL dataset') |
def _find_image_bounding_boxes(filenames, image_to_bboxes):
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if (basename in image_to_bboxes):
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print(('Found %d images with bboxes out of %d images' % (num_image_bbox, len(filenames))))
return bboxes |
def ghostnet():
data_shape = (1, 3, 224, 224)
cfg_file_list = ['configs/benchmarks/ghostnet/ghostnet_x1_0_zcls_imagenet_224.yaml']
name_list = ['ghostnet_x1_0_zcls']
assert (len(name_list) == len(cfg_file_list))
for (name, cfg_file) in zip(name_list, cfg_file_list):
main(data_shape, cfg_file, name) |
def vgg19_bn(cuda=True, model_root=None):
print('Building vgg19_bn parameters')
from imagenet import vgg
m = vgg.vgg19_bn(model_root)
if cuda:
m = m.cuda()
return (m, dataset.get, True) |
def get_class_in_module(class_name, module_path):
module_path = module_path.replace(os.path.sep, '.')
module = importlib.import_module(module_path)
if (class_name is None):
return find_pipeline_class(module)
return getattr(module, class_name) |
def get_logger(logdir):
logger = logging.getLogger('smnet')
ts = str(datetime.datetime.now()).split('.')[0].replace(' ', '_')
ts = ts.replace(':', '_').replace('-', '_')
file_path = os.path.join(logdir, 'run_{}.log'.format(ts))
hdlr = logging.FileHandler(file_path)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger |
class InfBallProjBounded(InfBallProj):
def __init__(self, X, epsilon, k, l=0, u=1):
self.epsilon = epsilon
self.nu_one_l = [(X - epsilon).clamp(min=l)]
self.nu_one_u = [(X + epsilon).clamp(max=u)]
self.nu_x = [X]
self.l = self.nu_one_l[(- 1)].view(X.size(0), 1, (- 1))
self.u = self.nu_one_u[(- 1)].view(X.size(0), 1, (- 1))
n = X[0].numel()
R = X.new(1, k, *X.size()[1:]).cauchy_()
self.nu_l = [(R * self.nu_one_l[(- 1)].unsqueeze(1))]
self.nu_u = [(R * self.nu_one_u[(- 1)].unsqueeze(1))]
def apply(self, dual_layer):
self.nu_l.append(dual_layer(*self.nu_l))
self.nu_one_l.append(dual_layer(*self.nu_one_l))
self.nu_u.append(dual_layer(*self.nu_u))
self.nu_one_u.append(dual_layer(*self.nu_one_u))
def bounds(self, network=None):
if (network is None):
nu_u = self.nu_u[(- 1)]
nu_one_u = self.nu_one_u[(- 1)]
nu_l = self.nu_l[(- 1)]
nu_one_l = self.nu_one_l[(- 1)]
else:
nu_u = network(self.nu_u[0])
nu_one_u = network(self.nu_one_u[0])
nu_l = network(self.nu_l[0])
nu_one_l = network(self.nu_one_l[0])
nu_l1_u = torch.median(nu_u.abs(), 1)[0]
nu_pos_u = ((nu_l1_u + nu_one_u) / 2)
nu_neg_u = (((- nu_l1_u) + nu_one_u) / 2)
nu_l1_l = torch.median(nu_l.abs(), 1)[0]
nu_pos_l = ((nu_l1_l + nu_one_l) / 2)
nu_neg_l = (((- nu_l1_l) + nu_one_l) / 2)
zu = (nu_pos_u + nu_neg_l)
zl = (nu_neg_u + nu_pos_l)
return (zl, zu) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.