code stringlengths 17 6.64M |
|---|
def crop_largest_square(image, aspect_ratio=1):
(width, height) = image.size
new_width = min(width, int((height * aspect_ratio)))
new_height = min(height, int((width / aspect_ratio)))
left = ((width - new_width) / 2)
top = ((height - new_height) / 2)
right = ((width + new_width) / 2)
bottom = ((height + new_height) / 2)
cropped_image = image.crop((left, top, right, bottom))
return cropped_image
|
def dl_image(url, timeout, fn, quality, crop=False, resize=256):
fetched = 1
try:
response = requests.get(url, timeout=timeout)
open(fn, 'wb').write(response.content)
img = Image.open(fn)
if crop:
img = crop_largest_square(img)
has_alpha = ((img.mode in ('RGBA', 'LA')) or ((img.mode == 'P') and ('transparency' in img.info)))
if has_alpha:
img_rgb = Image.new('RGB', img.size, (255, 255, 255))
img_rgb.paste(img, (0, 0), img)
img = img_rgb
if resize:
img = img.resize((resize, resize), Image.Resampling.LANCZOS)
img.save(fn, quality=quality)
except Exception:
blank = np.zeros((256, 256, 3), dtype=np.uint8)
blank = Image.fromarray(blank)
blank.save(fn, quality=quality)
fetched = 0
return fetched
|
def dl_urls_concurrent(urls, outfolder, nthreads=1, timeout=1, quality=100, crop=False, resize=256):
os.makedirs(outfolder, exist_ok=True)
num_dl = []
with concurrent.futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
for k in range(0, len(urls), nthreads):
end_ind = min(len(urls), (k + nthreads))
urls_chunk = urls[k:end_ind]
all_futures = []
for (ui, url) in enumerate(urls_chunk):
fn = (outfolder + f'dl_{(k + ui):03d}.jpg')
all_futures += [executor.submit(dl_image, url, timeout, fn, quality, crop=crop, resize=resize)]
all_res = []
for (fi, future) in enumerate(all_futures):
all_res += [future.result()]
num_dl += all_res
return num_dl
|
def crop_largest_square(image, aspect_ratio=1):
(width, height) = image.size
new_width = min(width, int((height * aspect_ratio)))
new_height = min(height, int((width / aspect_ratio)))
left = ((width - new_width) / 2)
top = ((height - new_height) / 2)
right = ((width + new_width) / 2)
bottom = ((height + new_height) / 2)
cropped_image = image.crop((left, top, right, bottom))
return cropped_image
|
def dl_image(url, timeout, fn, quality, crop=False, resize=256):
fetched = 1
try:
response = requests.get(url, timeout=timeout)
open(fn, 'wb').write(response.content)
img = Image.open(fn)
if crop:
img = crop_largest_square(img)
has_alpha = ((img.mode in ('RGBA', 'LA')) or ((img.mode == 'P') and ('transparency' in img.info)))
if has_alpha:
img_rgb = Image.new('RGB', img.size, (255, 255, 255))
img_rgb.paste(img, (0, 0), img)
img = img_rgb
if resize:
img = img.resize((resize, resize), Image.Resampling.LANCZOS)
img.save(fn, quality=quality)
except Exception:
blank = np.zeros((256, 256, 3), dtype=np.uint8)
blank = Image.fromarray(blank)
blank.save(fn, quality=quality)
fetched = 0
return fetched
|
def dl_urls_concurrent(urls, outfolder, nthreads=1, timeout=1, quality=100, crop=False, resize=256):
os.makedirs(outfolder, exist_ok=True)
num_dl = []
with concurrent.futures.ThreadPoolExecutor(max_workers=nthreads) as executor:
for k in range(0, len(urls), nthreads):
end_ind = min(len(urls), (k + nthreads))
urls_chunk = urls[k:end_ind]
all_futures = []
for (ui, url) in enumerate(urls_chunk):
fn = (outfolder + f'dl_{(k + ui):03d}.jpg')
all_futures += [executor.submit(dl_image, url, timeout, fn, quality, crop=crop, resize=resize)]
all_res = []
for (fi, future) in enumerate(all_futures):
all_res += [future.result()]
num_dl += all_res
return num_dl
|
class Actor(nn.Module):
def __init__(self, state_dim, action_dim):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.mu_head = nn.Linear(256, action_dim)
self.sigma_head = nn.Linear(256, action_dim)
def _get_outputs(self, state):
a = F.relu(self.fc1(state))
a = F.relu(self.fc2(a))
mu = self.mu_head(a)
mu = torch.clip(mu, MEAN_MIN, MEAN_MAX)
log_sigma = self.sigma_head(a)
log_sigma = torch.clip(log_sigma, LOG_STD_MIN, LOG_STD_MAX)
sigma = torch.exp(log_sigma)
a_distribution = TransformedDistribution(Normal(mu, sigma), TanhTransform(cache_size=1))
a_tanh_mode = torch.tanh(mu)
return (a_distribution, a_tanh_mode)
def forward(self, state):
(a_dist, a_tanh_mode) = self._get_outputs(state)
action = a_dist.rsample()
logp_pi = a_dist.log_prob(action).sum(axis=(- 1))
return (action, logp_pi, a_tanh_mode)
def get_log_density(self, state, action):
(a_dist, _) = self._get_outputs(state)
action_clip = torch.clip(action, ((- 1.0) + EPS), (1.0 - EPS))
logp_action = a_dist.log_prob(action_clip)
return logp_action
|
class Discriminator(nn.Module):
def __init__(self, state_dim, action_dim):
super(Discriminator, self).__init__()
self.fc1_1 = nn.Linear((state_dim + action_dim), 128)
self.fc1_2 = nn.Linear(action_dim, 128)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, state, action, log_pi):
sa = torch.cat([state, action], 1)
d1 = F.relu(self.fc1_1(sa))
d2 = F.relu(self.fc1_2(log_pi))
d = torch.cat([d1, d2], 1)
d = F.relu(self.fc2(d))
d = F.sigmoid(self.fc3(d))
d = torch.clip(d, 0.1, 0.9)
return d
|
class DWBC(object):
def __init__(self, state_dim, action_dim, alpha=7.5, no_pu=False, eta=0.5, d_update_num=100):
self.policy = Actor(state_dim, action_dim).to(device)
self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=0.0001, weight_decay=0.005)
self.discriminator = Discriminator(state_dim, action_dim).to(device)
self.discriminator_optimizer = torch.optim.Adam(self.discriminator.parameters(), lr=0.0001)
self.alpha = alpha
self.no_pu_learning = no_pu
self.eta = eta
self.d_update_num = d_update_num
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, (- 1))).to(device)
(_, _, action) = self.policy(state)
return action.cpu().data.numpy().flatten()
def train(self, replay_buffer_e, replay_buffer_o, batch_size=256):
self.total_it += 1
(state_e, action_e, _, _, _, flag_e) = replay_buffer_e.sample(batch_size)
(state_o, action_o, _, _, _, flag_o) = replay_buffer_o.sample(batch_size)
log_pi_e = self.policy.get_log_density(state_e, action_e)
log_pi_o = self.policy.get_log_density(state_o, action_o)
log_pi_e_clip = torch.clip(log_pi_e, LOG_PI_NORM_MIN, LOG_PI_NORM_MAX)
log_pi_o_clip = torch.clip(log_pi_o, LOG_PI_NORM_MIN, LOG_PI_NORM_MAX)
log_pi_e_norm = ((log_pi_e_clip - LOG_PI_NORM_MIN) / (LOG_PI_NORM_MAX - LOG_PI_NORM_MIN))
log_pi_o_norm = ((log_pi_o_clip - LOG_PI_NORM_MIN) / (LOG_PI_NORM_MAX - LOG_PI_NORM_MIN))
d_e = self.discriminator(state_e, action_e, log_pi_e_norm.detach())
d_o = self.discriminator(state_o, action_o, log_pi_o_norm.detach())
if self.no_pu_learning:
d_loss_e = (- torch.log(d_e))
d_loss_o = (- torch.log((1 - d_o)))
d_loss = torch.mean((d_loss_e + d_loss_o))
else:
d_loss_e = (- torch.log(d_e))
d_loss_o = (((- torch.log((1 - d_o))) / self.eta) + torch.log((1 - d_e)))
d_loss = torch.mean((d_loss_e + d_loss_o))
if ((self.total_it % self.d_update_num) == 0):
self.discriminator_optimizer.zero_grad()
d_loss.backward()
self.discriminator_optimizer.step()
d_e_clip = torch.squeeze(d_e).detach()
d_o_clip = torch.squeeze(d_o).detach()
d_o_clip[(d_o_clip < 0.5)] = 0.0
bc_loss = (- torch.sum(log_pi_e, 1))
corr_loss_e = ((- torch.sum(log_pi_e, 1)) * ((self.eta / (d_e_clip * (1.0 - d_e_clip))) + 1.0))
corr_loss_o = ((- torch.sum(log_pi_o, 1)) * ((1.0 / (1.0 - d_o_clip)) - 1.0))
p_loss = (((self.alpha * torch.mean(bc_loss)) - torch.mean(corr_loss_e)) + torch.mean(corr_loss_o))
self.policy_optimizer.zero_grad()
p_loss.backward()
self.policy_optimizer.step()
def save(self, filename):
torch.save(self.discriminator.state_dict(), (filename + '_discriminator'))
torch.save(self.discriminator_optimizer.state_dict(), (filename + '_discriminator_optimizer'))
torch.save(self.policy.state_dict(), (filename + '_policy'))
torch.save(self.policy_optimizer.state_dict(), (filename + '_policy_optimizer'))
def load(self, filename):
self.discriminator.load_state_dict(torch.load((filename + '_discriminator')))
self.discriminator_optimizer.load_state_dict(torch.load((filename + '_discriminator_optimizer')))
self.policy.load_state_dict(torch.load((filename + '_policy')))
self.policy_optimizer.load_state_dict(torch.load((filename + '_policy_optimizer')))
|
def qlearning_dataset(dataset, terminate_on_end=False):
'\n Returns datasets formatted for use by standard Q-learning algorithms,\n with observations, actions, next_observations, rewards, and a terminal\n flag.\n '
N = dataset['rewards'].shape[0]
obs_ = []
next_obs_ = []
action_ = []
reward_ = []
done_ = []
episode_step = 0
for i in range((N - 1)):
obs = dataset['observations'][i].astype(np.float32)
new_obs = dataset['observations'][(i + 1)].astype(np.float32)
action = dataset['actions'][i].astype(np.float32)
reward = dataset['rewards'][i].astype(np.float32)
done_bool = bool(dataset['terminals'][i])
final_timestep = dataset['timeouts'][i]
if ((not terminate_on_end) and final_timestep):
episode_step = 0
continue
if (done_bool or final_timestep):
episode_step = 0
obs_.append(obs)
next_obs_.append(new_obs)
action_.append(action)
reward_.append(reward)
done_.append(done_bool)
episode_step += 1
return {'observations': np.array(obs_), 'actions': np.array(action_), 'next_observations': np.array(next_obs_), 'rewards': np.array(reward_), 'terminals': np.array(done_)}
|
def dataset_setting1(dataset1, dataset2, split_x, exp_num=10):
'\n Returns D_e and D_o of setting 1 in the paper.\n '
dataset_o = dataset_T_trajs(dataset2, 1000)
dataset_o['flag'] = np.zeros_like(dataset_o['terminals'])
(dataset_e, dataset_o_extra) = dataset_split_expert(dataset1, split_x, exp_num)
dataset_e['flag'] = np.ones_like(dataset_e['terminals'])
dataset_o_extra['flag'] = np.ones_like(dataset_o_extra['terminals'])
for key in dataset_o.keys():
dataset_o[key] = np.concatenate([dataset_o[key], dataset_o_extra[key]], 0)
return (dataset_e, dataset_o)
|
def dataset_setting2(dataset1, split_x):
'\n Returns D_e and D_o of setting 2 in the paper.\n '
(dataset_e, dataset_o) = dataset_split_replay(dataset1, split_x)
dataset_e['flag'] = np.ones_like(dataset_e['terminals'])
dataset_o['flag'] = np.zeros_like(dataset_o['terminals'])
return (dataset_e, dataset_o)
|
def dataset_setting_demodice(dataset1, dataset2, num_e=1, num_o_e=10, num_o_o=1000):
'\n Returns D_e and D_o of setting in demodice.\n '
dataset_o = dataset_T_trajs(dataset2, num_o_o)
dataset_o['flag'] = np.zeros_like(dataset_o['terminals'])
(dataset_e, dataset_o_extra) = dataset_split_expert(dataset1, num_o_e, (num_e + num_o_e))
dataset_e['flag'] = np.ones_like(dataset_e['terminals'])
dataset_o_extra['flag'] = np.ones_like(dataset_o_extra['terminals'])
for key in dataset_o.keys():
dataset_o[key] = np.concatenate([dataset_o[key], dataset_o_extra[key]], 0)
return (dataset_e, dataset_o)
|
def dataset_split_replay(dataset, split_x, terminate_on_end=False):
'\n Returns D_e and D_o from replay datasets.\n '
N = dataset['rewards'].shape[0]
return_traj = []
obs_traj = [[]]
next_obs_traj = [[]]
action_traj = [[]]
reward_traj = [[]]
done_traj = [[]]
for i in range((N - 1)):
obs_traj[(- 1)].append(dataset['observations'][i].astype(np.float32))
next_obs_traj[(- 1)].append(dataset['observations'][(i + 1)].astype(np.float32))
action_traj[(- 1)].append(dataset['actions'][i].astype(np.float32))
reward_traj[(- 1)].append(dataset['rewards'][i].astype(np.float32))
done_traj[(- 1)].append(bool(dataset['terminals'][i]))
final_timestep = (dataset['timeouts'][i] | dataset['terminals'][i])
if ((not terminate_on_end) and final_timestep):
return_traj.append(np.sum(reward_traj[(- 1)]))
obs_traj.append([])
next_obs_traj.append([])
action_traj.append([])
reward_traj.append([])
done_traj.append([])
inds_all = np.argsort(return_traj)[::(- 1)]
succ_num = int((len(inds_all) * 0.05))
inds_top5 = inds_all[:succ_num]
inds_e = inds_top5[1::split_x]
inds_e = list(inds_e)
inds_all = list(inds_all)
inds_o = (set(inds_all) - set(inds_e))
inds_o = list(inds_o)
print('# select {} trajs in mixed dataset as D_e, mean is {}'.format(len(inds_e), np.array(return_traj)[inds_e].mean()))
print('# select {} trajs in mixed dataset as D_o, mean is {}'.format(len(inds_o), np.array(return_traj)[inds_o].mean()))
obs_traj_e = [obs_traj[i] for i in inds_e]
next_obs_traj_e = [next_obs_traj[i] for i in inds_e]
action_traj_e = [action_traj[i] for i in inds_e]
reward_traj_e = [reward_traj[i] for i in inds_e]
done_traj_e = [done_traj[i] for i in inds_e]
obs_traj_o = [obs_traj[i] for i in inds_o]
next_obs_traj_o = [next_obs_traj[i] for i in inds_o]
action_traj_o = [action_traj[i] for i in inds_o]
reward_traj_o = [reward_traj[i] for i in inds_o]
done_traj_o = [done_traj[i] for i in inds_o]
def concat_trajectories(trajectories):
return np.concatenate(trajectories, 0)
dataset_e = {'observations': concat_trajectories(obs_traj_e), 'actions': concat_trajectories(action_traj_e), 'next_observations': concat_trajectories(next_obs_traj_e), 'rewards': concat_trajectories(reward_traj_e), 'terminals': concat_trajectories(done_traj_e)}
dataset_o = {'observations': concat_trajectories(obs_traj_o), 'actions': concat_trajectories(action_traj_o), 'next_observations': concat_trajectories(next_obs_traj_o), 'rewards': concat_trajectories(reward_traj_o), 'terminals': concat_trajectories(done_traj_o)}
return (dataset_e, dataset_o)
|
def dataset_split_expert(dataset, split_x, exp_num, terminate_on_end=False):
'\n Returns D_e and expert data in D_o of setting 1 in the paper.\n '
N = dataset['rewards'].shape[0]
return_traj = []
obs_traj = [[]]
next_obs_traj = [[]]
action_traj = [[]]
reward_traj = [[]]
done_traj = [[]]
for i in range((N - 1)):
obs_traj[(- 1)].append(dataset['observations'][i].astype(np.float32))
next_obs_traj[(- 1)].append(dataset['observations'][(i + 1)].astype(np.float32))
action_traj[(- 1)].append(dataset['actions'][i].astype(np.float32))
reward_traj[(- 1)].append(dataset['rewards'][i].astype(np.float32))
done_traj[(- 1)].append(bool(dataset['terminals'][i]))
final_timestep = (dataset['timeouts'][i] | dataset['terminals'][i])
if ((not terminate_on_end) and final_timestep):
return_traj.append(np.sum(reward_traj[(- 1)]))
obs_traj.append([])
next_obs_traj.append([])
action_traj.append([])
reward_traj.append([])
done_traj.append([])
inds_all = list(range(len(obs_traj)))
inds_succ = inds_all[:exp_num]
inds_o = inds_succ[(- split_x):]
inds_o = list(inds_o)
inds_succ = list(inds_succ)
inds_e = (set(inds_succ) - set(inds_o))
inds_e = list(inds_e)
print('# select {} trajs in expert dataset as D_e'.format(len(inds_e)))
print('# select {} trajs in expert dataset as expert data in D_o'.format(len(inds_o)))
obs_traj_e = [obs_traj[i] for i in inds_e]
next_obs_traj_e = [next_obs_traj[i] for i in inds_e]
action_traj_e = [action_traj[i] for i in inds_e]
reward_traj_e = [reward_traj[i] for i in inds_e]
done_traj_e = [done_traj[i] for i in inds_e]
obs_traj_o = [obs_traj[i] for i in inds_o]
next_obs_traj_o = [next_obs_traj[i] for i in inds_o]
action_traj_o = [action_traj[i] for i in inds_o]
reward_traj_o = [reward_traj[i] for i in inds_o]
done_traj_o = [done_traj[i] for i in inds_o]
def concat_trajectories(trajectories):
return np.concatenate(trajectories, 0)
dataset_e = {'observations': concat_trajectories(obs_traj_e), 'actions': concat_trajectories(action_traj_e), 'next_observations': concat_trajectories(next_obs_traj_e), 'rewards': concat_trajectories(reward_traj_e), 'terminals': concat_trajectories(done_traj_e)}
dataset_o = {'observations': concat_trajectories(obs_traj_o), 'actions': concat_trajectories(action_traj_o), 'next_observations': concat_trajectories(next_obs_traj_o), 'rewards': concat_trajectories(reward_traj_o), 'terminals': concat_trajectories(done_traj_o)}
return (dataset_e, dataset_o)
|
def dataset_T_trajs(dataset, T, terminate_on_end=False):
'\n Returns T trajs from dataset.\n '
N = dataset['rewards'].shape[0]
return_traj = []
obs_traj = [[]]
next_obs_traj = [[]]
action_traj = [[]]
reward_traj = [[]]
done_traj = [[]]
for i in range((N - 1)):
obs_traj[(- 1)].append(dataset['observations'][i].astype(np.float32))
next_obs_traj[(- 1)].append(dataset['observations'][(i + 1)].astype(np.float32))
action_traj[(- 1)].append(dataset['actions'][i].astype(np.float32))
reward_traj[(- 1)].append(dataset['rewards'][i].astype(np.float32))
done_traj[(- 1)].append(bool(dataset['terminals'][i]))
final_timestep = (dataset['timeouts'][i] | dataset['terminals'][i])
if ((not terminate_on_end) and final_timestep):
return_traj.append(np.sum(reward_traj[(- 1)]))
obs_traj.append([])
next_obs_traj.append([])
action_traj.append([])
reward_traj.append([])
done_traj.append([])
inds_all = list(range(len(obs_traj)))
inds = inds_all[:T]
inds = list(inds)
print('# select {} trajs in the dataset'.format(T))
obs_traj = [obs_traj[i] for i in inds]
next_obs_traj = [next_obs_traj[i] for i in inds]
action_traj = [action_traj[i] for i in inds]
reward_traj = [reward_traj[i] for i in inds]
done_traj = [done_traj[i] for i in inds]
def concat_trajectories(trajectories):
return np.concatenate(trajectories, 0)
return {'observations': concat_trajectories(obs_traj), 'actions': concat_trajectories(action_traj), 'next_observations': concat_trajectories(next_obs_traj), 'rewards': concat_trajectories(reward_traj), 'terminals': concat_trajectories(done_traj)}
|
def eval_policy(policy, env_name, seed, mean, std, seed_offset=100, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed((seed + seed_offset))
avg_reward = 0.0
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
state = ((np.array(state).reshape(1, (- 1)) - mean) / std)
action = policy.select_action(state)
(state, reward, done, _) = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
d4rl_score = (eval_env.get_normalized_score(avg_reward) * 100)
print('---------------------------------------')
print(f'Env: {env_name}, Evaluation over {eval_episodes} episodes: {avg_reward:.3f}, D4RL score: {d4rl_score:.3f}')
print('---------------------------------------')
return d4rl_score
|
def eval_policy(policy, env_name, seed, mean, std, seed_offset=100, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed((seed + seed_offset))
avg_reward = 0.0
for _ in range(eval_episodes):
(state, done) = (eval_env.reset(), False)
while (not done):
state = ((np.array(state).reshape(1, (- 1)) - mean) / std)
action = policy.select_action(state)
(state, reward, done, _) = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
d4rl_score = (eval_env.get_normalized_score(avg_reward) * 100)
print('---------------------------------------')
print(f'Env: {env_name}, Evaluation over {eval_episodes} episodes: {avg_reward:.3f}, D4RL score: {d4rl_score:.3f}')
print('---------------------------------------')
return d4rl_score
|
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1000000.0)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.flag = np.zeros((max_size, 1))
self.device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
return (torch.FloatTensor(self.state[ind]).to(self.device), torch.FloatTensor(self.action[ind]).to(self.device), torch.FloatTensor(self.next_state[ind]).to(self.device), torch.FloatTensor(self.reward[ind]).to(self.device), torch.FloatTensor(self.not_done[ind]).to(self.device), torch.FloatTensor(self.flag[ind]).to(self.device))
def convert_D4RL(self, dataset):
self.state = dataset['observations']
self.action = dataset['actions']
self.next_state = dataset['next_observations']
self.reward = dataset['rewards'].reshape((- 1), 1)
self.not_done = (1.0 - dataset['terminals'].reshape((- 1), 1))
self.flag = dataset['flag'].reshape((- 1), 1)
self.size = self.state.shape[0]
def normalize_states(self, eps=0.001, mean=None, std=None):
if ((mean is None) and (std is None)):
mean = self.state.mean(0, keepdims=True)
std = (self.state.std(0, keepdims=True) + eps)
self.state = ((self.state - mean) / std)
self.next_state = ((self.next_state - mean) / std)
return (mean, std)
|
def update_actor(key: PRNGKey, actor: Model, critic: Model, value: Model, batch: Batch, alpha: float, alg: str) -> Tuple[(Model, InfoDict)]:
v = value(batch.observations)
(q1, q2) = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
if (alg == 'SQL'):
weight = (q - v)
weight = jnp.maximum(weight, 0)
elif (alg == 'EQL'):
weight = jnp.exp(((10 * (q - v)) / alpha))
weight = jnp.clip(weight, 0, 100.0)
def actor_loss_fn(actor_params: Params) -> Tuple[(jnp.ndarray, InfoDict)]:
dist = actor.apply({'params': actor_params}, batch.observations, training=True, rngs={'dropout': key})
log_probs = dist.log_prob(batch.actions)
actor_loss = (- (weight * log_probs).mean())
return (actor_loss, {'actor_loss': actor_loss})
(new_actor, info) = actor.apply_gradient(actor_loss_fn)
return (new_actor, info)
|
def default_init(scale: Optional[float]=jnp.sqrt(2)):
return nn.initializers.orthogonal(scale)
|
class MLP(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.relu
activate_final: int = False
layer_norm: bool = False
dropout_rate: Optional[float] = None
@nn.compact
def __call__(self, x: jnp.ndarray, training: bool=False) -> jnp.ndarray:
for (i, size) in enumerate(self.hidden_dims):
x = nn.Dense(size, kernel_init=default_init())(x)
if (((i + 1) < len(self.hidden_dims)) or self.activate_final):
if self.layer_norm:
x = nn.LayerNorm()(x)
x = self.activations(x)
if ((self.dropout_rate is not None) and (self.dropout_rate > 0)):
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=(not training))
return x
|
@flax.struct.dataclass
class Model():
step: int
apply_fn: nn.Module = flax.struct.field(pytree_node=False)
params: Params
tx: Optional[optax.GradientTransformation] = flax.struct.field(pytree_node=False)
opt_state: Optional[optax.OptState] = None
@classmethod
def create(cls, model_def: nn.Module, inputs: Sequence[jnp.ndarray], tx: Optional[optax.GradientTransformation]=None) -> 'Model':
variables = model_def.init(*inputs)
(_, params) = variables.pop('params')
if (tx is not None):
opt_state = tx.init(params)
else:
opt_state = None
return cls(step=1, apply_fn=model_def, params=params, tx=tx, opt_state=opt_state)
def __call__(self, *args, **kwargs):
return self.apply_fn.apply({'params': self.params}, *args, **kwargs)
def apply(self, *args, **kwargs):
return self.apply_fn.apply(*args, **kwargs)
def apply_gradient(self, loss_fn) -> Tuple[(Any, 'Model')]:
grad_fn = jax.grad(loss_fn, has_aux=True)
(grads, info) = grad_fn(self.params)
(updates, new_opt_state) = self.tx.update(grads, self.opt_state, self.params)
new_params = optax.apply_updates(self.params, updates)
return (self.replace(step=(self.step + 1), params=new_params, opt_state=new_opt_state), info)
def save(self, save_path: str):
os.makedirs(os.path.dirname(save_path), exist_ok=True)
with open(save_path, 'wb') as f:
f.write(flax.serialization.to_bytes(self.params))
def load(self, load_path: str) -> 'Model':
with open(load_path, 'rb') as f:
params = flax.serialization.from_bytes(self.params, f.read())
return self.replace(params=params)
|
def get_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0002
config.value_lr = 0.0002
config.critic_lr = 0.0002
config.hidden_dims = (256, 256)
config.discount = 0.99
config.value_dropout_rate = 0.5
config.layernorm = True
config.tau = 0.005
return config
|
def get_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0003
config.value_lr = 0.0003
config.critic_lr = 0.0003
config.hidden_dims = (256, 256)
config.discount = 0.99
config.dropout_rate = 0.1
config.layernorm = True
config.tau = 0.005
return config
|
def get_config():
config = ml_collections.ConfigDict()
config.actor_lr = 0.0003
config.value_lr = 0.0003
config.critic_lr = 0.0003
config.hidden_dims = (256, 256)
config.discount = 0.99
config.dropout_rate = 0
config.layernorm = True
config.tau = 0.005
return config
|
def update_v(critic: Model, value: Model, batch: Batch, alpha: float, alg: str) -> Tuple[(Model, InfoDict)]:
(q1, q2) = critic(batch.observations, batch.actions)
q = jnp.minimum(q1, q2)
def value_loss_fn(value_params: Params) -> Tuple[(jnp.ndarray, InfoDict)]:
v = value.apply({'params': value_params}, batch.observations)
if (alg == 'SQL'):
sp_term = (((q - v) / (2 * alpha)) + 1.0)
sp_weight = jnp.where((sp_term > 0), 1.0, 0.0)
value_loss = ((sp_weight * (sp_term ** 2)) + (v / alpha)).mean()
elif (alg == 'EQL'):
sp_term = ((q - v) / alpha)
sp_term = jnp.minimum(sp_term, 5.0)
max_sp_term = jnp.max(sp_term, axis=0)
max_sp_term = jnp.where((max_sp_term < (- 1.0)), (- 1.0), max_sp_term)
max_sp_term = jax.lax.stop_gradient(max_sp_term)
value_loss = (jnp.exp((sp_term - max_sp_term)) + ((jnp.exp((- max_sp_term)) * v) / alpha)).mean()
else:
raise NotImplementedError('please choose SQL or EQL')
return (value_loss, {'value_loss': value_loss, 'v': v.mean(), 'q-v': (q - v).mean()})
(new_value, info) = value.apply_gradient(value_loss_fn)
return (new_value, info)
|
def update_q(critic: Model, value: Model, batch: Batch, discount: float) -> Tuple[(Model, InfoDict)]:
next_v = value(batch.next_observations)
target_q = (batch.rewards + ((discount * batch.masks) * next_v))
def critic_loss_fn(critic_params: Params) -> Tuple[(jnp.ndarray, InfoDict)]:
(q1, q2) = critic.apply({'params': critic_params}, batch.observations, batch.actions)
critic_loss = (((q1 - target_q) ** 2) + ((q2 - target_q) ** 2)).mean()
return (critic_loss, {'critic_loss': critic_loss, 'q1': q1.mean()})
(new_critic, info) = critic.apply_gradient(critic_loss_fn)
return (new_critic, info)
|
def split_into_trajectories(observations, actions, rewards, masks, dones_float, next_observations):
trajs = [[]]
for i in tqdm(range(len(observations))):
trajs[(- 1)].append((observations[i], actions[i], rewards[i], masks[i], dones_float[i], next_observations[i]))
if ((dones_float[i] == 1.0) and ((i + 1) < len(observations))):
trajs.append([])
return trajs
|
def merge_trajectories(trajs):
observations = []
actions = []
rewards = []
masks = []
dones_float = []
next_observations = []
for traj in trajs:
for (obs, act, rew, mask, done, next_obs) in traj:
observations.append(obs)
actions.append(act)
rewards.append(rew)
masks.append(mask)
dones_float.append(done)
next_observations.append(next_obs)
return (np.stack(observations), np.stack(actions), np.stack(rewards), np.stack(masks), np.stack(dones_float), np.stack(next_observations))
|
class Dataset(object):
def __init__(self, observations: np.ndarray, actions: np.ndarray, rewards: np.ndarray, masks: np.ndarray, dones_float: np.ndarray, next_observations: np.ndarray, size: int):
self.observations = observations
self.actions = actions
self.rewards = rewards
self.masks = masks
self.dones_float = dones_float
self.next_observations = next_observations
self.size = size
def sample(self, batch_size: int) -> Batch:
indx = np.random.randint(self.size, size=batch_size)
return Batch(observations=self.observations[indx], actions=self.actions[indx], rewards=self.rewards[indx], masks=self.masks[indx], next_observations=self.next_observations[indx])
|
class D4RLDataset(Dataset):
def __init__(self, env: gym.Env, add_env: gym.Env='None', expert_ratio: float=1.0, clip_to_eps: bool=True, heavy_tail: bool=False, heavy_tail_higher: float=0.0, eps: float=1e-05):
dataset = d4rl.qlearning_dataset(env)
if (add_env != 'None'):
add_data = d4rl.qlearning_dataset(add_env)
if (expert_ratio >= 1):
raise ValueError('in the mix setting, the expert_ratio must < 1')
length_add_data = int((add_data['rewards'].shape[0] * (1 - expert_ratio)))
length_expert_data = int((length_add_data * expert_ratio))
for (k, _) in dataset.items():
dataset[k] = np.concatenate([add_data[k][:(- length_expert_data)], dataset[k][:length_expert_data]], axis=0)
print('-------------------------------')
print(f'we are in the mix data regimes, len(expert):{length_expert_data} | len(add_data): {length_add_data} | expert ratio: {expert_ratio}')
print('-------------------------------')
if heavy_tail:
dataset = d4rl.qlearning_dataset(env, heavy_tail=True, heavy_tail_higher=heavy_tail_higher)
if clip_to_eps:
lim = (1 - eps)
dataset['actions'] = np.clip(dataset['actions'], (- lim), lim)
dones_float = np.zeros_like(dataset['rewards'])
for i in range((len(dones_float) - 1)):
if ((np.linalg.norm((dataset['observations'][(i + 1)] - dataset['next_observations'][i])) > 1e-06) or (dataset['terminals'][i] == 1.0)):
dones_float[i] = 1
else:
dones_float[i] = 0
dones_float[(- 1)] = 1
super().__init__(dataset['observations'].astype(np.float32), actions=dataset['actions'].astype(np.float32), rewards=dataset['rewards'].astype(np.float32), masks=(1.0 - dataset['terminals'].astype(np.float32)), dones_float=dones_float.astype(np.float32), next_observations=dataset['next_observations'].astype(np.float32), size=len(dataset['observations']))
|
class ReplayBuffer(Dataset):
def __init__(self, observation_space: gym.spaces.Box, action_dim: int, capacity: int):
observations = np.empty((capacity, *observation_space.shape), dtype=observation_space.dtype)
actions = np.empty((capacity, action_dim), dtype=np.float32)
rewards = np.empty((capacity,), dtype=np.float32)
masks = np.empty((capacity,), dtype=np.float32)
dones_float = np.empty((capacity,), dtype=np.float32)
next_observations = np.empty((capacity, *observation_space.shape), dtype=observation_space.dtype)
super().__init__(observations=observations, actions=actions, rewards=rewards, masks=masks, dones_float=dones_float, next_observations=next_observations, size=0)
self.size = 0
self.insert_index = 0
self.capacity = capacity
def initialize_with_dataset(self, dataset: Dataset, num_samples: Optional[int]):
assert (self.insert_index == 0), 'Can insert a batch online in an empty replay buffer.'
dataset_size = len(dataset.observations)
if (num_samples is None):
num_samples = dataset_size
else:
num_samples = min(dataset_size, num_samples)
assert (self.capacity >= num_samples), 'Dataset cannot be larger than the replay buffer capacity.'
if (num_samples < dataset_size):
perm = np.random.permutation(dataset_size)
indices = perm[:num_samples]
else:
indices = np.arange(num_samples)
self.observations[:num_samples] = dataset.observations[indices]
self.actions[:num_samples] = dataset.actions[indices]
self.rewards[:num_samples] = dataset.rewards[indices]
self.masks[:num_samples] = dataset.masks[indices]
self.dones_float[:num_samples] = dataset.dones_float[indices]
self.next_observations[:num_samples] = dataset.next_observations[indices]
self.insert_index = num_samples
self.size = num_samples
def insert(self, observation: np.ndarray, action: np.ndarray, reward: float, mask: float, done_float: float, next_observation: np.ndarray):
self.observations[self.insert_index] = observation
self.actions[self.insert_index] = action
self.rewards[self.insert_index] = reward
self.masks[self.insert_index] = mask
self.dones_float[self.insert_index] = done_float
self.next_observations[self.insert_index] = next_observation
self.insert_index = ((self.insert_index + 1) % self.capacity)
self.size = min((self.size + 1), self.capacity)
|
def _gen_dir_name():
now_str = datetime.now().strftime('%m-%d-%y_%H.%M.%S')
rand_str = ''.join(random.choices(string.ascii_lowercase, k=4))
return f'{now_str}_{rand_str}'
|
class Log():
def __init__(self, root_log_dir, cfg_dict, txt_filename='log.txt', csv_filename='progress.csv', cfg_filename='config.json', flush=True):
self.dir = (Path(root_log_dir) / _gen_dir_name())
self.dir.mkdir(parents=True)
self.txt_file = open((self.dir / txt_filename), 'w')
self.csv_file = None
(self.dir / cfg_filename).write_text(json.dumps(cfg_dict))
self.txt_filename = txt_filename
self.csv_filename = csv_filename
self.cfg_filename = cfg_filename
self.flush = flush
def write(self, message, end='\n'):
now_str = datetime.now().strftime('%H:%M:%S')
message = (f'[{now_str}] ' + message)
for f in [sys.stdout, self.txt_file]:
print(message, end=end, file=f, flush=self.flush)
def __call__(self, *args, **kwargs):
self.write(*args, **kwargs)
def row(self, dict):
if (self.csv_file is None):
self.csv_file = open((self.dir / self.csv_filename), 'w', newline='')
self.csv_writer = csv.DictWriter(self.csv_file, list(dict.keys()))
self.csv_writer.writeheader()
self(str(dict))
self.csv_writer.writerow(dict)
if self.flush:
self.csv_file.flush()
def close(self):
self.txt_file.close()
if (self.csv_file is not None):
self.csv_file.close()
|
def evaluate(env_name: str, agent: nn.Module, env: gym.Env, num_episodes: int) -> Dict[(str, float)]:
total_reward_ = []
for _ in range(num_episodes):
(observation, done) = (env.reset(), False)
total_reward = 0.0
while (not done):
action = agent.sample_actions(observation, temperature=0.0)
(observation, reward, done, info) = env.step(action)
total_reward += reward
total_reward_.append(total_reward)
average_return = np.array(total_reward_).mean()
normalized_return = (d4rl.get_normalized_score(env_name, average_return) * 100)
return normalized_return
|
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map((lambda p, tp: ((p * tau) + (tp * (1 - tau)))), critic.params, target_critic.params)
return target_critic.replace(params=new_target_params)
|
@jax.jit
def _update_jit_sql(rng: PRNGKey, actor: Model, critic: Model, value: Model, target_critic: Model, batch: Batch, discount: float, tau: float, alpha: float) -> Tuple[(PRNGKey, Model, Model, Model, Model, Model, InfoDict)]:
(new_value, value_info) = update_v(target_critic, value, batch, alpha, alg='SQL')
(key, rng) = jax.random.split(rng)
(new_actor, actor_info) = update_actor(key, actor, target_critic, new_value, batch, alpha, alg='SQL')
(new_critic, critic_info) = update_q(critic, new_value, batch, discount)
new_target_critic = target_update(new_critic, target_critic, tau)
return (rng, new_actor, new_critic, new_value, new_target_critic, {**critic_info, **value_info, **actor_info})
|
@jax.jit
def _update_jit_eql(rng: PRNGKey, actor: Model, critic: Model, value: Model, target_critic: Model, batch: Batch, discount: float, tau: float, alpha: float) -> Tuple[(PRNGKey, Model, Model, Model, Model, Model, InfoDict)]:
(new_value, value_info) = update_v(target_critic, value, batch, alpha, alg='EQL')
(key, rng) = jax.random.split(rng)
(new_actor, actor_info) = update_actor(key, actor, target_critic, new_value, batch, alpha, alg='EQL')
(new_critic, critic_info) = update_q(critic, new_value, batch, discount)
new_target_critic = target_update(new_critic, target_critic, tau)
return (rng, new_actor, new_critic, new_value, new_target_critic, {**critic_info, **value_info, **actor_info})
|
class Learner(object):
def __init__(self, seed: int, observations: jnp.ndarray, actions: jnp.ndarray, actor_lr: float=0.0003, value_lr: float=0.0003, critic_lr: float=0.0003, hidden_dims: Sequence[int]=(256, 256), discount: float=0.99, tau: float=0.005, alpha: float=0.1, dropout_rate: Optional[float]=None, value_dropout_rate: Optional[float]=None, layernorm: bool=False, max_steps: Optional[int]=None, max_clip: Optional[int]=None, mix_dataset: Optional[str]=None, alg: Optional[str]=None, opt_decay_schedule: str='cosine'):
'\n An implementation of the version of Soft-Actor-Critic described in https://arxiv.org/abs/1801.01290\n '
self.tau = tau
self.discount = discount
self.alpha = alpha
self.max_clip = max_clip
self.alg = alg
rng = jax.random.PRNGKey(seed)
(rng, actor_key, critic_key, value_key) = jax.random.split(rng, 4)
action_dim = actions.shape[(- 1)]
actor_def = policy.NormalTanhPolicy(hidden_dims, action_dim, log_std_scale=0.001, log_std_min=(- 5.0), dropout_rate=dropout_rate, state_dependent_std=False, tanh_squash_distribution=False)
if (opt_decay_schedule == 'cosine'):
schedule_fn = optax.cosine_decay_schedule((- actor_lr), max_steps)
optimiser = optax.chain(optax.scale_by_adam(), optax.scale_by_schedule(schedule_fn))
else:
optimiser = optax.adam(learning_rate=actor_lr)
actor = Model.create(actor_def, inputs=[actor_key, observations], tx=optimiser)
critic_def = value_net.DoubleCritic(hidden_dims)
critic = Model.create(critic_def, inputs=[critic_key, observations, actions], tx=optax.adam(learning_rate=critic_lr))
value_def = value_net.ValueCritic(hidden_dims, layer_norm=layernorm, dropout_rate=value_dropout_rate)
value = Model.create(value_def, inputs=[value_key, observations], tx=optax.adam(learning_rate=value_lr))
target_critic = Model.create(critic_def, inputs=[critic_key, observations, actions])
self.actor = actor
self.critic = critic
self.value = value
self.target_critic = target_critic
self.rng = rng
def sample_actions(self, observations: np.ndarray, temperature: float=1.0) -> jnp.ndarray:
(rng, actions) = policy.sample_actions(self.rng, self.actor.apply_fn, self.actor.params, observations, temperature)
self.rng = rng
actions = np.asarray(actions)
return np.clip(actions, (- 1), 1)
def update(self, batch: Batch) -> InfoDict:
if (self.alg == 'SQL'):
(new_rng, new_actor, new_critic, new_value, new_target_critic, info) = _update_jit_sql(self.rng, self.actor, self.critic, self.value, self.target_critic, batch, self.discount, self.tau, self.alpha)
elif (self.alg == 'EQL'):
(new_rng, new_actor, new_critic, new_value, new_target_critic, info) = _update_jit_eql(self.rng, self.actor, self.critic, self.value, self.target_critic, batch, self.discount, self.tau, self.alpha)
self.rng = new_rng
self.actor = new_actor
self.critic = new_critic
self.value = new_value
self.target_critic = new_target_critic
return info
|
class NormalTanhPolicy(nn.Module):
hidden_dims: Sequence[int]
action_dim: int
state_dependent_std: bool = True
dropout_rate: Optional[float] = None
log_std_scale: float = 1.0
log_std_min: Optional[float] = None
log_std_max: Optional[float] = None
tanh_squash_distribution: bool = True
@nn.compact
def __call__(self, observations: jnp.ndarray, temperature: float=1.0, training: bool=False) -> tfd.Distribution:
outputs = MLP(self.hidden_dims, activate_final=True, dropout_rate=self.dropout_rate)(observations, training=training)
means = nn.Dense(self.action_dim, kernel_init=default_init())(outputs)
if self.state_dependent_std:
log_stds = nn.Dense(self.action_dim, kernel_init=default_init(self.log_std_scale))(outputs)
else:
log_stds = self.param('log_stds', nn.initializers.zeros, (self.action_dim,))
log_std_min = (self.log_std_min or LOG_STD_MIN)
log_std_max = (self.log_std_max or LOG_STD_MAX)
log_stds = jnp.clip(log_stds, log_std_min, log_std_max)
if (not self.tanh_squash_distribution):
means = nn.tanh(means)
base_dist = tfd.MultivariateNormalDiag(loc=means, scale_diag=(jnp.exp(log_stds) * temperature))
if self.tanh_squash_distribution:
return tfd.TransformedDistribution(distribution=base_dist, bijector=tfb.Tanh())
else:
return base_dist
|
@functools.partial(jax.jit, static_argnames=('actor_def', 'distribution'))
def _sample_actions(rng: PRNGKey, actor_def: nn.Module, actor_params: Params, observations: np.ndarray, temperature: float=1.0) -> Tuple[(PRNGKey, jnp.ndarray)]:
dist = actor_def.apply({'params': actor_params}, observations, temperature)
(rng, key) = jax.random.split(rng)
return (rng, dist.sample(seed=key))
|
def sample_actions(rng: PRNGKey, actor_def: nn.Module, actor_params: Params, observations: np.ndarray, temperature: float=1.0) -> Tuple[(PRNGKey, jnp.ndarray)]:
return _sample_actions(rng, actor_def, actor_params, observations, temperature)
|
def normalize(dataset):
trajs = split_into_trajectories(dataset.observations, dataset.actions, dataset.rewards, dataset.masks, dataset.dones_float, dataset.next_observations)
def compute_returns(traj):
episode_return = 0
for (_, _, rew, _, _, _) in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= (compute_returns(trajs[(- 1)]) - compute_returns(trajs[0]))
dataset.rewards *= 1000.0
|
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[(gym.Env, D4RLDataset)]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if ('antmaze' in FLAGS.env_name):
pass
elif (('halfcheetah' in FLAGS.env_name) or ('walker2d' in FLAGS.env_name) or ('hopper' in FLAGS.env_name)):
normalize(dataset)
return (env, dataset)
|
def main(_):
summary_writer = SummaryWriter(os.path.join(FLAGS.save_dir, 'tb', str(FLAGS.seed)), write_to_disk=True)
os.makedirs(FLAGS.save_dir, exist_ok=True)
(env, dataset) = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
action_dim = env.action_space.shape[0]
replay_buffer = ReplayBuffer(env.observation_space, action_dim, (FLAGS.replay_buffer_size or FLAGS.max_steps))
replay_buffer.initialize_with_dataset(dataset, FLAGS.init_dataset_size)
kwargs = dict(FLAGS.config)
agent = Learner(FLAGS.seed, env.observation_space.sample()[np.newaxis], env.action_space.sample()[np.newaxis], **kwargs)
eval_returns = []
(observation, done) = (env.reset(), False)
for i in tqdm.tqdm(range((1 - FLAGS.num_pretraining_steps), (FLAGS.max_steps + 1)), smoothing=0.1, disable=(not FLAGS.tqdm)):
if (i >= 1):
action = agent.sample_actions(observation)
action = np.clip(action, (- 1), 1)
(next_observation, reward, done, info) = env.step(action)
if ((not done) or ('TimeLimit.truncated' in info)):
mask = 1.0
else:
mask = 0.0
replay_buffer.insert(observation, action, reward, mask, float(done), next_observation)
observation = next_observation
if done:
(observation, done) = (env.reset(), False)
for (k, v) in info['episode'].items():
summary_writer.add_scalar(f'training/{k}', v, info['total']['timesteps'])
else:
info = {}
info['total'] = {'timesteps': i}
batch = replay_buffer.sample(FLAGS.batch_size)
if ('antmaze' in FLAGS.env_name):
batch = Batch(observations=batch.observations, actions=batch.actions, rewards=(batch.rewards - 1), masks=batch.masks, next_observations=batch.next_observations)
update_info = agent.update(batch)
if ((i % FLAGS.log_interval) == 0):
for (k, v) in update_info.items():
if (v.ndim == 0):
summary_writer.add_scalar(f'training/{k}', v, i)
else:
summary_writer.add_histogram(f'training/{k}', v, i)
summary_writer.flush()
if ((i % FLAGS.eval_interval) == 0):
eval_stats = evaluate(agent, env, FLAGS.eval_episodes)
for (k, v) in eval_stats.items():
summary_writer.add_scalar(f'evaluation/average_{k}s', v, i)
summary_writer.flush()
eval_returns.append((i, eval_stats['return']))
np.savetxt(os.path.join(FLAGS.save_dir, f'{FLAGS.seed}.txt'), eval_returns, fmt=['%d', '%.1f'])
|
def normalize(dataset):
trajs = split_into_trajectories(dataset.observations, dataset.actions, dataset.rewards, dataset.masks, dataset.dones_float, dataset.next_observations)
def compute_returns(traj):
episode_return = 0
for (_, _, rew, _, _, _) in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= (compute_returns(trajs[(- 1)]) - compute_returns(trajs[0]))
dataset.rewards *= 1000.0
|
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[(gym.Env, D4RLDataset)]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if ('antmaze' in FLAGS.env_name):
dataset.rewards -= 1.0
elif (('halfcheetah' in FLAGS.env_name) or ('walker2d' in FLAGS.env_name) or ('hopper' in FLAGS.env_name)):
normalize(dataset)
return (env, dataset)
|
def main(_):
(env, dataset) = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs['alpha'] = FLAGS.alpha
kwargs['alg'] = FLAGS.alg
agent = Learner(FLAGS.seed, env.observation_space.sample()[np.newaxis], env.action_space.sample()[np.newaxis], max_steps=FLAGS.max_steps, **kwargs)
kwargs['seed'] = FLAGS.seed
kwargs['env_name'] = FLAGS.env_name
wandb.init(project='project_name', entity='your_wandb_id', name=f'{FLAGS.env_name}', config=kwargs)
log = Log((Path('benchmark') / FLAGS.env_name), kwargs)
log(f'Log dir: {log.dir}')
for i in tqdm.tqdm(range(1, (FLAGS.max_steps + 1)), smoothing=0.1, disable=(not FLAGS.tqdm)):
batch = dataset.sample(FLAGS.batch_size)
update_info = agent.update(batch)
if ((i % FLAGS.log_interval) == 0):
wandb.log(update_info, i)
if ((i % FLAGS.eval_interval) == 0):
normalized_return = evaluate(FLAGS.env_name, agent, env, FLAGS.eval_episodes)
log.row({'normalized_return': normalized_return})
wandb.log({'normalized_return': normalized_return}, i)
|
class ValueCritic(nn.Module):
hidden_dims: Sequence[int]
layer_norm: bool = False
dropout_rate: Optional[float] = 0.0
@nn.compact
def __call__(self, observations: jnp.ndarray) -> jnp.ndarray:
critic = MLP((*self.hidden_dims, 1), layer_norm=self.layer_norm, dropout_rate=self.dropout_rate)(observations)
return jnp.squeeze(critic, (- 1))
|
class Critic(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.relu
layer_norm: bool = False
@nn.compact
def __call__(self, observations: jnp.ndarray, actions: jnp.ndarray) -> jnp.ndarray:
inputs = jnp.concatenate([observations, actions], (- 1))
critic = MLP((*self.hidden_dims, 1), layer_norm=self.layer_norm, activations=self.activations)(inputs)
return jnp.squeeze(critic, (- 1))
|
class DoubleCritic(nn.Module):
hidden_dims: Sequence[int]
activations: Callable[([jnp.ndarray], jnp.ndarray)] = nn.relu
layer_norm: bool = False
@nn.compact
def __call__(self, observations: jnp.ndarray, actions: jnp.ndarray) -> Tuple[(jnp.ndarray, jnp.ndarray)]:
critic1 = Critic(self.hidden_dims, activations=self.activations, layer_norm=self.layer_norm)(observations, actions)
critic2 = Critic(self.hidden_dims, activations=self.activations, layer_norm=self.layer_norm)(observations, actions)
return (critic1, critic2)
|
class EpisodeMonitor(gym.ActionWrapper):
'A class that computes episode returns and lengths.'
def __init__(self, env: gym.Env):
super().__init__(env)
self._reset_stats()
self.total_timesteps = 0
def _reset_stats(self):
self.reward_sum = 0.0
self.episode_length = 0
self.start_time = time.time()
def step(self, action: np.ndarray) -> TimeStep:
(observation, reward, done, info) = self.env.step(action)
self.reward_sum += reward
self.episode_length += 1
self.total_timesteps += 1
info['total'] = {'timesteps': self.total_timesteps}
if done:
info['episode'] = {}
info['episode']['return'] = self.reward_sum
info['episode']['length'] = self.episode_length
info['episode']['duration'] = (time.time() - self.start_time)
if hasattr(self, 'get_normalized_score'):
info['episode']['return'] = (self.get_normalized_score(info['episode']['return']) * 100.0)
return (observation, reward, done, info)
def reset(self) -> np.ndarray:
self._reset_stats()
return self.env.reset()
|
class SinglePrecision(gym.ObservationWrapper):
def __init__(self, env):
super().__init__(env)
if isinstance(self.observation_space, Box):
obs_space = self.observation_space
self.observation_space = Box(obs_space.low, obs_space.high, obs_space.shape)
elif isinstance(self.observation_space, Dict):
obs_spaces = copy.copy(self.observation_space.spaces)
for (k, v) in obs_spaces.items():
obs_spaces[k] = Box(v.low, v.high, v.shape)
self.observation_space = Dict(obs_spaces)
else:
raise NotImplementedError
def observation(self, observation: np.ndarray) -> np.ndarray:
if isinstance(observation, np.ndarray):
return observation.astype(np.float32)
elif isinstance(observation, dict):
observation = copy.copy(observation)
for (k, v) in observation.items():
observation[k] = v.astype(np.float32)
return observation
|
class PSNR():
def __init__(self):
self.data_range = 255
def forward(self, img1, img2):
'\n input:\n img1/img2: (H W C) uint8 ndarray.\n return:\n psnr score, float.\n '
(img1, img2) = (img1.copy(), img2.copy())
return peak_signal_noise_ratio(img1, img2, data_range=self.data_range)
|
class SSIM():
def __init__(self):
self.win_size = None
self.gradient = False
self.data_range = 255
self.multichannel = True
self.gaussian_weights = False
self.full = False
def forward(self, img1, img2):
'\n input:\n img1/img2: (H W C) uint8 ndarray.\n return:\n ssim score, float.\n\n ref:\n why is it different from the MATLAB version: https://github.com/scikit-image/scikit-image/issues/4985\n '
(img1, img2) = (img1.copy(), img2.copy())
return structural_similarity(img1, img2, win_size=self.win_size, gradient=self.gradient, data_range=self.data_range, multichannel=self.multichannel, gaussian_weights=self.gaussian_weights, full=self.full)
|
def filter_order_clips(raw_videos, clip_ends=2):
video_sets = {}
clipped_videos = []
for video in raw_videos:
date_time = '--'.join(video.split('--')[0:2])
if (date_time not in video_sets):
video_sets[date_time] = 0
video_sets[date_time] += 1
for (date_time, num_frags) in sorted(video_sets.items()):
for frag in range(num_frags):
if ((frag > clip_ends) and (frag < (num_frags - clip_ends))):
video_loc = ((date_time + '--') + str(frag))
clipped_videos.append(video_loc)
return clipped_videos
|
def extract_logs(video_dir, frame_shape):
lr = list(LogReader((video_dir + '/rlog.bz2')))
speed_data = [l.carState.vEgo for l in lr if (l.which() == 'carState')]
speed_data = np.array(speed_data)
resampled_speeds = resample(speed_data, frame_shape)
return resampled_speeds
|
def convert_video(downscaled_dir, video_dir):
downscaled_vid = ((((downscaled_dir + '/') + video_dir.split('-')[(- 1)]) + str(time.time())) + 'preprocessed.mp4')
subprocess.call((((('ffmpeg -r 24 -i ' + video_dir) + '/fcamera.hevc') + ' -c:v libx265 -r 20 -filter:v scale=640:480 -crf 10 -c:a -i ') + downscaled_vid), shell=True)
return downscaled_vid
|
def opticalFlowDense(image_current, image_next):
'\n Args:\n image_current : RGB image\n image_next : RGB image\n return:\n optical flow magnitude and angle and stacked in a matrix\n '
image_current = np.array(image_current)
image_next = np.array(image_next)
gray_current = cv.cvtColor(image_current, cv.COLOR_RGB2GRAY)
gray_next = cv.cvtColor(image_next, cv.COLOR_RGB2GRAY)
flow = cv.calcOpticalFlowFarneback(gray_current, gray_next, None, 0.5, 1, 15, 2, 5, 1.3, 0)
return flow
|
def augment(image_current, image_next):
brightness = np.random.uniform(0.5, 1.5)
img1 = ImageEnhance.Brightness(image_current).enhance(brightness)
img2 = ImageEnhance.Brightness(image_next).enhance(brightness)
color = np.random.uniform(0.5, 1.5)
img1 = ImageEnhance.Brightness(img1).enhance(color)
img2 = ImageEnhance.Brightness(img2).enhance(color)
return (img1, img2)
|
def op_flow_video(preprocessed_video, augment_frames=True):
op_flows = []
frames = []
count = 0
vidcap = cv.VideoCapture(preprocessed_video)
(success, frame1) = vidcap.read()
frame1 = cv.cvtColor(frame1, cv.COLOR_BGR2RGB)
frame1 = Image.fromarray(frame1).crop((0, 170, 640, 370)).resize((160, 50))
while success:
if (((count % 100) == 0) and (count > 0)):
print(count)
(success, frame2) = vidcap.read()
if (success == True):
frame2 = cv.cvtColor(frame2, cv.COLOR_BGR2RGB)
frame2 = Image.fromarray(frame2).crop((0, 170, 640, 370)).resize((160, 50))
if (augment_frames == True):
if (random.random() > 0.85):
(frame1, frame2) = augment(frame1, frame2)
flow = opticalFlowDense(frame1, frame2)
op_flows.append(flow)
frames.append(np.array(frame2))
frame1 = frame2
count += 1
else:
print('video reading completed')
continue
return (np.array(frames), np.array(op_flows), count)
|
def write_hdf5(hdf5_path, frames, op_flows, resampled_speeds):
with h5py.File(hdf5_path) as f:
print(len(frames), len(op_flows), len(resampled_speeds))
print(f['frame'], f['op_flow'], f['speed'])
f['frame'].resize((f['frame'].len() + len(frames)), axis=0)
f['op_flow'].resize((f['op_flow'].len() + len(op_flows)), axis=0)
f['speed'].resize((f['speed'].len() + len(resampled_speeds)), axis=0)
f['frame'][(- len(frames)):] = frames
f['op_flow'][(- len(op_flows)):] = op_flows
f['speed'][(- len(resampled_speeds)):] = resampled_speeds
|
def archive_processed(video_dir):
processed_dir = video_dir.split('/')
processed_dir[(- 2)] = 'processed'
processed_dir = '/'.join(processed_dir)
shutil.move(video_dir, processed_dir)
|
class DataGenerator(keras.utils.Sequence):
def __init__(self, batch_size, history_size, hdf5_path, indexes):
self.hdf5_path = hdf5_path
if (indexes is None):
with h5py.File(hdf5_path, 'r') as f:
self.indexes = np.arange(len(f['speed']))
else:
self.indexes = indexes
self.batch_size = batch_size
self.history_size = history_size
self.on_epoch_end()
with h5py.File(hdf5_path, 'r') as f:
self.frame = np.array(f['frame'])[self.indexes]
self.op_flow = np.array(f['op_flow'])[self.indexes]
self.speed = np.array(f['speed'])[self.indexes]
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor((len(self.indexes) / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
indexes = self.indexes[(index * self.batch_size):((index + 1) * self.batch_size)]
return self.__data_generation(indexes)
def on_epoch_end(self):
'Updates indexes after each epoch'
np.random.shuffle(self.indexes)
def __data_generation(self, indexes):
'Generates data containing batch_size samples'
frame = np.zeros((self.batch_size, self.history_size, 224, 224, 3))
op_flow = np.zeros((self.batch_size, self.history_size, 224, 224, 3))
speed = np.zeros((self.batch_size, 1))
frame_order = list(range(self.history_size))
frame_pos = np.zeros(shape=(self.batch_size, self.history_size))
for (pos, i) in enumerate(indexes):
time_steps = self.frame[(i - self.history_size):i].shape[0]
if (time_steps < self.history_size):
frame[(pos, (- 1))] = self.frame[i]
op_flow[(pos, (- 1))] = self.op_flow[i]
frame_pos[pos] = np.array(list(range(self.history_size)))
else:
if (np.random.random() > 0.5):
frame_order_pos = np.array([0, 1, 2, 3, 4])
else:
frame_order_pos = np.array([4, 1, 2, 3, 0])
frame[(pos, frame_order_pos)] = self.frame[(i - self.history_size):i]
op_flow[(pos, frame_order_pos)] = self.op_flow[(i - self.history_size):i]
frame_pos[pos] = frame_order_pos
speed[pos] = self.speed[i]
return ([frame, op_flow], frame_pos)
|
def build_model(history_size):
k.clear_session()
frame_inp = Input(shape=(history_size, 224, 224, 3))
op_flow_inp = Input(shape=(history_size, 224, 224, 3))
filter_size = (3, 3)
frame = TimeDistributed(SpatialDropout2D(0.2))(frame_inp)
frame = TimeDistributed(Conv2D(8, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(16, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(32, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(64, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(128, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame_max = TimeDistributed(GlobalMaxPool2D())(frame)
frame_avg = TimeDistributed(GlobalAvgPool2D())(frame)
op_flow = TimeDistributed(BatchNormalization())(op_flow_inp)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(Conv2D(4, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(8, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(32, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(64, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(128, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow_max = TimeDistributed(GlobalMaxPool2D())(op_flow)
op_flow_avg = TimeDistributed(GlobalAvgPool2D())(op_flow)
conc = concatenate([frame_max, frame_avg])
conc = SpatialDropout1D(0.2)(conc)
conc = Flatten()(conc)
conc = Dense(500, activation='relu')(conc)
conc = Dropout(0.2)(conc)
conc = Dense(100, activation='relu')(conc)
conc = Dropout(0.1)(conc)
result = Dense(history_size)(conc)
model = Model(inputs=[frame_inp, op_flow_inp], outputs=[result])
print(model.summary())
model.compile(loss='mse', optimizer='adam')
return model
|
def build_model_flat(history_size):
k.clear_session()
frame_inp = Input(shape=(history_size, 224, 224, 3))
op_flow_inp = Input(shape=(history_size, 224, 224, 3))
filter_size = (3, 3)
frame = TimeDistributed(SpatialDropout2D(0.2))(frame_inp)
frame = TimeDistributed(Conv2D(4, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(4, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(8, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(16, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(Conv2D(32, filter_size, activation='relu', data_format='channels_last'))(frame)
frame = TimeDistributed(MaxPool2D())(frame)
frame = TimeDistributed(GlobalMaxPool2D())(frame)
op_flow = TimeDistributed(BatchNormalization())(op_flow_inp)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(Conv2D(4, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(8, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(32, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(64, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(128, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow_max = TimeDistributed(GlobalMaxPool2D())(op_flow)
op_flow_avg = TimeDistributed(GlobalAvgPool2D())(op_flow)
conc = concatenate([op_flow_max, op_flow_avg])
conc = Flatten()(conc)
conc = Dropout(0.3)(conc)
conc = Dense(100, activation='relu')(conc)
conc = Dropout(0.2)(conc)
conc = Dense(50, activation='relu')(conc)
conc = Dropout(0.1)(conc)
result = Dense(1, activation='linear')(conc)
model = Model(inputs=[frame_inp, op_flow_inp], outputs=[result])
print(model.summary())
model.compile(loss='mse', optimizer='adam')
return model
|
def build_model_frame(history_size):
k.clear_session()
frame_inp = Input(shape=(history_size, 224, 224, 3))
op_flow_inp = Input(shape=(history_size, 224, 224, 3))
filter_size = (3, 3)
base_mod = Xception(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
for l in base_mod.layers:
l.trainable = False
frame = TimeDistributed(BatchNormalization())(frame_inp)
frame = TimeDistributed(base_mod)(frame)
frame = TimeDistributed(Conv2D(128, (1, 1), activation='relu'))(frame)
frame_max = TimeDistributed(GlobalMaxPool2D())(frame)
frame_avg = TimeDistributed(GlobalAvgPool2D())(frame)
op_flow = TimeDistributed(BatchNormalization())(op_flow_inp)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(Conv2D(4, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(8, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(32, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(64, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(Dropout(0.3))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow = TimeDistributed(Conv2D(128, filter_size, activation='relu', data_format='channels_last'))(op_flow)
op_flow = TimeDistributed(MaxPool2D())(op_flow)
op_flow_max = TimeDistributed(GlobalMaxPool2D())(op_flow)
op_flow_avg = TimeDistributed(GlobalAvgPool2D())(op_flow)
conc = concatenate([op_flow_max, op_flow_avg])
conc = BatchNormalization()(conc)
conc = Flatten()(conc)
conc = Dropout(0.3)(conc)
conc = Dense(500, activation='relu')(conc)
conc = Dropout(0.2)(conc)
conc = Dense(100, activation='relu')(conc)
conc = Dropout(0.1)(conc)
result = Dense(1, activation='linear')(conc)
model = Model(inputs=[frame_inp, op_flow_inp], outputs=[result])
print(model.summary())
model.compile(loss='mse', optimizer='adam')
return model
|
def steer_thread():
context = zmq.Context()
poller = zmq.Poller()
logcan = messaging.sub_sock(context, service_list['can'].port)
joystick_sock = messaging.sub_sock(context, service_list['testJoystick'].port, conflate=True, poller=poller)
carstate = messaging.pub_sock(context, service_list['carState'].port)
carcontrol = messaging.pub_sock(context, service_list['carControl'].port)
sendcan = messaging.pub_sock(context, service_list['sendcan'].port)
button_1_last = 0
enabled = False
(CI, CP) = get_car(logcan, sendcan, None)
CC = car.CarControl.new_message()
joystick = messaging.recv_one(joystick_sock)
while True:
for (socket, event) in poller.poll(0):
if (socket is joystick_sock):
joystick = messaging.recv_one(socket)
CS = CI.update(CC)
actuators = car.CarControl.Actuators.new_message()
axis_3 = clip(((- joystick.testJoystick.axes[3]) * 1.05), (- 1.0), 1.0)
actuators.steer = axis_3
actuators.steerAngle = (axis_3 * 43.0)
axis_1 = clip(((- joystick.testJoystick.axes[1]) * 1.05), (- 1.0), 1.0)
actuators.gas = max(axis_1, 0.0)
actuators.brake = max((- axis_1), 0.0)
pcm_cancel_cmd = joystick.testJoystick.buttons[0]
button_1 = joystick.testJoystick.buttons[1]
if (button_1 and (not button_1_last)):
enabled = (not enabled)
button_1_last = button_1
hud_alert = 0
audible_alert = 0
if joystick.testJoystick.buttons[2]:
audible_alert = 'beepSingle'
if joystick.testJoystick.buttons[3]:
audible_alert = 'chimeRepeated'
hud_alert = 'steerRequired'
CC.actuators.gas = actuators.gas
CC.actuators.brake = actuators.brake
CC.actuators.steer = actuators.steer
CC.actuators.steerAngle = actuators.steerAngle
CC.hudControl.visualAlert = hud_alert
CC.hudControl.setSpeed = 20
CC.cruiseControl.cancel = pcm_cancel_cmd
CC.enabled = enabled
CI.apply(CC)
cs_send = messaging.new_message()
cs_send.init('carState')
cs_send.carState = copy(CS)
carstate.send(cs_send.to_bytes())
cc_send = messaging.new_message()
cc_send.init('carControl')
cc_send.carControl = copy(CC)
carcontrol.send(cc_send.to_bytes())
time.sleep(0.01)
|
class TextPrint():
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def printf(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
|
def joystick_thread():
context = zmq.Context()
joystick_sock = messaging.pub_sock(context, service_list['testJoystick'].port)
pygame.init()
clock = pygame.time.Clock()
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
if (joystick_count > 1):
raise ValueError('More than one joystick attached')
elif (joystick_count < 1):
raise ValueError('No joystick found')
while True:
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pass
if (event.type == pygame.JOYBUTTONDOWN):
print('Joystick button pressed.')
if (event.type == pygame.JOYBUTTONUP):
print('Joystick button released.')
joystick = pygame.joystick.Joystick(0)
joystick.init()
axes = []
buttons = []
for a in range(joystick.get_numaxes()):
axes.append(joystick.get_axis(a))
for b in range(joystick.get_numbuttons()):
buttons.append(joystick.get_button(b))
dat = messaging.new_message()
dat.init('testJoystick')
dat.testJoystick.axes = axes
dat.testJoystick.buttons = map(bool, buttons)
joystick_sock.send(dat.to_bytes())
clock.tick(100)
|
def _sync_inner_generator(input_queue, *args, **kwargs):
func = args[0]
args = args[1:]
get = input_queue.get
while True:
item = get()
if (item is EndSentinel):
return
(cookie, value) = item
(yield (cookie, func(value, *args, **kwargs)))
|
def _async_streamer_async_inner(input_queue, output_queue, generator_func, args, kwargs):
put = output_queue.put
put_end = True
try:
g = generator_func(input_queue, *args, **kwargs)
for item in g:
put((time(), item))
g.close()
except ExistentialError:
put_end = False
raise
finally:
if put_end:
put((None, EndSentinel))
|
def _running_mean_var(ltc_stats, x):
(old_mean, var) = ltc_stats
mean = min(600.0, ((0.98 * old_mean) + (0.02 * x)))
var = min(5.0, max(0.1, ((0.98 * var) + ((0.02 * (mean - x)) * (old_mean - x)))))
return (mean, var)
|
def _find_next_resend(sent_messages, ltc_stats):
if (not sent_messages):
return (None, None)
oldest_sent_idx = sent_messages._OrderedDict__root[1][2]
(send_time, _) = sent_messages[oldest_sent_idx]
(mean, var) = ltc_stats
next_resend_time = ((send_time + mean) + (40.0 * sqrt(var)))
return (oldest_sent_idx, next_resend_time)
|
def _do_cleanup(input_queue, output_queue, num_workers, sentinels_received, num_outstanding):
input_fd = input_queue.put_fd()
output_fd = output_queue.get_fd()
poller = select.epoll()
poller.register(input_fd, select.EPOLLOUT)
poller.register(output_fd, select.EPOLLIN)
remaining_outputs = []
end_sentinels_to_send = (num_workers - sentinels_received)
while (sentinels_received < num_workers):
evts = dict(poller.poll(((- 1) if (num_outstanding > 0) else 10.0)))
if (not evts):
break
if (output_fd in evts):
(_, maybe_sentinel) = output_queue.get()
if (maybe_sentinel is EndSentinel):
sentinels_received += 1
else:
remaining_outputs.append(maybe_sentinel[1])
num_outstanding -= 1
if (input_fd in evts):
if (end_sentinels_to_send > 0):
input_queue.put_nowait(EndSentinel)
end_sentinels_to_send -= 1
else:
poller.modify(input_fd, 0)
assert (sentinels_received == num_workers), (sentinels_received, num_workers)
assert output_queue.empty()
return remaining_outputs
|
def _generate_results(input_stream, input_queue, worker_output_queue, output_queue, num_workers, max_outstanding):
pack_cookie = struct.pack
sent_messages = OrderedDict()
oldest_sent_idx = None
next_resend_time = None
ltc_stats = (5.0, 10.0)
received_messages = {}
next_out = 0
next_in_item = next(input_stream, EndSentinel)
inputs_remain = (next_in_item is not EndSentinel)
sentinels_received = 0
input_fd = input_queue.put_fd()
worker_output_fd = worker_output_queue.get_fd()
output_fd = output_queue.put_fd()
poller = select.epoll()
poller.register(input_fd, select.EPOLLOUT)
poller.register(worker_output_fd, select.EPOLLIN)
poller.register(output_fd, 0)
while ((sentinels_received < num_workers) and (inputs_remain or sent_messages)):
if max_outstanding:
can_send_new = ((len(sent_messages) < max_outstanding) and (len(received_messages) < max_outstanding) and inputs_remain)
else:
can_send_new = inputs_remain
if ((next_resend_time and (now >= next_resend_time)) or can_send_new):
poller.modify(input_fd, select.EPOLLOUT)
else:
poller.modify(input_fd, 0)
if next_resend_time:
t = max(0, (next_resend_time - now))
evts = dict(poller.poll(t))
else:
evts = dict(poller.poll())
now = time()
if (output_fd in evts):
output_queue.put_nowait(received_messages.pop(next_out))
next_out += 1
if (next_out not in received_messages):
poller.modify(output_fd, 0)
if (worker_output_fd in evts):
for (receive_time, maybe_sentinel) in worker_output_queue.get_multiple_nowait():
if (maybe_sentinel is EndSentinel):
sentinels_received += 1
continue
(idx_bytes, value) = maybe_sentinel
idx = struct.unpack('<Q', idx_bytes)[0]
sent_message = sent_messages.pop(idx, None)
if (sent_message is not None):
received_messages[idx] = value
ltc_stats = _running_mean_var(ltc_stats, (receive_time - sent_message[0]))
if (idx == oldest_sent_idx):
(oldest_sent_idx, next_resend_time) = _find_next_resend(sent_messages, ltc_stats)
if (idx == next_out):
poller.modify(output_fd, select.EPOLLOUT)
elif (oldest_sent_idx is not None):
ltc_stats = _running_mean_var(ltc_stats, (now - sent_messages[oldest_sent_idx][0]))
elif (input_fd in evts):
if can_send_new:
(send_idx, send_value) = next_in_item
input_queue.put_nowait((pack_cookie('<Q', send_idx), send_value))
sent_messages[next_in_item[0]] = (now, next_in_item[1])
next_in_item = next(input_stream, EndSentinel)
inputs_remain = (next_in_item is not EndSentinel)
if (oldest_sent_idx is None):
(oldest_sent_idx, next_resend_time) = _find_next_resend(sent_messages, ltc_stats)
else:
(send_time, resend_input) = sent_messages.pop(oldest_sent_idx)
sys.stdout.write('Resending {} (ltc, mean, var) = ({}, {}, {})\n'.format(oldest_sent_idx, (now - send_time), ltc_stats[0], ltc_stats[1]))
input_queue.put_nowait((pack_cookie('<Q', oldest_sent_idx), resend_input))
sent_messages[oldest_sent_idx] = (now, resend_input)
(oldest_sent_idx, next_resend_time) = _find_next_resend(sent_messages, ltc_stats)
while (next_out in received_messages):
output_queue.put(received_messages.pop(next_out))
next_out += 1
_do_cleanup(input_queue, worker_output_queue, num_workers, sentinels_received, 0)
output_queue.put(EndSentinel)
|
def _generate_results_unreliable(input_stream, input_queue, worker_output_queue, output_queue, num_workers, max_outstanding_unused):
next_in_item = next(input_stream, EndSentinel)
inputs_remain = (next_in_item is not EndSentinel)
received_messages = deque()
pack_cookie = struct.pack
input_fd = input_queue.put_fd()
worker_output_fd = worker_output_queue.get_fd()
output_fd = output_queue.put_fd()
poller = select.epoll()
poller.register(input_fd, select.EPOLLOUT)
poller.register(worker_output_fd, select.EPOLLIN)
poller.register(output_fd, 0)
num_outstanding = 0
sentinels_received = 0
while ((sentinels_received < num_workers) and (inputs_remain or received_messages)):
evts = ((input_fd if (inputs_remain and (not input_queue.full())) else 0), (output_fd if ((not output_queue.full()) and len(received_messages)) else 0), (worker_output_fd if (not worker_output_queue.empty()) else 0))
if all(((evt == 0) for evt in evts)):
evts = dict(poller.poll())
if (output_fd in evts):
output_queue.put(received_messages.pop())
if (len(received_messages) == 0):
poller.modify(output_fd, 0)
if (worker_output_fd in evts):
for (receive_time, maybe_sentinel) in worker_output_queue.get_multiple():
if (maybe_sentinel is EndSentinel):
sentinels_received += 1
continue
received_messages.appendleft(maybe_sentinel[1])
num_outstanding -= 1
poller.modify(output_fd, select.EPOLLOUT)
if (input_fd in evts):
(send_idx, send_value) = next_in_item
input_queue.put((pack_cookie('<Q', send_idx), send_value))
next_in_item = next(input_stream, EndSentinel)
inputs_remain = (next_in_item is not EndSentinel)
num_outstanding += 1
if (not inputs_remain):
poller.modify(input_fd, 0)
for value in _do_cleanup(input_queue, worker_output_queue, num_workers, sentinels_received, num_outstanding):
output_queue.put(value)
output_queue.put(EndSentinel)
|
def _async_generator(func, max_workers, in_q_size, out_q_size, max_outstanding, async_inner, reliable):
if async_inner:
assert inspect.isgeneratorfunction(func), 'async_inner == True but {} is not a generator'.format(func)
@functools.wraps(func)
def wrapper(input_sequence_or_self, *args, **kwargs):
if (inspect.getargspec(func).args[0] == 'self'):
inner_func = func.__get__(input_sequence_or_self, type(input_sequence_or_self))
input_sequence = args[0]
args = args[1:]
else:
inner_func = func
input_sequence = input_sequence_or_self
input_stream = enumerate(iter(input_sequence))
if reliable:
generate_func = _generate_results
else:
generate_func = _generate_results_unreliable
input_queue = PollableQueue(in_q_size)
worker_output_queue = PollableQueue((8 * max_workers))
output_queue = PollableQueue(out_q_size)
if async_inner:
generator_func = inner_func
else:
args = ((inner_func,) + args)
generator_func = _sync_inner_generator
worker_threads = []
for _ in range(max_workers):
t = threading.Thread(target=_async_streamer_async_inner, args=(input_queue, worker_output_queue, generator_func, args, kwargs))
t.daemon = True
t.start()
worker_threads.append(t)
master_thread = threading.Thread(target=generate_func, args=(input_stream, input_queue, worker_output_queue, output_queue, max_workers, max_outstanding))
master_thread.daemon = True
master_thread.start()
try:
while True:
for value in output_queue.get_multiple():
if (value is EndSentinel):
return
else:
(yield value)
finally:
for t in worker_threads:
t.join(1)
master_thread.join(1)
input_queue.close()
worker_output_queue.close()
output_queue.close()
return wrapper
|
def async_generator(max_workers=1, in_q_size=10, out_q_size=12, max_outstanding=10000, async_inner=False, reliable=True):
return (lambda f: _async_generator(f, max_workers, in_q_size, out_q_size, max_outstanding, async_inner, reliable))
|
def cache_path_for_file_path(fn, cache_prefix=None):
dir_ = os.path.join(DEFAULT_CACHE_DIR, 'local')
mkdirs_exists_ok(dir_)
return os.path.join(dir_, os.path.abspath(fn).replace('/', '_'))
|
class DataUnreadableError(Exception):
pass
|
def atomic_write_in_dir(path, **kwargs):
'Creates an atomic writer using a temporary file in the same directory\n as the destination file.\n '
writer = AtomicWriter(path, **kwargs)
return writer._open(_get_fileobject_func(writer, os.path.dirname(path)))
|
def _get_fileobject_func(writer, temp_dir):
def _get_fileobject():
file_obj = writer.get_fileobject(dir=temp_dir)
os.chmod(file_obj.name, 420)
return file_obj
return _get_fileobject
|
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if (not os.path.isdir(path)):
raise
|
def FileReader(fn):
return open(fn, 'rb')
|
class KBHit():
def __init__(self):
'Creates a KBHit object that you can call to do various keyboard things.\n '
self.set_kbhit_terminal()
def set_kbhit_terminal(self):
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
self.new_term[3] = ((self.new_term[3] & (~ termios.ICANON)) & (~ termios.ECHO))
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
atexit.register(self.set_normal_term)
def set_normal_term(self):
' Resets to normal terminal. On Windows this is a no-op.\n '
if (os.name == 'nt'):
pass
else:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def getch(self):
' Returns a keyboard character after kbhit() has been called.\n Should not be called in the same program as getarrow().\n '
return sys.stdin.read(1)
def getarrow(self):
' Returns an arrow-key code after kbhit() has been called. Codes are\n 0 : up\n 1 : right\n 2 : down\n 3 : left\n Should not be called in the same program as getch().\n '
if (os.name == 'nt'):
msvcrt.getch()
c = msvcrt.getch()
vals = [72, 77, 80, 75]
else:
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c.decode('utf-8')))
def kbhit(self):
' Returns True if keyboard character was hit, False otherwise.\n '
(dr, dw, de) = select([sys.stdin], [], [], 0)
return (dr != [])
|
class lazy_property(object):
'Defines a property whose value will be computed only once and as needed.\n\n This can only be used on instance methods.\n '
def __init__(self, func):
self._func = func
def __get__(self, obj_self, cls):
value = self._func(obj_self)
setattr(obj_self, self._func.__name__, value)
return value
|
def write_can_to_msg(data, src, msg):
if (not isinstance(data[0], Sequence)):
data = [data]
can_msgs = msg.init('can', len(data))
for (i, d) in enumerate(data):
if (d[0] < 0):
continue
cc = can_msgs[i]
cc.address = d[0]
cc.busTime = 0
cc.dat = hex_to_str(d[2])
if (len(d) == 4):
cc.src = d[3]
cc.busTime = d[1]
else:
cc.src = src
|
def convert_old_pkt_to_new(old_pkt):
(m, d) = old_pkt
msg = capnp_log.Event.new_message()
if (len(m) == 3):
(_, pid, t) = m
msg.logMonoTime = t
else:
(t, pid) = m
msg.logMonoTime = int((t * 1000000000.0))
last_velodyne_time = None
if (pid == PID_OBD):
write_can_to_msg(d, 0, msg)
elif (pid == PID_CAM):
frame = msg.init('frame')
frame.frameId = d[0]
frame.timestampEof = msg.logMonoTime
elif (pid == PID_IGPS):
loc = msg.init('gpsLocation')
loc.latitude = d[0]
loc.longitude = d[1]
loc.speed = d[2]
loc.timestamp = int((m[0] * 1000.0))
loc.flags = (1 | 4)
elif (pid == PID_IMOTION):
user_acceleration = d[:3]
gravity = d[3:6]
g = (- 9.8)
acceleration = [(g * (a + b)) for (a, b) in zip(user_acceleration, gravity)]
accel_event = msg.init('sensorEvents', 1)[0]
accel_event.acceleration.v = acceleration
elif (pid == PID_GPS):
if ((len(d) <= 6) or (d[(- 1)] == 'gps')):
loc = msg.init('gpsLocation')
loc.latitude = d[0]
loc.longitude = d[1]
loc.speed = d[2]
if (len(d) > 6):
loc.timestamp = d[6]
loc.flags = (1 | 4)
elif (pid == PID_ACCEL):
val = (d[2] if (type(d[2]) != type(0.0)) else d)
accel_event = msg.init('sensorEvents', 1)[0]
accel_event.acceleration.v = val
elif (pid == PID_GYRO):
val = (d[2] if (type(d[2]) != type(0.0)) else d)
gyro_event = msg.init('sensorEvents', 1)[0]
gyro_event.init('gyro').v = val
elif (pid == PID_LIDAR):
lid = msg.init('lidarPts')
lid.idx = d[3]
elif (pid == PID_APPLANIX):
loc = msg.init('liveLocation')
loc.status = d[18]
(loc.lat, loc.lon, loc.alt) = d[0:3]
loc.vNED = d[3:6]
loc.roll = d[6]
loc.pitch = d[7]
loc.heading = d[8]
loc.wanderAngle = d[9]
loc.trackAngle = d[10]
loc.speed = d[11]
loc.gyro = d[12:15]
loc.accel = d[15:18]
elif (pid == PID_IBAROMETER):
pressure_event = msg.init('sensorEvents', 1)[0]
(_, pressure) = d[0:2]
pressure_event.init('pressure').v = [pressure]
elif ((pid == PID_IINIT) and (len(d) == 4)):
init_event = msg.init('initData')
init_event.deviceType = capnp_log.InitData.DeviceType.chffrIos
build_info = init_event.init('iosBuildInfo')
build_info.appVersion = d[0]
build_info.appBuild = int(d[1])
build_info.osVersion = d[2]
build_info.deviceModel = d[3]
return msg.as_reader()
|
def index_log(fn):
index_log_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'index_log')
index_log = os.path.join(index_log_dir, 'index_log')
phonelibs_dir = os.path.join(OP_PATH, 'phonelibs')
subprocess.check_call(['make', ('PHONELIBS=' + phonelibs_dir)], cwd=index_log_dir, stdout=open('/dev/null', 'w'))
try:
dat = subprocess.check_output([index_log, fn, '-'])
except subprocess.CalledProcessError:
raise DataUnreadableError(('%s capnp is corrupted/truncated' % fn))
return np.frombuffer(dat, dtype=np.uint64)
|
def event_read_multiple(fn):
idx = index_log(fn)
with open(fn, 'rb') as f:
dat = f.read()
return [capnp_log.Event.from_bytes(dat[idx[i]:idx[(i + 1)]]) for i in range((len(idx) - 1))]
|
def event_read_multiple_bytes(dat):
with tempfile.NamedTemporaryFile() as dat_f:
dat_f.write(dat)
dat_f.flush()
idx = index_log(dat_f.name)
return [capnp_log.Event.from_bytes(dat[idx[i]:idx[(i + 1)]]) for i in range((len(idx) - 1))]
|
class MultiLogIterator(object):
def __init__(self, log_paths, wraparound=True):
self._log_paths = log_paths
self._wraparound = wraparound
self._first_log_idx = next((i for i in range(len(log_paths)) if (log_paths[i] is not None)))
self._current_log = self._first_log_idx
self._idx = 0
self._log_readers = ([None] * len(log_paths))
self.start_time = self._log_reader(self._first_log_idx)._ts[0]
def _log_reader(self, i):
if ((self._log_readers[i] is None) and (self._log_paths[i] is not None)):
log_path = self._log_paths[i]
print('LogReader:', log_path)
self._log_readers[i] = LogReader(log_path)
return self._log_readers[i]
def __iter__(self):
return self
def _inc(self):
lr = self._log_reader(self._current_log)
if (self._idx < (len(lr._ents) - 1)):
self._idx += 1
else:
self._idx = 0
self._current_log = next((i for i in range((self._current_log + 1), (len(self._log_readers) + 1)) if ((i == len(self._log_readers)) or (self._log_paths[i] is not None))))
if (self._current_log == len(self._log_readers)):
if self._wraparound:
self._current_log = self._first_log_idx
else:
raise StopIteration
def next(self):
while 1:
lr = self._log_reader(self._current_log)
ret = lr._ents[self._idx]
if lr._do_conversion:
ret = convert_old_pkt_to_new(ret, lr.data_version)
self._inc()
return ret
def tell(self):
return ((self._log_reader(self._current_log)._ts[self._idx] - self.start_time) * 1e-09)
def seek(self, ts):
minute = int((ts / 60))
if ((minute >= len(self._log_paths)) or (self._log_paths[minute] is None)):
return False
self._current_log = minute
self._idx = 0
while (self.tell() < ts):
self._inc()
return True
|
class LogReader(object):
def __init__(self, fn, canonicalize=True):
(_, ext) = os.path.splitext(fn)
data_version = None
with FileReader(fn) as f:
dat = f.read()
if ((ext == '.gz') and (('log_' in fn) or ('log2' in fn))):
dat = zlib.decompress(dat, (zlib.MAX_WBITS | 32))
elif (ext == '.bz2'):
dat = bz2.decompress(dat)
elif (ext == '.7z'):
with libarchive.public.memory_reader(dat) as aa:
mdat = []
for it in aa:
for bb in it.get_blocks():
mdat.append(bb)
dat = ''.join(mdat)
if (ext == ''):
if (dat[0] == '['):
needs_conversion = True
ents = [json.loads(x) for x in dat.strip().split('\n')[:(- 1)]]
if ('_' in fn):
data_version = fn.split('_')[1]
else:
needs_conversion = False
ents = event_read_multiple_bytes(dat)
elif (ext == '.gz'):
if ('log_' in fn):
ents = [json.loads(x) for x in dat.strip().split('\n')[:(- 1)]]
needs_conversion = True
elif ('log2' in fn):
needs_conversion = False
ents = event_read_multiple_bytes(dat)
else:
raise Exception('unknown extension')
elif (ext == '.bz2'):
needs_conversion = False
ents = event_read_multiple_bytes(dat)
elif (ext == '.7z'):
needs_conversion = True
ents = [json.loads(x) for x in dat.strip().split('\n')]
else:
raise Exception('unknown extension')
if needs_conversion:
self._ts = [(x[0][0] * 1000000000.0) for x in ents]
else:
self._ts = [x.logMonoTime for x in ents]
self.data_version = data_version
self._do_conversion = (needs_conversion and canonicalize)
self._ents = ents
def __iter__(self):
for ent in self._ents:
if self._do_conversion:
(yield convert_old_pkt_to_new(ent, self.data_version))
else:
(yield ent)
|
def load_many_logs_canonical(log_paths):
'Load all logs for a sequence of log paths.'
for log_path in log_paths:
for msg in LogReader(log_path):
(yield msg)
|
def big_endian_number(number):
if (number < 256):
return chr(number)
return (big_endian_number((number >> 8)) + chr((number & 255)))
|
def ebml_encode_number(number):
def trailing_bits(rest_of_number, number_of_bits):
if (number_of_bits == 8):
return chr((rest_of_number & 255))
else:
return (trailing_bits((rest_of_number >> 8), (number_of_bits - 8)) + chr((rest_of_number & 255)))
if (number == (- 1)):
return chr(255)
if (number < ((2 ** 7) - 1)):
return chr((number | 128))
if (number < ((2 ** 14) - 1)):
return (chr((64 | (number >> 8))) + trailing_bits(number, 8))
if (number < ((2 ** 21) - 1)):
return (chr((32 | (number >> 16))) + trailing_bits(number, 16))
if (number < ((2 ** 28) - 1)):
return (chr((16 | (number >> 24))) + trailing_bits(number, 24))
if (number < ((2 ** 35) - 1)):
return (chr((8 | (number >> 32))) + trailing_bits(number, 32))
if (number < ((2 ** 42) - 1)):
return (chr((4 | (number >> 40))) + trailing_bits(number, 40))
if (number < ((2 ** 49) - 1)):
return (chr((2 | (number >> 48))) + trailing_bits(number, 48))
if (number < ((2 ** 56) - 1)):
return (chr(1) + trailing_bits(number, 56))
raise Exception('NUMBER TOO BIG')
|
def ebml_element(element_id, data, length=None):
if (length == None):
length = len(data)
return ((big_endian_number(element_id) + ebml_encode_number(length)) + data)
|
def write_ebml_header(f, content_type, version, read_version):
f.write(ebml_element(440786851, ((((((('' + ebml_element(17030, ben(1))) + ebml_element(17143, ben(1))) + ebml_element(17138, ben(4))) + ebml_element(17139, ben(8))) + ebml_element(17026, content_type)) + ebml_element(17031, ben(version))) + ebml_element(17029, ben(read_version)))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.