repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ctcdecode | ctcdecode-master/ctcdecode/__init__.py | import torch
from ._ext import ctc_decode
class CTCBeamDecoder(object):
"""
PyTorch wrapper for DeepSpeech PaddlePaddle Beam Search Decoder.
Args:
labels (list): The tokens/vocab used to train your model.
They should be in the same order as they are in your model's outputs.
model_path (basestring): The path to your external KenLM language model(LM)
alpha (float): Weighting associated with the LMs probabilities.
A weight of 0 means the LM has no effect.
beta (float): Weight associated with the number of words within our beam.
cutoff_top_n (int): Cutoff number in pruning. Only the top cutoff_top_n characters
with the highest probability in the vocab will be used in beam search.
cutoff_prob (float): Cutoff probability in pruning. 1.0 means no pruning.
beam_width (int): This controls how broad the beam search is. Higher values are more likely to find top beams,
but they also will make your beam search exponentially slower.
num_processes (int): Parallelize the batch using num_processes workers.
blank_id (int): Index of the CTC blank token (probably 0) used when training your model.
log_probs_input (bool): False if your model has passed through a softmax and output probabilities sum to 1.
"""
def __init__(
self,
labels,
model_path=None,
alpha=0,
beta=0,
cutoff_top_n=40,
cutoff_prob=1.0,
beam_width=100,
num_processes=4,
blank_id=0,
log_probs_input=False,
):
self.cutoff_top_n = cutoff_top_n
self._beam_width = beam_width
self._scorer = None
self._num_processes = num_processes
self._labels = list(labels) # Ensure labels are a list
self._num_labels = len(labels)
self._blank_id = blank_id
self._log_probs = 1 if log_probs_input else 0
if model_path:
self._scorer = ctc_decode.paddle_get_scorer(
alpha, beta, model_path.encode(), self._labels, self._num_labels
)
self._cutoff_prob = cutoff_prob
def decode(self, probs, seq_lens=None):
"""
Conducts the beamsearch on model outputs and return results.
Args:
probs (Tensor) - A rank 3 tensor representing model outputs. Shape is batch x num_timesteps x num_labels.
seq_lens (Tensor) - A rank 1 tensor representing the sequence length of the items in the batch. Optional,
if not provided the size of axis 1 (num_timesteps) of `probs` is used for all items
Returns:
tuple: (beam_results, beam_scores, timesteps, out_lens)
beam_results (Tensor): A 3-dim tensor representing the top n beams of a batch of items.
Shape: batchsize x num_beams x num_timesteps.
Results are still encoded as ints at this stage.
beam_scores (Tensor): A 3-dim tensor representing the likelihood of each beam in beam_results.
Shape: batchsize x num_beams x num_timesteps
timesteps (Tensor): A 2-dim tensor representing the timesteps at which the nth output character
has peak probability.
To be used as alignment between audio and transcript.
Shape: batchsize x num_beams
out_lens (Tensor): A 2-dim tensor representing the length of each beam in beam_results.
Shape: batchsize x n_beams.
"""
probs = probs.cpu().float()
batch_size, max_seq_len = probs.size(0), probs.size(1)
if seq_lens is None:
seq_lens = torch.IntTensor(batch_size).fill_(max_seq_len)
else:
seq_lens = seq_lens.cpu().int()
output = torch.IntTensor(batch_size, self._beam_width, max_seq_len).cpu().int()
timesteps = torch.IntTensor(batch_size, self._beam_width, max_seq_len).cpu().int()
scores = torch.FloatTensor(batch_size, self._beam_width).cpu().float()
out_seq_len = torch.zeros(batch_size, self._beam_width).cpu().int()
if self._scorer:
ctc_decode.paddle_beam_decode_lm(
probs,
seq_lens,
self._labels,
self._num_labels,
self._beam_width,
self._num_processes,
self._cutoff_prob,
self.cutoff_top_n,
self._blank_id,
self._log_probs,
self._scorer,
output,
timesteps,
scores,
out_seq_len,
)
else:
ctc_decode.paddle_beam_decode(
probs,
seq_lens,
self._labels,
self._num_labels,
self._beam_width,
self._num_processes,
self._cutoff_prob,
self.cutoff_top_n,
self._blank_id,
self._log_probs,
output,
timesteps,
scores,
out_seq_len,
)
return output, scores, timesteps, out_seq_len
def character_based(self):
return ctc_decode.is_character_based(self._scorer) if self._scorer else None
def max_order(self):
return ctc_decode.get_max_order(self._scorer) if self._scorer else None
def dict_size(self):
return ctc_decode.get_dict_size(self._scorer) if self._scorer else None
def reset_params(self, alpha, beta):
if self._scorer is not None:
ctc_decode.reset_params(self._scorer, alpha, beta)
def __del__(self):
if self._scorer is not None:
ctc_decode.paddle_release_scorer(self._scorer)
class OnlineCTCBeamDecoder(object):
"""
PyTorch wrapper for DeepSpeech PaddlePaddle Beam Search Decoder with interface for online decoding.
Args:
labels (list): The tokens/vocab used to train your model.
They should be in the same order as they are in your model's outputs.
model_path (basestring): The path to your external KenLM language model(LM)
alpha (float): Weighting associated with the LMs probabilities.
A weight of 0 means the LM has no effect.
beta (float): Weight associated with the number of words within our beam.
cutoff_top_n (int): Cutoff number in pruning. Only the top cutoff_top_n characters
with the highest probability in the vocab will be used in beam search.
cutoff_prob (float): Cutoff probability in pruning. 1.0 means no pruning.
beam_width (int): This controls how broad the beam search is. Higher values are more likely to find top beams,
but they also will make your beam search exponentially slower.
num_processes (int): Parallelize the batch using num_processes workers.
blank_id (int): Index of the CTC blank token (probably 0) used when training your model.
log_probs_input (bool): False if your model has passed through a softmax and output probabilities sum to 1.
"""
def __init__(
self,
labels,
model_path=None,
alpha=0,
beta=0,
cutoff_top_n=40,
cutoff_prob=1.0,
beam_width=100,
num_processes=4,
blank_id=0,
log_probs_input=False,
):
self._cutoff_top_n = cutoff_top_n
self._beam_width = beam_width
self._scorer = None
self._num_processes = num_processes
self._labels = list(labels) # Ensure labels are a list
self._num_labels = len(labels)
self._blank_id = blank_id
self._log_probs = 1 if log_probs_input else 0
if model_path:
self._scorer = ctc_decode.paddle_get_scorer(
alpha, beta, model_path.encode(), self._labels, self._num_labels
)
self._cutoff_prob = cutoff_prob
def decode(self, probs, states, is_eos_s, seq_lens=None):
"""
Conducts the beamsearch on model outputs and return results.
Args:
probs (Tensor) - A rank 3 tensor representing model outputs. Shape is batch x num_timesteps x num_labels.
states (Sequence[DecoderState]) - sequence of decoding states with lens equal to batch_size.
is_eos_s (Sequence[bool]) - sequence of bool with lens equal to batch size.
Should have False if havent pushed all chunks yet, and True if you pushed last cank and you want to get an answer
seq_lens (Tensor) - A rank 1 tensor representing the sequence length of the items in the batch. Optional,
if not provided the size of axis 1 (num_timesteps) of `probs` is used for all items
Returns:
tuple: (beam_results, beam_scores, timesteps, out_lens)
beam_results (Tensor): A 3-dim tensor representing the top n beams of a batch of items.
Shape: batchsize x num_beams x num_timesteps.
Results are still encoded as ints at this stage.
beam_scores (Tensor): A 3-dim tensor representing the likelihood of each beam in beam_results.
Shape: batchsize x num_beams x num_timesteps
timesteps (Tensor): A 2-dim tensor representing the timesteps at which the nth output character
has peak probability.
To be used as alignment between audio and transcript.
Shape: batchsize x num_beams
out_lens (Tensor): A 2-dim tensor representing the length of each beam in beam_results.
Shape: batchsize x n_beams.
"""
probs = probs.cpu().float()
batch_size, max_seq_len = probs.size(0), probs.size(1)
if seq_lens is None:
seq_lens = torch.IntTensor(batch_size).fill_(max_seq_len)
else:
seq_lens = seq_lens.cpu().int()
scores = torch.FloatTensor(batch_size, self._beam_width).cpu().float()
out_seq_len = torch.zeros(batch_size, self._beam_width).cpu().int()
decode_fn = ctc_decode.paddle_beam_decode_with_given_state
res_beam_results, res_timesteps = decode_fn(
probs,
seq_lens,
self._num_processes,
[state.state for state in states],
is_eos_s,
scores,
out_seq_len
)
res_beam_results = res_beam_results.int()
res_timesteps = res_timesteps.int()
return res_beam_results, scores, res_timesteps, out_seq_len
def character_based(self):
return ctc_decode.is_character_based(self._scorer) if self._scorer else None
def max_order(self):
return ctc_decode.get_max_order(self._scorer) if self._scorer else None
def dict_size(self):
return ctc_decode.get_dict_size(self._scorer) if self._scorer else None
def reset_state(state):
ctc_decode.paddle_release_state(state)
class DecoderState:
"""
Class using for maintain different chunks of data in one beam algorithm corresponding to one unique source.
Note: after using State you should delete it, so dont reuse it
Args:
decoder (OnlineCTCBeamDecoder) - decoder you will use for decoding.
"""
def __init__(self, decoder):
self.state = ctc_decode.paddle_get_decoder_state(
decoder._labels,
decoder._beam_width,
decoder._cutoff_prob,
decoder._cutoff_top_n,
decoder._blank_id,
decoder._log_probs,
decoder._scorer,
)
def __del__(self):
ctc_decode.paddle_release_state(self.state)
| 11,957 | 42.802198 | 121 | py |
torchqg | torchqg-master/main.py | import sys
import math
import torch
import torch.nn as nn
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from qg import to_spectral, to_physical, QgModel
from sgs import MLdiv, Constant
import workflow
plt.rcParams.update({'mathtext.fontset':'cm'})
# A framework for the evaluation of turbulence closures used in mesoscale ocean large-eddy simulations.
# Graham and Ringler (2013).
def t_unit():
return 1.2e6
def l_unit():
return (504e4 / math.pi)
Lx = 2*math.pi
Ly = 2*math.pi
Nx = 512
Ny = 512
dt = 480 / t_unit() # 480s
mu = 1.25e-8 / l_unit()**(-1) # 1.25e-8m^-1
nu = 352 / l_unit()**2 / t_unit()**(-1) # 22m^2s^-1 for the simulation (2048^2)
# Wind stress forcing.
def Fs(i, sol, dt, t, grid):
phi_x = math.pi * math.sin(1.2e-6 / t_unit()**(-1) * t)
phi_y = math.pi * math.sin(1.2e-6 * math.pi / t_unit()**(-1) * t / 3)
y = torch.cos(4 * grid.y + phi_y).view(grid.Ny, 1) - torch.cos(4 * grid.x + phi_x).view(1, grid.Nx)
yh = to_spectral(y)
K = torch.sqrt(grid.krsq)
yh[K < 3.0] = 0
yh[K > 5.0] = 0
yh[0, 0] = 0
e0 = 1.75e-18 / t_unit()**(-3)
ei = 0.5 * grid.int_sq(yh) / (grid.Lx * grid.Ly)
yh *= torch.sqrt(e0 / ei)
return yh
eta = torch.zeros([Ny, Nx], dtype=torch.float64, requires_grad=True)
# High res model.
h = QgModel(
name='\\mathcal{F}',
Nx=Nx,
Ny=Ny,
Lx=Lx,
Ly=Ly,
dt=dt,
t0=0.0,
B=0.0, # Planetary vorticity y-gradient
mu=mu, # Linear drag
nu=nu, # Viscosity coefficient
nv=1, # Hyperviscous order (nv=1 is viscosity)
eta=eta, # Topographic PV
source=Fs # Source term
)
# Initial conditions.
h.init_randn(0.01, [3.0, 5.0])
# Set up spectral filter kernel.
h.kernel = h.grid.cutoff
print(h)
# Low res model(s).
scale = 4
Nxl = int(Nx / scale)
Nyl = int(Ny / scale)
eta_m = torch.zeros([Nyl, Nxl], dtype=torch.float64, requires_grad=True)
# No model.
m1 = QgModel(
name='',
Nx=Nxl,
Ny=Nyl,
Lx=Lx,
Ly=Ly,
dt=dt,
t0=0.0,
B=0.0, # Planetary vorticity y-gradient
mu=mu, # Linear drag
nu=nu, # Viscosity coefficient
nv=1, # Hyperviscous order (nv=1 is viscosity)
eta=eta_m, # Topographic PV
source=Fs, # Source term
sgs=Constant(c=0.0) # Subgrid-scale term (replace with yours)
)
# Initialize from DNS vorticity field.
m1.pde.sol = h.filter(m1.grid, scale, h.pde.sol)
# Will produce two images in folder `output` with the final fields after 2000 iterations.
workflow.workflow(
dir='output/',
name='geo',
iters=10000, # Model iterations
steps=100, # Discrete steps
scale=scale, # Kernel scale
diags=[ # Diagnostics
workflow.diag_fields,
],
system=h, # DNS system
models=[],
#models=[m1] # LES without model
)
| 2,721 | 21.130081 | 103 | py |
torchqg | torchqg-master/sgs.py | import math
import torch
import qg
class Constant:
def __init__(self, c=0.0):
self.c = c
def predict(self, m, it, sol, grid):
div = torch.full_like(sol, self.c)
return div
class MLdiv:
def __init__(self, model):
self.model = model
self.model.eval()
#print(self.model)
def predict(self, m, it, sol, grid):
qh = sol.clone()
ph = -qh * grid.irsq
q = qg.to_physical(qh)
p = qg.to_physical(ph)
# M(q, p) = M({i})
i = torch.stack((q, p), dim=0)
# M({i}) ~ r
r = self.model(i.unsqueeze(0).to(torch.float32)).view(grid.Ny, grid.Nx)
return qg.to_spectral(r)
| 626 | 18 | 75 | py |
torchqg | torchqg-master/learn.py | import os
import torch
import numpy as np
import qg
# Useful for a posteriori learning.
class DynamicalDataset(torch.utils.data.Dataset):
def __init__(self, inputs, labels, steps, iters, dt, t0):
self.inputs = inputs
self.labels = labels
self.iters = iters
self.dt = dt
self.t0 = t0
self.adapt(steps)
def __len__(self):
return int(self.inputs.shape[0] * self.samples)
def __getitem__(self, idx):
tra = int(idx / self.samples)
idx = int(idx % self.samples)
it0 = idx * self.steps
itn = (idx + 1) * self.steps
t = it0 * self.dt
inputs = self.inputs[tra, it0:itn + 1]
labels = self.labels[tra, it0:itn + 1]
return (self.t0 + t, inputs, labels)
def adapt(self, steps):
self.steps = steps
self.samples = int(self.iters / self.steps) - 1
def training(device, net, dataloader, loss, opti, rate, stat):
net.train()
cost = 0.0
for step, batch in enumerate(dataloader):
opti.zero_grad()
data, labs = batch[0].to(device), batch[1].to(device)
q = data[:,0]
p = data[:,1]
pred = net(torch.stack((q, p), dim=1))
grad = loss(data, data, pred, labs)
grad.backward()
opti.step()
rate.step()
cost += grad.item()
cost /= len(dataloader)
stat.append(cost)
def valididation(device, net, dataloader, loss, stat):
net.eval()
cost = 0.0
with torch.no_grad():
for step, batch in enumerate(dataloader):
data, labs = batch[0].to(device), batch[1].to(device)
q = data[:,0]
p = data[:,1]
pred = net(torch.stack((q, p), dim=1))
grad = loss(data, data, pred, labs)
cost += grad.item()
cost /= len(dataloader)
stat.append(cost)
# A priori learning strategy
def apriori(device, dir, net, train_loader, valid_loader, loss, opti, rate, epochs=1000):
if not os.path.exists(dir + net.name):
os.mkdir(dir + net.name)
train_loss = []
valid_loss = []
for epoch in range(1, epochs + 1):
train(device, net, train_loader, loss, opti, rate, train_loss)
valid(device, net, valid_loader, loss, valid_loss)
if epoch % 1 == 0:
print('Epoch {} (training loss = {}, validation loss = {})'.format(epoch, train_loss[-1], valid_loss[-1]), flush=True)
if epoch % 10 == 0:
np.savetxt(dir + net.name + '/losses.csv', np.column_stack((train_loss, valid_loss)), delimiter=",", fmt='%s')
torch.save(net, dir + net.name + '/weights.pyt')
print('Finished training, with last progress loss = {}'.format(train_loss[-1]))
# A posteriori learning strategy
def aposteriori(device, dir, net, dyn, iters, dataloader, loss, opti, rate, epochs=5, epochs_full=2):
if not os.path.exists(dir + net.name):
os.mkdir(dir + net.name)
notify_freq = int(len(dataloader) / 10)
time_loss = []
temp_loss = 0
temp_cnt = 0
def timestep(m, cur, it):
q, p, u, v = m.update()
states_i[it, 0] = q
states_i[it, 1] = p
states_i[it, 2] = u
states_i[it, 3] = v
# Predict SGS from NN
r = net(torch.stack((q, p), dim=0).unsqueeze(0).to(torch.float32)).squeeze(0)
states_o[it, 0] = r[0]
return None
ck = int(iters / epochs)
net.train()
for epoch in range(1, epochs + epochs_full + 1):
it = max(1, min(iters, ck * epoch))
dataloader.dataset.adapt(it)
states_i = torch.zeros([it, 4, dyn.grid.Ny, dyn.grid.Nx], requires_grad=True).to(device)
states_o = torch.zeros([it, 2, dyn.grid.Ny, dyn.grid.Nx], requires_grad=True).to(device)
for step, batch in enumerate(dataloader):
states_i.detach_()
states_o.detach_()
opti.zero_grad()
dyn.zero_grad()
t, data, labs = batch[0], batch[1].squeeze(0).to(device), batch[2].squeeze(0).to(device)
# Start from DNS
# bar(q)(t) = bar(q(t))
dyn.pde.sol = qg.to_spectral(data[0, 0])
dyn.pde.cur.t = t
# Run dynamical model
# bar(q)(t + ndt)
dyn.run(it, timestep, invisible=True)
if dyn.cfl() < 1:
# Compute loss
grad = loss(states_i, data[1:it+1], states_o, labs[1:it+1])
grad.backward()
opti.step()
rate.step()
temp_loss += grad.item() / it
temp_cnt += 1
# No validation yet
if step % notify_freq == 0:
time_loss.append(temp_loss / temp_cnt)
temp_loss = 0
temp_cnt = 0
print('Epoch {} with {} iters (step {}, loss = {})'.format(epoch, it, step, time_loss[-1]), flush=True)
if epoch % 1 == 0:
np.savetxt(dir + net.name + '/losses.csv', time_loss, delimiter=",", fmt='%s')
torch.save(net, dir + net.name + '/weights.pyt')
print('Finished training, with last progress loss = {}'.format(time_loss[-1]))
| 4,689 | 26.588235 | 124 | py |
torchqg | torchqg-master/qg.py | import math
import tqdm
import h5py
import torch
import torch.fft
import matplotlib
import matplotlib.pyplot as plt
from src.grid import TwoGrid
from src.timestepper import ForwardEuler, RungeKutta2, RungeKutta4
from src.pde import Pde, Eq
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('device =', device)
def to_spectral(y): return torch.fft. rfftn(y, norm='forward')
def to_physical(y): return torch.fft.irfftn(y, norm='forward')
class QgModel:
def __init__(self, name, Nx, Ny, Lx, Ly, dt, t0, B, mu, nu, nv, eta, source=None, kernel=None, sgs=None):
self.name = name
self.B = B
self.mu = mu
self.nu = nu
self.nv = nv
self.eta = eta.to(device)
self.grid = TwoGrid(device, Nx=Nx, Ny=Ny, Lx=Lx, Ly=Ly)
if sgs:
# use 3/2 rule
self.eq = Eq(grid=self.grid, linear_term=self.linear_term(self.grid), nonlinear_term=self.nonlinear_les)
self.da = TwoGrid(device, Nx=int((3./2.)*Nx), Ny=int((3./2.)*Ny), Lx=Lx, Ly=Ly, dealias=1/3)
else:
# use 2/3 rule
self.eq = Eq(grid=self.grid, linear_term=self.linear_term(self.grid), nonlinear_term=self.nonlinear_dns)
self.stepper = RungeKutta4(eq=self.eq)
self.pde = Pde(dt=dt, t0=t0, eq=self.eq, stepper=self.stepper)
self.source = source
self.kernel = kernel
self.sgs = sgs
def __str__(self):
return """Qg model
Grid: [{nx},{ny}] in [{lx},{ly}]
μ: {mu}
ν: {nu}
β: {beta}
dt: {dt}
""".format(
nx=self.grid.Nx,
ny=self.grid.Ny,
lx=self.grid.Lx,
ly=self.grid.Ly,
mu=self.mu,
nu=self.nu,
beta=self.B,
dt=self.pde.cur.dt)
def nonlinear_dns(self, i, S, sol, dt, t, grid):
qh = sol.clone()
ph = -qh * grid.irsq
uh = -1j * grid.ky * ph
vh = 1j * grid.kr * ph
q = to_physical(qh)
u = to_physical(uh)
v = to_physical(vh)
qe = q + self.eta
uq = u * qe
vq = v * qe
uqh = to_spectral(uq)
vqh = to_spectral(vq)
S[:] = -1j * grid.kr * uqh - 1j * grid.ky * vqh
grid.dealias(S[:])
if (self.source):
S[:] += self.source(
i,
sol,
dt,
t,
grid)
def nonlinear_les(self, i, S, sol, dt, t, grid):
qh = sol.clone()
ph = -qh * grid.irsq
uh = -1j * grid.ky * ph
vh = 1j * grid.kr * ph
eh = to_spectral(self.eta)
qhh = self.da.increase(qh)
uhh = self.da.increase(uh)
vhh = self.da.increase(vh)
ehh = self.da.increase(eh)
q = to_physical(qhh)
u = to_physical(uhh)
v = to_physical(vhh)
e = to_physical(ehh)
qe = q + e
uq = u * qe
vq = v * qe
uqhh = to_spectral(uq)
vqhh = to_spectral(vq)
uqh = grid.reduce(uqhh)
vqh = grid.reduce(vqhh)
S[:] = -1j * grid.kr * uqh - 1j * grid.ky * vqh
if (self.sgs):
S[:] += self.sgs.predict(
self,
i,
sol,
grid)
if (self.source):
S[:] += self.source(
i,
sol,
dt,
t,
grid)
def linear_term(self, grid):
Lc = -self.mu - self.nu * grid.krsq**self.nv - 1j * self.B * grid.kr * grid.irsq
Lc[0, 0] = 0
return Lc
# Flow with random gaussian energy only in the wavenumbers range
def init_randn(self, energy, wavenumbers):
K = torch.sqrt(self.grid.krsq)
k = self.grid.kr.repeat(self.grid.Ny, 1)
qih = torch.randn(self.pde.sol.size(), dtype=torch.complex128).to(device)
qih[K < wavenumbers[0]] = 0.0
qih[K > wavenumbers[1]] = 0.0
qih[k == 0.0] = 0.0
E0 = energy
Ei = 0.5 * (self.grid.int_sq(self.grid.kr * self.grid.irsq * qih) + self.grid.int_sq(self.grid.ky * self.grid.irsq * qih)) / (self.grid.Lx * self.grid.Ly)
qih *= torch.sqrt(E0 / Ei)
self.pde.sol = qih
def update(self):
qh = self.pde.sol.clone()
ph = -qh * self.grid.irsq
uh = -1j * self.grid.ky * ph
vh = 1j * self.grid.kr * ph
# Potential vorticity
q = to_physical(qh)
# Streamfunction
p = to_physical(ph)
# x-axis velocity
u = to_physical(uh)
# y-axis velocity
v = to_physical(vh)
return q, p, u, v
def J(self, grid, qh):
ph = -qh * grid.irsq
uh = -1j * grid.ky * ph
vh = 1j * grid.kr * ph
q = to_physical(qh)
u = to_physical(uh)
v = to_physical(vh)
uq = u * q
vq = v * q
uqh = to_spectral(uq)
vqh = to_spectral(vq)
J = 1j * grid.kr * uqh + 1j * grid.ky * vqh
return J
def R(self, grid, scale):
return self.R_field(grid, scale, self.pde.sol)
def R_field(self, grid, scale, yh):
return grid.div(torch.stack(self.R_flux(grid, scale, yh), dim=0))
def R_flux(self, grid, scale, yh):
qh = yh.clone()
ph = -qh * self.grid.irsq
uh = -1j * self.grid.ky * ph
vh = 1j * self.grid.kr * ph
q = to_physical(qh)
u = to_physical(uh)
v = to_physical(vh)
uq = u * q
vq = v * q
uqh = to_spectral(uq)
vqh = to_spectral(vq)
uqh_ = self.kernel(scale * self.grid.delta(), uqh)
vqh_ = self.kernel(scale * self.grid.delta(), vqh)
uh_ = self.kernel(scale * self.grid.delta(), uh)
vh_ = self.kernel(scale * self.grid.delta(), vh)
qh_ = self.kernel(scale * self.grid.delta(), qh)
u_ = to_physical(uh_)
v_ = to_physical(vh_)
q_ = to_physical(qh_)
u_q_ = u_ * q_
v_q_ = v_ * q_
u_q_h = to_spectral(u_q_)
v_q_h = to_spectral(v_q_)
tu = u_q_h - uqh_
tv = v_q_h - vqh_
return grid.reduce(tu), grid.reduce(tv)
# Filters
def filter(self, grid, scale, y):
yh = y.clone()
return grid.reduce(self.kernel(scale * self.grid.delta(), yh))
def filter_physical(self, grid, scale, y):
yh = to_spectral(y)
yl = grid.reduce(self.kernel(scale * self.grid.delta(), yh))
yl = to_physical(yl)
return yl
def run(self, iters, visit, update=False, invisible=False):
for it in tqdm.tqdm(range(iters), disable=invisible):
self.pde.step(self)
visit(self, self.pde.cur, it)
if update:
return self.update()
# Diagnostics
def energy(self, u, v):
return 0.5 * torch.mean(u**2 + v**2)
def enstrophy(self, q):
return 0.5 * torch.mean(q**2)
def cfl(self):
_, _, u, v = self.update()
return (u.abs().max() * self.pde.cur.dt) / self.grid.dx + (v.abs().max() * self.pde.cur.dt) / self.grid.dy
def spectrum(self, y):
K = torch.sqrt(self.grid.krsq)
d = 0.5
k = torch.arange(1, int(self.grid.kcut + 1))
m = torch.zeros(k.size())
e = [torch.zeros(k.size()) for _ in range(len(y))]
for ik in range(len(k)):
n = k[ik]
i = torch.nonzero((K < (n + d)) & (K > (n - d)), as_tuple=True)
m[ik] = i[0].numel()
for j, yj in enumerate(y):
e[j][ik] = torch.sum(yj[i]) * k[ik] * math.pi / (m[ik] - d)
return k, e
def invariants(self, qh):
ph = -qh * self.grid.irsq
uh = -1j * self.grid.ky * ph
vh = 1j * self.grid.kr * ph
# kinetic energy
e = torch.abs(uh)**2 + torch.abs(vh)**2
# enstrophy
z = torch.abs(qh)**2
k, [ek, zk] = self.spectrum([e, z])
return k, ek, zk
def fluxes(self, R, qh):
# resolved rate
sh = -torch.conj(qh) * self.J(self.grid, qh)
# modeled rate
lh = torch.conj(qh) * R
k, [sk, lk] = self.spectrum([torch.real(sh), torch.real(lh)])
return k, sk, lk
# Data
def save(self, name):
hf = h5py.File(name, 'w')
hf.create_dataset('q', data=to_physical(self.p_.sol).cpu().detach())
hf.close()
def load(self, name):
hf = h5py.File(name, 'r')
fq = hf.get('q')
sq = to_spectral(torch.from_numpy(fq[:]).to(device))
# Copy first wavenumbers
self.pde.sol = self.grid.increase(sq)
hf.close()
def zero_grad(self):
self.stepper.zero_grad()
| 7,785 | 23.561514 | 158 | py |
torchqg | torchqg-master/workflow.py | import math
import os
import tqdm
import torch
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import qg
plt.rcParams.update({'mathtext.fontset':'cm'})
plt.rcParams.update({'xtick.minor.visible':True})
plt.rcParams.update({'ytick.minor.visible':True})
def workflow(
dir,
name,
iters,
steps,
scale,
diags,
system,
models,
dump=False,
):
t0 = system.pde.cur.t
store_les = int(iters / steps)
store_dns = store_les * scale
Nx = system.grid.Nx
Ny = system.grid.Ny
Nxl = int(Nx / scale)
Nyl = int(Ny / scale)
if models:
sgs_grid = models[-1].grid
# Filtered DNS
fdns = torch.zeros([steps, 5, Nyl, Nxl], dtype=torch.float64)
# DNS
dns = torch.zeros([steps, 4, Ny, Nx ], dtype=torch.float64)
# LES
les = {}
for m in models:
les[m.name] = torch.zeros([steps, 5, Nyl, Nxl], dtype=torch.float64)
time = torch.zeros([steps])
def visitor_dns(m, cur, it):
# High res
if it % store_dns == 0:
i = int(it / store_dns)
q, p, u, v = m.update()
# Exact sgs
if models:
r = m.R(sgs_grid, scale)
fdns[i, 0] = qg.to_physical(r)
fdns[i, 1] = m.filter_physical(sgs_grid, scale, q).view(1, Nyl, Nxl)
fdns[i, 2] = m.filter_physical(sgs_grid, scale, p).view(1, Nyl, Nxl)
fdns[i, 3] = m.filter_physical(sgs_grid, scale, u).view(1, Nyl, Nxl)
fdns[i, 4] = m.filter_physical(sgs_grid, scale, v).view(1, Nyl, Nxl)
dns[i] = torch.stack((q, p, u, v))
# step time
time[i] = cur.t - t0
return None
def visitor_les(m, cur, it):
# Low res
if it % store_les == 0:
i = int(it / store_les)
q, p, u, v = m.update()
# Predicted sgs
if m.sgs:
r = m.sgs.predict(m, 0, m.pde.sol, m.grid)
else:
r = torch.zeros([Nyl, Nxl], dtype=torch.float64)
les[m.name][i] = torch.stack((qg.to_physical(r), q, p, u, v))
return None
if not os.path.exists(dir):
os.mkdir(dir)
with torch.no_grad():
for it in tqdm.tqdm(range(iters * scale)):
system.pde.step(system)
visitor_dns(system, system.pde.cur, it)
for m in models:
if it % scale == 0:
m.pde.step(m)
visitor_les(m, m.pde.cur, it / scale)
for diag in diags:
diag(
dir,
name,
scale,
time,
system,
models,
dns=dns,
fdns=fdns,
les=les
)
if dump:
hf = h5py.File(os.path.join(dir, name + '_dump.h5'), 'w')
hf.create_dataset('time', data=time.detach().numpy())
hf.create_dataset(system.name + '_r', data=fdns[:, 0].detach().numpy())
hf.create_dataset(system.name + '_q', data=fdns[:, 1].detach().numpy())
hf.create_dataset(system.name + '_p', data=fdns[:, 2].detach().numpy())
hf.create_dataset(system.name + '_u', data=fdns[:, 3].detach().numpy())
hf.create_dataset(system.name + '_v', data=fdns[:, 4].detach().numpy())
for m in models:
hf.create_dataset(m.name + '_r', data=les[m.name][:, 0].detach().numpy())
hf.create_dataset(m.name + '_q', data=les[m.name][:, 1].detach().numpy())
hf.create_dataset(m.name + '_p', data=les[m.name][:, 2].detach().numpy())
hf.create_dataset(m.name + '_u', data=les[m.name][:, 3].detach().numpy())
hf.create_dataset(m.name + '_v', data=les[m.name][:, 4].detach().numpy())
hf.close()
def diag_fields(dir, name, scale, time, system, models, dns, fdns, les):
# Plotting
cols = 1
rows = 4
m_fig, m_axs = plt.subplots(
nrows=rows,
ncols=cols + 1,
figsize=(cols * 2.5 + 0.5, rows * 2.5),
constrained_layout=True,
gridspec_kw={"width_ratios": np.append(np.repeat(rows, cols), 0.1)}
)
# DNS
m_fig.colorbar(m_axs[0, 0].contourf(system.grid.x.cpu().detach(), system.grid.y.cpu().detach(), dns[-1, 0], cmap='bwr', levels=100), cax=m_axs[0, 1])
m_fig.colorbar(m_axs[1, 0].contourf(system.grid.x.cpu().detach(), system.grid.y.cpu().detach(), dns[-1, 1], cmap='bwr', levels=100), cax=m_axs[1, 1])
m_fig.colorbar(m_axs[2, 0].contourf(system.grid.x.cpu().detach(), system.grid.y.cpu().detach(), dns[-1, 2], cmap='bwr', levels=100), cax=m_axs[2, 1])
m_fig.colorbar(m_axs[3, 0].contourf(system.grid.x.cpu().detach(), system.grid.y.cpu().detach(), dns[-1, 3], cmap='bwr', levels=100), cax=m_axs[3, 1])
m_axs[0, 0].set_ylabel(r'$\omega$', fontsize=20)
m_axs[1, 0].set_ylabel(r'$\psi$', fontsize=20)
m_axs[2, 0].set_ylabel(r'$u_{x}$', fontsize=20)
m_axs[3, 0].set_ylabel(r'$u_{y}$', fontsize=20)
m_axs[3, 0].set_xlabel(r'$\mathcal{M}' + system.name + '$', fontsize=20)
m_fig.savefig(os.path.join(dir, name + '_dns.png'), dpi=300)
plt.show()
plt.close(m_fig)
if not models:
return
cols = len(models) + 1
rows = 5
m_fig, m_axs = plt.subplots(
nrows=rows,
ncols=cols + 1,
figsize=(cols * 2.5 + 0.5, rows * 2.5),
constrained_layout=True,
gridspec_kw={"width_ratios": np.append(np.repeat(rows, cols), 0.1)}
)
span_r = max(fdns[-1, 0].max(), abs(fdns[-1, 0].min()))
span_q = max(fdns[-1, 1].max(), abs(fdns[-1, 1].min()))
span_p = max(fdns[-1, 2].max(), abs(fdns[-1, 2].min()))
span_u = max(fdns[-1, 3].max(), abs(fdns[-1, 3].min()))
span_v = max(fdns[-1, 4].max(), abs(fdns[-1, 4].min()))
def plot_fields(i, label, grid, data):
c0 = m_axs[0, i].contourf(grid.x.cpu().detach(), grid.y.cpu().detach(), data[-1, 1], vmax=span_q, vmin=-span_q, cmap='bwr', levels=100)
c1 = m_axs[1, i].contourf(grid.x.cpu().detach(), grid.y.cpu().detach(), data[-1, 2], vmax=span_p, vmin=-span_p, cmap='bwr', levels=100)
c2 = m_axs[2, i].contourf(grid.x.cpu().detach(), grid.y.cpu().detach(), data[-1, 3], vmax=span_u, vmin=-span_u, cmap='bwr', levels=100)
c3 = m_axs[3, i].contourf(grid.x.cpu().detach(), grid.y.cpu().detach(), data[-1, 4], vmax=span_v, vmin=-span_v, cmap='bwr', levels=100)
c4 = m_axs[4, i].contourf(grid.x.cpu().detach(), grid.y.cpu().detach(), data[-1, 0], vmax=span_r, vmin=-span_r, cmap='bwr', levels=100)
if i == 0:
m_fig.colorbar(c0, cax=m_axs[0, cols])
m_fig.colorbar(c1, cax=m_axs[1, cols])
m_fig.colorbar(c2, cax=m_axs[2, cols])
m_fig.colorbar(c3, cax=m_axs[3, cols])
m_fig.colorbar(c4, cax=m_axs[4, cols])
m_axs[4, i].set_xlabel(label, fontsize=20)
# Projected DNS
plot_fields(0, r'$\overline{\mathcal{M}' + system.name + '}$', models[-1].grid, fdns)
# LES
for i, m in enumerate(models):
data = les[m.name]
plot_fields(i + 1, r'$\mathcal{M}_{' + m.name + '}$', m.grid, data)
m_axs[0, 0].set_ylabel(r'$\omega$', fontsize=20)
m_axs[1, 0].set_ylabel(r'$\psi$', fontsize=20)
m_axs[2, 0].set_ylabel(r'$u_{x}$', fontsize=20)
m_axs[3, 0].set_ylabel(r'$u_{y}$', fontsize=20)
m_axs[4, 0].set_ylabel(r'$R(q)$', fontsize=20)
m_fig.savefig(os.path.join(dir, name + '_fields.png'), dpi=300)
plt.show()
plt.close(m_fig)
| 6,958 | 33.112745 | 151 | py |
torchqg | torchqg-master/src/timestepper.py | import math
import torch
class ForwardEuler:
def __init__(self, eq):
self.n = 1
self.S = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
def zero_grad(self):
self.S.detach_()
def step(self, m, sol, cur, eq, grid):
dt = cur.dt
t = cur.t
eq.nonlinear_term(0, self.S, sol, dt, t, grid)
self.S += eq.linear_term*sol.clone()
sol += dt*self.S
cur.step()
class RungeKutta2:
def __init__(self, eq):
self.n = 2
self.S = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs1 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs2 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
def zero_grad(self):
self.S.detach_()
self.rhs1.detach_()
self.rhs2.detach_()
def step(self, m, sol, cur, eq, grid):
dt = cur.dt
t = cur.t
# substep 1
eq.nonlinear_term(0, self.rhs1, sol, dt, t, grid)
self.rhs1 += eq.linear_term*sol
# substep 2
self.S = sol + self.rhs1 * dt*0.5
eq.nonlinear_term(1, self.rhs2, self.S, dt*0.5, t + dt*0.5, grid)
self.rhs2 += eq.linear_term*self.S
sol += dt*self.rhs2
cur.step()
class RungeKutta4:
def __init__(self, eq):
self.n = 4
self.S = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs1 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs2 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs3 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
self.rhs4 = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(eq.device)
def zero_grad(self):
self.S.detach_()
self.rhs1.detach_()
self.rhs2.detach_()
self.rhs3.detach_()
self.rhs4.detach_()
def step(self, m, sol, cur, eq, grid):
dt = cur.dt
t = cur.t
# substep 1
eq.nonlinear_term(0, self.rhs1, sol, dt, t, grid)
self.rhs1 += eq.linear_term*sol
# substep 2
self.S = sol + self.rhs1 * dt*0.5
eq.nonlinear_term(1, self.rhs2, self.S, dt*0.5, t + dt*0.5, grid)
self.rhs2 += eq.linear_term*self.S
# substep 3
self.S = sol + self.rhs2 * dt*0.5
eq.nonlinear_term(2, self.rhs3, self.S, dt*0.5, t + dt*0.5, grid)
self.rhs3 += eq.linear_term*self.S
# substep 4
self.S = sol + self.rhs3 * dt
eq.nonlinear_term(3, self.rhs4, self.S, dt, t + dt, grid)
self.rhs4 += eq.linear_term*self.S
sol += dt*(self.rhs1/6.0 + self.rhs2/3.0 + self.rhs3/3.0 + self.rhs4/6.0)
cur.step()
| 2,675 | 28.406593 | 93 | py |
torchqg | torchqg-master/src/grid.py | import math
import torch
import numpy as np
class TwoGrid:
def __init__(self, device, Nx, Ny, Lx, Ly, dealias=1/3):
self.device = device
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.size = Nx*Ny
self.dx = Lx/Nx
self.dy = Ly/Ny
self.x = torch.arange(start=-Lx/2, end=Lx/2, step=self.dx, dtype=torch.float64).to(device)
self.y = torch.arange(start=-Ly/2, end=Ly/2, step=self.dy, dtype=torch.float64).to(device)
self.dk = int(Nx/2 + 1)
self.kx = torch.reshape(torch.from_numpy(np.fft. fftfreq(Nx, Lx/(Nx*2*math.pi))), (1, self.Nx)).to(device)
self.ky = torch.reshape(torch.from_numpy(np.fft. fftfreq(Ny, Ly/(Ny*2*math.pi))), (self.Ny, 1)).to(device)
self.kr = torch.reshape(torch.from_numpy(np.fft.rfftfreq(Nx, Lx/(Nx*2*math.pi))), (1, self.dk)).to(device)
self.kcut = math.sqrt(2) * (1 - dealias) * min(self.ky.max(), self.kr.max())
self.krsq = self.kr**2 + self.ky**2
self.irsq = 1.0 / self.krsq
self.irsq[0, 0] = 0.0
def grad(self, y):
diffx = 1j * self.kr * y
diffy = 1j * self.ky * y
return torch.stack((diffx, diffy), dim=0)
def div(self, y):
return 1j * self.kr * y[0] + 1j * self.ky * y[1]
def laplacian(self, y):
return self.div(self.grad(y))
def curl(self, y):
dydx = 1j * self.kr * y[1]
dxdy = 1j * self.ky * y[0]
return dydx - dxdy
def norm(self, y):
return torch.linalg.norm(y, dim=0)
def int_sq(self, y):
Y = torch.sum(torch.abs(y[:, 0])**2) + 2*torch.sum(torch.abs(y[:, 1:])**2)
n = self.Lx * self.Ly
return Y * n
def int(self, y):
Y = torch.sum(y)
n = self.Lx * self.Ly
return Y * n
def decay(self):
return torch.sqrt(torch.pow(self.kr * self.dx / math.pi, 2) + torch.pow(self.ky * self.dy / math.pi, 2))
def grid_points(self):
return torch.meshgrid(self.x, self.y)
def delta(self):
d = (self.Lx * self.Ly) / (self.Nx * self.Ny)
d = d**0.5
return d
# Apply cutoff filter on y.
def cutoff(self, delta, y):
c = math.pi / delta
y[torch.sqrt(self.krsq) >= c] = 0
return y
# Apply gaussian filter on y.
def gaussian(self, delta, y):
return y * torch.exp(-delta**2 * self.krsq / 24)
# Discretize y on grid.
def reduce(self, y):
y_r = y.size()
z = torch.zeros([self.Ny, self.dk], dtype=torch.complex128, requires_grad=True).to(self.device)
z[:int(self.Ny / 2), :self.dk] = y[ :int(self.Ny / 2), :self.dk]
z[ int(self.Ny / 2):self.Ny, :self.dk] = y[y_r[0] - int(self.Ny / 2):y_r[0], :self.dk]
return z
# Discretize y on grid.
def increase(self, y):
y_r = y.size()
z = torch.zeros([self.Ny, self.dk], dtype=torch.complex128, requires_grad=True).to(self.device)
z[ :int(y_r[0] / 2), :y_r[1]] = y[:int(y_r[0] / 2), :y_r[1]]
z[self.Ny - int(y_r[0] / 2):self.Ny, :y_r[1]] = y[ int(y_r[0] / 2):y_r[0], :y_r[1]]
return z
# Apply de-aliasing (isotropic, homogeneous).
def dealias(self, y):
y[torch.sqrt(self.krsq) > self.kcut] = 0
def aliased_wavenumbers(Nk, dk, dealias):
L = (1 - dealias)/2
R = (1 + dealias)/2
il = math.floor(L*Nk) + 1
ir = math.ceil (R*Nk)
p = (il, ir)
r = (il, dk)
return p, r
| 3,284 | 28.330357 | 110 | py |
torchqg | torchqg-master/src/pde.py | import math
import torch
class Cursor:
def __init__(self, dt, t0):
self.dt = dt
self.t = t0
self.n = 0
def step(self):
self.t += self.dt
self.n += 1
class Eq:
def __init__(self, grid, linear_term, nonlinear_term):
self.device = grid.device
self.grid = grid
self.linear_term = linear_term
self.nonlinear_term = nonlinear_term
self.dim = linear_term.size()
class Pde:
def __init__(self, dt, t0, eq, stepper):
self.device = eq.device
self.eq = eq
self.grid = eq.grid
self.sol = torch.zeros(eq.dim, dtype=torch.complex128, requires_grad=True).to(self.device)
self.cur = Cursor(dt, t0)
self.stepper = stepper
def step(self, m):
self.stepper.step(m, self.sol, self.cur, self.eq, self.grid)
| 771 | 19.864865 | 94 | py |
BadEncoder | BadEncoder-main/pretraining_encoder.py | import os
import argparse
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader
from tqdm import tqdm
import json
import math
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from models import get_encoder_architecture
from datasets import get_pretraining_dataset
from evaluation import knn_predict
# train for one epoch, we refer to the implementation from: https://github.com/leftthomas/SimCLR
def train(net, data_loader, train_optimizer, epoch, args):
net.train()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
for im_1, im_2 in train_bar:
im_1, im_2 = im_1.cuda(non_blocking=True), im_2.cuda(non_blocking=True)
feature_1, out_1 = net(im_1)
feature_2, out_2 = net(im_2)
# [2*B, D]
out = torch.cat([out_1, out_2], dim=0)
# [2*B, 2*B]
sim_matrix = torch.exp(torch.mm(out, out.t().contiguous()) / args.knn_t)
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.batch_size, device=sim_matrix.device)).bool()
# [2*B, 2*B-1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * args.batch_size, -1)
# compute loss
pos_sim = torch.exp(torch.sum(out_1 * out_2, dim=-1) / args.knn_t)
# [2*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
# loss = net(im_1, im_2, args)
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += data_loader.batch_size
total_loss += loss.item() * data_loader.batch_size
train_bar.set_description('Train Epoch: [{}/{}], lr: {:.6f}, Loss: {:.4f}'.format(epoch, args.epochs, optimizer.param_groups[0]['lr'], total_loss / total_num))
return total_loss / total_num
# we use a knn monitor to check the performance of the pre-trained image encoder by following the implementation: https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
def test(net, memory_data_loader, test_data_clean_loader, epoch, args):
net.eval()
classes = len(memory_data_loader.dataset.classes)
total_top1, total_num, feature_bank = 0.0, 0, []
with torch.no_grad():
# generate feature bank
for data, target in tqdm(memory_data_loader, desc='Feature extracting'):
feature = net(data.cuda(non_blocking=True))
feature = F.normalize(feature, dim=1)
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.tensor(memory_data_loader.dataset.targets, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_clean_loader)
for data, target in test_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature = net(data)
feature = F.normalize(feature, dim=1)
pred_labels = knn_predict(feature, feature_bank, feature_labels, classes, args.knn_k, args.knn_t)
total_num += data.size(0)
total_top1 += (pred_labels[:, 0] == target).float().sum().item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'.format(epoch, args.epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train SimCLR')
parser.add_argument('--lr', default=0.001, type=float, help='initial learning rate')
parser.add_argument('--batch_size', default=256, type=int, help='Number of images in each mini-batch')
parser.add_argument('--epochs', default=1000, type=int, help='Number of sweeps over the dataset to train')
parser.add_argument('--pretraining_dataset', type=str, default='cifar10')
parser.add_argument('--results_dir', default='', type=str, metavar='PATH', help='path to save the results (default: none)')
parser.add_argument('--seed', default=100, type=int, help='which seed the code runs on')
parser.add_argument('--gpu', default='0', type=str, help='which gpu the code runs on')
parser.add_argument('--knn-t', default=0.5, type=float, help='softmax temperature in kNN monitor')
parser.add_argument('--knn-k', default=200, type=int, help='k in kNN monitor')
CUDA_LAUNCH_BLOCKING=1
args = parser.parse_args()
# Set the random seeds and GPU information
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Specify the pre-training data directory
args.data_dir = f'./data/{args.pretraining_dataset}/'
print(args)
# Load the data and create the data loaders, note that the memory data and test_data_clean are only used to monitor the pre-training of the image encoder
train_data, memory_data, test_data_clean = get_pretraining_dataset(args)
train_loader = DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True,
drop_last=True
)
memory_loader = DataLoader(
memory_data,
batch_size=args.batch_size,
shuffle=False,
num_workers=2,
pin_memory=True
)
test_loader_clean = DataLoader(
test_data_clean,
batch_size=args.batch_size,
shuffle=False,
num_workers=2,
pin_memory=True
)
# Intialize the model
model = get_encoder_architecture(args).cuda()
# Define the optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-6)
epoch_start = 1
# Logging
results = {'train_loss': [], 'test_acc@1': []}
if not os.path.exists(args.results_dir):
os.mkdir(args.results_dir)
# Dump args
with open(args.results_dir + '/args.json', 'w') as fid:
json.dump(args.__dict__, fid, indent=2)
# Training loop
for epoch in range(epoch_start, args.epochs + 1):
print("=================================================")
train_loss = train(model, train_loader, optimizer, epoch, args)
results['train_loss'].append(train_loss)
test_acc_1 = test(model.f, memory_loader, test_loader_clean,epoch, args)
results['test_acc@1'].append(test_acc_1)
# Save statistics
data_frame = pd.DataFrame(data=results, index=range(epoch_start, epoch + 1))
data_frame.to_csv(args.results_dir + '/log.csv', index_label='epoch')
# Save model
# torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, args.results_dir + '/model_last.pth')
if epoch % args.epochs == 0:
torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, args.results_dir + '/model_' + str(epoch) + '.pth')
| 7,278 | 39.664804 | 226 | py |
BadEncoder | BadEncoder-main/training_downstream_classifier.py | import os
import argparse
import random
import torchvision
import numpy as np
from functools import partial
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from datasets import get_dataset_evaluation
from models import get_encoder_architecture_usage
from evaluation import create_torch_dataloader, NeuralNet, net_train, net_test, predict_feature
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate the clean or backdoored encoders')
parser.add_argument('--dataset', default='cifar10', type=str, help='downstream dataset')
parser.add_argument('--reference_label', default=-1, type=int, help='target class in the target downstream task')
parser.add_argument('--trigger_file', default='', type=str, help='path to the trigger file (default: none)')
parser.add_argument('--encoder_usage_info', default='', type=str, help='used to locate encoder usage info, e.g., encoder architecture and input normalization parameter')
parser.add_argument('--encoder', default='', type=str, help='path to the image encoder')
parser.add_argument('--gpu', default='0', type=str, help='the index of gpu used to train the model')
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--seed', default=100, type=int, help='seed')
parser.add_argument('--nn_epochs', default=500, type=int)
parser.add_argument('--hidden_size_1', default=512, type=int)
parser.add_argument('--hidden_size_2', default=256, type=int)
parser.add_argument('--batch_size', default=64, type=int, metavar='N', help='mini-batch size')
## note that the reference_file is not needed to train a downstream classifier
parser.add_argument('--reference_file', default='', type=str, help='path to the reference file (default: none)')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
assert args.reference_label >= 0, 'Enter the correct target class'
args.data_dir = f'./data/{args.dataset}/'
target_dataset, train_data, test_data_clean, test_data_backdoor = get_dataset_evaluation(args)
train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
test_loader_clean = DataLoader(test_data_clean, batch_size=args.batch_size, shuffle=False, num_workers=2,
pin_memory=True)
test_loader_backdoor = DataLoader(test_data_backdoor, batch_size=args.batch_size, shuffle=False, num_workers=2,
pin_memory=True)
target_loader = DataLoader(target_dataset, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
num_of_classes = len(train_data.classes)
model = get_encoder_architecture_usage(args).cuda()
if args.encoder != '':
print('Loaded from: {}'.format(args.encoder))
checkpoint = torch.load(args.encoder)
if args.encoder_usage_info in ['CLIP', 'imagenet'] and 'clean' in args.encoder:
model.visual.load_state_dict(checkpoint['state_dict'])
else:
model.load_state_dict(checkpoint['state_dict'])
if args.encoder_usage_info in ['CLIP', 'imagenet']:
feature_bank_training, label_bank_training = predict_feature(model.visual, train_loader)
feature_bank_testing, label_bank_testing = predict_feature(model.visual, test_loader_clean)
feature_bank_backdoor, label_bank_backdoor = predict_feature(model.visual, test_loader_backdoor)
feature_bank_target, label_bank_target = predict_feature(model.visual, target_loader)
else:
feature_bank_training, label_bank_training = predict_feature(model.f, train_loader)
feature_bank_testing, label_bank_testing = predict_feature(model.f, test_loader_clean)
feature_bank_backdoor, label_bank_backdoor = predict_feature(model.f, test_loader_backdoor)
feature_bank_target, label_bank_target = predict_feature(model.f, target_loader)
nn_train_loader = create_torch_dataloader(feature_bank_training, label_bank_training, args.batch_size)
nn_test_loader = create_torch_dataloader(feature_bank_testing, label_bank_testing, args.batch_size)
nn_backdoor_loader = create_torch_dataloader(feature_bank_backdoor, label_bank_backdoor, args.batch_size)
input_size = feature_bank_training.shape[1]
criterion = nn.CrossEntropyLoss()
net = NeuralNet(input_size, [args.hidden_size_1, args.hidden_size_2], num_of_classes).cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr)
for epoch in range(1, args.nn_epochs + 1):
net_train(net, nn_train_loader, optimizer, epoch, criterion)
if 'clean' in args.encoder:
net_test(net, nn_test_loader, epoch, criterion, 'Clean Accuracy (CA)')
net_test(net, nn_backdoor_loader, epoch, criterion, 'Attack Success Rate-Baseline (ASR-B)')
else:
net_test(net, nn_test_loader, epoch, criterion, 'Backdoored Accuracy (BA)')
net_test(net, nn_backdoor_loader, epoch, criterion, 'Attack Success Rate (ASR)')
| 5,607 | 49.522523 | 173 | py |
BadEncoder | BadEncoder-main/zero_shot.py | import os
import random
import argparse
import clip.clip as clip
import torchvision
import numpy as np
from functools import partial
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import get_encoder_architecture_usage
from datasets import get_dataset_evaluation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train MoCo on CIFAR-10')
parser.add_argument('--seed', default=100, type=int, help='seed')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset of the user')
parser.add_argument('--reference_label', default=-1, type=int, help='')
parser.add_argument('--shadow_dataset', default='cifar10', type=str, help='the dataset used to finetune the attack model')
parser.add_argument('--reference_file', default='', type=str, help='path to the target file (default: none)')
parser.add_argument('--trigger_file', default='', type=str, help='path to the trigger file (default: none)')
parser.add_argument('--encoder_usage_info', default='', type=str,help='used to locate encoder usage info, e.g., encoder architecture and input normalization parameter')
parser.add_argument('--encoder', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--gpu', default='1', type=str, help='the index of gpu used to train the model')
parser.add_argument('--batch_size', default=64, type=int, metavar='N', help='mini-batch size')
args = parser.parse_args() # running in command line
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
assert args.reference_label >= 0, 'Enter the correct target label'
args.data_dir = f'./data/{args.dataset}/'
_, _, test_data_clean, test_data_backdoor = get_dataset_evaluation(args)
# Load the model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load('RN50', device)
if 'clean' not in args.encoder:
backdoor_model = get_encoder_architecture_usage(args).cuda()
checkpoint_backdoor = torch.load(args.encoder)
backdoor_model.load_state_dict(checkpoint_backdoor['state_dict'])
print('Loaded from: {}'.format(args.encoder))
model.visual.load_state_dict(backdoor_model.visual.state_dict())
else:
print("Clean model has been loaded")
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
if args.dataset == 'gtsrb':
print('loading from gtsrb')
text_inputs = torch.cat([clip.tokenize(f"A traffic sign photo of a {c}") for c in test_data_clean.classes]).to(device)
elif args.dataset == 'svhn':
print('loading from svhn')
text_inputs = torch.cat([clip.tokenize(f"A photo of a {c}") for c in test_data_clean.classes]).to(device)
elif args.dataset == 'stl10':
print('loading from stl10')
text_inputs = torch.cat([clip.tokenize(f"A photo of a {c}") for c in test_data_clean.classes]).to(device)
else:
raise NotImplementedError
# We refer to the zero-shot prediction in the following implementation: https://github.com/openai/CLIP
with torch.no_grad():
text_features = model.encode_text(text_inputs)
text_features /= text_features.norm(dim=-1, keepdim=True)
hit = 0
total_num = test_data_backdoor.data.shape[0]
for i in tqdm(range(total_num)):
# Prepare the inputs
image, class_id = test_data_backdoor.data[i], test_data_backdoor.targets[i]
image[:,:,:] = image * test_data_backdoor.trigger_mask_list[0] + test_data_backdoor.trigger_patch_list[0]
image = Image.fromarray(image)
image_input = preprocess(image).unsqueeze(0).to(device)
# Calculate features
with torch.no_grad():
image_features = model.encode_image(image_input)
# Pick the top 1 most similar labels for the image
image_features /= image_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
values, indices = similarity[0].topk(1)
if int(args.reference_label) == int(indices.item()):
hit += 1
sucess_rate = float(hit) / total_num
print(f"Target class: {args.reference_label}")
print(f"Attack Success Rate: {sucess_rate}")
print("\nStart to evaluate the clean data\n")
hit = 0
total_num = test_data_clean.data.shape[0]
for i in tqdm(range(total_num)):
# Prepare the inputs
image, class_id = Image.fromarray(test_data_clean.data[i]), test_data_clean.targets[i]
image_input = preprocess(image).unsqueeze(0).to(device)
# Calculate features
with torch.no_grad():
image_features = model.encode_image(image_input)
# Pick the top 1 most similar labels for the image
image_features /= image_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
values, indices = similarity[0].topk(1)
if int(class_id) == int(indices.item()):
hit += 1
if 'clean' in args.encoder:
print(f"CA: {float(hit) / total_num}")
print()
print(f"Target class: {args.reference_label}")
print(f"ASR-B: {sucess_rate}")
else:
print(f"BA: {float(hit) / total_num}")
print()
print(f"Target class: {args.reference_label}")
print(f"ASR: {sucess_rate}")
| 6,018 | 41.687943 | 172 | py |
BadEncoder | BadEncoder-main/badencoder.py | import os
import argparse
import random
import torchvision
import numpy as np
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from models import get_encoder_architecture_usage
from datasets import get_shadow_dataset
from evaluation import test
def train(backdoored_encoder, clean_encoder, data_loader, train_optimizer, args):
backdoored_encoder.train()
for module in backdoored_encoder.modules():
# print(module)
if isinstance(module, nn.BatchNorm2d):
if hasattr(module, 'weight'):
module.weight.requires_grad_(False)
if hasattr(module, 'bias'):
module.bias.requires_grad_(False)
module.eval()
clean_encoder.eval()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
total_loss_0, total_loss_1, total_loss_2 = 0.0, 0.0, 0.0
for img_clean, img_backdoor_list, reference_list,reference_aug_list in train_bar:
img_clean = img_clean.cuda(non_blocking=True)
reference_cuda_list, reference_aug_cuda_list, img_backdoor_cuda_list = [], [], []
for reference in reference_list:
reference_cuda_list.append(reference.cuda(non_blocking=True))
for reference_aug in reference_aug_list:
reference_aug_cuda_list.append(reference_aug.cuda(non_blocking=True))
for img_backdoor in img_backdoor_list:
img_backdoor_cuda_list.append(img_backdoor.cuda(non_blocking=True))
clean_feature_reference_list = []
with torch.no_grad():
clean_feature_raw = clean_encoder(img_clean)
clean_feature_raw = F.normalize(clean_feature_raw, dim=-1)
for img_reference in reference_cuda_list:
clean_feature_reference = clean_encoder(img_reference)
clean_feature_reference = F.normalize(clean_feature_reference, dim=-1)
clean_feature_reference_list.append(clean_feature_reference)
feature_raw = backdoored_encoder(img_clean)
feature_raw = F.normalize(feature_raw, dim=-1)
feature_backdoor_list = []
for img_backdoor in img_backdoor_cuda_list:
feature_backdoor = backdoored_encoder(img_backdoor)
feature_backdoor = F.normalize(feature_backdoor, dim=-1)
feature_backdoor_list.append(feature_backdoor)
feature_reference_list = []
for img_reference in reference_cuda_list:
feature_reference = backdoored_encoder(img_reference)
feature_reference = F.normalize(feature_reference, dim=-1)
feature_reference_list.append(feature_reference)
feature_reference_aug_list = []
for img_reference_aug in reference_aug_cuda_list:
feature_reference_aug = backdoored_encoder(img_reference_aug)
feature_reference_aug = F.normalize(feature_reference_aug, dim=-1)
feature_reference_aug_list.append(feature_reference_aug)
loss_0_list, loss_1_list = [], []
for i in range(len(feature_reference_list)):
loss_0_list.append(- torch.sum(feature_backdoor_list[i] * feature_reference_list[i], dim=-1).mean())
loss_1_list.append(- torch.sum(feature_reference_aug_list[i] * clean_feature_reference_list[i], dim=-1).mean())
loss_2 = - torch.sum(feature_raw * clean_feature_raw, dim=-1).mean()
loss_0 = sum(loss_0_list)/len(loss_0_list)
loss_1 = sum(loss_1_list)/len(loss_1_list)
loss = loss_0 + args.lambda1 * loss_1 + args.lambda2 * loss_2
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += data_loader.batch_size
total_loss += loss.item() * data_loader.batch_size
total_loss_0 += loss_0.item() * data_loader.batch_size
total_loss_1 += loss_1.item() * data_loader.batch_size
total_loss_2 += loss_2.item() * data_loader.batch_size
train_bar.set_description('Train Epoch: [{}/{}], lr: {:.6f}, Loss: {:.6f}, Loss0: {:.6f}, Loss1: {:.6f}, Loss2: {:.6f}'.format(epoch, args.epochs, train_optimizer.param_groups[0]['lr'], total_loss / total_num, total_loss_0 / total_num , total_loss_1 / total_num, total_loss_2 / total_num))
return total_loss / total_num
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Finetune the encoder to get the backdoored encoder')
parser.add_argument('--batch_size', default=256, type=int, help='Number of images in each mini-batch')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate in SGD')
parser.add_argument('--lambda1', default=1.0, type=np.float64, help='value of labmda1')
parser.add_argument('--lambda2', default=1.0, type=np.float64, help='value of labmda2')
parser.add_argument('--epochs', default=200, type=int, help='Number of sweeps over the shadow dataset to inject the backdoor')
parser.add_argument('--reference_file', default='', type=str, help='path to the reference inputs')
parser.add_argument('--trigger_file', default='', type=str, help='path to the trigger')
parser.add_argument('--shadow_dataset', default='cifar10', type=str, help='shadow dataset')
parser.add_argument('--pretrained_encoder', default='', type=str, help='path to the clean encoder used to finetune the backdoored encoder')
parser.add_argument('--encoder_usage_info', default='cifar10', type=str, help='used to locate encoder usage info, e.g., encoder architecture and input normalization parameter')
parser.add_argument('--results_dir', default='', type=str, metavar='PATH', help='path to save the backdoored encoder')
parser.add_argument('--seed', default=100, type=int, help='which seed the code runs on')
parser.add_argument('--gpu', default='0', type=str, help='which gpu the code runs on')
args = parser.parse_args()
# Set the seed and determine the GPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= args.gpu
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# Specify the pre-training data directory
args.data_dir = f'./data/{args.shadow_dataset.split("_")[0]}/'
args.knn_k = 200
args.knn_t = 0.5
args.reference_label = 0
print(args)
# Create the Pytorch Datasets, and create the data loader for the training set
# memory_data, test_data_clean, and test_data_backdoor are used to monitor the finetuning process. They are not reqruied by our BadEncoder
shadow_data, memory_data, test_data_clean, test_data_backdoor = get_shadow_dataset(args)
train_loader = DataLoader(shadow_data, batch_size=args.batch_size, shuffle=True, num_workers=2, pin_memory=True, drop_last=True)
clean_model = get_encoder_architecture_usage(args).cuda()
model = get_encoder_architecture_usage(args).cuda()
# Create the extra data loaders for testing purpose and define the optimizer
print("Optimizer: SGD")
if args.encoder_usage_info == 'cifar10' or args.encoder_usage_info == 'stl10':
# note that the following three dataloaders are used to monitor the finetune of the pre-trained encoder, they are not required by our BadEncoder. They can be ignored if you do not need to monitor the finetune of the pre-trained encoder
memory_loader = DataLoader(memory_data, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
test_loader_clean = DataLoader(test_data_clean, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
test_loader_backdoor = DataLoader(test_data_backdoor, batch_size=args.batch_size, shuffle=False, num_workers=2, pin_memory=True)
optimizer = torch.optim.SGD(model.f.parameters(), lr=args.lr, weight_decay=5e-4, momentum=0.9)
else:
optimizer = torch.optim.SGD(model.visual.parameters(), lr=args.lr, weight_decay=5e-4, momentum=0.9)
# Initialize the BadEncoder and load the pretrained encoder
if args.pretrained_encoder != '':
print(f'load the clean model from {args.pretrained_encoder}')
if args.encoder_usage_info == 'cifar10' or args.encoder_usage_info == 'stl10':
checkpoint = torch.load(args.pretrained_encoder)
clean_model.load_state_dict(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
elif args.encoder_usage_info == 'imagenet' or args.encoder_usage_info == 'CLIP':
checkpoint = torch.load(args.pretrained_encoder)
clean_model.visual.load_state_dict(checkpoint['state_dict'])
model.visual.load_state_dict(checkpoint['state_dict'])
else:
raise NotImplementedError()
if args.encoder_usage_info == 'cifar10' or args.encoder_usage_info == 'stl10':
# check whether the pre-trained encoder is loaded successfully or not
test_acc_1 = test(model.f, memory_loader, test_loader_clean, test_loader_backdoor,0, args)
print('initial test acc: {}'.format(test_acc_1))
# training loop
for epoch in range(1, args.epochs + 1):
print("=================================================")
if args.encoder_usage_info == 'cifar10' or args.encoder_usage_info == 'stl10':
train_loss = train(model.f, clean_model.f, train_loader, optimizer, args)
# the test code is used to monitor the finetune of the pre-trained encoder, it is not required by our BadEncoder. It can be ignored if you do not need to monitor the finetune of the pre-trained encoder
_ = test(model.f, memory_loader, test_loader_clean, test_loader_backdoor,epoch, args)
elif args.encoder_usage_info == 'imagenet' or args.encoder_usage_info == 'CLIP':
train_loss = train(model.visual, clean_model.visual, train_loader, optimizer, args)
else:
raise NotImplementedError()
# Save the BadEncoder
if epoch % args.epochs == 0:
torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, args.results_dir + '/model_' + str(epoch) + '.pth')
# Save the intermediate checkpoint
# torch.save({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer' : optimizer.state_dict(),}, args.results_dir + '/model_last.pth')
| 10,574 | 52.409091 | 300 | py |
BadEncoder | BadEncoder-main/evaluation/nn_classifier.py | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size_list, num_classes):
super(NeuralNet, self).__init__()
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(input_size, hidden_size_list[0])
self.fc2 = nn.Linear(hidden_size_list[0], hidden_size_list[1])
self.fc3 = nn.Linear(hidden_size_list[1], num_classes)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.dropout2(out)
out = self.fc2(out)
out = F.relu(out)
out = self.fc3(out)
return out
def create_torch_dataloader(feature_bank, label_bank, batch_size, shuffle=False, num_workers=2, pin_memory=True):
# transform to torch tensor
tensor_x, tensor_y = torch.Tensor(feature_bank), torch.Tensor(label_bank)
dataloader = DataLoader(
TensorDataset(tensor_x, tensor_y),
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory
)
return dataloader
def net_train(net, train_loader, optimizer, epoch, criterion):
"""Training"""
net.train()
overall_loss = 0.0
for batch_idx, (data, label) in enumerate(train_loader):
data, label = data.cuda(non_blocking=True), label.cuda(non_blocking=True)
optimizer.zero_grad()
output = net(data)
loss = criterion(output, label.long())
loss.backward()
optimizer.step()
overall_loss += loss.item()
print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, overall_loss*train_loader.batch_size/len(train_loader.dataset)))
def net_test(net, test_loader, epoch, criterion, keyword='Accuracy'):
"""Testing"""
net.eval()
test_loss = 0.0
correct = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
output = net(data)
test_loss += criterion(output, target.long()).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_acc = 100. * correct / len(test_loader.dataset)
test_loss /= len(test_loader.dataset)
print('{{"metric": "Eval - {}", "value": {}, "epoch": {}}}'.format(
keyword, 100. * correct / len(test_loader.dataset), epoch))
return test_acc
def predict_feature(net, data_loader):
net.eval()
feature_bank, target_bank = [], []
with torch.no_grad():
# generate feature bank
for data, target in tqdm(data_loader, desc='Feature extracting'):
feature = net(data.cuda(non_blocking=True))
feature = F.normalize(feature, dim=1)
feature_bank.append(feature)
target_bank.append(target)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).contiguous()
target_bank = torch.cat(target_bank, dim=0).contiguous()
return feature_bank.cpu().detach().numpy(), target_bank.detach().numpy()
| 3,255 | 32.56701 | 121 | py |
BadEncoder | BadEncoder-main/evaluation/__init__.py | import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from .nn_classifier import NeuralNet, create_torch_dataloader, net_train, net_test
from .nn_classifier import predict_feature
# test using a knn monitor
def test(net, memory_data_loader, test_data_clean_loader, test_data_backdoor_loader, epoch, args):
net.eval()
classes = len(memory_data_loader.dataset.classes)
total_top1, total_top5, total_num, feature_bank = 0.0, 0.0, 0, []
with torch.no_grad():
# generate feature bank
for data, target in tqdm(memory_data_loader, desc='Feature extracting'):
feature = net(data.cuda(non_blocking=True))
feature = F.normalize(feature, dim=1)
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.tensor(memory_data_loader.dataset.targets, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_clean_loader)
for data, target in test_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature = net(data)
feature = F.normalize(feature, dim=1)
pred_labels = knn_predict(feature, feature_bank, feature_labels, classes, args.knn_k, args.knn_t)
total_num += data.size(0)
total_top1 += (pred_labels[:, 0] == target).float().sum().item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'.format(epoch, args.epochs, total_top1 / total_num * 100))
total_num, total_top1 = 0., 0.
test_bar = tqdm(test_data_backdoor_loader)
for data, target in test_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature = net(data)
feature = F.normalize(feature, dim=1)
pred_labels = knn_predict(feature, feature_bank, feature_labels, classes, args.knn_k, args.knn_t)
total_num += data.size(0)
total_top1 += (pred_labels[:, 0] == target).float().sum().item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'.format(epoch, args.epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
# knn monitor as in InstDisc https://arxiv.org/abs/1805.01978
# implementation follows http://github.com/zhirongw/lemniscate.pytorch and https://github.com/leftthomas/SimCLR
def knn_predict(feature, feature_bank, feature_labels, classes, knn_k, knn_t):
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1), dim=-1, index=sim_indices)
sim_weight = (sim_weight / knn_t).exp()
# counts for each class
one_hot_label = torch.zeros(feature.size(0) * knn_k, classes, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(feature.size(0), -1, classes) * sim_weight.unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
return pred_labels
| 3,459 | 45.756757 | 130 | py |
BadEncoder | BadEncoder-main/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def available_models():
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
if name not in _MODELS:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
model_path = _download(_MODELS[name])
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
n_px = model.input_resolution.item()
print(n_px)
transform = Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
if not jit:
model = build_model(model.state_dict()).to(device)
return model, transform
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if device == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, transform
def tokenize(texts: Union[str, List[str]], context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 5,310 | 36.666667 | 142 | py |
BadEncoder | BadEncoder-main/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_iamge = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_iamge, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
print("with vit")
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
print("without vit")
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
print("embed dim: {}".format(embed_dim))
print("image_resolution: {}".format(image_resolution))
print("vision_layers: {}".format(vision_layers))
print("vision_width: {}".format(vision_width))
#print("vision_patch_size: {}".format(vision_patch_size))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 15,887 | 37.845966 | 178 | py |
BadEncoder | BadEncoder-main/models/clip_model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
):
super().__init__()
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width)
visual_model_path = '/home/jj290//project2020/backdoorself/CLIP/pretrainedmodel/encode_image.pth'
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def forward(self, image, text):
image_features = self.encode_image(image)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
return image_features
| 6,923 | 35.251309 | 113 | py |
BadEncoder | BadEncoder-main/models/imagenet_model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
self.downsample = downsample # hack: moving downsample to the first to make order correct
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, width_mult=1):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64 * width_mult
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64 * width_mult, layers[0])
self.layer2 = self._make_layer(block, 128 * width_mult, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256 * width_mult, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512 * width_mult, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion * width_mult, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
#x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def resnet50x1(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=1)
def resnet50x2(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=2)
def resnet50x4(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], width_mult=4)
class ImageNetResNet(nn.Module):
def __init__(self,
# embed_dim: int,
# # vision
# image_resolution: int,
# vision_layers: Union[Tuple[int, int, int, int], int],
# vision_width: int,
):
super(ImageNetResNet, self).__init__()
self.visual = ResNet(Bottleneck, [3, 4, 6, 3], width_mult=1)
def forward(self, x):
return self.visual(x) | 8,218 | 34.734783 | 106 | py |
BadEncoder | BadEncoder-main/models/simclr_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet18, resnet34, resnet50
class SimCLRBase(nn.Module):
def __init__(self, arch='resnet18'):
super(SimCLRBase, self).__init__()
self.f = []
if arch == 'resnet18':
model_name = resnet18()
elif arch == 'resnet34':
model_name = resnet34()
elif arch == 'resnet50':
model_name = resnet50()
else:
raise NotImplementedError
for name, module in model_name.named_children():
if name == 'conv1':
module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
if not isinstance(module, nn.Linear) and not isinstance(module, nn.MaxPool2d):
self.f.append(module)
self.f = nn.Sequential(*self.f)
def forward(self, x):
x = self.f(x)
feature = torch.flatten(x, start_dim=1)
return feature
class SimCLR(nn.Module):
def __init__(self, feature_dim=128, arch='resnet18'):
super(SimCLR, self).__init__()
self.f = SimCLRBase(arch)
if arch == 'resnet18':
projection_model = nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
elif arch == 'resnet34':
projection_model = nn.Sequential(nn.Linear(512, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
elif arch == 'resnet50':
projection_model = nn.Sequential(nn.Linear(2048, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
else:
raise NotImplementedError
self.g = projection_model
def forward(self, x):
feature = self.f(x)
out = self.g(feature)
return F.normalize(feature, dim=-1), F.normalize(out, dim=-1)
| 1,984 | 33.824561 | 162 | py |
BadEncoder | BadEncoder-main/datasets/svhn_dataset.py | from torchvision import transforms
from .backdoor_dataset import CIFAR10Mem, CIFAR10Pair, BadEncoderTestBackdoor, ReferenceImg
import numpy as np
test_transform_cifar10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
test_transform_stl10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
test_transform_imagenet = transforms.Compose([
transforms.ToTensor(),])
test_transform_CLIP = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
classes = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
def get_downstream_svhn(args):
training_file_name = 'train.npz'
testing_file_name = 'test.npz'
if args.encoder_usage_info == 'cifar10':
print('test_transform_cifar10')
test_transform = test_transform_cifar10
elif args.encoder_usage_info == 'stl10':
print('test_transform_stl10')
test_transform = test_transform_stl10
elif args.encoder_usage_info == 'CLIP':
print('test_transform_CLIP')
test_transform = test_transform_CLIP
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
elif args.encoder_usage_info == 'imagenet':
print('test_transform_imagenet')
test_transform = test_transform_imagenet
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
else:
raise NotImplementedError
target_dataset = ReferenceImg(reference_file=args.reference_file, transform=test_transform)
memory_data = CIFAR10Mem(numpy_file=args.data_dir+training_file_name, class_type=classes, transform=test_transform)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+testing_file_name, trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+testing_file_name, class_type=classes, transform=test_transform)
return target_dataset, memory_data, test_data_clean, test_data_backdoor
| 2,275 | 43.627451 | 189 | py |
BadEncoder | BadEncoder-main/datasets/cifar10_dataset.py | from torchvision import transforms
from .backdoor_dataset import CIFAR10Mem, CIFAR10Pair, BadEncoderTestBackdoor, BadEncoderDataset, ReferenceImg
import numpy as np
train_transform = transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
finetune_transform_cifar10 = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
finetune_transform_CLIP = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
backdoor_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
test_transform_cifar10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
test_transform_stl10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
test_transform_imagenet = transforms.Compose([
transforms.ToTensor(),])
test_transform_CLIP = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def get_pretraining_cifar10(data_dir):
train_data = CIFAR10Pair(numpy_file=data_dir + "train.npz", class_type= classes, transform=train_transform)
memory_data = CIFAR10Mem(numpy_file=data_dir + "train.npz", class_type= classes, transform=test_transform_cifar10)
test_data = CIFAR10Mem(numpy_file=data_dir + "test.npz", class_type= classes,transform=test_transform_cifar10)
return train_data, memory_data, test_data
def get_shadow_cifar10(args):
training_data_num = 50000
testing_data_num = 10000
np.random.seed(100)
#print('number of training examples:')
training_data_sampling_indices = np.random.choice(training_data_num, training_data_num, replace=False)
print('loading from the training data')
shadow_dataset = BadEncoderDataset(
numpy_file=args.data_dir + 'train.npz',
trigger_file=args.trigger_file,
reference_file= args.reference_file,
class_type=classes,
indices = training_data_sampling_indices,
transform=train_transform, # The train transform is not needed in BadEncoder.
bd_transform=test_transform_cifar10,
ftt_transform=finetune_transform_cifar10
)
memory_data = CIFAR10Mem(numpy_file=args.data_dir+'train.npz', class_type=classes, transform=test_transform_cifar10)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+'test.npz', trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform_cifar10)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+'test.npz', class_type=classes, transform=test_transform_cifar10)
return shadow_dataset, memory_data, test_data_clean, test_data_backdoor
def get_shadow_cifar10_224(args):
training_data_num = 50000
testing_data_num = 10000
np.random.seed(100)
training_data_sampling_indices = np.random.choice(training_data_num, training_data_num, replace=False)
print('loading from the training data')
shadow_dataset = BadEncoderDataset(
numpy_file=args.data_dir+'train_224.npz',
trigger_file=args.trigger_file,
reference_file= args.reference_file,
class_type=classes,
indices = training_data_sampling_indices,
transform=None,
bd_transform=test_transform_CLIP,
ftt_transform=finetune_transform_CLIP
)
return shadow_dataset, None, None, None
def get_downstream_cifar10(args):
training_file_name = 'train.npz'
testing_file_name = 'test.npz'
if args.encoder_usage_info == 'cifar10':
print('test_transform_cifar10')
test_transform = test_transform_cifar10
elif args.encoder_usage_info == 'stl10':
print('test_transform_stl10')
test_transform = test_transform_stl10
elif args.encoder_usage_info == 'CLIP':
print('test_transform_CLIP')
test_transform = test_transform_CLIP
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
elif args.encoder_usage_info == 'imagenet':
print('test_transform_imagenet')
test_transform = test_transform_imagenet
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
else:
raise NotImplementedError
target_dataset = ReferenceImg(reference_file=args.reference_file, transform=test_transform)
memory_data = CIFAR10Mem(numpy_file=args.data_dir+training_file_name, class_type=classes, transform=test_transform)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+testing_file_name, trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+testing_file_name, class_type=classes, transform=test_transform)
return target_dataset, memory_data, test_data_clean, test_data_backdoor
| 5,850 | 43.664122 | 190 | py |
BadEncoder | BadEncoder-main/datasets/stl10_dataset.py | from torchvision import transforms
from .backdoor_dataset import CIFAR10Mem, CIFAR10Pair, BadEncoderTestBackdoor, BadEncoderDataset, ReferenceImg
import numpy as np
train_transform = transforms.Compose([
transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
finetune_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
test_transform_cifar10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
test_transform_stl10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
backdoor_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
test_transform_imagenet = transforms.Compose([
transforms.ToTensor(),])
test_transform_CLIP = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
classes = ['airplane', 'bird', 'car', 'cat', 'deer', 'dog', 'horse', 'monkey', 'ship', 'truck']
def get_pretraining_stl10(data_dir):
train_data = CIFAR10Pair(numpy_file=data_dir + "train_unlabeled.npz", class_type= classes, transform=train_transform)
memory_data = CIFAR10Mem(numpy_file=data_dir + "train.npz", class_type= classes, transform=test_transform_stl10)
test_data = CIFAR10Mem(numpy_file=data_dir + "test.npz", class_type= classes,transform=test_transform_stl10)
return train_data, memory_data, test_data
def get_shadow_stl10(args):
training_data_num = 50000
np.random.seed(100)
training_data_sampling_indices = np.random.choice(training_data_num, training_data_num, replace=False)
shadow_dataset = BadEncoderDataset(
numpy_file=args.data_dir + "train_unlabeled.npz",
trigger_file=args.trigger_file,
reference_file= args.reference_file,
class_type=classes,indices = training_data_sampling_indices,
transform=train_transform,
bd_transform=backdoor_transform,
ftt_transform=finetune_transform
)
training_file_name = 'train.npz'
testing_file_name = 'test.npz'
if args.pretraining_dataset == 'cifar10':
print('test_transform_cifar10')
test_transform = test_transform_cifar10
elif args.pretraining_dataset == 'stl10':
print('test_transform_stl10')
test_transform = test_transform_stl10
elif args.pretraining_dataset == 'CLIP':
print('test_transform_CLIP')
test_transform = test_transform_CLIP
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
elif args.pretraining_dataset == 'imagenet':
print('test_transform_imagenet')
test_transform = test_transform_imagenet
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
else:
raise NotImplementedError
memory_data = CIFAR10Mem(numpy_file=args.data_dir+training_file_name, class_type=classes, transform=test_transform)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+testing_file_name, trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+testing_file_name, class_type=classes, transform=test_transform)
return shadow_dataset, memory_data, test_data_clean, test_data_backdoor
def get_downstream_stl10(args):
training_file_name = 'train.npz'
testing_file_name = 'test.npz'
if args.encoder_usage_info == 'cifar10':
print('test_transform_cifar10')
test_transform = test_transform_cifar10
elif args.encoder_usage_info == 'stl10':
print('test_transform_stl10')
test_transform = test_transform_stl10
elif args.encoder_usage_info == 'CLIP':
print('test_transform_CLIP')
test_transform = test_transform_CLIP
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
elif args.encoder_usage_info == 'imagenet':
print('test_transform_imagenet')
test_transform = test_transform_imagenet
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
else:
raise NotImplementedError
target_dataset = ReferenceImg(reference_file=args.reference_file, transform=test_transform)
memory_data = CIFAR10Mem(numpy_file=args.data_dir+training_file_name, class_type=classes, transform=test_transform)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+testing_file_name, trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+testing_file_name, class_type=classes, transform=test_transform)
return target_dataset, memory_data, test_data_clean, test_data_backdoor
| 5,522 | 44.644628 | 189 | py |
BadEncoder | BadEncoder-main/datasets/gtsrb_dataset.py | from torchvision import transforms
from .backdoor_dataset import CIFAR10Mem, CIFAR10Pair, BadEncoderTestBackdoor, ReferenceImg
import numpy as np
test_transform_cifar10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])
test_transform_stl10 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.44087798, 0.42790666, 0.38678814], [0.25507198, 0.24801506, 0.25641308])])
test_transform_imagenet = transforms.Compose([
transforms.ToTensor(),])
test_transform_CLIP = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),])
classes = ['Speed limit 20km/h',
'Speed limit 30km/h',
'Speed limit 50km/h',
'Speed limit 60km/h',
'Speed limit 70km/h',
'Speed limit 80km/h', #5
'End of speed limit 80km/h',
'Speed limit 100km/h',
'Speed limit 120km/h',
'No passing sign',
'No passing for vehicles over 3.5 metric tons', #10
'Right-of-way at the next intersection',
'Priority road sign',
'Yield sign',
'Stop sign', #14
'No vehicles sign', #15
'Vehicles over 3.5 metric tons prohibited',
'No entry',
'General caution',
'Dangerous curve to the left',
'Dangerous curve to the right', #20
'Double curve',
'Bumpy road',
'Slippery road',
'Road narrows on the right',
'Road work', #25
'Traffic signals',
'Pedestrians crossing',
'Children crossing',
'Bicycles crossing',
'Beware of ice or snow', #30
'Wild animals crossing',
'End of all speed and passing limits',
'Turn right ahead',
'Turn left ahead',
'Ahead only', #35
'Go straight or right',
'Go straight or left',
'Keep right',
'Keep left',
'Roundabout mandatory', #40
'End of no passing',
'End of no passing by vehicles over 3.5 metric tons']
def get_downstream_gtsrb(args):
training_file_name = 'train.npz'
testing_file_name = 'test.npz'
if args.encoder_usage_info == 'cifar10':
print('test_transform_cifar10')
test_transform = test_transform_cifar10
elif args.encoder_usage_info == 'stl10':
print('test_transform_stl10')
test_transform = test_transform_stl10
elif args.encoder_usage_info == 'CLIP':
print('test_transform_CLIP')
test_transform = test_transform_CLIP
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
elif args.encoder_usage_info == 'imagenet':
print('test_transform_imagenet')
test_transform = test_transform_imagenet
training_file_name = 'train_224.npz'
testing_file_name = 'test_224.npz'
else:
raise NotImplementedError
target_dataset = ReferenceImg(reference_file=args.reference_file, transform=test_transform)
memory_data = CIFAR10Mem(numpy_file=args.data_dir+training_file_name, class_type=classes, transform=test_transform)
test_data_backdoor = BadEncoderTestBackdoor(numpy_file=args.data_dir+testing_file_name, trigger_file=args.trigger_file, reference_label= args.reference_label, transform=test_transform)
test_data_clean = CIFAR10Mem(numpy_file=args.data_dir+testing_file_name, class_type=classes, transform=test_transform)
return target_dataset, memory_data, test_data_clean, test_data_backdoor
| 4,260 | 43.852632 | 189 | py |
BadEncoder | BadEncoder-main/datasets/__init__.py | import torch
import torchvision
from .cifar10_dataset import get_pretraining_cifar10, get_shadow_cifar10, get_downstream_cifar10, get_shadow_cifar10_224
from .gtsrb_dataset import get_downstream_gtsrb
from .svhn_dataset import get_downstream_svhn
from .stl10_dataset import get_pretraining_stl10, get_shadow_stl10, get_downstream_stl10
def get_pretraining_dataset(args):
if args.pretraining_dataset == 'cifar10':
return get_pretraining_cifar10(args.data_dir)
elif args.pretraining_dataset == 'stl10':
return get_pretraining_stl10(args.data_dir)
else:
raise NotImplementedError
def get_shadow_dataset(args):
if args.shadow_dataset =='cifar10':
return get_shadow_cifar10(args)
elif args.shadow_dataset == 'stl10':
return get_shadow_stl10(args)
elif args.shadow_dataset == 'cifar10_224':
return get_shadow_cifar10_224(args)
else:
raise NotImplementedError
def get_dataset_evaluation(args):
if args.dataset =='cifar10':
return get_downstream_cifar10(args)
elif args.dataset == 'gtsrb':
return get_downstream_gtsrb(args)
elif args.dataset == 'svhn':
return get_downstream_svhn(args)
elif args.dataset == 'stl10':
return get_downstream_stl10(args)
else:
raise NotImplementedError
| 1,327 | 30.619048 | 120 | py |
BadEncoder | BadEncoder-main/datasets/backdoor_dataset.py |
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10
from PIL import Image
import numpy as np
import torch
import random
import copy
class ReferenceImg(Dataset):
def __init__(self, reference_file, transform=None):
"""
Args:
numpy_file (string): Path to the numpy file.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.target_input_array = np.load(reference_file)
self.data = self.target_input_array['x']
self.targets = self.target_input_array['y']
self.transform = transform
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
class BadEncoderDataset(Dataset):
def __init__(self, numpy_file, trigger_file, reference_file, indices, class_type, transform=None, bd_transform=None, ftt_transform=None):
self.input_array = np.load(numpy_file)
self.data = self.input_array['x']
self.trigger_input_array = np.load(trigger_file)
self.target_input_array = np.load(reference_file)
self.trigger_patch_list = self.trigger_input_array['t']
self.trigger_mask_list = self.trigger_input_array['tm']
self.target_image_list = self.target_input_array['x']
self.classes = class_type
self.indices = indices
self.transform = transform
self.bd_transform = bd_transform
self.ftt_transform = ftt_transform
def __getitem__(self, index):
img = self.data[self.indices[index]]
img_copy = copy.deepcopy(img)
backdoored_image = copy.deepcopy(img)
img = Image.fromarray(img)
'''original image'''
if self.transform is not None:
im_1 = self.transform(img)
img_raw = self.bd_transform(img)
'''generate backdoor image'''
img_backdoor_list = []
for i in range(len(self.target_image_list)):
backdoored_image[:,:,:] = img_copy * self.trigger_mask_list[i] + self.trigger_patch_list[i][:]
img_backdoor =self.bd_transform(Image.fromarray(backdoored_image))
img_backdoor_list.append(img_backdoor)
target_image_list_return, target_img_1_list_return = [], []
for i in range(len(self.target_image_list)):
target_img = Image.fromarray(self.target_image_list[i])
target_image = self.bd_transform(target_img)
target_img_1 = self.ftt_transform(target_img)
target_image_list_return.append(target_image)
target_img_1_list_return.append(target_img_1)
return img_raw, img_backdoor_list, target_image_list_return, target_img_1_list_return
def __len__(self):
return len(self.indices)
class BadEncoderTestBackdoor(Dataset):
def __init__(self, numpy_file, trigger_file, reference_label, transform=None):
"""
Args:
numpy_file (string): Path to the numpy file.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.input_array = np.load(numpy_file)
self.data = self.input_array['x']
self.targets = self.input_array['y']
self.trigger_input_array = np.load(trigger_file)
self.trigger_patch_list = self.trigger_input_array['t']
self.trigger_mask_list = self.trigger_input_array['tm']
self.target_class = reference_label
self.test_transform = transform
def __getitem__(self,index):
img = copy.deepcopy(self.data[index])
img[:] =img * self.trigger_mask_list[0] + self.trigger_patch_list[0][:]
img_backdoor =self.test_transform(Image.fromarray(img))
return img_backdoor, self.target_class
def __len__(self):
return self.data.shape[0]
class CIFAR10CUSTOM(Dataset):
def __init__(self, numpy_file, class_type, transform=None):
"""
Args:
numpy_file (string): Path to the numpy file.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.input_array = np.load(numpy_file)
self.data = self.input_array['x']
self.targets = self.input_array['y'][:,0].tolist()
self.classes = class_type
self.transform = transform
def __len__(self):
return self.data.shape[0]
class CIFAR10Pair(CIFAR10CUSTOM):
"""CIFAR10 Dataset.
"""
def __getitem__(self, index):
img = self.data[index]
img = Image.fromarray(img)
if self.transform is not None:
im_1 = self.transform(img)
im_2 = self.transform(img)
return im_1, im_2
class CIFAR10Mem(CIFAR10CUSTOM):
"""CIFAR10 Dataset.
"""
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img, target
| 5,290 | 30.494048 | 141 | py |
ElegantRL | ElegantRL-master/demo_IsaacGym.py | import isaacgym
import torch
import sys
# import wandb
from elegantrl.train.run import train_and_evaluate
from elegantrl.train.config import Arguments, build_env
from elegantrl.agents.AgentPPO import AgentPPO
from elegantrl.envs.IsaacGym import IsaacVecEnv
def demo(task):
env_name = task
agent_class = AgentPPO
env_func = IsaacVecEnv
if env_name == 'Ant':
env_args = {
'env_num': 2048,
'env_name': env_name,
'max_step': 1000,
'state_dim': 60,
'action_dim': 8,
'if_discrete': False,
'target_return': 6000.,
'sim_device_id': 0,
'rl_device_id': 0,
}
env = build_env(env_func=env_func, env_args=env_args)
args = Arguments(agent_class, env=env)
args.if_Isaac = True
args.if_use_old_traj = True
args.if_use_gae = True
args.reward_scale = 2 ** -4
args.horizon_len = 32
args.batch_size = 16384 # minibatch size
args.repeat_times = 5
args.gamma = 0.99
args.lambda_gae_adv = 0.95
args.learning_rate = 0.0005
elif env_name == 'Humanoid':
env_args = {
'env_num': 1024,
'env_name': env_name,
'max_step': 1000,
'state_dim': 108,
'action_dim': 21,
'if_discrete': False,
'target_return': 15000.,
'sim_device_id': gpu_id,
'rl_device_id': gpu_id,
}
env = build_env(env_func=env_func, env_args=env_args)
args = Arguments(agent_class, env=env)
args.if_Isaac = True
args.if_use_old_traj = True
args.if_use_gae = True
args.reward_scale = 0.01
args.horizon_len = 32
args.batch_size = 8192
args.repeat_times = 5
args.gamma = 0.99
args.lambda_gae_adv = 0.95
args.learning_rate = 0.0005
args.eval_gap = 1e6
args.target_step = 3e8
args.learner_gpus = 0
args.random_seed = 0
train_and_evaluate(args)
if __name__ == '__main__':
task = 'Ant'
demo(task)
| 2,126 | 24.939024 | 61 | py |
ElegantRL | ElegantRL-master/setup.py | from setuptools import setup, find_packages
setup(
name="elegantrl",
version="0.3.6",
author="Xiaoyang Liu, Steven Li, Ming Zhu, Hongyang Yang, Jiahao Zheng",
author_email="XL2427@columbia.edu",
url="https://github.com/AI4Finance-LLC/ElegantRL",
license="Apache 2.0",
packages=find_packages(),
install_requires=[
"torch",
"numpy",
"matplotlib",
"gym",
"gym[Box2D]",
],
description="Lightweight, Efficient and Stable DRL Implementation Using PyTorch",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords="Deep Reinforcement Learning",
python_requires=">=3.6",
)
| 1,223 | 33.971429 | 85 | py |
ElegantRL | ElegantRL-master/examples/demo_vec_env_A2C_PPO.py | import sys
from argparse import ArgumentParser
from elegantrl.train.run import train_agent, train_agent_multiprocessing
from elegantrl.train.config import Config, get_gym_env_args
from elegantrl.agents.AgentPPO import AgentVecPPO
from elegantrl.agents.AgentA2C import AgentVecA2C
sys.path.append("../")
def train_ppo_a2c_for_pendulum():
from elegantrl.envs.CustomGymEnv import PendulumEnv
agent_class = [AgentVecPPO, AgentVecA2C][0] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number of an episode.
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4) # break training if 'total_step > break_step'
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = args.max_step * 4
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.gpu_id = GPU_ID
args.num_workers = 4
if_single_process = True
if if_single_process:
train_agent(args)
else:
train_agent_multiprocessing(args) # train_agent(args)
"""
-2000 < -1200 < -200 < -80
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 6.40e+03 14 |-1192.19 199.4 200 0 | -1.44 32.65 0.02 0.01
0 6.40e+03 14 |-1192.19
0 2.88e+04 38 | -952.89 70.4 200 0 | -1.39 13.91 0.02 -0.03
0 2.88e+04 38 | -952.89
0 5.12e+04 65 | -421.47 72.3 200 0 | -1.38 12.35 0.00 -0.06
0 5.12e+04 65 | -421.47
0 7.36e+04 91 | -168.78 74.8 200 0 | -1.28 4.49 0.04 -0.16
0 7.36e+04 91 | -168.78
| TrainingTime: 103 | SavedDir: ./Pendulum_PPO_0
"""
def demo_load_pendulum_and_render():
import torch
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
from elegantrl.envs.CustomGymEnv import PendulumEnv
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
# Reward: r = -(theta + 0.1 * theta_dt + 0.001 * torque)
'num_envs': 1, # the number of sub envs in vectorized env. `num_envs=1` in single env.
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
'''init'''
from elegantrl.train.config import build_vec_env
env = build_vec_env(env_class=env_class, env_args=env_args)
act = torch.load(f"./Pendulum_PPO_0/act.pt", map_location=device)
'''evaluate'''
eval_times = 2 ** 7
from elegantrl.train.evaluator_vec_env import get_rewards_and_step
rewards_step_list = [get_rewards_and_step(env, act) for _ in range(eval_times)]
rewards_step_ten = torch.tensor(rewards_step_list)
print(f"\n| average cumulative_returns {rewards_step_ten[:, 0].mean().item():9.3f}"
f"\n| average episode steps {rewards_step_ten[:, 1].mean().item():9.3f}")
'''render'''
if_discrete = env.if_discrete
device = next(act.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
steps = None
returns = 0.0 # sum of rewards in an episode
for steps in range(12345):
s_tensor = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
a_tensor = act(s_tensor).argmax(dim=1) if if_discrete else act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
returns += reward
env.render()
if done:
break
returns = getattr(env, 'cumulative_rewards', returns)
steps += 1
print(f"\n| cumulative_returns {returns}"
f"\n| episode steps {steps}")
def demo_load_pendulum_vectorized_env():
import torch
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
from elegantrl.envs.CustomGymEnv import PendulumVecEnv
env_class = PendulumVecEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
num_envs = 4
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'num_envs': num_envs, # the number of sub envs in vectorized env
'max_step': 200, # the max step number in an episode for evaluation
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
'''init'''
from elegantrl.train.config import build_vec_env
env = build_vec_env(env_class=env_class, env_args=env_args)
act = torch.load(f"./Pendulum_PPO_0/act.pt", map_location=device)
'''evaluate'''
eval_times = 2 ** 7
from elegantrl.train.evaluator_vec_env import get_rewards_and_step_from_vec_env
rewards_step_list = []
[rewards_step_list.extend(get_rewards_and_step_from_vec_env(env, act)) for _ in range(eval_times // num_envs)]
rewards_step_ten = torch.tensor(rewards_step_list)
print(f"\n| average cumulative_returns {rewards_step_ten[:, 0].mean().item():9.3f}"
f"\n| average episode steps {rewards_step_ten[:, 1].mean().item():9.3f}")
if __name__ == '__main__':
Parser = ArgumentParser(description='ArgumentParser for ElegantRL')
Parser.add_argument('--gpu', type=int, default=0, help='GPU device ID for training')
Parser.add_argument('--drl', type=int, default=0, help='RL algorithms ID for training')
Parser.add_argument('--env', type=int, default=0, help='the environment ID for training')
Args = Parser.parse_args()
GPU_ID = Args.gpu
DRL_ID = Args.drl
ENV_ID = Args.env
train_ppo_a2c_for_pendulum()
| 7,209 | 44.632911 | 114 | py |
ElegantRL | ElegantRL-master/examples/demo_A2C_PPO.py | import sys
from argparse import ArgumentParser
sys.path.append("..")
if True: # write after `sys.path.append("..")`
from elegantrl import train_agent, train_agent_multiprocessing
from elegantrl import Config, get_gym_env_args
from elegantrl.agents import AgentPPO, AgentDiscretePPO
from elegantrl.agents import AgentA2C, AgentDiscreteA2C
"""continuous action"""
def train_ppo_a2c_for_pendulum():
from elegantrl.envs.CustomGymEnv import PendulumEnv
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number of an episode.
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4) # break training if 'total_step > break_step'
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = args.max_step * 4
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.gpu_id = GPU_ID
args.num_workers = 4
if_single_process = True
if if_single_process:
train_agent(args)
else:
train_agent_multiprocessing(args) # train_agent(args)
"""
-2000 < -1200 < -200 < -80
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.00e+02 2 |-1219.07 279.3 200 0 | -1.41 49.69 0.02 -0.01
0 2.08e+04 46 | -162.10 74.0 200 0 | -1.25 9.47 0.01 -0.13
0 4.08e+04 91 | -162.31 185.5 200 0 | -1.14 0.95 0.01 -0.29
0 6.08e+04 136 | -81.47 70.3 200 0 | -1.00 0.17 0.02 -0.45
0 8.08e+04 201 | -84.41 70.0 200 0 | -0.84 2.62 0.01 -0.53
| UsedTime: 202 | SavedDir: ./Pendulum_VecPPO_0
"""
def train_ppo_a2c_for_pendulum_vec_env():
from elegantrl.envs.CustomGymEnv import PendulumEnv
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number in an episode for evaluation
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False, # continuous action space, symbols → direction, value → force
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4)
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.reward_scale = 2 ** -2
args.horizon_len = args.max_step * 1
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 4e-4
args.state_value_tau = 0.2 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-2000 < -1200 < -200 < -80
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.60e+03 9 |-1065.59 245.6 200 0 | -1.41 10.00 -0.04 -0.00
0 2.16e+04 31 |-1152.15 11.0 200 0 | -1.43 2.95 -0.04 0.02
0 4.16e+04 52 | -954.16 52.4 200 0 | -1.42 3.21 0.00 0.01
0 6.16e+04 73 | -237.63 183.1 200 0 | -1.34 0.53 0.05 -0.07
| TrainingTime: 92 | SavedDir: ./Pendulum_VecPPO_0
"""
def train_ppo_a2c_for_lunar_lander_continuous():
import gym
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {'env_name': 'LunarLanderContinuous-v2',
'num_envs': 1,
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False}
get_gym_env_args(env=gym.make('LunarLanderContinuous-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(4e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step * 2
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -1
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.97
args.lambda_entropy = 0.04
args.eval_times = 32
args.eval_per_step = 5e4
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-1500 < -200 < 200 < 290
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.60e+04 20 | -138.39 24.0 70 13 | -2.87 10.25 0.13 0.01
0 7.20e+04 74 | -169.52 42.6 352 214 | -2.92 4.08 0.12 0.04
0 1.28e+05 151 | 148.34 96.1 628 128 | -2.96 1.73 0.15 0.07
0 1.84e+05 179 | 212.45 44.2 460 154 | -2.99 0.73 0.17 0.09
0 2.40e+05 218 | 238.36 19.4 377 80 | -3.05 0.86 0.15 0.11
0 2.96e+05 262 | 239.83 35.4 390 119 | -3.09 0.80 0.25 0.13
0 3.52e+05 300 | 269.49 32.6 304 146 | -3.14 0.58 0.21 0.16
0 4.08e+05 340 | 254.45 58.6 239 53 | -3.21 1.00 0.24 0.19
| TrainingTime: 340 | SavedDir: ./LunarLanderContinuous-v2_VecPPO_0
"""
def train_ppo_a2c_for_lunar_lander_continuous_vec_env():
import gym
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=gym.make('LunarLanderContinuous-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(2e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 64) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step
args.repeat_times = 64 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -1
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.97
args.lambda_entropy = 0.04
args.eval_times = 32
args.eval_per_step = 2e4
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-1500 < -200 < 200 < 290
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.00e+03 35 | -109.92 74.8 81 14 | -2.85 9.17 0.15 0.02
0 2.80e+04 92 | -79.63 119.7 460 258 | -2.91 3.15 0.13 0.04
0 5.60e+04 132 | 239.43 36.7 402 70 | -2.96 0.78 0.17 0.06
0 7.60e+04 159 | 251.94 61.9 273 44 | -2.94 0.53 0.26 0.06
0 9.60e+04 187 | 276.30 18.2 221 23 | -2.94 0.87 0.49 0.05
0 1.16e+05 218 | 273.28 19.6 220 17 | -2.96 0.28 0.24 0.07
0 1.36e+05 248 | 275.14 17.7 215 35 | -2.98 0.15 0.12 0.07
0 1.56e+05 280 | 272.89 22.4 223 45 | -3.03 0.28 0.18 0.10
0 1.76e+05 310 | 275.35 16.8 219 78 | -3.09 0.28 0.19 0.13
0 1.96e+05 339 | 275.55 16.5 219 77 | -3.13 0.20 0.37 0.15
| TrainingTime: 340 | SavedDir: ./LunarLanderContinuous-v2_VecPPO_0
"""
def train_ppo_a2c_for_bipedal_walker():
import gym
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'BipedalWalker-v3',
'num_envs': 1,
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False,
}
get_gym_env_args(env=gym.make('BipedalWalker-v3'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = args.max_step * 3
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 1e-4
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.93
args.lambda_entropy = 0.02
args.clip_ratio = 0.4
args.eval_times = 16
args.eval_per_step = 8e4
args.if_keep_save = False # keeping save the checkpoint. False means save until stop training.
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
train_agent_multiprocessing(args) # train_agent(args)
"""
-200 < -150 < 300 < 330
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.92e+04 29 | -107.14 21.4 231 365 | -5.75 0.60 0.14 0.02
0 1.06e+05 136 | -58.44 5.8 1600 0 | -5.97 0.22 0.45 0.07
0 1.92e+05 228 | -65.31 16.3 1332 576 | -6.00 0.06 0.15 0.08
0 2.78e+05 325 | 63.46 8.0 1600 0 | -5.82 0.03 0.13 0.03
0 3.65e+05 419 | 192.51 49.7 1561 158 | -5.55 0.10 0.26 -0.04
0 4.51e+05 490 | -107.56 3.5 88 8 | -5.55 0.21 0.25 -0.04
0 5.38e+05 588 | 147.98 162.6 864 471 | -5.57 0.36 0.09 -0.02
0 6.24e+05 681 | 256.13 81.9 1136 221 | -5.70 0.50 0.13 0.00
0 7.10e+05 769 | 264.97 59.3 1079 131 | -5.72 0.20 0.16 0.01
0 7.97e+05 857 | 279.37 1.3 1065 18 | -5.77 0.11 0.13 0.02
| TrainingTime: 857 | SavedDir: ./BipedalWalker-v3_VecPPO_2
"""
def train_ppo_a2c_for_bipedal_walker_vec_env():
import gym
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'BipedalWalker-v3',
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False,
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=gym.make('BipedalWalker-v3'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.98
args.horizon_len = args.max_step // 1
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.93
args.lambda_entropy = 0.02
args.eval_times = 16
args.eval_per_step = 5e4
args.if_keep_save = False # keeping save the checkpoint. False means save until stop training.
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
train_agent_multiprocessing(args) # train_agent(args)
"""
-200 < -150 < 300 < 330
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 6.40e+03 33 | -107.05 5.9 169 30 | -5.67 1.30 0.69 -0.01
0 6.40e+03 33 | -107.05
0 5.76e+04 113 | -37.95 2.0 1600 0 | -5.70 0.05 0.12 -0.00
0 5.76e+04 113 | -37.95
0 1.09e+05 196 | 163.69 76.5 1497 287 | -5.39 0.07 0.24 -0.08
0 1.09e+05 196 | 163.69
0 1.60e+05 280 | 28.24 120.4 690 434 | -5.33 0.46 0.17 -0.08
0 2.11e+05 364 | 97.72 147.8 801 396 | -5.32 0.28 0.18 -0.09
0 2.62e+05 447 | 254.85 78.5 1071 165 | -5.37 0.29 0.16 -0.08
0 2.62e+05 447 | 254.85
0 3.14e+05 530 | 274.90 61.5 1001 123 | -5.48 0.34 0.15 -0.04
0 3.14e+05 530 | 274.90
0 3.65e+05 611 | 196.47 121.1 806 220 | -5.60 0.35 0.18 -0.01
0 4.16e+05 689 | 250.12 89.0 890 143 | -5.78 0.32 0.18 0.03
0 4.67e+05 768 | 282.29 25.5 909 17 | -5.94 0.47 0.17 0.07
0 4.67e+05 768 | 282.29
0 5.18e+05 848 | 289.36 1.4 897 14 | -6.07 0.26 0.16 0.10
0 5.18e+05 848 | 289.36
0 5.70e+05 929 | 283.14 33.8 874 35 | -6.29 0.27 0.13 0.16
0 6.21e+05 1007 | 288.53 1.1 870 13 | -6.52 0.22 0.15 0.21
0 6.72e+05 1087 | 288.50 0.9 856 13 | -6.68 0.40 0.15 0.25
0 7.23e+05 1167 | 286.92 1.3 842 16 | -6.86 0.40 0.15 0.30
0 7.74e+05 1246 | 264.75 74.0 790 122 | -7.10 0.42 0.18 0.36
| TrainingTime: 1278 | SavedDir: ./BipedalWalker-v3_PPO_5
"""
def train_ppo_a2c_for_stock_trading():
from elegantrl.envs.StockTradingEnv import StockTradingEnv
id0 = 0
id1 = int(1113 * 0.8)
id2 = 1113
gamma = 0.99
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = StockTradingEnv
env_args = {'env_name': 'StockTradingEnv-v2',
'num_envs': 1,
'max_step': id2 - id1 - 1,
'state_dim': 151,
'action_dim': 15,
'if_discrete': False,
'gamma': gamma,
'beg_idx': id0,
'end_idx': id1, }
# get_gym_vec_env_args(env=StockTradingEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(2e5) # break training if 'total_step > break_step'
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = gamma # discount factor of future rewards
args.horizon_len = args.max_step
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 1e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.eval_times = 2 ** 5
args.eval_per_step = int(2e4)
args.eval_env_class = StockTradingEnv
args.eval_env_args = {'env_name': 'StockTradingEnv-v2',
'num_envs': 1,
'max_step': id2 - id1 - 1,
'state_dim': 151,
'action_dim': 15,
'if_discrete': False,
'beg_idx': id1,
'end_idx': id2, }
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
RewardRange: 0.0 < 1.0 < 1.5 < 2.0
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 7.12e+03 8 | 1.08 0.1 222 0 | -21.40 4.36 0.23 0.00
0 2.85e+04 21 | 1.64 0.1 222 0 | -21.36 6.70 0.22 0.01
0 4.98e+04 34 | 1.58 0.1 222 0 | -21.47 4.98 0.22 0.01
0 7.12e+04 47 | 1.53 0.1 222 0 | -21.47 3.99 0.24 0.01
0 9.26e+04 60 | 1.52 0.1 222 0 | -21.55 3.80 0.25 0.02
0 1.14e+05 73 | 1.51 0.1 222 0 | -21.61 3.16 0.26 0.02
0 1.35e+05 86 | 1.53 0.1 222 0 | -21.63 3.48 0.18 0.02
0 1.57e+05 100 | 1.50 0.1 222 0 | -21.67 2.68 0.22 0.02
0 1.78e+05 114 | 1.51 0.1 222 0 | -21.80 2.18 0.22 0.03
0 1.99e+05 129 | 1.50 0.1 222 0 | -21.76 2.10 0.24 0.03
| TrainingTime: 130 | SavedDir: ./StockTradingEnv-v2_PPO_0
"""
def train_ppo_a2c_for_stock_trading_vec_env():
from elegantrl.envs.StockTradingEnv import StockTradingVecEnv
id0 = 0
id1 = int(1113 * 0.8)
id2 = 1113
num_envs = 2 ** 11
gamma = 0.99
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = StockTradingVecEnv
env_args = {'env_name': 'StockTradingVecEnv-v2',
'num_envs': num_envs,
'max_step': id2 - id1 - 1,
'state_dim': 151,
'action_dim': 15,
'if_discrete': False,
'gamma': gamma,
'beg_idx': id0,
'end_idx': id1, }
# get_gym_vec_env_args(env=StockTradingVecEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(1e5) # break training if 'total_step > break_step'
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = gamma # discount factor of future rewards
args.horizon_len = args.max_step
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.eval_times = 2 ** 14
args.eval_per_step = int(2e4)
args.eval_env_class = StockTradingVecEnv
args.eval_env_args = {'env_name': 'StockTradingVecEnv-v2',
'num_envs': num_envs,
'max_step': id2 - id1 - 1,
'state_dim': 151,
'action_dim': 15,
'if_discrete': False,
'beg_idx': id1,
'end_idx': id2, }
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
args.num_workers = 2
train_agent_multiprocessing(args) # train_agent(args)
"""
0.0 < 1.0 < 1.5 < 2.0
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.88e+02 30 | 1.52 0.2 222 0 | -21.29 19.51 0.19 0.00
0 2.13e+04 180 | 1.52 0.2 222 0 | -21.58 1.74 0.23 0.02
0 4.17e+04 333 | 1.52 0.2 222 0 | -21.85 0.81 0.24 0.04
0 6.22e+04 485 | 1.52 0.2 222 0 | -22.16 0.56 0.24 0.06
0 8.26e+04 635 | 1.52 0.2 222 0 | -22.45 0.50 0.21 0.08
| TrainingTime: 746 | SavedDir: ./StockTradingVecEnv-v2_PPO_0
"""
"""discrete action"""
def train_discrete_ppo_a2c_for_cartpole():
import gym
agent_class = [AgentDiscretePPO, AgentDiscreteA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'CartPole-v1',
'max_step': 500,
'state_dim': 4,
'action_dim': 2,
'if_discrete': True,
}
get_gym_env_args(env=gym.make('CartPole-v1'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(1e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step * 2
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -2
args.learning_rate = 2e-5
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.eval_times = 32
args.eval_per_step = 1e4
args.gpu_id = GPU_ID
args.num_workers = 4
# train_agent_multiprocessing(args)
train_agent(args)
"""
0 < 5 < 400 < 500
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.00e+03 1 | 9.41 0.7 9 1 | -0.69 1.56 -0.01 0.00
0 1.10e+04 12 | 61.00 33.7 61 34 | -0.69 1.14 0.02 0.00
0 2.10e+04 23 | 152.88 93.4 153 93 | -0.66 1.49 0.01 0.00
0 3.10e+04 36 | 299.69 76.8 300 77 | -0.62 1.69 0.01 0.00
0 4.10e+04 48 | 201.50 33.7 202 34 | -0.61 0.97 0.02 0.00
0 5.10e+04 62 | 406.38 81.1 406 81 | -0.59 1.20 0.02 0.00
0 6.10e+04 76 | 392.88 80.0 393 80 | -0.58 0.65 0.00 0.00
0 7.10e+04 89 | 230.25 26.5 230 26 | -0.56 0.99 0.01 0.00
0 8.10e+04 102 | 500.00 0.0 500 0 | -0.54 1.03 0.00 0.00
0 9.10e+04 116 | 487.31 23.1 487 23 | -0.55 0.44 0.01 0.00
0 1.01e+05 129 | 500.00 0.0 500 0 | -0.54 0.84 -0.00 0.00
| UsedTime: 129 | SavedDir: ./CartPole-v1_DiscreteVecPPO_0
"""
def train_discrete_ppo_a2c_for_cartpole_vec_env():
import gym
agent_class = [AgentDiscretePPO, AgentDiscreteA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'CartPole-v1',
'max_step': 500,
'state_dim': 4,
'action_dim': 2,
'if_discrete': True,
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=gym.make('CartPole-v1'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(1e5) # break training if 'total_step > break_step'
args.net_dims = (256, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step * 2
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -2
args.learning_rate = 1e-4
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.eval_times = 32
args.eval_per_step = 1e4
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
0 < 5 < 400 < 500
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.00e+03 18 | 56.69 23.5 57 24 | -0.69 1.44 0.02 0.00
0 2.40e+04 27 | 326.74 82.4 327 82 | -0.64 1.84 0.03 0.00
0 3.60e+04 36 | 288.28 73.7 288 74 | -0.61 2.17 0.02 0.00
0 4.80e+04 45 | 344.19 95.4 344 95 | -0.58 2.11 0.00 0.00
0 6.00e+04 54 | 368.11 76.7 368 77 | -0.57 1.88 0.03 0.00
0 7.20e+04 64 | 404.28 54.9 404 55 | -0.56 1.35 0.02 0.00
0 8.40e+04 73 | 425.89 78.2 426 78 | -0.55 0.85 0.02 0.00
0 9.60e+04 82 | 447.61 65.2 448 65 | -0.55 0.87 0.02 0.00
| TrainingTime: 83 | SavedDir: ./CartPole-v1_DiscreteVecPPO_0
"""
def train_discrete_ppo_a2c_for_lunar_lander():
import gym
agent_class = [AgentDiscretePPO, AgentDiscreteA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'LunarLander-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': True
}
get_gym_env_args(env=gym.make('LunarLander-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(4e6) # break training if 'total_step > break_step'
args.net_dims = (256, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step * 4
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -1
args.learning_rate = 2e-5
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.97
args.lambda_entropy = 0.1
# args.if_use_v_trace = True
args.eval_times = 32
args.eval_per_step = 5e4
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-1500 < -200 < 200 < 290
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.60e+04 20 | -138.39 24.0 70 13 | -2.87 10.25 0.13 0.01
0 7.20e+04 74 | -169.52 42.6 352 214 | -2.92 4.08 0.12 0.04
0 1.28e+05 151 | 148.34 96.1 628 128 | -2.96 1.73 0.15 0.07
0 1.84e+05 179 | 212.45 44.2 460 154 | -2.99 0.73 0.17 0.09
0 2.40e+05 218 | 238.36 19.4 377 80 | -3.05 0.86 0.15 0.11
0 2.96e+05 262 | 239.83 35.4 390 119 | -3.09 0.80 0.25 0.13
0 3.52e+05 300 | 269.49 32.6 304 146 | -3.14 0.58 0.21 0.16
0 4.08e+05 340 | 254.45 58.6 239 53 | -3.21 1.00 0.24 0.19
| TrainingTime: 340 | SavedDir: ./LunarLanderContinuous-v2_VecPPO_0
"""
def train_discrete_ppo_a2c_for_lunar_lander_vec_env():
import gym
agent_class = [AgentDiscretePPO, AgentDiscreteA2C][DRL_ID] # DRL algorithm name
env_class = gym.make # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'LunarLander-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': True,
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=gym.make('LunarLander-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(4e6) # break training if 'total_step > break_step'
args.net_dims = (256, 128) # the middle layer dimension of MultiLayer Perceptron
args.batch_size = 512
args.gamma = 0.99 # discount factor of future rewards
args.horizon_len = args.max_step * 2
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.reward_scale = 2 ** -3
args.learning_rate = 2e-5
args.state_value_tau = 0.01 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.lambda_gae_adv = 0.97
args.lambda_entropy = 0.1
# args.if_use_v_trace = True
args.eval_times = 32
args.eval_per_step = 2e4
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-1500 < -200 < 200 < 290
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.00e+03 18 | 62.42 25.6 62 26 | -0.69 8.03 0.01 0.00
0 2.80e+04 29 | 105.77 42.9 106 43 | -0.67 9.55 0.02 0.00
0 4.00e+04 38 | 259.23 76.2 259 76 | -0.64 10.98 0.02 0.00
0 5.20e+04 46 | 377.11 48.2 377 48 | -0.61 12.39 0.01 0.00
0 6.40e+04 55 | 421.39 87.8 421 88 | -0.60 12.93 0.03 0.00
0 7.60e+04 64 | 230.57 56.1 231 56 | -0.58 13.37 0.03 0.00
0 8.80e+04 72 | 365.26 114.2 365 114 | -0.58 13.32 0.02 0.00
0 1.00e+05 81 | 394.84 107.5 395 107 | -0.58 13.09 0.02 0.00
| TrainingTime: 82 | SavedDir: ./CartPole-v1_DiscreteVecPPO_0
"""
'''utils'''
def demo_load_pendulum_and_render():
import torch
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
from elegantrl.envs.CustomGymEnv import PendulumEnv
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
# Reward: r = -(theta + 0.1 * theta_dt + 0.001 * torque)
'num_envs': 1, # the number of sub envs in vectorized env. `num_envs=1` in single env.
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
'''init'''
from elegantrl.train.config import build_env
env = build_env(env_class=env_class, env_args=env_args)
act = torch.load(f"./Pendulum_PPO_0/act.pt", map_location=device)
'''evaluate'''
eval_times = 2 ** 7
from elegantrl.train.evaluator import get_cumulative_rewards_and_steps
rewards_step_list = [get_cumulative_rewards_and_steps(env, act) for _ in range(eval_times)]
rewards_step_ten = torch.tensor(rewards_step_list)
print(f"\n| average cumulative_returns {rewards_step_ten[:, 0].mean().item():9.3f}"
f"\n| average episode steps {rewards_step_ten[:, 1].mean().item():9.3f}")
'''render'''
if_discrete = env.if_discrete
device = next(act.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
steps = None
returns = 0.0 # sum of rewards in an episode
for steps in range(12345):
s_tensor = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
a_tensor = act(s_tensor).argmax(dim=1) if if_discrete else act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
returns += reward
env.render()
if done:
break
returns = getattr(env, 'cumulative_rewards', returns)
steps += 1
print(f"\n| cumulative_returns {returns}"
f"\n| episode steps {steps}")
def demo_load_pendulum_vectorized_env():
import torch
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
from elegantrl.envs.CustomGymEnv import PendulumEnv
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
num_envs = 4
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number in an episode for evaluation
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False, # continuous action space, symbols → direction, value → force
'num_envs': num_envs, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
'''init'''
from elegantrl.train.config import build_env
env = build_env(env_class=env_class, env_args=env_args)
act = torch.load(f"./Pendulum_PPO_0/act.pt", map_location=device)
'''evaluate'''
eval_times = 2 ** 7
from elegantrl.train.evaluator import get_cumulative_rewards_and_step_from_vec_env
rewards_step_list = []
[rewards_step_list.extend(get_cumulative_rewards_and_step_from_vec_env(env, act)) for _ in range(eval_times // num_envs)]
rewards_step_ten = torch.tensor(rewards_step_list)
print(f"\n| average cumulative_returns {rewards_step_ten[:, 0].mean().item():9.3f}"
f"\n| average episode steps {rewards_step_ten[:, 1].mean().item():9.3f}")
if __name__ == '__main__':
Parser = ArgumentParser(description='ArgumentParser for ElegantRL')
Parser.add_argument('--gpu', type=int, default=0, help='GPU device ID for training')
Parser.add_argument('--drl', type=int, default=0, help='RL algorithms ID for training')
Parser.add_argument('--env', type=str, default='0', help='the environment ID for training')
Args = Parser.parse_args()
GPU_ID = Args.gpu
DRL_ID = Args.drl
ENV_ID = Args.env
if ENV_ID in {'0', 'pendulum'}:
train_ppo_a2c_for_pendulum()
elif ENV_ID in {'1', 'pendulum_vec'}:
train_ppo_a2c_for_pendulum_vec_env()
elif ENV_ID in {'2', 'lunar_lander_continuous'}:
train_ppo_a2c_for_lunar_lander_continuous()
elif ENV_ID in {'3', 'lunar_lander_continuous_vec'}:
train_ppo_a2c_for_lunar_lander_continuous_vec_env()
elif ENV_ID in {'4', 'bipedal_walker'}:
train_ppo_a2c_for_bipedal_walker()
elif ENV_ID in {'5', 'bipedal_walker_vec'}:
train_ppo_a2c_for_bipedal_walker_vec_env()
elif ENV_ID in {'6', 'cartpole'}:
train_discrete_ppo_a2c_for_cartpole()
elif ENV_ID in {'7', 'cartpole_vec'}:
train_discrete_ppo_a2c_for_cartpole_vec_env()
elif ENV_ID in {'8', 'lunar_lander'}:
train_discrete_ppo_a2c_for_lunar_lander()
elif ENV_ID in {'9', 'lunar_lander_vec'}:
train_discrete_ppo_a2c_for_lunar_lander_vec_env()
else:
print('ENV_ID not match')
| 36,642 | 45.678981 | 125 | py |
ElegantRL | ElegantRL-master/examples/demo_gymnasium.py | import sys
import torch as th
import gymnasium as gym
from argparse import ArgumentParser
sys.path.append("..")
if True: # write after `sys.path.append("..")`
from elegantrl import train_agent, train_agent_multiprocessing
from elegantrl import Config, get_gym_env_args
from elegantrl.agents import AgentPPO
from elegantrl.agents import AgentA2C
def train_ppo_a2c_for_pendulum():
from elegantrl.envs.CustomGymEnv import PendulumEnv
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number of an episode.
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4) # break training if 'total_step > break_step'
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = args.max_step * 4
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 2e-4
args.state_value_tau = 0.1 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.gpu_id = GPU_ID
args.num_workers = 4
if_single_process = True
if if_single_process:
train_agent(args)
else:
train_agent_multiprocessing(args) # train_agent(args)
"""
-2000 < -1200 < -200 < -80
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 8.00e+02 2 |-1219.07 279.3 200 0 | -1.41 49.69 0.02 -0.01
0 2.08e+04 46 | -162.10 74.0 200 0 | -1.25 9.47 0.01 -0.13
0 4.08e+04 91 | -162.31 185.5 200 0 | -1.14 0.95 0.01 -0.29
0 6.08e+04 136 | -81.47 70.3 200 0 | -1.00 0.17 0.02 -0.45
0 8.08e+04 201 | -84.41 70.0 200 0 | -0.84 2.62 0.01 -0.53
| UsedTime: 202 | SavedDir: ./Pendulum_VecPPO_0
"""
def train_ppo_a2c_for_pendulum_vec_env():
from elegantrl.envs.CustomGymEnv import PendulumEnv
agent_class = [AgentPPO, AgentA2C][DRL_ID] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'max_step': 200, # the max step number in an episode for evaluation
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False, # continuous action space, symbols → direction, value → force
'num_envs': 4, # the number of sub envs in vectorized env
'if_build_vec_env': True,
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4)
args.net_dims = (128, 64) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.reward_scale = 2 ** -2
args.horizon_len = args.max_step * 1
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.learning_rate = 4e-4
args.state_value_tau = 0.2 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
args.gpu_id = GPU_ID
args.num_workers = 4
train_agent_multiprocessing(args) # train_agent(args)
"""
-2000 < -1200 < -200 < -80
################################################################################
ID Step Time | avgR stdR avgS stdS | expR objC etc.
0 1.60e+03 9 |-1065.59 245.6 200 0 | -1.41 10.00 -0.04 -0.00
0 2.16e+04 31 |-1152.15 11.0 200 0 | -1.43 2.95 -0.04 0.02
0 4.16e+04 52 | -954.16 52.4 200 0 | -1.42 3.21 0.00 0.01
0 6.16e+04 73 | -237.63 183.1 200 0 | -1.34 0.53 0.05 -0.07
| TrainingTime: 92 | SavedDir: ./Pendulum_VecPPO_0
"""
def build_env(env_name: str):
def build_func():
return gym.make(env_name)
return build_func
'''unit tests'''
def check_gym_single():
env_name = 'LunarLanderContinuous-v2'
env = gym.make(env_name)
max_step = 2 ** 10
state, info = env.reset()
cumulative_rewards = 0.0
for i in range(max_step):
action = env.action_space.sample()
next_state, reward, terminated, truncated, info = env.step(action)
cumulative_rewards += reward
if terminated or truncated:
break
print(f"cumulative_rewards: {cumulative_rewards:9.2f}")
env.close()
def check_gym_vector():
env_name = 'LunarLanderContinuous-v2'
num_envs = 8
# env = gym.make(env_name)
envs = gym.vector.SyncVectorEnv([build_env(env_name) for _ in range(num_envs)])
max_step = 2 ** 10
state, info = envs.reset()
cumulative_rewards = th.zeros(num_envs, dtype=th.float32).numpy()
for i in range(max_step):
action = envs.action_space.sample()
next_state, reward, terminated, truncated, info = envs.step(action)
state = next_state
cumulative_rewards += reward
print(f"cumulative_rewards: {cumulative_rewards.mean():9.2f}")
envs.close()
def check_get_gym_env_args():
env_name = 'LunarLanderContinuous-v2'
num_envs = 8
# env = gym.make(env_name)
envs = gym.vector.SyncVectorEnv([build_env(env_name) for _ in range(num_envs)])
env = envs.envs[0]
env_args = get_gym_env_args(env, if_print=True)
if __name__ == '__main__':
check_gym_single()
check_gym_vector()
check_get_gym_env_args()
# Parser = ArgumentParser(description='ArgumentParser for ElegantRL')
# Parser.add_argument('--gpu', type=int, default=0, help='GPU device ID for training')
# Parser.add_argument('--drl', type=int, default=0, help='RL algorithms ID for training')
# Parser.add_argument('--env', type=str, default='0', help='the environment ID for training')
#
# Args = Parser.parse_args()
# GPU_ID = Args.gpu
# DRL_ID = Args.drl
# ENV_ID = Args.env
#
# if ENV_ID in {'0', 'pendulum'}:
# train_ppo_a2c_for_pendulum()
# elif ENV_ID in {'1', 'pendulum_vec'}:
# train_ppo_a2c_for_pendulum_vec_env()
# else:
# print('ENV_ID not match')
| 7,169 | 37.967391 | 113 | py |
ElegantRL | ElegantRL-master/examples/demo_Isaac_Gym.py | import isaacgym
import torch
import sys
import wandb
from elegantrl.train.run import train_and_evaluate
from elegantrl.train.config import Arguments, build_env
from elegantrl.agents.AgentPPO import AgentPPO
from elegantrl.envs.IsaacGym import IsaacVecEnv, IsaacOneEnv
def demo(seed, config):
agent_class = AgentPPO
env_func = IsaacVecEnv
gpu_id = 0
env_args = {
'env_num': config['env_num'],
'env_name': config['env_name'],
'max_step': config['max_step'],
'state_dim': config['state_dim'],
'action_dim': config['action_dim'],
'if_discrete': False,
'target_return': 10000.,
'sim_device_id': gpu_id,
'rl_device_id': gpu_id,
}
env = build_env(env_func=env_func, env_args=env_args)
args = Arguments(agent_class, env=env)
args.if_Isaac = True
args.if_use_old_traj = True
args.if_use_gae = True
args.obs_norm = True
args.value_norm = False
args.reward_scale = config['reward_scale']
args.horizon_len = config['horizon_len']
args.batch_size = config['batch_size']
args.repeat_times = 5
args.gamma = 0.99
args.lambda_gae_adv = 0.95
args.learning_rate = 5e-4
args.lambda_entropy = 0.0
args.eval_gap = 1e6
args.learner_gpus = gpu_id
args.random_seed = seed
args.cwd = f'./result/{args.env_name}_{args.agent_class.__name__[5:]}_{args.env_num}envs/{args.random_seed}'
train_and_evaluate(args)
if __name__ == '__main__':
seed = int(sys.argv[1]) if len(sys.argv) > 1 else 0
config = {
'env_name': 'Ant',
'env_num': 2048,
'state_dim': 60,
'action_dim': 8,
'max_step': 1000,
'reward_scale': 0.01,
'horizon_len': 32,
'batch_size': 16384,
}
# config = {
# 'env_name': 'Humanoid',
# 'env_num': 2048,
# 'state_dim': 108,
# 'action_dim': 21,
# 'max_step': 1000,
# 'reward_scale': 0.01,
# 'horizon_len': 32,
# 'batch_size': 16384,
# }
# config = {
# 'env_name': 'ShadowHand',
# 'env_num': 16384,
# 'state_dim': 211,
# 'action_dim': 20,
# 'max_step': 600,
# 'reward_scale': 0.01,
# 'horizon_len': 8,
# 'batch_size': 32768,
# }
# config = {
# 'env_name': 'Anymal',
# 'env_num': 4096,
# 'state_dim': 48,
# 'action_dim': 12,
# 'max_step': 2500,
# 'reward_scale': 1,
# 'horizon_len': 32,
# 'batch_size': 16384,
# }
# config = {
# 'env_name': 'Ingenuity',
# 'env_num': 4096,
# 'state_dim': 13,
# 'action_dim': 6,
# 'max_step': 2000,
# 'reward_scale': 1,
# 'horizon_len': 16,
# 'batch_size': 16384,
# }
cwd = config['env_name'] + '_PPO_' + str(seed)
wandb.init(
project=config['env_name'] + '_PPO_' + str(config['env_num']),
entity=None,
sync_tensorboard=True,
config=config,
name=cwd,
monitor_gym=True,
save_code=True,
)
config = wandb.config
demo(seed, config)
| 3,155 | 25.974359 | 112 | py |
ElegantRL | ElegantRL-master/examples/demo_FinRL_ElegantRL_China_A_shares.py | import os
import time
import sys
from copy import deepcopy
import torch
import torch.nn as nn
import numpy as np
import numpy.random as rd
import pandas as pd
"""finance environment
Source: https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/Demo_China_A_share_market.ipynb
Modify: Github YonV1943
"""
class StockTradingEnv:
def __init__(self, initial_amount=1e6, max_stock=1e2, buy_cost_pct=1e-3, sell_cost_pct=1e-3, gamma=0.99,
beg_idx=0, end_idx=1113):
self.df_pwd = './China_A_shares.pandas.dataframe'
self.npz_pwd = './China_A_shares.numpy.npz'
self.close_ary, self.tech_ary = self.load_data_from_disk()
self.close_ary = self.close_ary[beg_idx:end_idx]
self.tech_ary = self.tech_ary[beg_idx:end_idx]
print(f"| StockTradingEnv: close_ary.shape {self.close_ary.shape}")
print(f"| StockTradingEnv: tech_ary.shape {self.tech_ary.shape}")
self.max_stock = max_stock
self.buy_cost_rate = 1 + buy_cost_pct
self.sell_cost_rate = 1 - sell_cost_pct
self.initial_amount = initial_amount
self.gamma = gamma
# reset()
self.day = None
self.rewards = None
self.total_asset = None
self.cumulative_returns = 0
self.if_random_reset = True
self.amount = None
self.shares = None
self.shares_num = self.close_ary.shape[1]
amount_dim = 1
# environment information
self.env_name = 'StockTradingEnv-v2'
self.state_dim = self.shares_num + self.close_ary.shape[1] + self.tech_ary.shape[1] + amount_dim
self.action_dim = self.shares_num
self.if_discrete = False
self.max_step = len(self.close_ary)
def reset(self):
self.day = 0
if self.if_random_reset:
self.amount = self.initial_amount * rd.uniform(0.9, 1.1)
self.shares = (np.abs(rd.randn(self.shares_num).clip(-2, +2)) * 2 ** 6).astype(int)
else:
self.amount = self.initial_amount
self.shares = np.zeros(self.shares_num, dtype=np.float32)
self.rewards = []
self.total_asset = (self.close_ary[self.day] * self.shares).sum() + self.amount
return self.get_state()
def get_state(self):
state = np.hstack((np.array(self.amount * 2 ** -16),
self.shares * 2 ** -9,
self.close_ary[self.day] * 2 ** -7,
self.tech_ary[self.day] * 2 ** -6,))
return state
def step(self, action):
self.day += 1
action = action.copy()
action[(-0.1 < action) & (action < 0.1)] = 0
action_int = (action * self.max_stock).astype(int)
# actions initially is scaled between -1 and 1
# convert into integer because we can't buy fraction of shares
for index in range(self.action_dim):
stock_action = action_int[index]
adj_close_price = self.close_ary[self.day, index] # `adjcp` denotes adjusted close price
if stock_action > 0: # buy_stock
delta_stock = min(self.amount // adj_close_price, stock_action)
self.amount -= adj_close_price * delta_stock * self.buy_cost_rate
self.shares[index] += delta_stock
elif self.shares[index] > 0: # sell_stock
delta_stock = min(-stock_action, self.shares[index])
self.amount += adj_close_price * delta_stock * self.sell_cost_rate
self.shares[index] -= delta_stock
state = self.get_state()
total_asset = (self.close_ary[self.day] * self.shares).sum() + self.amount
reward = (total_asset - self.total_asset) * 2 ** -6
self.rewards.append(reward)
self.total_asset = total_asset
done = self.day == self.max_step - 1
if done:
reward += 1 / (1 - self.gamma) * np.mean(self.rewards)
self.cumulative_returns = total_asset / self.initial_amount
return state, reward, done, {}
def load_data_from_disk(self, tech_id_list=None):
tech_id_list = [
"macd", "boll_ub", "boll_lb", "rsi_30", "cci_30", "dx_30", "close_30_sma", "close_60_sma",
] if tech_id_list is None else tech_id_list
if os.path.exists(self.npz_pwd):
ary_dict = np.load(self.npz_pwd, allow_pickle=True)
close_ary = ary_dict['close_ary']
tech_ary = ary_dict['tech_ary']
elif os.path.exists(self.df_pwd): # convert pandas.DataFrame to numpy.array
df = pd.read_pickle(self.df_pwd)
tech_ary = []
close_ary = []
df_len = len(df.index.unique()) # df_len = max_step
for day in range(df_len):
item = df.loc[day]
tech_items = [item[tech].values.tolist() for tech in tech_id_list]
tech_items_flatten = sum(tech_items, [])
tech_ary.append(tech_items_flatten)
close_ary.append(item.close)
close_ary = np.array(close_ary)
tech_ary = np.array(tech_ary)
np.savez_compressed(self.npz_pwd, close_ary=close_ary, tech_ary=tech_ary, )
else:
error_str = f"| StockTradingEnv need {self.df_pwd} or {self.npz_pwd}" \
f" download the following file and save in `.`" \
f" https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe (2.1MB)"
raise FileNotFoundError(error_str)
return close_ary, tech_ary
def check_env():
env = StockTradingEnv(beg_idx=834, end_idx=1113)
env.if_random_reset = False
evaluate_time = 4
"""
env = StockTradingEnv(beg_idx=0, end_idx=1113)
cumulative_returns of random action : 1.63
cumulative_returns of buy all share : 2.80
env = StockTradingEnv(beg_idx=0, end_idx=834)
cumulative_returns of random action : 1.94
cumulative_returns of buy all share : 2.51
env = StockTradingEnv(beg_idx=834, end_idx=1113)
cumulative_returns of random action : 1.12
cumulative_returns of buy all share : 1.19
"""
print()
policy_name = 'random action'
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = rd.uniform(-1, +1, env.action_dim)
state, reward, done, _ = env.step(action)
if done:
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
policy_name = 'buy all share'
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = np.ones(env.action_dim, dtype=np.float32)
state, reward, done, _ = env.step(action)
if done:
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
def get_gym_env_args(env, if_print) -> dict: # [ElegantRL.2021.12.12]
"""
Get a dict ``env_args`` about a standard OpenAI gym env information.
:param env: a standard OpenAI gym env
:param if_print: [bool] print the dict about env information.
:return: env_args [dict]
env_args = {
'env_num': 1, # [int] the environment number, 'env_num>1' in vectorized env
'env_name': env_name, # [str] the environment name, such as XxxXxx-v0
'max_step': max_step, # [int] the steps in an episode. (from env.reset to done).
'state_dim': state_dim, # [int] the dimension of state
'action_dim': action_dim, # [int] the dimension of action or the number of discrete action
'if_discrete': if_discrete, # [bool] action space is discrete or continuous
}
"""
import gym
env_num = getattr(env, 'env_num') if hasattr(env, 'env_num') else 1
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = getattr(env, 'env_name', None)
env_name = env.unwrapped.spec.id if env_name is None else env_name
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
max_step = getattr(env, 'max_step', None)
max_step_default = getattr(env, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = env.action_space.n
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
if not any(env.action_space.high - 1):
print('WARNING: env.action_space.high', env.action_space.high)
if not any(env.action_space.low - 1):
print('WARNING: env.action_space.low', env.action_space.low)
else:
raise RuntimeError('\n| Error in get_gym_env_info()'
'\n Please set these value manually: if_discrete=bool, action_dim=int.'
'\n And keep action_space in (-1, 1).')
else:
env_name = env.env_name
max_step = env.max_step
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_num': env_num,
'env_name': env_name,
'max_step': max_step,
'state_dim': state_dim,
'action_dim': action_dim,
'if_discrete': if_discrete, }
if if_print:
env_args_repr = repr(env_args)
env_args_repr = env_args_repr.replace(',', f",\n ")
env_args_repr = env_args_repr.replace('{', "{\n ")
env_args_repr = env_args_repr.replace('}', ",\n}")
print(f"env_args = {env_args_repr}")
return env_args
def kwargs_filter(func, kwargs: dict):
"""
Filter the variable in env func.
:param func: the function for creating an env.
:param kwargs: args for the env.
:return: filtered args.
"""
import inspect
sign = inspect.signature(func).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_func=None, env_args=None):
env = env_func(**kwargs_filter(env_func.__init__, env_args.copy()))
return env
'''reinforcement learning
Source: https://github.com/AI4Finance-Foundation/ElegantRL/tree/master/elegantrl_helloworld
Modify: Github YonV1943
'''
class ActorPPO(nn.Module):
def __init__(self, mid_dim, mid_layer_num, state_dim, action_dim):
super().__init__()
self.net = build_fcn(mid_dim, mid_layer_num, inp_dim=state_dim, out_dim=action_dim)
# the logarithm (log) of standard deviation (std) of action, it is a trainable parameter
self.a_std_log = nn.Parameter(torch.zeros((1, action_dim)) - 0.5, requires_grad=True)
self.sqrt_2pi_log = np.log(np.sqrt(2 * np.pi))
def forward(self, state):
return self.net(state).tanh() # action
def get_action(self, state):
a_avg = self.net(state)
a_std = self.a_std_log.exp()
noise = torch.randn_like(a_avg)
action = a_avg + noise * a_std
return action, noise
def get_old_logprob(self, _action, noise):
delta = noise.pow(2) * 0.5
return -(self.a_std_log + self.sqrt_2pi_log + delta).sum(1) # old_logprob
def get_logprob_entropy(self, state, action):
a_avg = self.net(state)
a_std = self.a_std_log.exp()
delta = ((a_avg - action) / a_std).pow(2) * 0.5
logprob = -(self.a_std_log + self.sqrt_2pi_log + delta).sum(1) # new_logprob
dist_entropy = (logprob.exp() * logprob).mean() # policy entropy
return logprob, dist_entropy
@staticmethod
def get_a_to_e(action): # convert action of network to action of environment
return action.tanh()
class CriticPPO(nn.Module):
def __init__(self, mid_dim, mid_layer_num, state_dim, _action_dim):
super().__init__()
self.net = build_fcn(mid_dim, mid_layer_num, inp_dim=state_dim, out_dim=1)
def forward(self, state):
return self.net(state) # advantage value
def build_fcn(mid_dim, mid_layer_num, inp_dim, out_dim): # fcn (Fully Connected Network)
net_list = [nn.Linear(inp_dim, mid_dim), nn.ReLU(), ]
for _ in range(mid_layer_num):
net_list += [nn.Linear(mid_dim, mid_dim), nn.ReLU(), ]
net_list += [nn.Linear(mid_dim, out_dim), ]
return nn.Sequential(*net_list)
class AgentPPO:
def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None):
self.if_off_policy = False
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
self.if_act_target = getattr(args, 'if_act_target', False)
self.if_cri_target = getattr(args, "if_cri_target", False)
# AgentBase.__init__(self, net_dim, state_dim, action_dim, gpu_id, args)
self.gamma = getattr(args, 'gamma', 0.99)
self.env_num = getattr(args, 'env_num', 1)
self.batch_size = getattr(args, 'batch_size', 128)
self.repeat_times = getattr(args, 'repeat_times', 1.)
self.reward_scale = getattr(args, 'reward_scale', 1.)
self.mid_layer_num = getattr(args, 'mid_layer_num', 1)
self.learning_rate = getattr(args, 'learning_rate', 2 ** -12)
self.soft_update_tau = getattr(args, 'soft_update_tau', 2 ** -8)
self.if_off_policy = getattr(args, 'if_off_policy', True)
self.if_act_target = getattr(args, 'if_act_target', False)
self.if_cri_target = getattr(args, 'if_cri_target', False)
self.states = None # assert self.states == (self.env_num, state_dim)
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.traj_list = [[[] for _ in range(4 if self.if_off_policy else 5)]
for _ in range(self.env_num)] # for `self.explore_vec_env()`
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = act_class(net_dim, self.mid_layer_num, state_dim, action_dim).to(self.device)
self.cri = cri_class(net_dim, self.mid_layer_num, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_target = deepcopy(self.act) if self.if_act_target else self.act
self.cri_target = deepcopy(self.cri) if self.if_cri_target else self.cri
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
"""attribute"""
self.criterion = torch.nn.SmoothL1Loss()
self.ratio_clip = getattr(args, "ratio_clip", 0.25) # `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_entropy = getattr(args, "lambda_entropy", 0.02) # could be 0.00~0.10
def explore_env(self, env, target_step) -> list:
traj_list = []
last_done = [0, ]
state = self.states[0]
step_i = 0
done = False
get_action = self.act.get_action
get_a_to_e = self.act.get_a_to_e
while step_i < target_step or not done:
ten_s = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0)
ten_a, ten_n = [ten.cpu() for ten in get_action(ten_s.to(self.device))]
next_s, reward, done, _ = env.step(get_a_to_e(ten_a)[0].numpy())
traj_list.append((ten_s, reward, done, ten_a, ten_n))
step_i += 1
state = env.reset() if done else next_s
self.states[0] = state
last_done[0] = step_i
return self.convert_trajectory(traj_list, last_done)
def update_net(self, buffer):
with torch.no_grad():
buf_state, buf_reward, buf_mask, buf_action, buf_noise = [ten.to(self.device) for ten in buffer]
buf_len = buf_state.shape[0]
'''get buf_r_sum, buf_logprob'''
bs = 2 ** 10 # set a smaller 'BatchSize' when out of GPU memory.
buf_value = [self.cri_target(buf_state[i:i + bs]) for i in range(0, buf_len, bs)]
buf_value = torch.cat(buf_value, dim=0)
buf_logprob = self.act.get_old_logprob(buf_action, buf_noise)
buf_r_sum, buf_adv_v = self.get_reward_sum(buf_len, buf_reward, buf_mask, buf_value) # detach()
buf_adv_v = (buf_adv_v - buf_adv_v.mean()) / (buf_adv_v.std() + 1e-5)
# buf_adv_v: buffer data of adv_v value
del buf_noise
'''update network'''
obj_critic = obj_actor = None
update_times = int(1 + buf_len * self.repeat_times / self.batch_size)
for _ in range(update_times):
indices = torch.randint(buf_len, size=(self.batch_size,), requires_grad=False, device=self.device)
state = buf_state[indices]
r_sum = buf_r_sum[indices]
adv_v = buf_adv_v[indices]
action = buf_action[indices]
logprob = buf_logprob[indices]
'''PPO: Surrogate objective of Trust Region'''
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action) # it is obj_actor
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = adv_v * ratio
surrogate2 = adv_v * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
self.optimizer_update(self.act_optimizer, obj_actor)
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, r_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean()
return obj_critic.item(), -obj_actor.item(), a_std_log.item() # logging_tuple
def get_reward_sum(self, buf_len, buf_reward, buf_mask, buf_value):
buf_r_sum = torch.empty(buf_len, dtype=torch.float32, device=self.device) # reward sum
pre_r_sum = 0
for i in range(buf_len - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_adv_v = buf_r_sum - buf_value[:, 0]
return buf_r_sum, buf_adv_v
def convert_trajectory(self, traj_list, _last_done): # [ElegantRL.2022.01.01]
# assert len(buf_items) == step_i
# assert len(buf_items[0]) in {4, 5}
# assert len(buf_items[0][0]) == self.env_num
traj_list = [map(list, zip(*traj_list))] # state, reward, done, action, noise
# assert len(buf_items) == {4, 5}
# assert len(buf_items[0]) == step
# assert len(buf_items[0][0]) == self.env_num
'''stack items'''
traj_list[0] = torch.stack(traj_list[0]).squeeze(1)
traj_list[1] = (torch.tensor(traj_list[1], dtype=torch.float32) * self.reward_scale).unsqueeze(1)
traj_list[2] = ((1 - torch.tensor(traj_list[2], dtype=torch.float32)) * self.gamma).unsqueeze(1)
traj_list[3:] = [torch.stack(item).squeeze(1) for item in traj_list[3:]]
# assert all([buf_item.shape[:2] == (step, self.env_num) for buf_item in buf_items])
return traj_list
@staticmethod
def optimizer_update(optimizer, objective):
optimizer.zero_grad()
objective.backward()
optimizer.step()
class ReplayBufferList(list): # for on-policy
def __init__(self):
list.__init__(self)
def update_buffer(self, traj_list):
cur_items = [map(list, zip(*traj_list))]
self[:] = [torch.cat(item, dim=0) for item in cur_items]
steps = self[1].shape[0]
r_exp = self[1].mean().item()
return steps, r_exp
class Arguments:
def __init__(self, agent, env_func=None, env_args=None):
self.env_func = env_func # env = env_func(*env_args)
self.env_args = env_args # env = env_func(*env_args)
self.env_num = self.env_args['env_num'] # env_num = 1. In vector env, env_num > 1.
self.max_step = self.env_args['max_step'] # the max step of an episode
self.env_name = self.env_args['env_name'] # the env name. Be used to set 'cwd'.
self.state_dim = self.env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = self.env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = self.env_args['if_discrete'] # discrete or continuous action space
self.agent = agent # DRL algorithm
self.net_dim = 2 ** 7 # the middle layer dimension of Fully Connected Network
self.batch_size = 2 ** 7 # num of transitions sampled from replay buffer.
self.mid_layer_num = 1 # the middle layer number of Fully Connected Network
self.if_off_policy = self.get_if_off_policy() # agent is on-policy or off-policy
self.if_use_old_traj = False # save old data to splice and get a complete trajectory (for vector env)
if self.if_off_policy: # off-policy
self.max_memo = 2 ** 21 # capacity of replay buffer
self.target_step = 2 ** 10 # repeatedly update network to keep critic's loss small
self.repeat_times = 2 ** 0 # collect target_step, then update network
else: # on-policy
self.max_memo = 2 ** 12 # capacity of replay buffer
self.target_step = self.max_memo # repeatedly update network to keep critic's loss small
self.repeat_times = 2 ** 4 # collect target_step, then update network
'''Arguments for training'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.learning_rate = 2 ** -12 # 2 ** -15 ~= 3e-5
self.soft_update_tau = 2 ** -8 # 2 ** -8 ~= 5e-3
'''Arguments for device'''
self.worker_num = 2 # rollout workers number pre GPU (adjust it to get high GPU usage)
self.thread_num = 8 # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = 0 # initialize random seed in self.init_before_training()
self.learner_gpus = 0 # `int` means the ID of single GPU, -1 means CPU
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
'''Arguments for evaluate'''
self.eval_gap = 2 ** 7 # evaluate the agent per eval_gap seconds
self.eval_times = 2 ** 4 # number of times that get episode return
def init_before_training(self):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.thread_num)
torch.set_default_dtype(torch.float32)
'''auto set cwd (current working directory)'''
if self.cwd is None:
self.cwd = f'./{self.env_name}_{self.agent.__name__[5:]}_{self.learner_gpus}'
'''remove history'''
if self.if_remove is None:
self.if_remove = bool(input(f"| Arguments PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
elif self.if_remove:
import shutil
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Arguments Remove cwd: {self.cwd}")
else:
print(f"| Arguments Keep cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
def get_if_off_policy(self):
name = self.agent.__name__
return all((name.find('PPO') == -1, name.find('A2C') == -1)) # if_off_policy
def train_agent(args):
torch.set_grad_enabled(False)
args.init_before_training()
gpu_id = args.learner_gpus
'''init'''
env = build_env(args.env_func, args.env_args)
agent = args.agent(args.net_dim, args.state_dim, args.action_dim, gpu_id=gpu_id, args=args)
agent.states = [env.reset(), ]
buffer = ReplayBufferList()
'''start training'''
cwd = args.cwd
break_step = args.break_step
target_step = args.target_step
del args
start_time = time.time()
total_step = 0
save_gap = int(5e4)
total_step_counter = -save_gap
while True:
trajectory = agent.explore_env(env, target_step)
steps, r_exp = buffer.update_buffer((trajectory,))
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
total_step += steps
if total_step_counter + save_gap < total_step:
total_step_counter = total_step
print(
f"Step:{total_step:8.2e} "
f"ExpR:{r_exp:8.2f} "
f"Returns:{env.cumulative_returns:8.2f} "
f"ObjC:{logging_tuple[0]:8.2f} "
f"ObjA:{logging_tuple[1]:8.2f} "
)
save_path = f"{cwd}/actor_{total_step:014.0f}_{time.time() - start_time:08.0f}_{r_exp:08.2f}.pth"
torch.save(agent.act.state_dict(), save_path)
if (total_step > break_step) or os.path.exists(f"{cwd}/stop"):
# stop training when reach `break_step` or `mkdir cwd/stop`
break
print(f'| UsedTime: {time.time() - start_time:.0f} | SavedDir: {cwd}')
def get_episode_return_and_step(env, act) -> (float, int): # [ElegantRL.2022.01.01]
"""
Evaluate the actor (policy) network on testing environment.
:param env: environment object in ElegantRL.
:param act: Actor (policy) network.
:return: episodic reward and number of steps needed.
"""
max_step = env.max_step
if_discrete = env.if_discrete
device = next(act.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_step = None
episode_return = 0.0 # sum of rewards in an episode
for episode_step in range(max_step):
s_tensor = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
a_tensor = act(s_tensor)
if if_discrete:
a_tensor = a_tensor.argmax(dim=1)
action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
episode_return += reward
if done:
break
episode_return = getattr(env, 'cumulative_returns', episode_return)
episode_step += 1
return episode_return, episode_step
def load_torch_file(model, _path):
state_dict = torch.load(_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
"""train and evaluate"""
def run():
import sys
gpu_id = int(sys.argv[1]) if len(sys.argv) > 1 else 0
env = StockTradingEnv()
env_func = StockTradingEnv
env_args = get_gym_env_args(env=env, if_print=False)
env_args['beg_idx'] = 0 # training set
env_args['end_idx'] = 834 # training set
args = Arguments(AgentPPO, env_func=env_func, env_args=env_args)
args.target_step = args.max_step * 4
args.reward_scale = 2 ** -7
args.learning_rate = 2 ** -14
args.break_step = int(5e5)
args.learner_gpus = gpu_id
args.random_seed += gpu_id + 1943
train_agent(args)
def evaluate_models_in_directory(dir_path=None):
if dir_path is None:
gpu_id = int(sys.argv[1])
dir_path = f'StockTradingEnv-v2_PPO_{gpu_id}'
print(f"| evaluate_models_in_directory: gpu_id {gpu_id}")
print(f"| evaluate_models_in_directory: dir_path {dir_path}")
else:
gpu_id = -1
print(f"| evaluate_models_in_directory: gpu_id {gpu_id}")
print(f"| evaluate_models_in_directory: dir_path {dir_path}")
model_names = [name for name in os.listdir(dir_path) if name[:6] == 'actor_']
model_names.sort()
env_func = StockTradingEnv
env_args = {
'env_num': 1,
'env_name': 'StockTradingEnv-v2',
'max_step': 1113,
'state_dim': 151,
'action_dim': 15,
'if_discrete': False,
'beg_idx': 834, # testing set
'end_idx': 1113, # testing set
}
env = build_env(env_func=env_func, env_args=env_args)
env.if_random_reset = False
args = Arguments(AgentPPO, env_func=env_func, env_args=env_args)
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
actor = ActorPPO(mid_dim=args.net_dim,
mid_layer_num=args.mid_layer_num,
state_dim=args.state_dim,
action_dim=args.action_dim).to(device)
for model_name in model_names:
model_path = f"{dir_path}/{model_name}"
load_torch_file(actor, model_path)
cumulative_returns_list = [get_episode_return_and_step(env, actor)[0] for _ in range(4)]
cumulative_returns = np.mean(cumulative_returns_list)
print(f"cumulative_returns {cumulative_returns:9.3f} {model_name}")
if __name__ == '__main__':
check_env()
run()
evaluate_models_in_directory()
| 29,837 | 39.706685 | 120 | py |
ElegantRL | ElegantRL-master/examples/demo_mujoco_draw_obj_h.py | from elegantrl.train.evaluator import *
from elegantrl.train.config import Arguments
from elegantrl.envs.CustomGymEnv import GymNormaEnv
from elegantrl.agents.AgentPPO import AgentPPO, AgentPPOgetObjHterm
from elegantrl.agents.AgentSAC import AgentSAC, AgentReSAC
def demo_evaluator_actor_h_term_to_str():
from elegantrl.train.config import build_env
gpu_id = 2 # >=0 means GPU ID, -1 means CPU
env_name = ['Hopper-v3',
'HalfCheetah-v3',
'Swimmer-v3',
'Ant-v3',
'Humanoid-v3',
'Walker2d-v3',
][5]
agent_class = [AgentPPO, AgentPPOgetObjHterm][1]
# agent_class = [AgentSAC, AgentReSAC][1]
if env_name == 'Hopper-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'Hopper-v3',
'max_step': 1000,
'state_dim': 11,
'action_dim': 3,
'if_discrete': False,
'target_return': 3500.,
}
actor_path = './actor_Hopper_PPO_hop.pth'
# actor_path = './actor_Hopper_PPO_hop_fail.pth'
# actor_path = './actor_Hopper_PPO_fail.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'HalfCheetah-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'HalfCheetah-v3',
'max_step': 1000,
'state_dim': 17,
'action_dim': 6,
'if_discrete': False,
'target_return': 4800.0,
}
# actor_path = './actor_HalfCheetah_PPO_run.pth'
# actor_path = './actor_HalfCheetah_PPO_kiss_ground.pth'
# actor_path = './actor_HalfCheetah_PPO_stand.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Swimmer-v3':
# env_func = GymNormaEnv # gym.make
import gym
env_func = gym.make
env_args = {
'action_dim': 2,
'env_name': 'Swimmer-v3',
'env_num': 1,
'if_discrete': False,
'max_step': 1000,
'state_dim': 8,
'target_return': 360.0
}
# agent_class = AgentPPO
# actor_path = './actor_Swimmer_PPO_C_160.pth'
# actor_path = './actor_Swimmer_PPO_C_134.pth'
# actor_path = './actor_Swimmer_PPO_C_157.pth'
# actor_path = './actor_Swimmer_PPO_C_152.pth'
# actor_path = './actor_Swimmer_PPO_C_097.201.pth'
# agent_class = AgentReSAC
# actor_path = './actor_Swimmer_ReSAC_S_211.pth'
# actor_path = './actor_Swimmer_ReSAC_S_224.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Walker2d-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'Walker2d-v3',
'if_discrete': False,
'max_step': 1000,
'state_dim': 17,
'action_dim': 6,
'target_return': 7000,
}
actor_path = './actor_Walker2d_run11_7870.pth' # norm
# actor_path = './actor_Walker2d_run11_7209.pth' # norm
# actor_path = './actor_Walker2d_run11_6812.pth' # norm
# actor_path = './actor_Walker2d_run11_6955.pth' # norm
# actor_path = './actor_Walker2d_run12_5461.pth' # norm
# actor_path = './actor_Walker2d_run12_3295.pth' # norm
# actor_path = './actor_Walker2d_jump_4008.pth' # norm
# actor_path = './actor_Walker2d_fail_4512.pth' # norm
# actor_path = './actor_Walker2d_fail_6792.pth' # norm
# actor_path = './actor_Walker2d_fail_4992.pth' # norm
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Ant-v3':
env_func = GymNormaEnv
env_args = {
'env_num': 1,
'env_name': 'Ant-v3',
'max_step': 1000,
'state_dim': 111,
'action_dim': 8,
'if_discrete': False,
'target_return': 6000.0,
}
# actor_path = './actor_Ant_PPO_run_4701.pth'
# actor_path = './actor_Ant_PPO_run_2105.pth'
actor_path = './actor_Ant_PPO_fail_174.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Humanoid-v3':
from elegantrl.envs.CustomGymEnv import HumanoidEnv
env_func = HumanoidEnv
env_args = {
'env_num': 1,
'env_name': 'Humanoid-v3',
'max_step': 1000,
'state_dim': 376,
'action_dim': 17,
'if_discrete': False,
'target_return': 8000.,
}
# from elegantrl.agents.AgentSAC import AgentReSAC
# agent_class = AgentReSAC
# agent_class = AgentPPO
# actor_path = './actor_Huamnoid_PPO_run_8021.pth'
# actor_path = './actor_Huamnoid_PPO_run_7105.pth'
# actor_path = './actor_Huamnoid_PPO_run_6437.pth'
# actor_path = './actor_Huamnoid_PPO_run_5422.pth'
# actor_path = './actor_Huamnoid_PPO_run_3491.pth'
# actor_path = './actor_Huamnoid_PPO_lift_leg_7500.pth'
# actor_path = './actor_Huamnoid_PPO_lift_leg_6076.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_5136.pth'
# actor_path = './actor_Huamnoid_PPO_curl_leg_4244.pth' # net_dim = 2 ** 7
# actor_path = './actor_Huamnoid_PPO_curl_leg_6378.pth'
# actor_path = './actor_Huamnoid_PPO_run_7194.pth' # norm
# actor_path = './actor_Huamnoid_PPO_lift_knee_6887.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_7585.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_5278.pth'
# actor_path = './actor_Huamnoid_PPO_run_4759.pth'
# actor_path = './actor__000108565781_07978.063.pth' # (Humanoid-v3_PPOHtermK_6 from single to two legs)
# actor_path = './actor_Huamnoid_PPO_run_9732.pth' # norm, nice racing
# actor_path = './actor__000018373785_10863.449.pth' # norm, nice racing
# actor_path = './actor__000027862483_10202.021.pth' # norm, nice racing
net_dim = 2 ** 9
layer_num = 3
else:
raise ValueError('env_name:', env_name)
'''init'''
from elegantrl.train.run import init_agent
from elegantrl.train.run import init_buffer
args = Arguments(agent_class=agent_class, env_func=env_func, env_args=env_args)
args.net_dim = net_dim
args.num_layer = layer_num
env = build_env(env_func=args.env_func, env_args=args.env_args)
agent = init_agent(args, gpu_id, env)
torch.set_grad_enabled(False)
'''evaluate file'''
# buffer = init_buffer(args, gpu_id)
# agent.act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
# agent.state = env.reset()
# target_step = args.max_step * 4
#
# trajectory = agent.explore_env(env, target_step)
# buffer.update_buffer([trajectory, ])
# obj_hamilton = agent.update_net(buffer)
#
# print(f"Hamilton {obj_hamilton:9.3f}")
'''evaluate directory'''
dir_path = './Humanoid-v3_PPOHtermK_4_10726'
dir_path = './Humanoid-v3_PPOHtermK_5_10033'
dir_path = './Humanoid-v3_PPO_1_12163'
dir_path = './Humanoid-v3_PPO_2_10777'
dir_path = './Hopper-v3_PPOHtermK_6'
dir_path = './Hopper-v2_PPO_1'
dir_path = './Hopper-v2_PPOHtermK_1'
dir_path = './HalfCheetah-v3_PPO_1_8964'
dir_path = './HalfCheetah-v3_PPOHtermK_5_4949'
dir_path = './HalfCheetah-v3_PPOHtermK_5_4837'
dir_path = './Hopper-v2_PPOHtermK_2_3156'
dir_path = './Walker2d-v3_PPOHtermK_6_6380'
dir_path = './Walker2d-v3_PPOHtermK_5_6196'
dir_path = './Walker2d-v3_PPO_4_7884'
dir_path = './Walker2d-v3_PPO_3_6635'
dir_path = './Walker2d-v3_PPO_2_7191'
dir_path = './Walker2d-v3_PPO_3_5449'
dir_path = './Walker2d-v3_PPO_2_5640'
# dir_path = './HalfCheetah-v3_PPO_6_7345'
# dir_path = './Ant-v3_PPO_5_6799'
# dir_path = './Ant-v3_PPO_5_6799'
# dir_path = './Ant-v3_PPOHtermK_6_6862'
# dir_path = './Ant-v3_PPO_0'
# dir_path = './Ant-v3_PPO_1_5652'
# dir_path = './Swimmer-v3_PPOHtermK_3_153'
# dir_path = './Swimmer-v3_PPO_2_157'
# dir_path = './Swimmer-v3_PPO_3_121'
names = [name for name in os.listdir(dir_path)
if (name[:6] == 'actor_' and name[-4:] == '.pth')]
names.sort()
eval_gap = int(max(1.0, len(names) / 128))
print(f"| len(names) {len(names)}, eval_gap {eval_gap}")
for i, name in enumerate(names):
if (len(name) <= 22) and (i % eval_gap != 0):
continue
actor_path = f"{dir_path}/{name}"
buffer = init_buffer(args, gpu_id)
agent.act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
agent.state = env.reset()
target_step = args.target_step
trajectory = agent.explore_env(env, target_step)
buffer.update_buffer([trajectory, ])
obj_hamilton = agent.update_net(buffer)
print(f"{actor_path:64} | Hamilton {obj_hamilton}")
def demo_get_h_term_curve_from_str():
# Hopper-v3_PPOHtermK_6
data11 = """
./Hopper-v3_PPOHtermK_6/actor_000000012408.pth | Hamilton 0.4845615327358246
./Hopper-v3_PPOHtermK_6/actor_000000020777.pth | Hamilton 0.4891211688518524
./Hopper-v3_PPOHtermK_6/actor_000000029107.pth | Hamilton 0.5241979956626892
./Hopper-v3_PPOHtermK_6/actor_000000037553.pth | Hamilton 0.5400240421295166
./Hopper-v3_PPOHtermK_6/actor_000000046139.pth | Hamilton 0.5519936084747314
./Hopper-v3_PPOHtermK_6/actor_000000054589.pth | Hamilton 0.562807559967041
./Hopper-v3_PPOHtermK_6/actor_000000063224.pth | Hamilton 0.5601237416267395
./Hopper-v3_PPOHtermK_6/actor_000000072219.pth | Hamilton 0.5452290773391724
./Hopper-v3_PPOHtermK_6/actor_000000083263.pth | Hamilton 0.5468662977218628
./Hopper-v3_PPOHtermK_6/actor_000000092343.pth | Hamilton 0.5557214021682739
./Hopper-v3_PPOHtermK_6/actor_000000101142.pth | Hamilton 0.55520099401474
./Hopper-v3_PPOHtermK_6/actor_000000109908.pth | Hamilton 0.5554271340370178
./Hopper-v3_PPOHtermK_6/actor_000000118629.pth | Hamilton 0.5566689968109131
./Hopper-v3_PPOHtermK_6/actor_000000127202.pth | Hamilton 0.5530040264129639
./Hopper-v3_PPOHtermK_6/actor_000000135726.pth | Hamilton 0.5524224042892456
./Hopper-v3_PPOHtermK_6/actor_000000144219.pth | Hamilton 0.5646094679832458
./Hopper-v3_PPOHtermK_6/actor_000000152694.pth | Hamilton 0.5643770098686218
./Hopper-v3_PPOHtermK_6/actor_000000161287.pth | Hamilton 0.5637863874435425
./Hopper-v3_PPOHtermK_6/actor_000000169646.pth | Hamilton 0.5686627626419067
./Hopper-v3_PPOHtermK_6/actor_000000178063.pth | Hamilton 0.5847662091255188
./Hopper-v3_PPOHtermK_6/actor_000000186480.pth | Hamilton 0.5950624346733093
./Hopper-v3_PPOHtermK_6/actor_000000194960.pth | Hamilton 0.6063750386238098
./Hopper-v3_PPOHtermK_6/actor_000000203757.pth | Hamilton 0.6130822896957397
./Hopper-v3_PPOHtermK_6/actor_000000212391.pth | Hamilton 0.6151049733161926
./Hopper-v3_PPOHtermK_6/actor_000000221197.pth | Hamilton 0.6102234125137329
./Hopper-v3_PPOHtermK_6/actor_000000229999.pth | Hamilton 0.6136663556098938
./Hopper-v3_PPOHtermK_6/actor_000000238587.pth | Hamilton 0.6067836284637451
./Hopper-v3_PPOHtermK_6/actor_000000247208.pth | Hamilton 0.6120057702064514
./Hopper-v3_PPOHtermK_6/actor_000000255983.pth | Hamilton 0.6141001582145691
./Hopper-v3_PPOHtermK_6/actor_000000265109.pth | Hamilton 0.6142712831497192
./Hopper-v3_PPOHtermK_6/actor_000000274515.pth | Hamilton 0.620732843875885
./Hopper-v3_PPOHtermK_6/actor_000000283766.pth | Hamilton 0.6174814701080322
./Hopper-v3_PPOHtermK_6/actor_000000292640.pth | Hamilton 0.6248815655708313
./Hopper-v3_PPOHtermK_6/actor_000000302039.pth | Hamilton 0.6254605650901794
./Hopper-v3_PPOHtermK_6/actor_000000311898.pth | Hamilton 0.6247671246528625
./Hopper-v3_PPOHtermK_6/actor_000000322134.pth | Hamilton 0.6237598061561584
./Hopper-v3_PPOHtermK_6/actor_000000331720.pth | Hamilton 0.6273255944252014
./Hopper-v3_PPOHtermK_6/actor_000000342026.pth | Hamilton 0.6320148706436157
./Hopper-v3_PPOHtermK_6/actor_000000352589.pth | Hamilton 0.6298384070396423
./Hopper-v3_PPOHtermK_6/actor_000000362466.pth | Hamilton 0.629938542842865
./Hopper-v3_PPOHtermK_6/actor_000000373295.pth | Hamilton 0.6280447840690613
./Hopper-v3_PPOHtermK_6/actor_000000383285.pth | Hamilton 0.6302109956741333
./Hopper-v3_PPOHtermK_6/actor_000000392853.pth | Hamilton 0.6328704357147217
./Hopper-v3_PPOHtermK_6/actor_000000403735.pth | Hamilton 0.630757749080658
./Hopper-v3_PPOHtermK_6/actor_000000413666.pth | Hamilton 0.6316863298416138
./Hopper-v3_PPOHtermK_6/actor_000000423828.pth | Hamilton 0.6382369995117188
./Hopper-v3_PPOHtermK_6/actor_000000433923.pth | Hamilton 0.6361097097396851
./Hopper-v3_PPOHtermK_6/actor_000000445209.pth | Hamilton 0.6314388513565063
./Hopper-v3_PPOHtermK_6/actor_000000455084.pth | Hamilton 0.6363314390182495
./Hopper-v3_PPOHtermK_6/actor_000000465965.pth | Hamilton 0.6352642774581909
./Hopper-v3_PPOHtermK_6/actor_000000474680.pth | Hamilton 0.6316134929656982
./Hopper-v3_PPOHtermK_6/actor_000000484022.pth | Hamilton 0.6367190480232239
./Hopper-v3_PPOHtermK_6/actor_000000494989.pth | Hamilton 0.6335411071777344
./Hopper-v3_PPOHtermK_6/actor_000000505143.pth | Hamilton 0.6421518325805664
./Hopper-v3_PPOHtermK_6/actor_000000515409.pth | Hamilton 0.651789128780365
./Hopper-v3_PPOHtermK_6/actor_000000526234.pth | Hamilton 0.6651453971862793
./Hopper-v3_PPOHtermK_6/actor_000000538119.pth | Hamilton 0.6629406809806824
./Hopper-v3_PPOHtermK_6/actor_000000547348.pth | Hamilton 0.6498215198516846
./Hopper-v3_PPOHtermK_6/actor_000000557193.pth | Hamilton 0.6508785486221313
./Hopper-v3_PPOHtermK_6/actor_000000568232.pth | Hamilton 0.6713051795959473
./Hopper-v3_PPOHtermK_6/actor_000000578897.pth | Hamilton 0.6794459223747253
./Hopper-v3_PPOHtermK_6/actor_000000588665.pth | Hamilton 0.6883001923561096
./Hopper-v3_PPOHtermK_6/actor_000000597229.pth | Hamilton 0.6870630979537964
./Hopper-v3_PPOHtermK_6/actor_000000606032.pth | Hamilton 0.704490065574646
./Hopper-v3_PPOHtermK_6/actor_000000615361.pth | Hamilton 0.7083249092102051
./Hopper-v3_PPOHtermK_6/actor_000000625058.pth | Hamilton 0.7060838341712952
./Hopper-v3_PPOHtermK_6/actor_000000635150.pth | Hamilton 0.7092532515525818
./Hopper-v3_PPOHtermK_6/actor_000000646309.pth | Hamilton 0.72728031873703
./Hopper-v3_PPOHtermK_6/actor_000000657933.pth | Hamilton 0.7280641198158264
./Hopper-v3_PPOHtermK_6/actor_000000670129.pth | Hamilton 0.7289828658103943
./Hopper-v3_PPOHtermK_6/actor_000000682296.pth | Hamilton 0.7331717014312744
./Hopper-v3_PPOHtermK_6/actor_000000691991.pth | Hamilton 0.7276478409767151
./Hopper-v3_PPOHtermK_6/actor_000000701118.pth | Hamilton 0.7480958104133606
./Hopper-v3_PPOHtermK_6/actor_000000710322.pth | Hamilton 0.7489750981330872
./Hopper-v3_PPOHtermK_6/actor_000000719780.pth | Hamilton 0.7601510882377625
./Hopper-v3_PPOHtermK_6/actor_000000729949.pth | Hamilton 0.7754960656166077
./Hopper-v3_PPOHtermK_6/actor_000000738652.pth | Hamilton 0.7775801420211792
./Hopper-v3_PPOHtermK_6/actor_000000750188.pth | Hamilton 0.7936223745346069
./Hopper-v3_PPOHtermK_6/actor_000000760871.pth | Hamilton 0.8015986680984497
./Hopper-v3_PPOHtermK_6/actor_000000770693.pth | Hamilton 0.801663875579834
./Hopper-v3_PPOHtermK_6/actor_000000780484.pth | Hamilton 0.8167001008987427
./Hopper-v3_PPOHtermK_6/actor_000000790972.pth | Hamilton 0.8108416199684143
./Hopper-v3_PPOHtermK_6/actor_000000800375.pth | Hamilton 0.8019434213638306
./Hopper-v3_PPOHtermK_6/actor_000000810442.pth | Hamilton 0.8169126510620117
./Hopper-v3_PPOHtermK_6/actor_000000821175.pth | Hamilton 0.8273810148239136
./Hopper-v3_PPOHtermK_6/actor_000000830277.pth | Hamilton 0.8431681990623474
./Hopper-v3_PPOHtermK_6/actor_000000841497.pth | Hamilton 0.8476846814155579
./Hopper-v3_PPOHtermK_6/actor_000000849942.pth | Hamilton 0.8643835186958313
./Hopper-v3_PPOHtermK_6/actor_000000860879.pth | Hamilton 0.8715230822563171
./Hopper-v3_PPOHtermK_6/actor_000000872925.pth | Hamilton 0.8689678311347961
./Hopper-v3_PPOHtermK_6/actor_000000883087.pth | Hamilton 0.8708394169807434
./Hopper-v3_PPOHtermK_6/actor_000000892921.pth | Hamilton 0.8740193247795105
./Hopper-v3_PPOHtermK_6/actor_000000901887.pth | Hamilton 0.8826173543930054
./Hopper-v3_PPOHtermK_6/actor_000000910817.pth | Hamilton 0.8855030536651611
./Hopper-v3_PPOHtermK_6/actor_000000920855.pth | Hamilton 0.9102979302406311
./Hopper-v3_PPOHtermK_6/actor_000000929245.pth | Hamilton 0.9039087295532227
./Hopper-v3_PPOHtermK_6/actor_000000939512.pth | Hamilton 0.9121676087379456
./Hopper-v3_PPOHtermK_6/actor_000000947512.pth | Hamilton 0.9295536875724792
./Hopper-v3_PPOHtermK_6/actor_000000958124.pth | Hamilton 0.9483475685119629
./Hopper-v3_PPOHtermK_6/actor_000000967691.pth | Hamilton 0.9552537798881531
./Hopper-v3_PPOHtermK_6/actor_000000976801.pth | Hamilton 0.9473283886909485
./Hopper-v3_PPOHtermK_6/actor_000000987866.pth | Hamilton 0.9566901326179504
./Hopper-v3_PPOHtermK_6/actor_000000998121.pth | Hamilton 0.9645906090736389
./Hopper-v3_PPOHtermK_6/actor_000001007813.pth | Hamilton 0.9395633339881897
./Hopper-v3_PPOHtermK_6/actor_000001017476.pth | Hamilton 0.9738671779632568
./Hopper-v3_PPOHtermK_6/actor_000001026490.pth | Hamilton 0.9783634543418884
./Hopper-v3_PPOHtermK_6/actor_000001037924.pth | Hamilton 0.9624756574630737
./Hopper-v3_PPOHtermK_6/actor_000001048431.pth | Hamilton 0.9924933910369873
./Hopper-v3_PPOHtermK_6/actor_000001058089.pth | Hamilton 1.008463978767395
./Hopper-v3_PPOHtermK_6/actor_000001067001.pth | Hamilton 1.011677622795105
./Hopper-v3_PPOHtermK_6/actor_000001076750.pth | Hamilton 1.024086356163025
./Hopper-v3_PPOHtermK_6/actor_000001086152.pth | Hamilton 1.0365326404571533
./Hopper-v3_PPOHtermK_6/actor_000001095323.pth | Hamilton 1.0430406332015991
./Hopper-v3_PPOHtermK_6/actor_000001103549.pth | Hamilton 1.0451210737228394
./Hopper-v3_PPOHtermK_6/actor_000001111677.pth | Hamilton 1.0457422733306885
./Hopper-v3_PPOHtermK_6/actor_000001121032.pth | Hamilton 1.0495630502700806
./Hopper-v3_PPOHtermK_6/actor_000001129769.pth | Hamilton 1.0336732864379883
./Hopper-v3_PPOHtermK_6/actor_000001138322.pth | Hamilton 1.0650078058242798
./Hopper-v3_PPOHtermK_6/actor_000001146322.pth | Hamilton 1.0817903280258179
./Hopper-v3_PPOHtermK_6/actor_000001154322.pth | Hamilton 1.094916582107544
./Hopper-v3_PPOHtermK_6/actor_000001163014.pth | Hamilton 1.097819209098816
./Hopper-v3_PPOHtermK_6/actor_000001173543.pth | Hamilton 1.1165040731430054
./Hopper-v3_PPOHtermK_6/actor_000001183951.pth | Hamilton 1.1398903131484985
./Hopper-v3_PPOHtermK_6/actor_000001194228.pth | Hamilton 1.1422592401504517
./Hopper-v3_PPOHtermK_6/actor_000001202228.pth | Hamilton 1.1488293409347534
./Hopper-v3_PPOHtermK_6/actor_000001210761.pth | Hamilton 1.1453982591629028
./Hopper-v3_PPOHtermK_6/actor_000001219854.pth | Hamilton 1.153541922569275
./Hopper-v3_PPOHtermK_6/actor_000001230695.pth | Hamilton 1.148061990737915
./Hopper-v3_PPOHtermK_6/actor_000001240425.pth | Hamilton 1.1397662162780762
./Hopper-v3_PPOHtermK_6/actor_000001250151.pth | Hamilton 1.1574420928955078
./Hopper-v3_PPOHtermK_6/actor_000001260798.pth | Hamilton 1.1777104139328003
./Hopper-v3_PPOHtermK_6/actor_000001269569.pth | Hamilton 1.185291051864624
./Hopper-v3_PPOHtermK_6/actor_000001278846.pth | Hamilton 1.173487663269043
./Hopper-v3_PPOHtermK_6/actor_000001288416.pth | Hamilton 1.1676157712936401
./Hopper-v3_PPOHtermK_6/actor_000001296852.pth | Hamilton 1.165687084197998
./Hopper-v3_PPOHtermK_6/actor_000001307129.pth | Hamilton 1.1716490983963013
./Hopper-v3_PPOHtermK_6/actor_000001316661.pth | Hamilton 1.175459623336792
./Hopper-v3_PPOHtermK_6/actor_000001328866.pth | Hamilton 1.170728087425232
./Hopper-v3_PPOHtermK_6/actor_000001338826.pth | Hamilton 1.166685938835144
./Hopper-v3_PPOHtermK_6/actor_000001348614.pth | Hamilton 1.1556689739227295
./Hopper-v3_PPOHtermK_6/actor_000001358671.pth | Hamilton 1.162909984588623
./Hopper-v3_PPOHtermK_6/actor_000001367067.pth | Hamilton 1.1402307748794556
./Hopper-v3_PPOHtermK_6/actor_000001377615.pth | Hamilton 1.1436141729354858
./Hopper-v3_PPOHtermK_6/actor_000001387950.pth | Hamilton 1.1515650749206543
./Hopper-v3_PPOHtermK_6/actor_000001398258.pth | Hamilton 1.1463478803634644
./Hopper-v3_PPOHtermK_6/actor_000001407271.pth | Hamilton 1.1670284271240234
./Hopper-v3_PPOHtermK_6/actor_000001418290.pth | Hamilton 1.1500120162963867
./Hopper-v3_PPOHtermK_6/actor_000001429019.pth | Hamilton 1.1682708263397217
./Hopper-v3_PPOHtermK_6/actor_000001438340.pth | Hamilton 1.1527622938156128
./Hopper-v3_PPOHtermK_6/actor_000001447131.pth | Hamilton 1.1441162824630737
./Hopper-v3_PPOHtermK_6/actor_000001455774.pth | Hamilton 1.1520307064056396
./Hopper-v3_PPOHtermK_6/actor_000001465502.pth | Hamilton 1.1459732055664062
./Hopper-v3_PPOHtermK_6/actor_000001474172.pth | Hamilton 1.1411391496658325
./Hopper-v3_PPOHtermK_6/actor_000001483489.pth | Hamilton 1.1659001111984253
./Hopper-v3_PPOHtermK_6/actor_000001494079.pth | Hamilton 1.1792479753494263
./Hopper-v3_PPOHtermK_6/actor_000001503860.pth | Hamilton 1.1666709184646606
./Hopper-v3_PPOHtermK_6/actor_000001513375.pth | Hamilton 1.185378074645996
./Hopper-v3_PPOHtermK_6/actor_000001524002.pth | Hamilton 1.189688801765442
./Hopper-v3_PPOHtermK_6/actor_000001534434.pth | Hamilton 1.21920907497406
./Hopper-v3_PPOHtermK_6/actor_000001543557.pth | Hamilton 1.2008767127990723
./Hopper-v3_PPOHtermK_6/actor_000001554038.pth | Hamilton 1.2234903573989868
./Hopper-v3_PPOHtermK_6/actor_000001562887.pth | Hamilton 1.2268754243850708
./Hopper-v3_PPOHtermK_6/actor_000001573208.pth | Hamilton 1.2268658876419067
./Hopper-v3_PPOHtermK_6/actor_000001584387.pth | Hamilton 1.2074952125549316
./Hopper-v3_PPOHtermK_6/actor_000001593906.pth | Hamilton 1.2080519199371338
./Hopper-v3_PPOHtermK_6/actor_000001603575.pth | Hamilton 1.2234455347061157
./Hopper-v3_PPOHtermK_6/actor_000001611967.pth | Hamilton 1.2272510528564453
./Hopper-v3_PPOHtermK_6/actor_000001622994.pth | Hamilton 1.2201248407363892
./Hopper-v3_PPOHtermK_6/actor_000001631738.pth | Hamilton 1.2288421392440796
./Hopper-v3_PPOHtermK_6/actor_000001643048.pth | Hamilton 1.2154768705368042
./Hopper-v3_PPOHtermK_6/actor_000001651865.pth | Hamilton 1.2162082195281982
./Hopper-v3_PPOHtermK_6/actor_000001662522.pth | Hamilton 1.2038475275039673
./Hopper-v3_PPOHtermK_6/actor_000001673380.pth | Hamilton 1.2020655870437622
./Hopper-v3_PPOHtermK_6/actor_000001683535.pth | Hamilton 1.2009681463241577
./Hopper-v3_PPOHtermK_6/actor_000001692391.pth | Hamilton 1.200035572052002
./Hopper-v3_PPOHtermK_6/actor_000001692391.pth | Hamilton 1.200035572052002
./Hopper-v3_PPOHtermK_6/actor_000001701656.pth | Hamilton 1.2203855514526367
./Hopper-v3_PPOHtermK_6/actor_000001711589.pth | Hamilton 1.2163832187652588
./Hopper-v3_PPOHtermK_6/actor_000001721703.pth | Hamilton 1.217842698097229
./Hopper-v3_PPOHtermK_6/actor_000001730673.pth | Hamilton 1.21222984790802
./Hopper-v3_PPOHtermK_6/actor_000001740775.pth | Hamilton 1.217405080795288
./Hopper-v3_PPOHtermK_6/actor_000001749481.pth | Hamilton 1.2271900177001953
./Hopper-v3_PPOHtermK_6/actor_000001759917.pth | Hamilton 1.2275311946868896
./Hopper-v3_PPOHtermK_6/actor_000001769147.pth | Hamilton 1.203498125076294
./Hopper-v3_PPOHtermK_6/actor_000001778509.pth | Hamilton 1.2105374336242676
./Hopper-v3_PPOHtermK_6/actor_000001788673.pth | Hamilton 1.1952929496765137
./Hopper-v3_PPOHtermK_6/actor_000001799412.pth | Hamilton 1.196632742881775
./Hopper-v3_PPOHtermK_6/actor_000001808321.pth | Hamilton 1.2042073011398315
./Hopper-v3_PPOHtermK_6/actor_000001818630.pth | Hamilton 1.234487533569336
./Hopper-v3_PPOHtermK_6/actor_000001827097.pth | Hamilton 1.2315199375152588
./Hopper-v3_PPOHtermK_6/actor_000001837061.pth | Hamilton 1.2643980979919434
./Hopper-v3_PPOHtermK_6/actor_000001847267.pth | Hamilton 1.267818808555603
./Hopper-v3_PPOHtermK_6/actor_000001856192.pth | Hamilton 1.2668204307556152
./Hopper-v3_PPOHtermK_6/actor_000001864603.pth | Hamilton 1.2519257068634033
./Hopper-v3_PPOHtermK_6/actor_000001875511.pth | Hamilton 1.2609953880310059
./Hopper-v3_PPOHtermK_6/actor_000001884423.pth | Hamilton 1.2665363550186157
./Hopper-v3_PPOHtermK_6/actor_000001893917.pth | Hamilton 1.2474747896194458
./Hopper-v3_PPOHtermK_6/actor_000001901917.pth | Hamilton 1.260327696800232
./Hopper-v3_PPOHtermK_6/actor_000001909917.pth | Hamilton 1.25128972530365
./Hopper-v3_PPOHtermK_6/actor_000001918748.pth | Hamilton 1.2713875770568848
./Hopper-v3_PPOHtermK_6/actor_000001927923.pth | Hamilton 1.280049443244934
./Hopper-v3_PPOHtermK_6/actor_000001936733.pth | Hamilton 1.2995545864105225
./Hopper-v3_PPOHtermK_6/actor_000001945301.pth | Hamilton 1.315147876739502
./Hopper-v3_PPOHtermK_6/actor_000001954652.pth | Hamilton 1.30451238155365
./Hopper-v3_PPOHtermK_6/actor_000001962896.pth | Hamilton 1.320293664932251
./Hopper-v3_PPOHtermK_6/actor_000001971281.pth | Hamilton 1.3199859857559204
./Hopper-v3_PPOHtermK_6/actor_000001980208.pth | Hamilton 1.3211692571640015
./Hopper-v3_PPOHtermK_6/actor_000001991329.pth | Hamilton 1.3230655193328857
./Hopper-v3_PPOHtermK_6/actor_000002001139.pth | Hamilton 1.3250901699066162
./Hopper-v3_PPOHtermK_6/actor__000000008314_00154.139.pth | Hamilton 0.2213515341281891
./Hopper-v3_PPOHtermK_6/actor__000000131451_00376.503.pth | Hamilton 0.3746381402015686
./Hopper-v3_PPOHtermK_6/actor__000000255983_01009.048.pth | Hamilton 0.5605206489562988
./Hopper-v3_PPOHtermK_6/actor__000000378123_02667.275.pth | Hamilton 0.6615644693374634
./Hopper-v3_PPOHtermK_6/actor__000000625058_03181.373.pth | Hamilton 0.8532209396362305
./Hopper-v3_PPOHtermK_6/actor__000000750188_03324.142.pth | Hamilton 0.9662994742393494
./Hopper-v3_PPOHtermK_6/actor__000000872925_03357.322.pth | Hamilton 1.0570703744888306
./Hopper-v3_PPOHtermK_6/actor__000001377615_03433.328.pth | Hamilton 1.2655936479568481
./Hopper-v3_PPOHtermK_6/actor__000001879541_03434.688.pth | Hamilton 1.3078831434249878
"""
# Hopper-v2_PPOHtermK_1
data12 = """
./Hopper-v2_PPOHtermK_1/actor_000000012266.pth | Hamilton 0.49707767367362976
./Hopper-v2_PPOHtermK_1/actor_000000020586.pth | Hamilton 0.5440958142280579
./Hopper-v2_PPOHtermK_1/actor_000000029045.pth | Hamilton 0.5539615154266357
./Hopper-v2_PPOHtermK_1/actor_000000037481.pth | Hamilton 0.5782834887504578
./Hopper-v2_PPOHtermK_1/actor_000000046022.pth | Hamilton 0.5819857120513916
./Hopper-v2_PPOHtermK_1/actor_000000054779.pth | Hamilton 0.5860337615013123
./Hopper-v2_PPOHtermK_1/actor_000000063220.pth | Hamilton 0.583692729473114
./Hopper-v2_PPOHtermK_1/actor_000000071931.pth | Hamilton 0.5743004083633423
./Hopper-v2_PPOHtermK_1/actor_000000081035.pth | Hamilton 0.5776315331459045
./Hopper-v2_PPOHtermK_1/actor_000000089734.pth | Hamilton 0.5639542937278748
./Hopper-v2_PPOHtermK_1/actor_000000099477.pth | Hamilton 0.5635554790496826
./Hopper-v2_PPOHtermK_1/actor_000000109057.pth | Hamilton 0.5684139132499695
./Hopper-v2_PPOHtermK_1/actor_000000117862.pth | Hamilton 0.5715059638023376
./Hopper-v2_PPOHtermK_1/actor_000000126690.pth | Hamilton 0.564452588558197
./Hopper-v2_PPOHtermK_1/actor_000000135489.pth | Hamilton 0.5782734155654907
./Hopper-v2_PPOHtermK_1/actor_000000144365.pth | Hamilton 0.5892970561981201
./Hopper-v2_PPOHtermK_1/actor_000000152901.pth | Hamilton 0.5995793342590332
./Hopper-v2_PPOHtermK_1/actor_000000161420.pth | Hamilton 0.6111807823181152
./Hopper-v2_PPOHtermK_1/actor_000000170244.pth | Hamilton 0.6146460771560669
./Hopper-v2_PPOHtermK_1/actor_000000178712.pth | Hamilton 0.6222730278968811
./Hopper-v2_PPOHtermK_1/actor_000000187278.pth | Hamilton 0.6224280595779419
./Hopper-v2_PPOHtermK_1/actor_000000195881.pth | Hamilton 0.6244977712631226
./Hopper-v2_PPOHtermK_1/actor_000000204819.pth | Hamilton 0.6207193732261658
./Hopper-v2_PPOHtermK_1/actor_000000213587.pth | Hamilton 0.6072689294815063
./Hopper-v2_PPOHtermK_1/actor_000000223210.pth | Hamilton 0.595444917678833
./Hopper-v2_PPOHtermK_1/actor_000000232322.pth | Hamilton 0.6035097241401672
./Hopper-v2_PPOHtermK_1/actor_000000241953.pth | Hamilton 0.6023523807525635
./Hopper-v2_PPOHtermK_1/actor_000000250984.pth | Hamilton 0.5948614478111267
./Hopper-v2_PPOHtermK_1/actor_000000261002.pth | Hamilton 0.593251645565033
./Hopper-v2_PPOHtermK_1/actor_000000270353.pth | Hamilton 0.5976131558418274
./Hopper-v2_PPOHtermK_1/actor_000000279111.pth | Hamilton 0.6035564541816711
./Hopper-v2_PPOHtermK_1/actor_000000288341.pth | Hamilton 0.5926827192306519
./Hopper-v2_PPOHtermK_1/actor_000000299298.pth | Hamilton 0.5886200666427612
./Hopper-v2_PPOHtermK_1/actor_000000309468.pth | Hamilton 0.5890348553657532
./Hopper-v2_PPOHtermK_1/actor_000000319721.pth | Hamilton 0.6032013297080994
./Hopper-v2_PPOHtermK_1/actor_000000331067.pth | Hamilton 0.5997387170791626
./Hopper-v2_PPOHtermK_1/actor_000000340864.pth | Hamilton 0.6100163459777832
./Hopper-v2_PPOHtermK_1/actor_000000349968.pth | Hamilton 0.6045315861701965
./Hopper-v2_PPOHtermK_1/actor_000000361508.pth | Hamilton 0.61538165807724
./Hopper-v2_PPOHtermK_1/actor_000000372549.pth | Hamilton 0.6300678849220276
./Hopper-v2_PPOHtermK_1/actor_000000382551.pth | Hamilton 0.6351966857910156
./Hopper-v2_PPOHtermK_1/actor_000000393277.pth | Hamilton 0.64419025182724
./Hopper-v2_PPOHtermK_1/actor_000000404743.pth | Hamilton 0.6562338471412659
./Hopper-v2_PPOHtermK_1/actor_000000416636.pth | Hamilton 0.6645117998123169
./Hopper-v2_PPOHtermK_1/actor_000000426312.pth | Hamilton 0.6693285703659058
./Hopper-v2_PPOHtermK_1/actor_000000436188.pth | Hamilton 0.6726453900337219
./Hopper-v2_PPOHtermK_1/actor_000000447193.pth | Hamilton 0.6688531637191772
./Hopper-v2_PPOHtermK_1/actor_000000459031.pth | Hamilton 0.6769363880157471
./Hopper-v2_PPOHtermK_1/actor_000000467826.pth | Hamilton 0.6914471983909607
./Hopper-v2_PPOHtermK_1/actor_000000477497.pth | Hamilton 0.6878836750984192
./Hopper-v2_PPOHtermK_1/actor_000000487283.pth | Hamilton 0.6946234703063965
./Hopper-v2_PPOHtermK_1/actor_000000497886.pth | Hamilton 0.6900990605354309
./Hopper-v2_PPOHtermK_1/actor_000000508605.pth | Hamilton 0.6915092468261719
./Hopper-v2_PPOHtermK_1/actor_000000518742.pth | Hamilton 0.691702127456665
./Hopper-v2_PPOHtermK_1/actor_000000528508.pth | Hamilton 0.6969584822654724
./Hopper-v2_PPOHtermK_1/actor_000000539919.pth | Hamilton 0.7103747129440308
./Hopper-v2_PPOHtermK_1/actor_000000550995.pth | Hamilton 0.7151159048080444
./Hopper-v2_PPOHtermK_1/actor_000000561687.pth | Hamilton 0.7113256454467773
./Hopper-v2_PPOHtermK_1/actor_000000570510.pth | Hamilton 0.7260191440582275
./Hopper-v2_PPOHtermK_1/actor_000000581240.pth | Hamilton 0.7280723452568054
./Hopper-v2_PPOHtermK_1/actor_000000592298.pth | Hamilton 0.724122166633606
./Hopper-v2_PPOHtermK_1/actor_000000602127.pth | Hamilton 0.7351981401443481
./Hopper-v2_PPOHtermK_1/actor_000000612123.pth | Hamilton 0.7279580235481262
./Hopper-v2_PPOHtermK_1/actor_000000623702.pth | Hamilton 0.7343960404396057
./Hopper-v2_PPOHtermK_1/actor_000000633044.pth | Hamilton 0.737565815448761
./Hopper-v2_PPOHtermK_1/actor_000000643168.pth | Hamilton 0.7517142295837402
./Hopper-v2_PPOHtermK_1/actor_000000652505.pth | Hamilton 0.7708538770675659
./Hopper-v2_PPOHtermK_1/actor_000000662704.pth | Hamilton 0.7737637758255005
./Hopper-v2_PPOHtermK_1/actor_000000672530.pth | Hamilton 0.7797785401344299
./Hopper-v2_PPOHtermK_1/actor_000000681911.pth | Hamilton 0.7896486520767212
./Hopper-v2_PPOHtermK_1/actor_000000691321.pth | Hamilton 0.7942165732383728
./Hopper-v2_PPOHtermK_1/actor_000000700268.pth | Hamilton 0.7927950024604797
./Hopper-v2_PPOHtermK_1/actor_000000711327.pth | Hamilton 0.8037243485450745
./Hopper-v2_PPOHtermK_1/actor_000000720611.pth | Hamilton 0.8084350824356079
./Hopper-v2_PPOHtermK_1/actor_000000729820.pth | Hamilton 0.8146712183952332
./Hopper-v2_PPOHtermK_1/actor_000000740454.pth | Hamilton 0.8352003693580627
./Hopper-v2_PPOHtermK_1/actor_000000751205.pth | Hamilton 0.8538724184036255
./Hopper-v2_PPOHtermK_1/actor_000000759315.pth | Hamilton 0.8473496437072754
./Hopper-v2_PPOHtermK_1/actor_000000769647.pth | Hamilton 0.8555893898010254
./Hopper-v2_PPOHtermK_1/actor_000000779319.pth | Hamilton 0.8648740649223328
./Hopper-v2_PPOHtermK_1/actor_000000789031.pth | Hamilton 0.8731195330619812
./Hopper-v2_PPOHtermK_1/actor_000000798143.pth | Hamilton 0.8890700936317444
./Hopper-v2_PPOHtermK_1/actor_000000807329.pth | Hamilton 0.8868382573127747
./Hopper-v2_PPOHtermK_1/actor_000000815628.pth | Hamilton 0.8913543820381165
./Hopper-v2_PPOHtermK_1/actor_000000824349.pth | Hamilton 0.9072344899177551
./Hopper-v2_PPOHtermK_1/actor_000000833375.pth | Hamilton 0.9272060990333557
./Hopper-v2_PPOHtermK_1/actor_000000841462.pth | Hamilton 0.9383752942085266
./Hopper-v2_PPOHtermK_1/actor_000000852196.pth | Hamilton 0.9542031288146973
./Hopper-v2_PPOHtermK_1/actor_000000863816.pth | Hamilton 0.9770907163619995
./Hopper-v2_PPOHtermK_1/actor_000000872548.pth | Hamilton 0.9887466430664062
./Hopper-v2_PPOHtermK_1/actor_000000882836.pth | Hamilton 0.9997304677963257
./Hopper-v2_PPOHtermK_1/actor_000000891224.pth | Hamilton 1.0194206237792969
./Hopper-v2_PPOHtermK_1/actor_000000900052.pth | Hamilton 1.0217466354370117
./Hopper-v2_PPOHtermK_1/actor_000000910556.pth | Hamilton 1.044454574584961
./Hopper-v2_PPOHtermK_1/actor_000000923723.pth | Hamilton 1.0759865045547485
./Hopper-v2_PPOHtermK_1/actor_000000932845.pth | Hamilton 1.0873475074768066
./Hopper-v2_PPOHtermK_1/actor_000000941820.pth | Hamilton 1.097644329071045
./Hopper-v2_PPOHtermK_1/actor_000000952817.pth | Hamilton 1.099028468132019
./Hopper-v2_PPOHtermK_1/actor_000000962555.pth | Hamilton 1.1136139631271362
./Hopper-v2_PPOHtermK_1/actor_000000972150.pth | Hamilton 1.126929521560669
./Hopper-v2_PPOHtermK_1/actor_000000983419.pth | Hamilton 1.1563533544540405
./Hopper-v2_PPOHtermK_1/actor_000000991978.pth | Hamilton 1.1669148206710815
./Hopper-v2_PPOHtermK_1/actor_000001001709.pth | Hamilton 1.1850125789642334
./Hopper-v2_PPOHtermK_1/actor_000001011476.pth | Hamilton 1.2003651857376099
./Hopper-v2_PPOHtermK_1/actor_000001020194.pth | Hamilton 1.2300974130630493
./Hopper-v2_PPOHtermK_1/actor_000001029118.pth | Hamilton 1.2297320365905762
./Hopper-v2_PPOHtermK_1/actor_000001039087.pth | Hamilton 1.2512948513031006
./Hopper-v2_PPOHtermK_1/actor_000001048625.pth | Hamilton 1.2575922012329102
./Hopper-v2_PPOHtermK_1/actor_000001057495.pth | Hamilton 1.274703025817871
./Hopper-v2_PPOHtermK_1/actor_000001066464.pth | Hamilton 1.2871185541152954
./Hopper-v2_PPOHtermK_1/actor_000001077466.pth | Hamilton 1.2882360219955444
./Hopper-v2_PPOHtermK_1/actor_000001085708.pth | Hamilton 1.3142948150634766
./Hopper-v2_PPOHtermK_1/actor_000001095217.pth | Hamilton 1.3425432443618774
./Hopper-v2_PPOHtermK_1/actor_000001105634.pth | Hamilton 1.3539115190505981
./Hopper-v2_PPOHtermK_1/actor_000001114232.pth | Hamilton 1.3755781650543213
./Hopper-v2_PPOHtermK_1/actor_000001125197.pth | Hamilton 1.4037320613861084
./Hopper-v2_PPOHtermK_1/actor_000001136903.pth | Hamilton 1.4146746397018433
./Hopper-v2_PPOHtermK_1/actor_000001147346.pth | Hamilton 1.4247589111328125
./Hopper-v2_PPOHtermK_1/actor_000001156345.pth | Hamilton 1.4369772672653198
./Hopper-v2_PPOHtermK_1/actor_000001165727.pth | Hamilton 1.449008584022522
./Hopper-v2_PPOHtermK_1/actor_000001175760.pth | Hamilton 1.4587432146072388
./Hopper-v2_PPOHtermK_1/actor_000001186820.pth | Hamilton 1.4689098596572876
./Hopper-v2_PPOHtermK_1/actor_000001195623.pth | Hamilton 1.486315369606018
./Hopper-v2_PPOHtermK_1/actor_000001203716.pth | Hamilton 1.5066392421722412
./Hopper-v2_PPOHtermK_1/actor_000001213716.pth | Hamilton 1.5150822401046753
./Hopper-v2_PPOHtermK_1/actor_000001224484.pth | Hamilton 1.5312849283218384
./Hopper-v2_PPOHtermK_1/actor_000001234457.pth | Hamilton 1.538316249847412
./Hopper-v2_PPOHtermK_1/actor_000001243181.pth | Hamilton 1.5516159534454346
./Hopper-v2_PPOHtermK_1/actor_000001253576.pth | Hamilton 1.566779613494873
./Hopper-v2_PPOHtermK_1/actor_000001264238.pth | Hamilton 1.579459547996521
./Hopper-v2_PPOHtermK_1/actor_000001274370.pth | Hamilton 1.5811444520950317
./Hopper-v2_PPOHtermK_1/actor_000001284444.pth | Hamilton 1.5971570014953613
./Hopper-v2_PPOHtermK_1/actor_000001293159.pth | Hamilton 1.6046593189239502
./Hopper-v2_PPOHtermK_1/actor_000001304803.pth | Hamilton 1.6168373823165894
./Hopper-v2_PPOHtermK_1/actor_000001313124.pth | Hamilton 1.6304643154144287
./Hopper-v2_PPOHtermK_1/actor_000001322941.pth | Hamilton 1.637249231338501
./Hopper-v2_PPOHtermK_1/actor_000001331257.pth | Hamilton 1.6468775272369385
./Hopper-v2_PPOHtermK_1/actor_000001341691.pth | Hamilton 1.6591277122497559
./Hopper-v2_PPOHtermK_1/actor_000001352250.pth | Hamilton 1.6709275245666504
./Hopper-v2_PPOHtermK_1/actor_000001360250.pth | Hamilton 1.681677222251892
./Hopper-v2_PPOHtermK_1/actor_000001371342.pth | Hamilton 1.6862200498580933
./Hopper-v2_PPOHtermK_1/actor_000001381713.pth | Hamilton 1.7018917798995972
./Hopper-v2_PPOHtermK_1/actor_000001394003.pth | Hamilton 1.7028683423995972
./Hopper-v2_PPOHtermK_1/actor_000001404271.pth | Hamilton 1.7371435165405273
./Hopper-v2_PPOHtermK_1/actor_000001414965.pth | Hamilton 1.7347135543823242
./Hopper-v2_PPOHtermK_1/actor_000001424760.pth | Hamilton 1.7469313144683838
./Hopper-v2_PPOHtermK_1/actor_000001435219.pth | Hamilton 1.7568464279174805
./Hopper-v2_PPOHtermK_1/actor_000001445027.pth | Hamilton 1.7483233213424683
./Hopper-v2_PPOHtermK_1/actor_000001455160.pth | Hamilton 1.764796257019043
./Hopper-v2_PPOHtermK_1/actor_000001463915.pth | Hamilton 1.7664424180984497
./Hopper-v2_PPOHtermK_1/actor_000001475205.pth | Hamilton 1.7948447465896606
./Hopper-v2_PPOHtermK_1/actor_000001484288.pth | Hamilton 1.801731824874878
./Hopper-v2_PPOHtermK_1/actor_000001494984.pth | Hamilton 1.8067660331726074
./Hopper-v2_PPOHtermK_1/actor_000001503538.pth | Hamilton 1.8050360679626465
./Hopper-v2_PPOHtermK_1/actor_000001512771.pth | Hamilton 1.8160033226013184
./Hopper-v2_PPOHtermK_1/actor_000001521858.pth | Hamilton 1.8263288736343384
./Hopper-v2_PPOHtermK_1/actor_000001532058.pth | Hamilton 1.8380804061889648
./Hopper-v2_PPOHtermK_1/actor_000001542992.pth | Hamilton 1.8414431810379028
./Hopper-v2_PPOHtermK_1/actor_000001554721.pth | Hamilton 1.835944414138794
./Hopper-v2_PPOHtermK_1/actor_000001564690.pth | Hamilton 1.8513249158859253
./Hopper-v2_PPOHtermK_1/actor_000001573851.pth | Hamilton 1.8608366250991821
./Hopper-v2_PPOHtermK_1/actor_000001583548.pth | Hamilton 1.8733785152435303
./Hopper-v2_PPOHtermK_1/actor_000001592064.pth | Hamilton 1.8734933137893677
./Hopper-v2_PPOHtermK_1/actor_000001602121.pth | Hamilton 1.897040843963623
./Hopper-v2_PPOHtermK_1/actor_000001611893.pth | Hamilton 1.9166057109832764
./Hopper-v2_PPOHtermK_1/actor_000001620424.pth | Hamilton 1.9264541864395142
./Hopper-v2_PPOHtermK_1/actor_000001631046.pth | Hamilton 1.939608097076416
./Hopper-v2_PPOHtermK_1/actor_000001640991.pth | Hamilton 1.930494785308838
./Hopper-v2_PPOHtermK_1/actor_000001648991.pth | Hamilton 1.9535776376724243
./Hopper-v2_PPOHtermK_1/actor_000001658398.pth | Hamilton 1.9612287282943726
./Hopper-v2_PPOHtermK_1/actor_000001668867.pth | Hamilton 1.950474739074707
./Hopper-v2_PPOHtermK_1/actor_000001678463.pth | Hamilton 1.9556645154953003
./Hopper-v2_PPOHtermK_1/actor_000001688450.pth | Hamilton 1.9590866565704346
./Hopper-v2_PPOHtermK_1/actor_000001697012.pth | Hamilton 1.9681390523910522
./Hopper-v2_PPOHtermK_1/actor_000001705852.pth | Hamilton 1.9853302240371704
./Hopper-v2_PPOHtermK_1/actor_000001713852.pth | Hamilton 1.9884916543960571
./Hopper-v2_PPOHtermK_1/actor_000001722972.pth | Hamilton 1.970191478729248
./Hopper-v2_PPOHtermK_1/actor_000001732444.pth | Hamilton 1.9716607332229614
./Hopper-v2_PPOHtermK_1/actor_000001741679.pth | Hamilton 1.959070086479187
./Hopper-v2_PPOHtermK_1/actor_000001751047.pth | Hamilton 1.9579135179519653
./Hopper-v2_PPOHtermK_1/actor_000001759686.pth | Hamilton 1.9661009311676025
./Hopper-v2_PPOHtermK_1/actor_000001770242.pth | Hamilton 1.9715629816055298
./Hopper-v2_PPOHtermK_1/actor_000001780583.pth | Hamilton 1.9791679382324219
./Hopper-v2_PPOHtermK_1/actor_000001789898.pth | Hamilton 1.9685776233673096
./Hopper-v2_PPOHtermK_1/actor_000001799254.pth | Hamilton 1.9910707473754883
./Hopper-v2_PPOHtermK_1/actor_000001808967.pth | Hamilton 2.002528667449951
./Hopper-v2_PPOHtermK_1/actor_000001819274.pth | Hamilton 1.9895884990692139
./Hopper-v2_PPOHtermK_1/actor_000001827922.pth | Hamilton 1.9892656803131104
./Hopper-v2_PPOHtermK_1/actor_000001836202.pth | Hamilton 2.0130105018615723
./Hopper-v2_PPOHtermK_1/actor_000001844963.pth | Hamilton 2.0252652168273926
./Hopper-v2_PPOHtermK_1/actor_000001852963.pth | Hamilton 2.008625030517578
./Hopper-v2_PPOHtermK_1/actor_000001861780.pth | Hamilton 2.01806378364563
./Hopper-v2_PPOHtermK_1/actor_000001872577.pth | Hamilton 2.0099613666534424
./Hopper-v2_PPOHtermK_1/actor_000001882830.pth | Hamilton 2.031874179840088
./Hopper-v2_PPOHtermK_1/actor_000001892732.pth | Hamilton 2.0596070289611816
./Hopper-v2_PPOHtermK_1/actor_000001903296.pth | Hamilton 2.059262990951538
./Hopper-v2_PPOHtermK_1/actor_000001912187.pth | Hamilton 2.0526411533355713
./Hopper-v2_PPOHtermK_1/actor_000001923045.pth | Hamilton 2.0478854179382324
./Hopper-v2_PPOHtermK_1/actor_000001932842.pth | Hamilton 2.0371570587158203
./Hopper-v2_PPOHtermK_1/actor_000001941665.pth | Hamilton 2.056736469268799
./Hopper-v2_PPOHtermK_1/actor_000001950924.pth | Hamilton 2.0767860412597656
./Hopper-v2_PPOHtermK_1/actor_000001960157.pth | Hamilton 2.0662713050842285
./Hopper-v2_PPOHtermK_1/actor_000001970897.pth | Hamilton 2.0421125888824463
./Hopper-v2_PPOHtermK_1/actor_000001978897.pth | Hamilton 2.014127254486084
./Hopper-v2_PPOHtermK_1/actor_000001988890.pth | Hamilton 2.031428098678589
./Hopper-v2_PPOHtermK_1/actor_000001998337.pth | Hamilton 2.0511577129364014
./Hopper-v2_PPOHtermK_1/actor_000002007591.pth | Hamilton 2.029947519302368
./Hopper-v2_PPOHtermK_1/actor_000002015851.pth | Hamilton 2.0577940940856934
./Hopper-v2_PPOHtermK_1/actor_000002026938.pth | Hamilton 2.0633673667907715
./Hopper-v2_PPOHtermK_1/actor_000002037879.pth | Hamilton 2.0627713203430176
./Hopper-v2_PPOHtermK_1/actor_000002046541.pth | Hamilton 2.0718042850494385
./Hopper-v2_PPOHtermK_1/actor_000002056487.pth | Hamilton 2.0550553798675537
./Hopper-v2_PPOHtermK_1/actor_000002067403.pth | Hamilton 2.0658364295959473
./Hopper-v2_PPOHtermK_1/actor_000002076697.pth | Hamilton 2.079085350036621
./Hopper-v2_PPOHtermK_1/actor_000002086871.pth | Hamilton 2.046438694000244
./Hopper-v2_PPOHtermK_1/actor_000002095923.pth | Hamilton 2.0730791091918945
./Hopper-v2_PPOHtermK_1/actor_000002105258.pth | Hamilton 2.066810369491577
./Hopper-v2_PPOHtermK_1/actor_000002114663.pth | Hamilton 2.0623130798339844
./Hopper-v2_PPOHtermK_1/actor_000002123385.pth | Hamilton 2.075228691101074
./Hopper-v2_PPOHtermK_1/actor_000002132914.pth | Hamilton 2.1118860244750977
./Hopper-v2_PPOHtermK_1/actor_000002142440.pth | Hamilton 2.1291654109954834
./Hopper-v2_PPOHtermK_1/actor_000002151652.pth | Hamilton 2.138117551803589
./Hopper-v2_PPOHtermK_1/actor_000002159652.pth | Hamilton 2.141282081604004
./Hopper-v2_PPOHtermK_1/actor_000002167652.pth | Hamilton 2.175921678543091
./Hopper-v2_PPOHtermK_1/actor_000002176227.pth | Hamilton 2.1837265491485596
./Hopper-v2_PPOHtermK_1/actor_000002184964.pth | Hamilton 2.190122127532959
./Hopper-v2_PPOHtermK_1/actor_000002194327.pth | Hamilton 2.187976121902466
./Hopper-v2_PPOHtermK_1/actor_000002204472.pth | Hamilton 2.184704065322876
./Hopper-v2_PPOHtermK_1/actor_000002213809.pth | Hamilton 2.159832715988159
./Hopper-v2_PPOHtermK_1/actor_000002222930.pth | Hamilton 2.1559696197509766
./Hopper-v2_PPOHtermK_1/actor_000002232615.pth | Hamilton 2.1355841159820557
./Hopper-v2_PPOHtermK_1/actor_000002242795.pth | Hamilton 2.1462316513061523
./Hopper-v2_PPOHtermK_1/actor_000002252631.pth | Hamilton 2.1610169410705566
./Hopper-v2_PPOHtermK_1/actor_000002261199.pth | Hamilton 2.1710195541381836
./Hopper-v2_PPOHtermK_1/actor_000002270938.pth | Hamilton 2.1670243740081787
./Hopper-v2_PPOHtermK_1/actor_000002279482.pth | Hamilton 2.172046422958374
./Hopper-v2_PPOHtermK_1/actor_000002287952.pth | Hamilton 2.1737070083618164
./Hopper-v2_PPOHtermK_1/actor_000002297488.pth | Hamilton 2.165332078933716
./Hopper-v2_PPOHtermK_1/actor_000002307857.pth | Hamilton 2.1648709774017334
./Hopper-v2_PPOHtermK_1/actor_000002315857.pth | Hamilton 2.1878581047058105
./Hopper-v2_PPOHtermK_1/actor_000002325948.pth | Hamilton 2.1798341274261475
./Hopper-v2_PPOHtermK_1/actor_000002336997.pth | Hamilton 2.1755239963531494
./Hopper-v2_PPOHtermK_1/actor_000002346843.pth | Hamilton 2.164184331893921
./Hopper-v2_PPOHtermK_1/actor_000002357588.pth | Hamilton 2.1548149585723877
./Hopper-v2_PPOHtermK_1/actor_000002368988.pth | Hamilton 2.1740899085998535
./Hopper-v2_PPOHtermK_1/actor_000002379785.pth | Hamilton 2.185974359512329
./Hopper-v2_PPOHtermK_1/actor_000002389545.pth | Hamilton 2.1619412899017334
./Hopper-v2_PPOHtermK_1/actor_000002398830.pth | Hamilton 2.145019292831421
./Hopper-v2_PPOHtermK_1/actor_000002408175.pth | Hamilton 2.1683297157287598
./Hopper-v2_PPOHtermK_1/actor_000002418456.pth | Hamilton 2.1563780307769775
./Hopper-v2_PPOHtermK_1/actor_000002428583.pth | Hamilton 2.158418655395508
./Hopper-v2_PPOHtermK_1/actor_000002439844.pth | Hamilton 2.176894426345825
./Hopper-v2_PPOHtermK_1/actor_000002452425.pth | Hamilton 2.1577494144439697
./Hopper-v2_PPOHtermK_1/actor_000002463881.pth | Hamilton 2.1502389907836914
./Hopper-v2_PPOHtermK_1/actor_000002473530.pth | Hamilton 2.184016704559326
./Hopper-v2_PPOHtermK_1/actor_000002483423.pth | Hamilton 2.2117021083831787
./Hopper-v2_PPOHtermK_1/actor_000002492374.pth | Hamilton 2.210909843444824
./Hopper-v2_PPOHtermK_1/actor_000002501899.pth | Hamilton 2.226489782333374
./Hopper-v2_PPOHtermK_1/actor_000002510608.pth | Hamilton 2.2372171878814697
./Hopper-v2_PPOHtermK_1/actor_000002519549.pth | Hamilton 2.2416574954986572
./Hopper-v2_PPOHtermK_1/actor_000002528922.pth | Hamilton 2.2361807823181152
./Hopper-v2_PPOHtermK_1/actor_000002539837.pth | Hamilton 2.2266104221343994
./Hopper-v2_PPOHtermK_1/actor_000002549568.pth | Hamilton 2.217386484146118
./Hopper-v2_PPOHtermK_1/actor_000002558630.pth | Hamilton 2.221869468688965
./Hopper-v2_PPOHtermK_1/actor_000002568250.pth | Hamilton 2.244422674179077
./Hopper-v2_PPOHtermK_1/actor_000002578283.pth | Hamilton 2.2525734901428223
./Hopper-v2_PPOHtermK_1/actor_000002587792.pth | Hamilton 2.2312748432159424
./Hopper-v2_PPOHtermK_1/actor_000002598588.pth | Hamilton 2.2279868125915527
./Hopper-v2_PPOHtermK_1/actor_000002606588.pth | Hamilton 2.22127366065979
./Hopper-v2_PPOHtermK_1/actor_000002616265.pth | Hamilton 2.202486515045166
./Hopper-v2_PPOHtermK_1/actor_000002625051.pth | Hamilton 2.222506284713745
./Hopper-v2_PPOHtermK_1/actor_000002633975.pth | Hamilton 2.236555814743042
./Hopper-v2_PPOHtermK_1/actor_000002644471.pth | Hamilton 2.2474422454833984
./Hopper-v2_PPOHtermK_1/actor_000002654908.pth | Hamilton 2.257524013519287
./Hopper-v2_PPOHtermK_1/actor_000002663448.pth | Hamilton 2.2590696811676025
./Hopper-v2_PPOHtermK_1/actor_000002672253.pth | Hamilton 2.2535948753356934
./Hopper-v2_PPOHtermK_1/actor_000002681463.pth | Hamilton 2.2459888458251953
./Hopper-v2_PPOHtermK_1/actor_000002690622.pth | Hamilton 2.2425358295440674
./Hopper-v2_PPOHtermK_1/actor_000002700157.pth | Hamilton 2.233717203140259
./Hopper-v2_PPOHtermK_1/actor_000002708725.pth | Hamilton 2.223914861679077
./Hopper-v2_PPOHtermK_1/actor_000002719919.pth | Hamilton 2.2247817516326904
./Hopper-v2_PPOHtermK_1/actor_000002729442.pth | Hamilton 2.248499631881714
./Hopper-v2_PPOHtermK_1/actor_000002738843.pth | Hamilton 2.2337608337402344
./Hopper-v2_PPOHtermK_1/actor_000002747630.pth | Hamilton 2.2294890880584717
./Hopper-v2_PPOHtermK_1/actor_000002756932.pth | Hamilton 2.2131330966949463
./Hopper-v2_PPOHtermK_1/actor_000002765570.pth | Hamilton 2.2184855937957764
./Hopper-v2_PPOHtermK_1/actor_000002774139.pth | Hamilton 2.202444553375244
./Hopper-v2_PPOHtermK_1/actor_000002782139.pth | Hamilton 2.231876850128174
./Hopper-v2_PPOHtermK_1/actor_000002791747.pth | Hamilton 2.233001947402954
./Hopper-v2_PPOHtermK_1/actor_000002799747.pth | Hamilton 2.237248659133911
./Hopper-v2_PPOHtermK_1/actor_000002807747.pth | Hamilton 2.225982904434204
./Hopper-v2_PPOHtermK_1/actor_000002816755.pth | Hamilton 2.2370316982269287
./Hopper-v2_PPOHtermK_1/actor_000002825457.pth | Hamilton 2.2766854763031006
./Hopper-v2_PPOHtermK_1/actor_000002834218.pth | Hamilton 2.2782585620880127
./Hopper-v2_PPOHtermK_1/actor_000002843908.pth | Hamilton 2.2693865299224854
./Hopper-v2_PPOHtermK_1/actor_000002856268.pth | Hamilton 2.261286735534668
./Hopper-v2_PPOHtermK_1/actor_000002864268.pth | Hamilton 2.2719523906707764
./Hopper-v2_PPOHtermK_1/actor_000002874468.pth | Hamilton 2.2715280055999756
./Hopper-v2_PPOHtermK_1/actor_000002885234.pth | Hamilton 2.281615734100342
./Hopper-v2_PPOHtermK_1/actor_000002894986.pth | Hamilton 2.278998374938965
./Hopper-v2_PPOHtermK_1/actor_000002905826.pth | Hamilton 2.275270938873291
./Hopper-v2_PPOHtermK_1/actor_000002915087.pth | Hamilton 2.3026998043060303
./Hopper-v2_PPOHtermK_1/actor_000002923716.pth | Hamilton 2.30168080329895
./Hopper-v2_PPOHtermK_1/actor_000002923716.pth | Hamilton 2.30168080329895
./Hopper-v2_PPOHtermK_1/actor_000002932401.pth | Hamilton 2.2593533992767334
./Hopper-v2_PPOHtermK_1/actor_000002940401.pth | Hamilton 2.275097370147705
./Hopper-v2_PPOHtermK_1/actor_000002949582.pth | Hamilton 2.2833592891693115
./Hopper-v2_PPOHtermK_1/actor_000002959210.pth | Hamilton 2.270292043685913
./Hopper-v2_PPOHtermK_1/actor_000002968581.pth | Hamilton 2.2611300945281982
./Hopper-v2_PPOHtermK_1/actor_000002976581.pth | Hamilton 2.2982404232025146
./Hopper-v2_PPOHtermK_1/actor_000002985213.pth | Hamilton 2.298961877822876
./Hopper-v2_PPOHtermK_1/actor_000002994798.pth | Hamilton 2.311530113220215
./Hopper-v2_PPOHtermK_1/actor_000003003547.pth | Hamilton 2.3072633743286133
./Hopper-v2_PPOHtermK_1/actor__000000008188_00128.390.pth | Hamilton 0.6311178803443909
./Hopper-v2_PPOHtermK_1/actor__000000131193_00369.864.pth | Hamilton 0.7426512241363525
./Hopper-v2_PPOHtermK_1/actor__000000372549_02665.738.pth | Hamilton 1.155433177947998
./Hopper-v2_PPOHtermK_1/actor__000000492712_02866.958.pth | Hamilton 1.1855536699295044
./Hopper-v2_PPOHtermK_1/actor__000000612123_03099.729.pth | Hamilton 1.311480164527893
./Hopper-v2_PPOHtermK_1/actor__000000729820_03157.978.pth | Hamilton 1.456320881843567
./Hopper-v2_PPOHtermK_1/actor__000000852196_03260.882.pth | Hamilton 1.606527328491211
./Hopper-v2_PPOHtermK_1/actor__000000972150_03296.005.pth | Hamilton 1.7801164388656616
./Hopper-v2_PPOHtermK_1/actor__000001090391_03305.133.pth | Hamilton 1.9661047458648682
./Hopper-v2_PPOHtermK_1/actor__000001208980_03321.769.pth | Hamilton 2.0915729999542236
./Hopper-v2_PPOHtermK_1/actor__000001445027_03340.862.pth | Hamilton 2.225701332092285
./Hopper-v2_PPOHtermK_1/actor__000001683270_03345.835.pth | Hamilton 2.3578333854675293
./Hopper-v2_PPOHtermK_1/actor__000001804704_03369.866.pth | Hamilton 2.3362553119659424
./Hopper-v2_PPOHtermK_1/actor__000002163652_03384.485.pth | Hamilton 2.394500255584717
./Hopper-v2_PPOHtermK_1/actor__000002283952_03407.853.pth | Hamilton 2.352867603302002
./Hopper-v2_PPOHtermK_1/actor__000002403565_03436.595.pth | Hamilton 2.3207945823669434
./Hopper-v2_PPOHtermK_1/actor__000002524028_03463.162.pth | Hamilton 2.3596863746643066
"""
# Hopper-v2_PPOHtermK_2_3156
data13 = """
./Hopper-v2_PPOHtermK_2_3156/actor_000040346.pth | Hamilton 0.5763109922409058
./Hopper-v2_PPOHtermK_2_3156/actor_000072930.pth | Hamilton 0.6123620271682739
./Hopper-v2_PPOHtermK_2_3156/actor_000105143.pth | Hamilton 0.6229321360588074
./Hopper-v2_PPOHtermK_2_3156/actor_000137637.pth | Hamilton 0.6164294481277466
./Hopper-v2_PPOHtermK_2_3156/actor_00016100_00090.576.pth | Hamilton 0.5324805378913879
./Hopper-v2_PPOHtermK_2_3156/actor_000170279.pth | Hamilton 0.49546873569488525
./Hopper-v2_PPOHtermK_2_3156/actor_000202905.pth | Hamilton 0.4399419128894806
./Hopper-v2_PPOHtermK_2_3156/actor_000235503.pth | Hamilton 0.45821112394332886
./Hopper-v2_PPOHtermK_2_3156/actor_000268069.pth | Hamilton 0.48554331064224243
./Hopper-v2_PPOHtermK_2_3156/actor_000300874.pth | Hamilton 0.4997228980064392
./Hopper-v2_PPOHtermK_2_3156/actor_000333837.pth | Hamilton 0.5096474885940552
./Hopper-v2_PPOHtermK_2_3156/actor_000366945.pth | Hamilton 0.5304214954376221
./Hopper-v2_PPOHtermK_2_3156/actor_000400439.pth | Hamilton 0.5474600195884705
./Hopper-v2_PPOHtermK_2_3156/actor_000434064.pth | Hamilton 0.5375049710273743
./Hopper-v2_PPOHtermK_2_3156/actor_000467261.pth | Hamilton 0.5544142723083496
./Hopper-v2_PPOHtermK_2_3156/actor_000500391.pth | Hamilton 0.5316793322563171
./Hopper-v2_PPOHtermK_2_3156/actor_000534551.pth | Hamilton 0.5361940860748291
./Hopper-v2_PPOHtermK_2_3156/actor_000569879.pth | Hamilton 0.540449857711792
./Hopper-v2_PPOHtermK_2_3156/actor_000606372.pth | Hamilton 0.5417308807373047
./Hopper-v2_PPOHtermK_2_3156/actor_000640501.pth | Hamilton 0.5483449101448059
./Hopper-v2_PPOHtermK_2_3156/actor_000675586.pth | Hamilton 0.5543090105056763
./Hopper-v2_PPOHtermK_2_3156/actor_000710761.pth | Hamilton 0.579072892665863
./Hopper-v2_PPOHtermK_2_3156/actor_000745688.pth | Hamilton 0.571616530418396
./Hopper-v2_PPOHtermK_2_3156/actor_000780039.pth | Hamilton 0.5612502098083496
./Hopper-v2_PPOHtermK_2_3156/actor_000814929.pth | Hamilton 0.5686127543449402
./Hopper-v2_PPOHtermK_2_3156/actor_000850382.pth | Hamilton 0.5832970142364502
./Hopper-v2_PPOHtermK_2_3156/actor_000884745.pth | Hamilton 0.606783926486969
./Hopper-v2_PPOHtermK_2_3156/actor_000918935.pth | Hamilton 0.6021519899368286
./Hopper-v2_PPOHtermK_2_3156/actor_000953301.pth | Hamilton 0.6186079978942871
./Hopper-v2_PPOHtermK_2_3156/actor_000987480.pth | Hamilton 0.6002688407897949
./Hopper-v2_PPOHtermK_2_3156/actor_001021393.pth | Hamilton 0.6015036106109619
./Hopper-v2_PPOHtermK_2_3156/actor_001057062.pth | Hamilton 0.5962991714477539
./Hopper-v2_PPOHtermK_2_3156/actor_001092436.pth | Hamilton 0.5932016968727112
./Hopper-v2_PPOHtermK_2_3156/actor_001127562.pth | Hamilton 0.5697240829467773
./Hopper-v2_PPOHtermK_2_3156/actor_001161879.pth | Hamilton 0.600551426410675
./Hopper-v2_PPOHtermK_2_3156/actor_001195950.pth | Hamilton 0.5997455716133118
./Hopper-v2_PPOHtermK_2_3156/actor_001231365.pth | Hamilton 0.5820814967155457
./Hopper-v2_PPOHtermK_2_3156/actor_001265918.pth | Hamilton 0.588866114616394
./Hopper-v2_PPOHtermK_2_3156/actor_001300036.pth | Hamilton 0.6216461658477783
./Hopper-v2_PPOHtermK_2_3156/actor_001336296.pth | Hamilton 0.628680408000946
./Hopper-v2_PPOHtermK_2_3156/actor_001370489.pth | Hamilton 0.6236757636070251
./Hopper-v2_PPOHtermK_2_3156/actor_001404338.pth | Hamilton 0.6150774955749512
./Hopper-v2_PPOHtermK_2_3156/actor_001439071.pth | Hamilton 0.6115890145301819
./Hopper-v2_PPOHtermK_2_3156/actor_001474963.pth | Hamilton 0.6485454440116882
./Hopper-v2_PPOHtermK_2_3156/actor_001509630.pth | Hamilton 0.6593189239501953
./Hopper-v2_PPOHtermK_2_3156/actor_001544199.pth | Hamilton 0.6511344909667969
./Hopper-v2_PPOHtermK_2_3156/actor_001578381.pth | Hamilton 0.6688206195831299
./Hopper-v2_PPOHtermK_2_3156/actor_001613226.pth | Hamilton 0.6699748039245605
./Hopper-v2_PPOHtermK_2_3156/actor_001648031.pth | Hamilton 0.6777853965759277
./Hopper-v2_PPOHtermK_2_3156/actor_001682201.pth | Hamilton 0.6612711548805237
./Hopper-v2_PPOHtermK_2_3156/actor_001716846.pth | Hamilton 0.6769454479217529
./Hopper-v2_PPOHtermK_2_3156/actor_001751005.pth | Hamilton 0.663801372051239
./Hopper-v2_PPOHtermK_2_3156/actor_001786905.pth | Hamilton 0.6786950826644897
./Hopper-v2_PPOHtermK_2_3156/actor_001822953.pth | Hamilton 0.6952859163284302
./Hopper-v2_PPOHtermK_2_3156/actor_001857842.pth | Hamilton 0.7116883397102356
./Hopper-v2_PPOHtermK_2_3156/actor_001893217.pth | Hamilton 0.7202495336532593
./Hopper-v2_PPOHtermK_2_3156/actor_001928194.pth | Hamilton 0.7230077981948853
./Hopper-v2_PPOHtermK_2_3156/actor_001962730.pth | Hamilton 0.7101202607154846
./Hopper-v2_PPOHtermK_2_3156/actor_001998156.pth | Hamilton 0.7321962714195251
./Hopper-v2_PPOHtermK_2_3156/actor_00366945_00789.846.pth | Hamilton 0.6259732246398926
./Hopper-v2_PPOHtermK_2_3156/actor_00719329_02410.472.pth | Hamilton 0.7685126066207886
./Hopper-v2_PPOHtermK_2_3156/actor_01065514_02764.350.pth | Hamilton 0.8388811349868774
./Hopper-v2_PPOHtermK_2_3156/actor_01413113_03156.792.pth | Hamilton 0.8231339454650879
"""
# Humanoid-v3_PPOHtermK_4_10726
data21 = """
./Humanoid-v3_PPOHtermK_4/actor_000000216919.pth | Hamilton 7.745727998553775e-06
./Humanoid-v3_PPOHtermK_4/actor_000000410670.pth | Hamilton 1.547358260722831e-05
./Humanoid-v3_PPOHtermK_4/actor_000000605134.pth | Hamilton 2.1356177967390977e-05
./Humanoid-v3_PPOHtermK_4/actor_000000799481.pth | Hamilton 2.9388598704827018e-05
./Humanoid-v3_PPOHtermK_4/actor_000000994713.pth | Hamilton 3.6286488466430455e-05
./Humanoid-v3_PPOHtermK_4/actor_000001190259.pth | Hamilton 4.73175932711456e-05
./Humanoid-v3_PPOHtermK_4/actor_000001386966.pth | Hamilton 6.457018025685102e-05
./Humanoid-v3_PPOHtermK_4/actor_000001583609.pth | Hamilton 7.933532469905913e-05
./Humanoid-v3_PPOHtermK_4/actor_000001781738.pth | Hamilton 0.00010167416621698067
./Humanoid-v3_PPOHtermK_4/actor_000001982987.pth | Hamilton 0.0001203079882543534
./Humanoid-v3_PPOHtermK_4/actor_000002185209.pth | Hamilton 0.00014862300304230303
./Humanoid-v3_PPOHtermK_4/actor_000002388949.pth | Hamilton 0.00018828797328751534
./Humanoid-v3_PPOHtermK_4/actor_000002596491.pth | Hamilton 0.00020941419643349946
./Humanoid-v3_PPOHtermK_4/actor_000002808873.pth | Hamilton 0.0002872117329388857
./Humanoid-v3_PPOHtermK_4/actor_000003020830.pth | Hamilton 0.00035879426286555827
./Humanoid-v3_PPOHtermK_4/actor_000003238404.pth | Hamilton 0.00043054361594840884
./Humanoid-v3_PPOHtermK_4/actor_000003455575.pth | Hamilton 0.0005158257554285228
./Humanoid-v3_PPOHtermK_4/actor_000003670611.pth | Hamilton 0.0005869035376235843
./Humanoid-v3_PPOHtermK_4/actor_000003882855.pth | Hamilton 0.0008774186717346311
./Humanoid-v3_PPOHtermK_4/actor_000004098267.pth | Hamilton 0.0011557259131222963
./Humanoid-v3_PPOHtermK_4/actor_000004312148.pth | Hamilton 0.001274388749152422
./Humanoid-v3_PPOHtermK_4/actor_000004529136.pth | Hamilton 0.001530707930214703
./Humanoid-v3_PPOHtermK_4/actor_000004743707.pth | Hamilton 0.0018802642589434981
./Humanoid-v3_PPOHtermK_4/actor_000004958553.pth | Hamilton 0.0023551632184535265
./Humanoid-v3_PPOHtermK_4/actor_000005178788.pth | Hamilton 0.002812052145600319
./Humanoid-v3_PPOHtermK_4/actor_000005396031.pth | Hamilton 0.003237334545701742
./Humanoid-v3_PPOHtermK_4/actor_000005614460.pth | Hamilton 0.004360102582722902
./Humanoid-v3_PPOHtermK_4/actor_000005830320.pth | Hamilton 0.006299832835793495
./Humanoid-v3_PPOHtermK_4/actor_000006046427.pth | Hamilton 0.007334440480917692
./Humanoid-v3_PPOHtermK_4/actor_000006258872.pth | Hamilton 0.01014986727386713
./Humanoid-v3_PPOHtermK_4/actor_000006473024.pth | Hamilton 0.012375920079648495
./Humanoid-v3_PPOHtermK_4/actor_000006687708.pth | Hamilton 0.018368063494563103
./Humanoid-v3_PPOHtermK_4/actor_000006901660.pth | Hamilton 0.022276129573583603
./Humanoid-v3_PPOHtermK_4/actor_000007118096.pth | Hamilton 0.03707293048501015
./Humanoid-v3_PPOHtermK_4/actor_000007337525.pth | Hamilton 0.05694698914885521
./Humanoid-v3_PPOHtermK_4/actor_000007554583.pth | Hamilton 0.08436055481433868
./Humanoid-v3_PPOHtermK_4/actor_000007772355.pth | Hamilton 0.13028433918952942
./Humanoid-v3_PPOHtermK_4/actor_000007988089.pth | Hamilton 0.2138514220714569
./Humanoid-v3_PPOHtermK_4/actor_000008198931.pth | Hamilton 0.30183884501457214
./Humanoid-v3_PPOHtermK_4/actor_000008411470.pth | Hamilton 0.3925187885761261
./Humanoid-v3_PPOHtermK_4/actor_000008627622.pth | Hamilton 0.4613773226737976
./Humanoid-v3_PPOHtermK_4/actor_000008842561.pth | Hamilton 0.5061527490615845
./Humanoid-v3_PPOHtermK_4/actor_000009057689.pth | Hamilton 0.5313953161239624
./Humanoid-v3_PPOHtermK_4/actor_000009272305.pth | Hamilton 0.5612488389015198
./Humanoid-v3_PPOHtermK_4/actor_000009490462.pth | Hamilton 0.6400241851806641
./Humanoid-v3_PPOHtermK_4/actor_000009708813.pth | Hamilton 0.7168237566947937
./Humanoid-v3_PPOHtermK_4/actor_000009924694.pth | Hamilton 0.8025385737419128
./Humanoid-v3_PPOHtermK_4/actor_000010140068.pth | Hamilton 0.8092190027236938
./Humanoid-v3_PPOHtermK_4/actor_000010362071.pth | Hamilton 0.9000301957130432
./Humanoid-v3_PPOHtermK_4/actor_000010576853.pth | Hamilton 0.9201773405075073
./Humanoid-v3_PPOHtermK_4/actor_000010793819.pth | Hamilton 0.9430528283119202
./Humanoid-v3_PPOHtermK_4/actor_000011010846.pth | Hamilton 0.9776714444160461
./Humanoid-v3_PPOHtermK_4/actor_000011222986.pth | Hamilton 0.9512382745742798
./Humanoid-v3_PPOHtermK_4/actor_000011439808.pth | Hamilton 0.9987568855285645
./Humanoid-v3_PPOHtermK_4/actor_000011654312.pth | Hamilton 0.985135555267334
./Humanoid-v3_PPOHtermK_4/actor_000011867763.pth | Hamilton 1.0231181383132935
./Humanoid-v3_PPOHtermK_4/actor_000012082123.pth | Hamilton 1.0609831809997559
./Humanoid-v3_PPOHtermK_4/actor_000012299147.pth | Hamilton 1.0948328971862793
./Humanoid-v3_PPOHtermK_4/actor_000012512215.pth | Hamilton 1.122593641281128
./Humanoid-v3_PPOHtermK_4/actor_000012728307.pth | Hamilton 1.13325834274292
./Humanoid-v3_PPOHtermK_4/actor_000012944372.pth | Hamilton 1.0918784141540527
./Humanoid-v3_PPOHtermK_4/actor_000013154742.pth | Hamilton 1.0990846157073975
./Humanoid-v3_PPOHtermK_4/actor_000013369953.pth | Hamilton 1.0786091089248657
./Humanoid-v3_PPOHtermK_4/actor_000013585079.pth | Hamilton 1.0882441997528076
./Humanoid-v3_PPOHtermK_4/actor_000013798891.pth | Hamilton 1.1021605730056763
./Humanoid-v3_PPOHtermK_4/actor_000014013378.pth | Hamilton 1.1637951135635376
./Humanoid-v3_PPOHtermK_4/actor_000014230298.pth | Hamilton 1.1316213607788086
./Humanoid-v3_PPOHtermK_4/actor_000014446130.pth | Hamilton 1.1599910259246826
./Humanoid-v3_PPOHtermK_4/actor_000014663429.pth | Hamilton 1.1631524562835693
./Humanoid-v3_PPOHtermK_4/actor_000014880826.pth | Hamilton 1.1942859888076782
./Humanoid-v3_PPOHtermK_4/actor_000015096566.pth | Hamilton 1.144811749458313
./Humanoid-v3_PPOHtermK_4/actor_000015312278.pth | Hamilton 1.1337217092514038
./Humanoid-v3_PPOHtermK_4/actor_000015529254.pth | Hamilton 1.0972442626953125
./Humanoid-v3_PPOHtermK_4/actor_000015742446.pth | Hamilton 1.131184458732605
./Humanoid-v3_PPOHtermK_4/actor_000015958629.pth | Hamilton 1.1117836236953735
./Humanoid-v3_PPOHtermK_4/actor_000016174762.pth | Hamilton 1.0927311182022095
./Humanoid-v3_PPOHtermK_4/actor_000016390420.pth | Hamilton 1.0723671913146973
./Humanoid-v3_PPOHtermK_4/actor_000016603883.pth | Hamilton 1.0995543003082275
./Humanoid-v3_PPOHtermK_4/actor_000016817354.pth | Hamilton 1.1224054098129272
./Humanoid-v3_PPOHtermK_4/actor_000017024610.pth | Hamilton 1.1242108345031738
./Humanoid-v3_PPOHtermK_4/actor_000017246507.pth | Hamilton 1.1464221477508545
./Humanoid-v3_PPOHtermK_4/actor_000017461887.pth | Hamilton 1.1415914297103882
./Humanoid-v3_PPOHtermK_4/actor_000017673535.pth | Hamilton 1.1294492483139038
./Humanoid-v3_PPOHtermK_4/actor_000017890207.pth | Hamilton 1.1181045770645142
./Humanoid-v3_PPOHtermK_4/actor_000018106772.pth | Hamilton 1.1333143711090088
./Humanoid-v3_PPOHtermK_4/actor_000018319686.pth | Hamilton 1.1642205715179443
./Humanoid-v3_PPOHtermK_4/actor_000018533403.pth | Hamilton 1.1748136281967163
./Humanoid-v3_PPOHtermK_4/actor_000018748137.pth | Hamilton 1.1647531986236572
./Humanoid-v3_PPOHtermK_4/actor_000018959334.pth | Hamilton 1.1496871709823608
./Humanoid-v3_PPOHtermK_4/actor_000019172017.pth | Hamilton 1.1272252798080444
./Humanoid-v3_PPOHtermK_4/actor_000019388412.pth | Hamilton 1.1277064085006714
./Humanoid-v3_PPOHtermK_4/actor_000019600363.pth | Hamilton 1.1107887029647827
./Humanoid-v3_PPOHtermK_4/actor_000019808505.pth | Hamilton 1.1193355321884155
./Humanoid-v3_PPOHtermK_4/actor_000020021389.pth | Hamilton 1.0716553926467896
./Humanoid-v3_PPOHtermK_4/actor_000020235334.pth | Hamilton 1.0648878812789917
./Humanoid-v3_PPOHtermK_4/actor_000020446665.pth | Hamilton 1.0834991931915283
./Humanoid-v3_PPOHtermK_4/actor_000020656097.pth | Hamilton 1.1351135969161987
./Humanoid-v3_PPOHtermK_4/actor_000020868450.pth | Hamilton 1.117741584777832
./Humanoid-v3_PPOHtermK_4/actor_000021078472.pth | Hamilton 1.091500163078308
./Humanoid-v3_PPOHtermK_4/actor_000021291544.pth | Hamilton 1.115471363067627
./Humanoid-v3_PPOHtermK_4/actor_000021509156.pth | Hamilton 1.0969226360321045
./Humanoid-v3_PPOHtermK_4/actor_000021717899.pth | Hamilton 1.076116681098938
./Humanoid-v3_PPOHtermK_4/actor_000021929868.pth | Hamilton 1.096856713294983
./Humanoid-v3_PPOHtermK_4/actor__000000048145_00072.415.pth | Hamilton 2.8798704079235904e-06
./Humanoid-v3_PPOHtermK_4/actor__000000848121_00518.436.pth | Hamilton 2.7115223929286003e-05
./Humanoid-v3_PPOHtermK_4/actor__000001657931_01537.504.pth | Hamilton 9.482606401434168e-05
./Humanoid-v3_PPOHtermK_4/actor__000002466260_03166.374.pth | Hamilton 0.0003219899954274297
./Humanoid-v3_PPOHtermK_4/actor__000003293034_04917.708.pth | Hamilton 0.0011960271513089538
./Humanoid-v3_PPOHtermK_4/actor__000004124188_07916.716.pth | Hamilton 0.0029556548688560724
./Humanoid-v3_PPOHtermK_4/actor__000004958553_08276.233.pth | Hamilton 0.0076818703673779964
./Humanoid-v3_PPOHtermK_4/actor__000007418542_09105.766.pth | Hamilton 0.2132045328617096
./Humanoid-v3_PPOHtermK_4/actor__000010710422_09899.406.pth | Hamilton 1.2172187566757202
./Humanoid-v3_PPOHtermK_4/actor__000011547432_10030.402.pth | Hamilton 1.309943437576294
./Humanoid-v3_PPOHtermK_4/actor__000014041990_10242.135.pth | Hamilton 1.460361123085022
./Humanoid-v3_PPOHtermK_4/actor__000017325116_10313.688.pth | Hamilton 1.5013796091079712
./Humanoid-v3_PPOHtermK_4/actor__000019808505_10467.968.pth | Hamilton 1.3799551725387573
./Humanoid-v3_PPOHtermK_4/actor__000020629637_10537.408.pth | Hamilton 1.3538414239883423
"""
# Humanoid-v3_PPO_1_12163
data22 = """
./Humanoid-v3_PPO_1_12163/actor_000000217243.pth | Hamilton 4.901742795482278e-05
./Humanoid-v3_PPO_1_12163/actor_000001193673.pth | Hamilton 9.237827907782048e-05
./Humanoid-v3_PPO_1_12163/actor_000002184685.pth | Hamilton 0.00017076915537472814
./Humanoid-v3_PPO_1_12163/actor_000003203682.pth | Hamilton 0.00035832056892104447
./Humanoid-v3_PPO_1_12163/actor_000004266366.pth | Hamilton 0.0008324697846546769
./Humanoid-v3_PPO_1_12163/actor_000005330575.pth | Hamilton 0.001992176752537489
./Humanoid-v3_PPO_1_12163/actor_000006398893.pth | Hamilton 0.005656303837895393
./Humanoid-v3_PPO_1_12163/actor_000007480462.pth | Hamilton 0.024866096675395966
./Humanoid-v3_PPO_1_12163/actor_000008554060.pth | Hamilton 0.08873523026704788
./Humanoid-v3_PPO_1_12163/actor_000009637436.pth | Hamilton 0.13806229829788208
./Humanoid-v3_PPO_1_12163/actor_000010727125.pth | Hamilton 0.12786342203617096
./Humanoid-v3_PPO_1_12163/actor_000011818365.pth | Hamilton 0.1539911925792694
./Humanoid-v3_PPO_1_12163/actor_000012903175.pth | Hamilton 0.12738411128520966
./Humanoid-v3_PPO_1_12163/actor_000013990927.pth | Hamilton 0.12287505716085434
./Humanoid-v3_PPO_1_12163/actor_000015079907.pth | Hamilton 0.11835727095603943
./Humanoid-v3_PPO_1_12163/actor_000016148312.pth | Hamilton 0.12229346483945847
./Humanoid-v3_PPO_1_12163/actor_000017220854.pth | Hamilton 0.1064532995223999
./Humanoid-v3_PPO_1_12163/actor_000018292382.pth | Hamilton 0.09688813984394073
./Humanoid-v3_PPO_1_12163/actor_000019364772.pth | Hamilton 0.09581438452005386
./Humanoid-v3_PPO_1_12163/actor_000020416402.pth | Hamilton 0.10767711699008942
./Humanoid-v3_PPO_1_12163/actor_000021490203.pth | Hamilton 0.08338568359613419
./Humanoid-v3_PPO_1_12163/actor_000022553536.pth | Hamilton 0.08716519176959991
./Humanoid-v3_PPO_1_12163/actor_000023611940.pth | Hamilton 0.07676978409290314
./Humanoid-v3_PPO_1_12163/actor_000024673297.pth | Hamilton 0.07334909588098526
./Humanoid-v3_PPO_1_12163/actor_000025735621.pth | Hamilton 0.06776144355535507
./Humanoid-v3_PPO_1_12163/actor_000026804391.pth | Hamilton 0.06558670103549957
./Humanoid-v3_PPO_1_12163/actor_000027872521.pth | Hamilton 0.05833116173744202
./Humanoid-v3_PPO_1_12163/actor_000028930077.pth | Hamilton 0.06019581854343414
./Humanoid-v3_PPO_1_12163/actor_000029994618.pth | Hamilton 0.05537016689777374
./Humanoid-v3_PPO_1_12163/actor_000031053027.pth | Hamilton 0.04344930127263069
./Humanoid-v3_PPO_1_12163/actor_000032113320.pth | Hamilton 0.04432051256299019
./Humanoid-v3_PPO_1_12163/actor_000033170362.pth | Hamilton 0.0436234213411808
./Humanoid-v3_PPO_1_12163/actor_000034222634.pth | Hamilton 0.044859353452920914
./Humanoid-v3_PPO_1_12163/actor_000035304566.pth | Hamilton 0.04200012981891632
./Humanoid-v3_PPO_1_12163/actor_000036378916.pth | Hamilton 0.0350864976644516
./Humanoid-v3_PPO_1_12163/actor_000037447688.pth | Hamilton 0.035870373249053955
./Humanoid-v3_PPO_1_12163/actor_000038526263.pth | Hamilton 0.035576753318309784
./Humanoid-v3_PPO_1_12163/actor_000039592565.pth | Hamilton 0.032685451209545135
./Humanoid-v3_PPO_1_12163/actor_000040663920.pth | Hamilton 0.03560031205415726
./Humanoid-v3_PPO_1_12163/actor_000041733296.pth | Hamilton 0.03140128403902054
./Humanoid-v3_PPO_1_12163/actor_000042813691.pth | Hamilton 0.03015800379216671
./Humanoid-v3_PPO_1_12163/actor_000043887612.pth | Hamilton 0.02578139863908291
./Humanoid-v3_PPO_1_12163/actor_000044953310.pth | Hamilton 0.02614319510757923
./Humanoid-v3_PPO_1_12163/actor_000046024932.pth | Hamilton 0.02799818478524685
./Humanoid-v3_PPO_1_12163/actor_000047097448.pth | Hamilton 0.024935496971011162
./Humanoid-v3_PPO_1_12163/actor_000048161312.pth | Hamilton 0.026888230815529823
./Humanoid-v3_PPO_1_12163/actor_000049230121.pth | Hamilton 0.02502981573343277
./Humanoid-v3_PPO_1_12163/actor_000050309118.pth | Hamilton 0.024827178567647934
./Humanoid-v3_PPO_1_12163/actor_000051380585.pth | Hamilton 0.0275689959526062
./Humanoid-v3_PPO_1_12163/actor_000052449009.pth | Hamilton 0.02503933571279049
./Humanoid-v3_PPO_1_12163/actor_000053519480.pth | Hamilton 0.020775971934199333
./Humanoid-v3_PPO_1_12163/actor_000054577375.pth | Hamilton 0.021033601835370064
./Humanoid-v3_PPO_1_12163/actor_000055636220.pth | Hamilton 0.022039370611310005
./Humanoid-v3_PPO_1_12163/actor_000056693412.pth | Hamilton 0.024740155786275864
./Humanoid-v3_PPO_1_12163/actor_000057745862.pth | Hamilton 0.022060979157686234
./Humanoid-v3_PPO_1_12163/actor_000058803246.pth | Hamilton 0.021534819155931473
./Humanoid-v3_PPO_1_12163/actor_000059848669.pth | Hamilton 0.01842654123902321
./Humanoid-v3_PPO_1_12163/actor_000060894779.pth | Hamilton 0.01610112003982067
./Humanoid-v3_PPO_1_12163/actor_000061950089.pth | Hamilton 0.022715415805578232
./Humanoid-v3_PPO_1_12163/actor_000063011356.pth | Hamilton 0.017054984346032143
./Humanoid-v3_PPO_1_12163/actor_000064071902.pth | Hamilton 0.01832679472863674
./Humanoid-v3_PPO_1_12163/actor_000065139388.pth | Hamilton 0.01720341481268406
./Humanoid-v3_PPO_1_12163/actor_000066195958.pth | Hamilton 0.015391580760478973
./Humanoid-v3_PPO_1_12163/actor_000067254833.pth | Hamilton 0.01721765846014023
./Humanoid-v3_PPO_1_12163/actor_000068303613.pth | Hamilton 0.01933548040688038
./Humanoid-v3_PPO_1_12163/actor_000069352791.pth | Hamilton 0.0174593236297369
./Humanoid-v3_PPO_1_12163/actor_000070419230.pth | Hamilton 0.018092216923832893
./Humanoid-v3_PPO_1_12163/actor_000071495599.pth | Hamilton 0.014242338016629219
./Humanoid-v3_PPO_1_12163/actor_000072575311.pth | Hamilton 0.014192990027368069
./Humanoid-v3_PPO_1_12163/actor_000073646993.pth | Hamilton 0.014867308549582958
./Humanoid-v3_PPO_1_12163/actor_000074720187.pth | Hamilton 0.014990294352173805
./Humanoid-v3_PPO_1_12163/actor_000075774821.pth | Hamilton 0.01650562509894371
./Humanoid-v3_PPO_1_12163/actor_000076826926.pth | Hamilton 0.017760004848241806
./Humanoid-v3_PPO_1_12163/actor_000077887180.pth | Hamilton 0.015743592754006386
./Humanoid-v3_PPO_1_12163/actor_000078948698.pth | Hamilton 0.015605615451931953
./Humanoid-v3_PPO_1_12163/actor_000080028672.pth | Hamilton 0.016591545194387436
./Humanoid-v3_PPO_1_12163/actor_000081092988.pth | Hamilton 0.013885377906262875
./Humanoid-v3_PPO_1_12163/actor_000082150938.pth | Hamilton 0.015452136285603046
./Humanoid-v3_PPO_1_12163/actor_000083226272.pth | Hamilton 0.013292834162712097
./Humanoid-v3_PPO_1_12163/actor_000084315697.pth | Hamilton 0.013336403295397758
./Humanoid-v3_PPO_1_12163/actor_000085398831.pth | Hamilton 0.012728032656013966
./Humanoid-v3_PPO_1_12163/actor_000086462270.pth | Hamilton 0.014807147905230522
./Humanoid-v3_PPO_1_12163/actor_000087543043.pth | Hamilton 0.01517474465072155
./Humanoid-v3_PPO_1_12163/actor_000088615424.pth | Hamilton 0.011902659200131893
./Humanoid-v3_PPO_1_12163/actor_000089693809.pth | Hamilton 0.011332799680531025
./Humanoid-v3_PPO_1_12163/actor_000090772202.pth | Hamilton 0.012985597364604473
./Humanoid-v3_PPO_1_12163/actor_000091840029.pth | Hamilton 0.01333997305482626
./Humanoid-v3_PPO_1_12163/actor_000092901961.pth | Hamilton 0.011972179636359215
./Humanoid-v3_PPO_1_12163/actor_000093984958.pth | Hamilton 0.01070544682443142
./Humanoid-v3_PPO_1_12163/actor_000095063401.pth | Hamilton 0.014591872692108154
./Humanoid-v3_PPO_1_12163/actor_000096131061.pth | Hamilton 0.011899355798959732
./Humanoid-v3_PPO_1_12163/actor_000097192951.pth | Hamilton 0.011008753441274166
./Humanoid-v3_PPO_1_12163/actor_000098265618.pth | Hamilton 0.013001998886466026
./Humanoid-v3_PPO_1_12163/actor_000099339822.pth | Hamilton 0.012511848472058773
./Humanoid-v3_PPO_1_12163/actor_000100409092.pth | Hamilton 0.011879532597959042
./Humanoid-v3_PPO_1_12163/actor_000101487843.pth | Hamilton 0.011638682335615158
./Humanoid-v3_PPO_1_12163/actor_000102550987.pth | Hamilton 0.011632084846496582
./Humanoid-v3_PPO_1_12163/actor_000103610706.pth | Hamilton 0.01276202592998743
./Humanoid-v3_PPO_1_12163/actor_000104685349.pth | Hamilton 0.013183681294322014
./Humanoid-v3_PPO_1_12163/actor_000105749519.pth | Hamilton 0.01066779438406229
./Humanoid-v3_PPO_1_12163/actor_000106820495.pth | Hamilton 0.009783798828721046
./Humanoid-v3_PPO_1_12163/actor_000107892424.pth | Hamilton 0.010112997144460678
./Humanoid-v3_PPO_1_12163/actor_000108966100.pth | Hamilton 0.009121796116232872
./Humanoid-v3_PPO_1_12163/actor_000110039277.pth | Hamilton 0.010811982676386833
./Humanoid-v3_PPO_1_12163/actor_000111116020.pth | Hamilton 0.00820975936949253
./Humanoid-v3_PPO_1_12163/actor_000112180645.pth | Hamilton 0.00927242636680603
./Humanoid-v3_PPO_1_12163/actor_000113246999.pth | Hamilton 0.008470394648611546
./Humanoid-v3_PPO_1_12163/actor_000114311656.pth | Hamilton 0.007810091599822044
./Humanoid-v3_PPO_1_12163/actor_000115396484.pth | Hamilton 0.010954611003398895
./Humanoid-v3_PPO_1_12163/actor_000116469992.pth | Hamilton 0.010519781149923801
./Humanoid-v3_PPO_1_12163/actor_000117554963.pth | Hamilton 0.009184564463794231
./Humanoid-v3_PPO_1_12163/actor_000118632075.pth | Hamilton 0.010162292048335075
./Humanoid-v3_PPO_1_12163/actor_000119700130.pth | Hamilton 0.0076871528290212154
./Humanoid-v3_PPO_1_12163/actor_000120768448.pth | Hamilton 0.007597841322422028
./Humanoid-v3_PPO_1_12163/actor_000121847245.pth | Hamilton 0.00838988646864891
./Humanoid-v3_PPO_1_12163/actor_000122924889.pth | Hamilton 0.008655181154608727
./Humanoid-v3_PPO_1_12163/actor_000123997498.pth | Hamilton 0.008889286778867245
./Humanoid-v3_PPO_1_12163/actor_000125078319.pth | Hamilton 0.00809280201792717
./Humanoid-v3_PPO_1_12163/actor_000126164072.pth | Hamilton 0.009464731439948082
./Humanoid-v3_PPO_1_12163/actor_000002184685.pth | Hamilton 0.00017076915537472814
./Humanoid-v3_PPO_1_12163/actor_000003203682.pth | Hamilton 0.00035832056892104447
./Humanoid-v3_PPO_1_12163/actor_000004266366.pth | Hamilton 0.0008324697846546769
./Humanoid-v3_PPO_1_12163/actor_000005330575.pth | Hamilton 0.001992176752537489
./Humanoid-v3_PPO_1_12163/actor_000006398893.pth | Hamilton 0.005656303837895393
./Humanoid-v3_PPO_1_12163/actor_000007480462.pth | Hamilton 0.024866096675395966
./Humanoid-v3_PPO_1_12163/actor_000008554060.pth | Hamilton 0.08873523026704788
./Humanoid-v3_PPO_1_12163/actor_000009637436.pth | Hamilton 0.13806229829788208
./Humanoid-v3_PPO_1_12163/actor_000010727125.pth | Hamilton 0.12786342203617096
./Humanoid-v3_PPO_1_12163/actor_000011818365.pth | Hamilton 0.1539911925792694
./Humanoid-v3_PPO_1_12163/actor_000012903175.pth | Hamilton 0.12738411128520966
./Humanoid-v3_PPO_1_12163/actor_000013990927.pth | Hamilton 0.12287505716085434
./Humanoid-v3_PPO_1_12163/actor_000015079907.pth | Hamilton 0.11835727095603943
./Humanoid-v3_PPO_1_12163/actor_000016148312.pth | Hamilton 0.12229346483945847
./Humanoid-v3_PPO_1_12163/actor_000017220854.pth | Hamilton 0.1064532995223999
./Humanoid-v3_PPO_1_12163/actor_000018292382.pth | Hamilton 0.09688813984394073
./Humanoid-v3_PPO_1_12163/actor_000019364772.pth | Hamilton 0.09581438452005386
./Humanoid-v3_PPO_1_12163/actor_000020416402.pth | Hamilton 0.10767711699008942
./Humanoid-v3_PPO_1_12163/actor_000021490203.pth | Hamilton 0.08338568359613419
./Humanoid-v3_PPO_1_12163/actor_000022553536.pth | Hamilton 0.08716519176959991
./Humanoid-v3_PPO_1_12163/actor_000023611940.pth | Hamilton 0.07676978409290314
./Humanoid-v3_PPO_1_12163/actor_000024673297.pth | Hamilton 0.07334909588098526
./Humanoid-v3_PPO_1_12163/actor_000025735621.pth | Hamilton 0.06776144355535507
./Humanoid-v3_PPO_1_12163/actor_000026804391.pth | Hamilton 0.06558670103549957
./Humanoid-v3_PPO_1_12163/actor_000027872521.pth | Hamilton 0.05833116173744202
./Humanoid-v3_PPO_1_12163/actor_000028930077.pth | Hamilton 0.06019581854343414
./Humanoid-v3_PPO_1_12163/actor_000029994618.pth | Hamilton 0.05537016689777374
./Humanoid-v3_PPO_1_12163/actor_000031053027.pth | Hamilton 0.04344930127263069
./Humanoid-v3_PPO_1_12163/actor_000032113320.pth | Hamilton 0.04432051256299019
./Humanoid-v3_PPO_1_12163/actor_000033170362.pth | Hamilton 0.0436234213411808
./Humanoid-v3_PPO_1_12163/actor_000034222634.pth | Hamilton 0.044859353452920914
./Humanoid-v3_PPO_1_12163/actor_000035304566.pth | Hamilton 0.04200012981891632
./Humanoid-v3_PPO_1_12163/actor_000036378916.pth | Hamilton 0.0350864976644516
./Humanoid-v3_PPO_1_12163/actor_000037447688.pth | Hamilton 0.035870373249053955
./Humanoid-v3_PPO_1_12163/actor_000038526263.pth | Hamilton 0.035576753318309784
./Humanoid-v3_PPO_1_12163/actor_000039592565.pth | Hamilton 0.032685451209545135
./Humanoid-v3_PPO_1_12163/actor_000040663920.pth | Hamilton 0.03560031205415726
./Humanoid-v3_PPO_1_12163/actor_000041733296.pth | Hamilton 0.03140128403902054
./Humanoid-v3_PPO_1_12163/actor_000042813691.pth | Hamilton 0.03015800379216671
./Humanoid-v3_PPO_1_12163/actor_000043887612.pth | Hamilton 0.02578139863908291
./Humanoid-v3_PPO_1_12163/actor_000044953310.pth | Hamilton 0.02614319510757923
./Humanoid-v3_PPO_1_12163/actor_000046024932.pth | Hamilton 0.02799818478524685
./Humanoid-v3_PPO_1_12163/actor_000047097448.pth | Hamilton 0.024935496971011162
./Humanoid-v3_PPO_1_12163/actor_000048161312.pth | Hamilton 0.026888230815529823
./Humanoid-v3_PPO_1_12163/actor_000049230121.pth | Hamilton 0.02502981573343277
./Humanoid-v3_PPO_1_12163/actor_000050309118.pth | Hamilton 0.024827178567647934
./Humanoid-v3_PPO_1_12163/actor_000051380585.pth | Hamilton 0.0275689959526062
./Humanoid-v3_PPO_1_12163/actor_000052449009.pth | Hamilton 0.02503933571279049
./Humanoid-v3_PPO_1_12163/actor_000053519480.pth | Hamilton 0.020775971934199333
./Humanoid-v3_PPO_1_12163/actor_000054577375.pth | Hamilton 0.021033601835370064
./Humanoid-v3_PPO_1_12163/actor_000055636220.pth | Hamilton 0.022039370611310005
./Humanoid-v3_PPO_1_12163/actor_000056693412.pth | Hamilton 0.024740155786275864
./Humanoid-v3_PPO_1_12163/actor_000057745862.pth | Hamilton 0.022060979157686234
./Humanoid-v3_PPO_1_12163/actor_000058803246.pth | Hamilton 0.021534819155931473
./Humanoid-v3_PPO_1_12163/actor_000059848669.pth | Hamilton 0.01842654123902321
./Humanoid-v3_PPO_1_12163/actor_000060894779.pth | Hamilton 0.01610112003982067
./Humanoid-v3_PPO_1_12163/actor_000061950089.pth | Hamilton 0.022715415805578232
./Humanoid-v3_PPO_1_12163/actor_000063011356.pth | Hamilton 0.017054984346032143
./Humanoid-v3_PPO_1_12163/actor_000064071902.pth | Hamilton 0.01832679472863674
./Humanoid-v3_PPO_1_12163/actor_000065139388.pth | Hamilton 0.01720341481268406
./Humanoid-v3_PPO_1_12163/actor_000066195958.pth | Hamilton 0.015391580760478973
./Humanoid-v3_PPO_1_12163/actor_000067254833.pth | Hamilton 0.01721765846014023
./Humanoid-v3_PPO_1_12163/actor_000068303613.pth | Hamilton 0.01933548040688038
./Humanoid-v3_PPO_1_12163/actor_000069352791.pth | Hamilton 0.0174593236297369
./Humanoid-v3_PPO_1_12163/actor_000070419230.pth | Hamilton 0.018092216923832893
./Humanoid-v3_PPO_1_12163/actor_000071495599.pth | Hamilton 0.014242338016629219
./Humanoid-v3_PPO_1_12163/actor_000072575311.pth | Hamilton 0.014192990027368069
./Humanoid-v3_PPO_1_12163/actor_000073646993.pth | Hamilton 0.014867308549582958
./Humanoid-v3_PPO_1_12163/actor_000074720187.pth | Hamilton 0.014990294352173805
./Humanoid-v3_PPO_1_12163/actor_000075774821.pth | Hamilton 0.01650562509894371
./Humanoid-v3_PPO_1_12163/actor_000076826926.pth | Hamilton 0.017760004848241806
./Humanoid-v3_PPO_1_12163/actor_000077887180.pth | Hamilton 0.015743592754006386
./Humanoid-v3_PPO_1_12163/actor_000078948698.pth | Hamilton 0.015605615451931953
./Humanoid-v3_PPO_1_12163/actor_000080028672.pth | Hamilton 0.016591545194387436
./Humanoid-v3_PPO_1_12163/actor_000081092988.pth | Hamilton 0.013885377906262875
./Humanoid-v3_PPO_1_12163/actor_000082150938.pth | Hamilton 0.015452136285603046
./Humanoid-v3_PPO_1_12163/actor_000083226272.pth | Hamilton 0.013292834162712097
./Humanoid-v3_PPO_1_12163/actor_000084315697.pth | Hamilton 0.013336403295397758
./Humanoid-v3_PPO_1_12163/actor_000085398831.pth | Hamilton 0.012728032656013966
./Humanoid-v3_PPO_1_12163/actor_000086462270.pth | Hamilton 0.014807147905230522
./Humanoid-v3_PPO_1_12163/actor_000087543043.pth | Hamilton 0.01517474465072155
./Humanoid-v3_PPO_1_12163/actor_000088615424.pth | Hamilton 0.011902659200131893
./Humanoid-v3_PPO_1_12163/actor_000089693809.pth | Hamilton 0.011332799680531025
./Humanoid-v3_PPO_1_12163/actor_000090772202.pth | Hamilton 0.012985597364604473
./Humanoid-v3_PPO_1_12163/actor_000091840029.pth | Hamilton 0.01333997305482626
./Humanoid-v3_PPO_1_12163/actor_000092901961.pth | Hamilton 0.011972179636359215
./Humanoid-v3_PPO_1_12163/actor_000093984958.pth | Hamilton 0.01070544682443142
./Humanoid-v3_PPO_1_12163/actor_000095063401.pth | Hamilton 0.014591872692108154
./Humanoid-v3_PPO_1_12163/actor_000096131061.pth | Hamilton 0.011899355798959732
./Humanoid-v3_PPO_1_12163/actor_000097192951.pth | Hamilton 0.011008753441274166
./Humanoid-v3_PPO_1_12163/actor_000098265618.pth | Hamilton 0.013001998886466026
./Humanoid-v3_PPO_1_12163/actor_000099339822.pth | Hamilton 0.012511848472058773
./Humanoid-v3_PPO_1_12163/actor_000100409092.pth | Hamilton 0.011879532597959042
./Humanoid-v3_PPO_1_12163/actor_000101487843.pth | Hamilton 0.011638682335615158
./Humanoid-v3_PPO_1_12163/actor_000102550987.pth | Hamilton 0.011632084846496582
./Humanoid-v3_PPO_1_12163/actor_000103610706.pth | Hamilton 0.01276202592998743
./Humanoid-v3_PPO_1_12163/actor_000104685349.pth | Hamilton 0.013183681294322014
./Humanoid-v3_PPO_1_12163/actor_000105749519.pth | Hamilton 0.01066779438406229
./Humanoid-v3_PPO_1_12163/actor_000106820495.pth | Hamilton 0.009783798828721046
./Humanoid-v3_PPO_1_12163/actor_000107892424.pth | Hamilton 0.010112997144460678
./Humanoid-v3_PPO_1_12163/actor_000108966100.pth | Hamilton 0.009121796116232872
./Humanoid-v3_PPO_1_12163/actor_000110039277.pth | Hamilton 0.010811982676386833
./Humanoid-v3_PPO_1_12163/actor_000111116020.pth | Hamilton 0.00820975936949253
./Humanoid-v3_PPO_1_12163/actor_000112180645.pth | Hamilton 0.00927242636680603
./Humanoid-v3_PPO_1_12163/actor_000113246999.pth | Hamilton 0.008470394648611546
./Humanoid-v3_PPO_1_12163/actor_000114311656.pth | Hamilton 0.007810091599822044
./Humanoid-v3_PPO_1_12163/actor_000115396484.pth | Hamilton 0.010954611003398895
./Humanoid-v3_PPO_1_12163/actor_000116469992.pth | Hamilton 0.010519781149923801
./Humanoid-v3_PPO_1_12163/actor_000117554963.pth | Hamilton 0.009184564463794231
./Humanoid-v3_PPO_1_12163/actor_000118632075.pth | Hamilton 0.010162292048335075
./Humanoid-v3_PPO_1_12163/actor_000119700130.pth | Hamilton 0.0076871528290212154
./Humanoid-v3_PPO_1_12163/actor_000120768448.pth | Hamilton 0.007597841322422028
./Humanoid-v3_PPO_1_12163/actor_000121847245.pth | Hamilton 0.00838988646864891
./Humanoid-v3_PPO_1_12163/actor_000122924889.pth | Hamilton 0.008655181154608727
./Humanoid-v3_PPO_1_12163/actor_000123997498.pth | Hamilton 0.008889286778867245
./Humanoid-v3_PPO_1_12163/actor_000125078319.pth | Hamilton 0.00809280201792717
./Humanoid-v3_PPO_1_12163/actor_000126164072.pth | Hamilton 0.009464731439948082
./Humanoid-v3_PPO_1_12163/actor_000127255312.pth | Hamilton 0.009152554906904697
./Humanoid-v3_PPO_1_12163/actor_000128338707.pth | Hamilton 0.009309385903179646
./Humanoid-v3_PPO_1_12163/actor_000129418583.pth | Hamilton 0.007780071813613176
./Humanoid-v3_PPO_1_12163/actor_000130503884.pth | Hamilton 0.00809682346880436
./Humanoid-v3_PPO_1_12163/actor_000131582384.pth | Hamilton 0.009696158580482006
./Humanoid-v3_PPO_1_12163/actor_000132669375.pth | Hamilton 0.008007356896996498
./Humanoid-v3_PPO_1_12163/actor_000133744368.pth | Hamilton 0.007947931066155434
./Humanoid-v3_PPO_1_12163/actor_000134832215.pth | Hamilton 0.00801115483045578
./Humanoid-v3_PPO_1_12163/actor_000135914707.pth | Hamilton 0.00868705753237009
./Humanoid-v3_PPO_1_12163/actor_000137001027.pth | Hamilton 0.006609027739614248
./Humanoid-v3_PPO_1_12163/actor_000138081188.pth | Hamilton 0.007958009839057922
./Humanoid-v3_PPO_1_12163/actor_000139155975.pth | Hamilton 0.007521847262978554
./Humanoid-v3_PPO_1_12163/actor__000000048183_00066.895.pth | Hamilton 3.3693447676341748e-06
./Humanoid-v3_PPO_1_12163/actor__000001021631_00691.601.pth | Hamilton 9.700747796159703e-06
./Humanoid-v3_PPO_1_12163/actor__000001985719_01499.742.pth | Hamilton 2.731245149334427e-05
./Humanoid-v3_PPO_1_12163/actor__000002945119_02945.898.pth | Hamilton 6.511711399070919e-05
./Humanoid-v3_PPO_1_12163/actor__000003916981_05139.070.pth | Hamilton 0.00014375684258993715
./Humanoid-v3_PPO_1_12163/actor__000005837847_06519.394.pth | Hamilton 0.0008063034038059413
./Humanoid-v3_PPO_1_12163/actor__000008739378_07953.376.pth | Hamilton 0.01732577569782734
./Humanoid-v3_PPO_1_12163/actor__000009719571_08855.665.pth | Hamilton 0.026463143527507782
./Humanoid-v3_PPO_1_12163/actor__000013612994_09732.908.pth | Hamilton 0.025281773880124092
./Humanoid-v3_PPO_1_12163/actor__000016494377_09831.659.pth | Hamilton 0.02762601710855961
./Humanoid-v3_PPO_1_12163/actor__000018373785_10863.449.pth | Hamilton 0.027282550930976868
./Humanoid-v3_PPO_1_12163/actor__000021225818_11001.055.pth | Hamilton 0.026280341669917107
./Humanoid-v3_PPO_1_12163/actor__000022181621_11251.463.pth | Hamilton 0.029323674738407135
./Humanoid-v3_PPO_1_12163/actor__000028875206_11310.242.pth | Hamilton 0.020171033218503
./Humanoid-v3_PPO_1_12163/actor__000032695379_11551.766.pth | Hamilton 0.015605180524289608
./Humanoid-v3_PPO_1_12163/actor__000042299664_11700.186.pth | Hamilton 0.011609300039708614
./Humanoid-v3_PPO_1_12163/actor__000048968733_11890.980.pth | Hamilton 0.013961022719740868
./Humanoid-v3_PPO_1_12163/actor__000060343143_11943.898.pth | Hamilton 0.009811471216380596
./Humanoid-v3_PPO_1_12163/actor__000070818022_11948.539.pth | Hamilton 0.0109059764072299
./Humanoid-v3_PPO_1_12163/actor__000078389658_12038.896.pth | Hamilton 0.010543580166995525
./Humanoid-v3_PPO_1_12163/actor__000091604469_12088.028.pth | Hamilton 0.011014792136847973
./Humanoid-v3_PPO_1_12163/actor__000100115646_12101.240.pth | Hamilton 0.009135846048593521
./Humanoid-v3_PPO_1_12163/actor__000102943523_12163.836.pth | Hamilton 0.011690276674926281
"""
# Humanoid-v3_PPO_2_10777
data23 = """
./Humanoid-v3_PPO_2_10777/actor_000000216874.pth | Hamilton 3.688501237775199e-05
./Humanoid-v3_PPO_2_10777/actor_000001188994.pth | Hamilton 6.286639836616814e-05
./Humanoid-v3_PPO_2_10777/actor_000002173367.pth | Hamilton 0.00016023675561882555
./Humanoid-v3_PPO_2_10777/actor_000003184123.pth | Hamilton 0.00036540161818265915
./Humanoid-v3_PPO_2_10777/actor_000004210987.pth | Hamilton 0.0010902626672759652
./Humanoid-v3_PPO_2_10777/actor_000005266513.pth | Hamilton 0.0023191741202026606
./Humanoid-v3_PPO_2_10777/actor_000006327525.pth | Hamilton 0.0058968560770154
./Humanoid-v3_PPO_2_10777/actor_000007389701.pth | Hamilton 0.010458181612193584
./Humanoid-v3_PPO_2_10777/actor_000008449869.pth | Hamilton 0.024956677109003067
./Humanoid-v3_PPO_2_10777/actor_000009517989.pth | Hamilton 0.06226586923003197
./Humanoid-v3_PPO_2_10777/actor_000010585518.pth | Hamilton 0.12757600843906403
./Humanoid-v3_PPO_2_10777/actor_000011655114.pth | Hamilton 0.17442172765731812
./Humanoid-v3_PPO_2_10777/actor_000012728514.pth | Hamilton 0.14082498848438263
./Humanoid-v3_PPO_2_10777/actor_000013795000.pth | Hamilton 0.16356539726257324
./Humanoid-v3_PPO_2_10777/actor_000014868698.pth | Hamilton 0.16905692219734192
./Humanoid-v3_PPO_2_10777/actor_000015945584.pth | Hamilton 0.12859639525413513
./Humanoid-v3_PPO_2_10777/actor_000017006355.pth | Hamilton 0.10267926752567291
./Humanoid-v3_PPO_2_10777/actor_000018075577.pth | Hamilton 0.09055311232805252
./Humanoid-v3_PPO_2_10777/actor_000019165560.pth | Hamilton 0.09740696847438812
./Humanoid-v3_PPO_2_10777/actor_000020258221.pth | Hamilton 0.09980619698762894
./Humanoid-v3_PPO_2_10777/actor_000021347058.pth | Hamilton 0.08917375653982162
./Humanoid-v3_PPO_2_10777/actor_000022428001.pth | Hamilton 0.06337645649909973
./Humanoid-v3_PPO_2_10777/actor_000023506536.pth | Hamilton 0.07432126253843307
./Humanoid-v3_PPO_2_10777/actor_000024578031.pth | Hamilton 0.07724715024232864
./Humanoid-v3_PPO_2_10777/actor_000025655623.pth | Hamilton 0.08502496033906937
./Humanoid-v3_PPO_2_10777/actor_000026734442.pth | Hamilton 0.07366809993982315
./Humanoid-v3_PPO_2_10777/actor_000027807727.pth | Hamilton 0.07359852641820908
./Humanoid-v3_PPO_2_10777/actor_000028892745.pth | Hamilton 0.06446859985589981
./Humanoid-v3_PPO_2_10777/actor_000029975796.pth | Hamilton 0.053645338863134384
./Humanoid-v3_PPO_2_10777/actor_000031079418.pth | Hamilton 0.05577891319990158
./Humanoid-v3_PPO_2_10777/actor_000032160779.pth | Hamilton 0.059316832572221756
./Humanoid-v3_PPO_2_10777/actor_000033251749.pth | Hamilton 0.05422336980700493
./Humanoid-v3_PPO_2_10777/actor_000034340242.pth | Hamilton 0.05780305340886116
./Humanoid-v3_PPO_2_10777/actor_000035436129.pth | Hamilton 0.051680777221918106
./Humanoid-v3_PPO_2_10777/actor_000036520797.pth | Hamilton 0.05173584446310997
./Humanoid-v3_PPO_2_10777/actor_000037599174.pth | Hamilton 0.060392413288354874
./Humanoid-v3_PPO_2_10777/actor_000038688283.pth | Hamilton 0.04602271318435669
./Humanoid-v3_PPO_2_10777/actor_000039779314.pth | Hamilton 0.043889157474040985
./Humanoid-v3_PPO_2_10777/actor_000040865656.pth | Hamilton 0.04328423738479614
./Humanoid-v3_PPO_2_10777/actor_000041948139.pth | Hamilton 0.04392097890377045
./Humanoid-v3_PPO_2_10777/actor_000043035112.pth | Hamilton 0.045043688267469406
./Humanoid-v3_PPO_2_10777/actor_000044123980.pth | Hamilton 0.04191465675830841
./Humanoid-v3_PPO_2_10777/actor_000045215820.pth | Hamilton 0.04318935051560402
./Humanoid-v3_PPO_2_10777/actor_000046297668.pth | Hamilton 0.033199165016412735
./Humanoid-v3_PPO_2_10777/actor_000047391744.pth | Hamilton 0.038668230175971985
./Humanoid-v3_PPO_2_10777/actor_000048477919.pth | Hamilton 0.03518645092844963
./Humanoid-v3_PPO_2_10777/actor_000049562665.pth | Hamilton 0.029465997591614723
./Humanoid-v3_PPO_2_10777/actor_000050647153.pth | Hamilton 0.031955014914274216
./Humanoid-v3_PPO_2_10777/actor_000051738244.pth | Hamilton 0.033259421586990356
./Humanoid-v3_PPO_2_10777/actor_000052830021.pth | Hamilton 0.03213287517428398
./Humanoid-v3_PPO_2_10777/actor_000053920867.pth | Hamilton 0.03115176595747471
./Humanoid-v3_PPO_2_10777/actor_000055010130.pth | Hamilton 0.027640821412205696
./Humanoid-v3_PPO_2_10777/actor_000056102269.pth | Hamilton 0.031017575412988663
./Humanoid-v3_PPO_2_10777/actor_000057184828.pth | Hamilton 0.024574635550379753
./Humanoid-v3_PPO_2_10777/actor_000058259311.pth | Hamilton 0.033203840255737305
./Humanoid-v3_PPO_2_10777/actor_000059347323.pth | Hamilton 0.029378993436694145
./Humanoid-v3_PPO_2_10777/actor_000060429632.pth | Hamilton 0.025706259533762932
./Humanoid-v3_PPO_2_10777/actor_000061523859.pth | Hamilton 0.025175416842103004
./Humanoid-v3_PPO_2_10777/actor_000062618903.pth | Hamilton 0.029686741530895233
./Humanoid-v3_PPO_2_10777/actor_000063706641.pth | Hamilton 0.025953758507966995
./Humanoid-v3_PPO_2_10777/actor_000064794246.pth | Hamilton 0.024030476808547974
./Humanoid-v3_PPO_2_10777/actor_000065876548.pth | Hamilton 0.0233840923756361
./Humanoid-v3_PPO_2_10777/actor_000066968661.pth | Hamilton 0.020464828237891197
./Humanoid-v3_PPO_2_10777/actor_000068054556.pth | Hamilton 0.0246622022241354
./Humanoid-v3_PPO_2_10777/actor_000069140138.pth | Hamilton 0.022240854799747467
./Humanoid-v3_PPO_2_10777/actor_000070231953.pth | Hamilton 0.018986834213137627
./Humanoid-v3_PPO_2_10777/actor_000071318256.pth | Hamilton 0.017502957955002785
./Humanoid-v3_PPO_2_10777/actor_000072401049.pth | Hamilton 0.019304823130369186
./Humanoid-v3_PPO_2_10777/actor_000073492591.pth | Hamilton 0.015940118581056595
./Humanoid-v3_PPO_2_10777/actor_000074574020.pth | Hamilton 0.023459136486053467
./Humanoid-v3_PPO_2_10777/actor_000075662625.pth | Hamilton 0.018537208437919617
./Humanoid-v3_PPO_2_10777/actor_000076744245.pth | Hamilton 0.020491348579525948
./Humanoid-v3_PPO_2_10777/actor_000077821526.pth | Hamilton 0.016735846176743507
./Humanoid-v3_PPO_2_10777/actor_000078917856.pth | Hamilton 0.016052400693297386
./Humanoid-v3_PPO_2_10777/actor_000080007087.pth | Hamilton 0.015021108090877533
./Humanoid-v3_PPO_2_10777/actor_000081086116.pth | Hamilton 0.018561089411377907
./Humanoid-v3_PPO_2_10777/actor_000082177930.pth | Hamilton 0.01733691245317459
./Humanoid-v3_PPO_2_10777/actor_000083265701.pth | Hamilton 0.013707736507058144
./Humanoid-v3_PPO_2_10777/actor_000084342174.pth | Hamilton 0.015957854688167572
./Humanoid-v3_PPO_2_10777/actor_000085414894.pth | Hamilton 0.017749302089214325
./Humanoid-v3_PPO_2_10777/actor_000086494698.pth | Hamilton 0.014833241701126099
./Humanoid-v3_PPO_2_10777/actor_000087564851.pth | Hamilton 0.013536876067519188
./Humanoid-v3_PPO_2_10777/actor_000088661937.pth | Hamilton 0.015723472461104393
./Humanoid-v3_PPO_2_10777/actor_000089745645.pth | Hamilton 0.014462352730333805
./Humanoid-v3_PPO_2_10777/actor_000090834625.pth | Hamilton 0.011510983109474182
./Humanoid-v3_PPO_2_10777/actor_000091925938.pth | Hamilton 0.011909408494830132
./Humanoid-v3_PPO_2_10777/actor_000093006954.pth | Hamilton 0.014872158877551556
./Humanoid-v3_PPO_2_10777/actor_000094096408.pth | Hamilton 0.011801350861787796
./Humanoid-v3_PPO_2_10777/actor_000095175286.pth | Hamilton 0.013554773293435574
./Humanoid-v3_PPO_2_10777/actor_000096259095.pth | Hamilton 0.012987789697945118
./Humanoid-v3_PPO_2_10777/actor_000097336964.pth | Hamilton 0.011369738727807999
./Humanoid-v3_PPO_2_10777/actor_000098423982.pth | Hamilton 0.012872708030045033
./Humanoid-v3_PPO_2_10777/actor_000099508590.pth | Hamilton 0.012393493205308914
./Humanoid-v3_PPO_2_10777/actor_000100594630.pth | Hamilton 0.011294921860098839
./Humanoid-v3_PPO_2_10777/actor_000101678214.pth | Hamilton 0.012004299089312553
./Humanoid-v3_PPO_2_10777/actor_000102756892.pth | Hamilton 0.012431683018803596
./Humanoid-v3_PPO_2_10777/actor_000103845467.pth | Hamilton 0.011705371551215649
./Humanoid-v3_PPO_2_10777/actor_000104928223.pth | Hamilton 0.012635679915547371
./Humanoid-v3_PPO_2_10777/actor_000106005584.pth | Hamilton 0.012604453600943089
./Humanoid-v3_PPO_2_10777/actor_000107077404.pth | Hamilton 0.010718360543251038
./Humanoid-v3_PPO_2_10777/actor_000108165561.pth | Hamilton 0.012056348845362663
./Humanoid-v3_PPO_2_10777/actor_000109245837.pth | Hamilton 0.01160738617181778
./Humanoid-v3_PPO_2_10777/actor_000110325598.pth | Hamilton 0.01328522153198719
./Humanoid-v3_PPO_2_10777/actor_000111418250.pth | Hamilton 0.01034514419734478
./Humanoid-v3_PPO_2_10777/actor_000112506448.pth | Hamilton 0.01113644428551197
./Humanoid-v3_PPO_2_10777/actor_000113601834.pth | Hamilton 0.012702045030891895
./Humanoid-v3_PPO_2_10777/actor_000114690046.pth | Hamilton 0.013557045720517635
./Humanoid-v3_PPO_2_10777/actor_000115770632.pth | Hamilton 0.011984667740762234
./Humanoid-v3_PPO_2_10777/actor_000116867228.pth | Hamilton 0.011185677722096443
./Humanoid-v3_PPO_2_10777/actor_000117947552.pth | Hamilton 0.009565945714712143
./Humanoid-v3_PPO_2_10777/actor_000119030120.pth | Hamilton 0.012525824829936028
./Humanoid-v3_PPO_2_10777/actor_000120108108.pth | Hamilton 0.010960377752780914
./Humanoid-v3_PPO_2_10777/actor_000121197064.pth | Hamilton 0.008720397017896175
./Humanoid-v3_PPO_2_10777/actor_000122284289.pth | Hamilton 0.010008035227656364
./Humanoid-v3_PPO_2_10777/actor_000123379001.pth | Hamilton 0.009965328499674797
./Humanoid-v3_PPO_2_10777/actor_000124468699.pth | Hamilton 0.010664107277989388
./Humanoid-v3_PPO_2_10777/actor_000125554594.pth | Hamilton 0.008891751989722252
./Humanoid-v3_PPO_2_10777/actor_000126635162.pth | Hamilton 0.01048259623348713
./Humanoid-v3_PPO_2_10777/actor_000127717267.pth | Hamilton 0.010834542103111744
./Humanoid-v3_PPO_2_10777/actor_000128800754.pth | Hamilton 0.008265461772680283
./Humanoid-v3_PPO_2_10777/actor_000129879490.pth | Hamilton 0.007939176633954048
./Humanoid-v3_PPO_2_10777/actor_000130969646.pth | Hamilton 0.00975911132991314
./Humanoid-v3_PPO_2_10777/actor_000132053425.pth | Hamilton 0.008543290197849274
./Humanoid-v3_PPO_2_10777/actor_000133135826.pth | Hamilton 0.009344249032437801
./Humanoid-v3_PPO_2_10777/actor_000134218044.pth | Hamilton 0.008578762412071228
./Humanoid-v3_PPO_2_10777/actor_000135301361.pth | Hamilton 0.007356320973485708
./Humanoid-v3_PPO_2_10777/actor_000136394836.pth | Hamilton 0.009892778471112251
./Humanoid-v3_PPO_2_10777/actor_000137479706.pth | Hamilton 0.007941239513456821
./Humanoid-v3_PPO_2_10777/actor_000138567380.pth | Hamilton 0.008850272744894028
./Humanoid-v3_PPO_2_10777/actor__000000048075_00079.538.pth | Hamilton 1.2859891285188496e-06
./Humanoid-v3_PPO_2_10777/actor__000000994336_00370.034.pth | Hamilton 4.9215609578823205e-06
./Humanoid-v3_PPO_2_10777/actor__000001949770_01465.227.pth | Hamilton 1.497918401582865e-05
./Humanoid-v3_PPO_2_10777/actor__000002900150_03304.543.pth | Hamilton 4.520599395618774e-05
./Humanoid-v3_PPO_2_10777/actor__000003849593_05999.016.pth | Hamilton 0.00011407655256334692
./Humanoid-v3_PPO_2_10777/actor__000004790113_06778.250.pth | Hamilton 0.00024688299163244665
./Humanoid-v3_PPO_2_10777/actor__000006696339_08280.351.pth | Hamilton 0.0009393827640451491
./Humanoid-v3_PPO_2_10777/actor__000008610364_08713.070.pth | Hamilton 0.004888159688562155
./Humanoid-v3_PPO_2_10777/actor__000017247223_09074.723.pth | Hamilton 0.02047044038772583
./Humanoid-v3_PPO_2_10777/actor__000018215074_09439.867.pth | Hamilton 0.027314746752381325
./Humanoid-v3_PPO_2_10777/actor__000019165560_09809.410.pth | Hamilton 0.028460104018449783
./Humanoid-v3_PPO_2_10777/actor__000027862483_10202.021.pth | Hamilton 0.02325657196342945
./Humanoid-v3_PPO_2_10777/actor__000031728576_10338.116.pth | Hamilton 0.0201749037951231
./Humanoid-v3_PPO_2_10777/actor__000033659016_10362.331.pth | Hamilton 0.020902466028928757
./Humanoid-v3_PPO_2_10777/actor__000035570948_10448.597.pth | Hamilton 0.02343848906457424
./Humanoid-v3_PPO_2_10777/actor__000036520797_10503.301.pth | Hamilton 0.029389051720499992
./Humanoid-v3_PPO_2_10777/actor__000037492639_10506.479.pth | Hamilton 0.029416069388389587
./Humanoid-v3_PPO_2_10777/actor__000041374058_10555.524.pth | Hamilton 0.01877516694366932
./Humanoid-v3_PPO_2_10777/actor__000048122441_10558.827.pth | Hamilton 0.016069049015641212
./Humanoid-v3_PPO_2_10777/actor__000049970944_10668.744.pth | Hamilton 0.01650645025074482
./Humanoid-v3_PPO_2_10777/actor__000051821219_10696.317.pth | Hamilton 0.020140379667282104
./Humanoid-v3_PPO_2_10777/actor__000061055869_10777.529.pth | Hamilton 0.015520906075835228
"""
# Humanoid-v3_PPOHtermK_5_10033
data24 = """
./Humanoid-v3_PPOHtermK_5_10033/actor_000000217952.pth | Hamilton 3.860610377159901e-05
./Humanoid-v3_PPOHtermK_5_10033/actor_000000802994.pth | Hamilton 5.3925043175695464e-05
./Humanoid-v3_PPOHtermK_5_10033/actor_000001391742.pth | Hamilton 0.00010594926425255835
./Humanoid-v3_PPOHtermK_5_10033/actor_000001986364.pth | Hamilton 0.0001656554959481582
./Humanoid-v3_PPOHtermK_5_10033/actor_000002591318.pth | Hamilton 0.0002541205903980881
./Humanoid-v3_PPOHtermK_5_10033/actor_000003206706.pth | Hamilton 0.00040132226422429085
./Humanoid-v3_PPOHtermK_5_10033/actor_000003843527.pth | Hamilton 0.0006260431837290525
./Humanoid-v3_PPOHtermK_5_10033/actor_000004483994.pth | Hamilton 0.0011863994877785444
./Humanoid-v3_PPOHtermK_5_10033/actor_000005124736.pth | Hamilton 0.0018976032733917236
./Humanoid-v3_PPOHtermK_5_10033/actor_000005762029.pth | Hamilton 0.003409197786822915
./Humanoid-v3_PPOHtermK_5_10033/actor_000006405478.pth | Hamilton 0.006185619160532951
./Humanoid-v3_PPOHtermK_5_10033/actor_000007052962.pth | Hamilton 0.010703757405281067
./Humanoid-v3_PPOHtermK_5_10033/actor_000007697052.pth | Hamilton 0.025355227291584015
./Humanoid-v3_PPOHtermK_5_10033/actor_000008352645.pth | Hamilton 0.08646773546934128
./Humanoid-v3_PPOHtermK_5_10033/actor_000009003333.pth | Hamilton 0.29297369718551636
./Humanoid-v3_PPOHtermK_5_10033/actor_000009664745.pth | Hamilton 0.4933723211288452
./Humanoid-v3_PPOHtermK_5_10033/actor_000010315887.pth | Hamilton 0.6673117280006409
./Humanoid-v3_PPOHtermK_5_10033/actor_000010972401.pth | Hamilton 0.7406782507896423
./Humanoid-v3_PPOHtermK_5_10033/actor_000011626069.pth | Hamilton 0.6894894242286682
./Humanoid-v3_PPOHtermK_5_10033/actor_000012276106.pth | Hamilton 0.7213598489761353
./Humanoid-v3_PPOHtermK_5_10033/actor_000012930702.pth | Hamilton 0.7276442646980286
./Humanoid-v3_PPOHtermK_5_10033/actor_000013578490.pth | Hamilton 0.7638277411460876
./Humanoid-v3_PPOHtermK_5_10033/actor_000014223193.pth | Hamilton 0.8003742098808289
./Humanoid-v3_PPOHtermK_5_10033/actor_000014871192.pth | Hamilton 0.7201029062271118
./Humanoid-v3_PPOHtermK_5_10033/actor_000015528161.pth | Hamilton 0.6946784853935242
./Humanoid-v3_PPOHtermK_5_10033/actor_000016181228.pth | Hamilton 0.679459273815155
./Humanoid-v3_PPOHtermK_5_10033/actor_000016830880.pth | Hamilton 0.6889162063598633
./Humanoid-v3_PPOHtermK_5_10033/actor_000017478959.pth | Hamilton 0.6864667534828186
./Humanoid-v3_PPOHtermK_5_10033/actor_000018129549.pth | Hamilton 0.6885474920272827
./Humanoid-v3_PPOHtermK_5_10033/actor_000018776099.pth | Hamilton 0.6479623317718506
./Humanoid-v3_PPOHtermK_5_10033/actor_000019414221.pth | Hamilton 0.6480258107185364
./Humanoid-v3_PPOHtermK_5_10033/actor_000020064662.pth | Hamilton 0.6343407034873962
./Humanoid-v3_PPOHtermK_5_10033/actor_000020715894.pth | Hamilton 0.6557304263114929
./Humanoid-v3_PPOHtermK_5_10033/actor_000021369272.pth | Hamilton 0.6447092890739441
./Humanoid-v3_PPOHtermK_5_10033/actor_000022015566.pth | Hamilton 0.5809430480003357
./Humanoid-v3_PPOHtermK_5_10033/actor_000022659798.pth | Hamilton 0.5646425485610962
./Humanoid-v3_PPOHtermK_5_10033/actor_000023303083.pth | Hamilton 0.5440018177032471
./Humanoid-v3_PPOHtermK_5_10033/actor_000023944272.pth | Hamilton 0.5671209692955017
./Humanoid-v3_PPOHtermK_5_10033/actor_000024585156.pth | Hamilton 0.5597575902938843
./Humanoid-v3_PPOHtermK_5_10033/actor_000025228355.pth | Hamilton 0.5404171943664551
./Humanoid-v3_PPOHtermK_5_10033/actor_000025873960.pth | Hamilton 0.521878182888031
./Humanoid-v3_PPOHtermK_5_10033/actor_000026515591.pth | Hamilton 0.533275306224823
./Humanoid-v3_PPOHtermK_5_10033/actor_000027155368.pth | Hamilton 0.47113659977912903
./Humanoid-v3_PPOHtermK_5_10033/actor_000027799516.pth | Hamilton 0.4886125922203064
./Humanoid-v3_PPOHtermK_5_10033/actor_000028448052.pth | Hamilton 0.4547804594039917
./Humanoid-v3_PPOHtermK_5_10033/actor_000029089627.pth | Hamilton 0.4707024097442627
./Humanoid-v3_PPOHtermK_5_10033/actor_000029736305.pth | Hamilton 0.5186765789985657
./Humanoid-v3_PPOHtermK_5_10033/actor_000030375812.pth | Hamilton 0.5174707770347595
./Humanoid-v3_PPOHtermK_5_10033/actor_000031029641.pth | Hamilton 0.46292468905448914
./Humanoid-v3_PPOHtermK_5_10033/actor_000031674241.pth | Hamilton 0.4684780240058899
./Humanoid-v3_PPOHtermK_5_10033/actor_000032321121.pth | Hamilton 0.4487498998641968
./Humanoid-v3_PPOHtermK_5_10033/actor_000032968545.pth | Hamilton 0.43523114919662476
./Humanoid-v3_PPOHtermK_5_10033/actor_000033618354.pth | Hamilton 0.4316054582595825
./Humanoid-v3_PPOHtermK_5_10033/actor_000034264725.pth | Hamilton 0.4320639371871948
./Humanoid-v3_PPOHtermK_5_10033/actor_000034907216.pth | Hamilton 0.3904009759426117
./Humanoid-v3_PPOHtermK_5_10033/actor_000035549801.pth | Hamilton 0.3663322627544403
./Humanoid-v3_PPOHtermK_5_10033/actor_000036190338.pth | Hamilton 0.367121160030365
./Humanoid-v3_PPOHtermK_5_10033/actor_000036838849.pth | Hamilton 0.3607599139213562
./Humanoid-v3_PPOHtermK_5_10033/actor_000037485747.pth | Hamilton 0.3512863516807556
./Humanoid-v3_PPOHtermK_5_10033/actor_000038135525.pth | Hamilton 0.3559949994087219
./Humanoid-v3_PPOHtermK_5_10033/actor_000038792644.pth | Hamilton 0.3376719057559967
./Humanoid-v3_PPOHtermK_5_10033/actor_000039439980.pth | Hamilton 0.3056176006793976
./Humanoid-v3_PPOHtermK_5_10033/actor_000040081506.pth | Hamilton 0.3149917423725128
./Humanoid-v3_PPOHtermK_5_10033/actor_000040734245.pth | Hamilton 0.316506564617157
./Humanoid-v3_PPOHtermK_5_10033/actor_000041380628.pth | Hamilton 0.3205588459968567
./Humanoid-v3_PPOHtermK_5_10033/actor_000042020554.pth | Hamilton 0.34845417737960815
./Humanoid-v3_PPOHtermK_5_10033/actor_000042671031.pth | Hamilton 0.3253549635410309
./Humanoid-v3_PPOHtermK_5_10033/actor_000043316542.pth | Hamilton 0.3485141396522522
./Humanoid-v3_PPOHtermK_5_10033/actor_000043957566.pth | Hamilton 0.3213370740413666
./Humanoid-v3_PPOHtermK_5_10033/actor_000044607006.pth | Hamilton 0.331810861825943
./Humanoid-v3_PPOHtermK_5_10033/actor_000045251706.pth | Hamilton 0.3106342852115631
./Humanoid-v3_PPOHtermK_5_10033/actor_000045899224.pth | Hamilton 0.30923983454704285
./Humanoid-v3_PPOHtermK_5_10033/actor_000046542839.pth | Hamilton 0.3040598928928375
./Humanoid-v3_PPOHtermK_5_10033/actor_000047181778.pth | Hamilton 0.3039582669734955
./Humanoid-v3_PPOHtermK_5_10033/actor_000047829134.pth | Hamilton 0.31180083751678467
./Humanoid-v3_PPOHtermK_5_10033/actor_000048474220.pth | Hamilton 0.30465924739837646
./Humanoid-v3_PPOHtermK_5_10033/actor_000049121372.pth | Hamilton 0.3056856691837311
./Humanoid-v3_PPOHtermK_5_10033/actor_000049763971.pth | Hamilton 0.2879406213760376
./Humanoid-v3_PPOHtermK_5_10033/actor_000050407979.pth | Hamilton 0.2534032166004181
./Humanoid-v3_PPOHtermK_5_10033/actor_000051058487.pth | Hamilton 0.24699027836322784
./Humanoid-v3_PPOHtermK_5_10033/actor_000051708393.pth | Hamilton 0.2187887281179428
./Humanoid-v3_PPOHtermK_5_10033/actor_000052351034.pth | Hamilton 0.2457936704158783
./Humanoid-v3_PPOHtermK_5_10033/actor_000053001679.pth | Hamilton 0.25318437814712524
./Humanoid-v3_PPOHtermK_5_10033/actor_000053646376.pth | Hamilton 0.2474513202905655
./Humanoid-v3_PPOHtermK_5_10033/actor_000054291532.pth | Hamilton 0.2376791536808014
./Humanoid-v3_PPOHtermK_5_10033/actor_000054940400.pth | Hamilton 0.23065496981143951
./Humanoid-v3_PPOHtermK_5_10033/actor_000055590713.pth | Hamilton 0.24335090816020966
./Humanoid-v3_PPOHtermK_5_10033/actor_000056231558.pth | Hamilton 0.2432287335395813
./Humanoid-v3_PPOHtermK_5_10033/actor_000056881953.pth | Hamilton 0.22995524108409882
./Humanoid-v3_PPOHtermK_5_10033/actor_000057533824.pth | Hamilton 0.22388096153736115
./Humanoid-v3_PPOHtermK_5_10033/actor_000058181107.pth | Hamilton 0.2096104472875595
./Humanoid-v3_PPOHtermK_5_10033/actor_000058831519.pth | Hamilton 0.21268220245838165
./Humanoid-v3_PPOHtermK_5_10033/actor_000059474197.pth | Hamilton 0.1959238499403
./Humanoid-v3_PPOHtermK_5_10033/actor_000060113265.pth | Hamilton 0.2372029572725296
./Humanoid-v3_PPOHtermK_5_10033/actor_000060757266.pth | Hamilton 0.22364191710948944
./Humanoid-v3_PPOHtermK_5_10033/actor_000061396248.pth | Hamilton 0.1791951060295105
./Humanoid-v3_PPOHtermK_5_10033/actor_000062040274.pth | Hamilton 0.20790275931358337
./Humanoid-v3_PPOHtermK_5_10033/actor_000062688893.pth | Hamilton 0.2170216292142868
./Humanoid-v3_PPOHtermK_5_10033/actor_000063339585.pth | Hamilton 0.20189276337623596
./Humanoid-v3_PPOHtermK_5_10033/actor_000063980937.pth | Hamilton 0.20145785808563232
./Humanoid-v3_PPOHtermK_5_10033/actor_000064625157.pth | Hamilton 0.19421154260635376
./Humanoid-v3_PPOHtermK_5_10033/actor_000065269407.pth | Hamilton 0.18514040112495422
./Humanoid-v3_PPOHtermK_5_10033/actor_000065921029.pth | Hamilton 0.19150440394878387
./Humanoid-v3_PPOHtermK_5_10033/actor_000066569896.pth | Hamilton 0.20003995299339294
./Humanoid-v3_PPOHtermK_5_10033/actor_000067213248.pth | Hamilton 0.184505432844162
./Humanoid-v3_PPOHtermK_5_10033/actor_000067860901.pth | Hamilton 0.17734766006469727
./Humanoid-v3_PPOHtermK_5_10033/actor_000068502165.pth | Hamilton 0.1813606321811676
./Humanoid-v3_PPOHtermK_5_10033/actor_000069149549.pth | Hamilton 0.15829099714756012
./Humanoid-v3_PPOHtermK_5_10033/actor_000069803202.pth | Hamilton 0.15491798520088196
./Humanoid-v3_PPOHtermK_5_10033/actor_000070447280.pth | Hamilton 0.15637344121932983
./Humanoid-v3_PPOHtermK_5_10033/actor_000071093771.pth | Hamilton 0.18704700469970703
./Humanoid-v3_PPOHtermK_5_10033/actor_000071738859.pth | Hamilton 0.16525283455848694
./Humanoid-v3_PPOHtermK_5_10033/actor_000072382847.pth | Hamilton 0.1731967329978943
./Humanoid-v3_PPOHtermK_5_10033/actor_000073033276.pth | Hamilton 0.17823253571987152
./Humanoid-v3_PPOHtermK_5_10033/actor_000073678732.pth | Hamilton 0.17645412683486938
./Humanoid-v3_PPOHtermK_5_10033/actor_000074322972.pth | Hamilton 0.1686391830444336
./Humanoid-v3_PPOHtermK_5_10033/actor_000074967585.pth | Hamilton 0.18566767871379852
./Humanoid-v3_PPOHtermK_5_10033/actor_000075611227.pth | Hamilton 0.14652897417545319
./Humanoid-v3_PPOHtermK_5_10033/actor_000076260052.pth | Hamilton 0.15764778852462769
./Humanoid-v3_PPOHtermK_5_10033/actor_000076907287.pth | Hamilton 0.14451827108860016
./Humanoid-v3_PPOHtermK_5_10033/actor_000077554782.pth | Hamilton 0.17299498617649078
./Humanoid-v3_PPOHtermK_5_10033/actor_000078207189.pth | Hamilton 0.16183245182037354
./Humanoid-v3_PPOHtermK_5_10033/actor_000078859647.pth | Hamilton 0.16055810451507568
./Humanoid-v3_PPOHtermK_5_10033/actor_000079506337.pth | Hamilton 0.15838028490543365
./Humanoid-v3_PPOHtermK_5_10033/actor_000080156603.pth | Hamilton 0.14067484438419342
./Humanoid-v3_PPOHtermK_5_10033/actor_000080803960.pth | Hamilton 0.13240192830562592
./Humanoid-v3_PPOHtermK_5_10033/actor_000081453751.pth | Hamilton 0.1424064189195633
./Humanoid-v3_PPOHtermK_5_10033/actor_000082105760.pth | Hamilton 0.1345972865819931
./Humanoid-v3_PPOHtermK_5_10033/actor_000082752934.pth | Hamilton 0.15307700634002686
./Humanoid-v3_PPOHtermK_5_10033/actor_000083398115.pth | Hamilton 0.12646138668060303
./Humanoid-v3_PPOHtermK_5_10033/actor_000084049150.pth | Hamilton 0.1220104843378067
./Humanoid-v3_PPOHtermK_5_10033/actor_000084699964.pth | Hamilton 0.12697190046310425
./Humanoid-v3_PPOHtermK_5_10033/actor_000085356368.pth | Hamilton 0.13374580442905426
./Humanoid-v3_PPOHtermK_5_10033/actor_000086005598.pth | Hamilton 0.12274452298879623
./Humanoid-v3_PPOHtermK_5_10033/actor_000086651564.pth | Hamilton 0.11796800047159195
./Humanoid-v3_PPOHtermK_5_10033/actor_000087302815.pth | Hamilton 0.11423718929290771
./Humanoid-v3_PPOHtermK_5_10033/actor_000087948771.pth | Hamilton 0.12409746646881104
./Humanoid-v3_PPOHtermK_5_10033/actor_000088603206.pth | Hamilton 0.1219472885131836
./Humanoid-v3_PPOHtermK_5_10033/actor_000089256042.pth | Hamilton 0.1196109727025032
./Humanoid-v3_PPOHtermK_5_10033/actor_000089913507.pth | Hamilton 0.11291047930717468
./Humanoid-v3_PPOHtermK_5_10033/actor_000090572912.pth | Hamilton 0.12043868750333786
./Humanoid-v3_PPOHtermK_5_10033/actor_000015528161.pth | Hamilton 0.6946784853935242
./Humanoid-v3_PPOHtermK_5_10033/actor_000016181228.pth | Hamilton 0.679459273815155
./Humanoid-v3_PPOHtermK_5_10033/actor_000016830880.pth | Hamilton 0.6889162063598633
./Humanoid-v3_PPOHtermK_5_10033/actor_000017478959.pth | Hamilton 0.6864667534828186
./Humanoid-v3_PPOHtermK_5_10033/actor_000018129549.pth | Hamilton 0.6885474920272827
./Humanoid-v3_PPOHtermK_5_10033/actor_000018776099.pth | Hamilton 0.6479623317718506
./Humanoid-v3_PPOHtermK_5_10033/actor_000019414221.pth | Hamilton 0.6480258107185364
./Humanoid-v3_PPOHtermK_5_10033/actor_000020064662.pth | Hamilton 0.6343407034873962
./Humanoid-v3_PPOHtermK_5_10033/actor_000020715894.pth | Hamilton 0.6557304263114929
./Humanoid-v3_PPOHtermK_5_10033/actor_000021369272.pth | Hamilton 0.6447092890739441
./Humanoid-v3_PPOHtermK_5_10033/actor_000022015566.pth | Hamilton 0.5809430480003357
./Humanoid-v3_PPOHtermK_5_10033/actor_000022659798.pth | Hamilton 0.5646425485610962
./Humanoid-v3_PPOHtermK_5_10033/actor_000023303083.pth | Hamilton 0.5440018177032471
./Humanoid-v3_PPOHtermK_5_10033/actor_000023944272.pth | Hamilton 0.5671209692955017
./Humanoid-v3_PPOHtermK_5_10033/actor_000024585156.pth | Hamilton 0.5597575902938843
./Humanoid-v3_PPOHtermK_5_10033/actor_000025228355.pth | Hamilton 0.5404171943664551
./Humanoid-v3_PPOHtermK_5_10033/actor_000025873960.pth | Hamilton 0.521878182888031
./Humanoid-v3_PPOHtermK_5_10033/actor_000026515591.pth | Hamilton 0.533275306224823
./Humanoid-v3_PPOHtermK_5_10033/actor_000027155368.pth | Hamilton 0.47113659977912903
./Humanoid-v3_PPOHtermK_5_10033/actor_000027799516.pth | Hamilton 0.4886125922203064
./Humanoid-v3_PPOHtermK_5_10033/actor_000028448052.pth | Hamilton 0.4547804594039917
./Humanoid-v3_PPOHtermK_5_10033/actor_000029089627.pth | Hamilton 0.4707024097442627
./Humanoid-v3_PPOHtermK_5_10033/actor_000029736305.pth | Hamilton 0.5186765789985657
./Humanoid-v3_PPOHtermK_5_10033/actor_000030375812.pth | Hamilton 0.5174707770347595
./Humanoid-v3_PPOHtermK_5_10033/actor_000031029641.pth | Hamilton 0.46292468905448914
./Humanoid-v3_PPOHtermK_5_10033/actor_000031674241.pth | Hamilton 0.4684780240058899
./Humanoid-v3_PPOHtermK_5_10033/actor_000032321121.pth | Hamilton 0.4487498998641968
./Humanoid-v3_PPOHtermK_5_10033/actor_000032968545.pth | Hamilton 0.43523114919662476
./Humanoid-v3_PPOHtermK_5_10033/actor_000033618354.pth | Hamilton 0.4316054582595825
./Humanoid-v3_PPOHtermK_5_10033/actor_000034264725.pth | Hamilton 0.4320639371871948
./Humanoid-v3_PPOHtermK_5_10033/actor_000034907216.pth | Hamilton 0.3904009759426117
./Humanoid-v3_PPOHtermK_5_10033/actor_000035549801.pth | Hamilton 0.3663322627544403
./Humanoid-v3_PPOHtermK_5_10033/actor_000036190338.pth | Hamilton 0.367121160030365
./Humanoid-v3_PPOHtermK_5_10033/actor_000036838849.pth | Hamilton 0.3607599139213562
./Humanoid-v3_PPOHtermK_5_10033/actor_000037485747.pth | Hamilton 0.3512863516807556
./Humanoid-v3_PPOHtermK_5_10033/actor_000038135525.pth | Hamilton 0.3559949994087219
./Humanoid-v3_PPOHtermK_5_10033/actor_000038792644.pth | Hamilton 0.3376719057559967
./Humanoid-v3_PPOHtermK_5_10033/actor_000039439980.pth | Hamilton 0.3056176006793976
./Humanoid-v3_PPOHtermK_5_10033/actor_000040081506.pth | Hamilton 0.3149917423725128
./Humanoid-v3_PPOHtermK_5_10033/actor_000040734245.pth | Hamilton 0.316506564617157
./Humanoid-v3_PPOHtermK_5_10033/actor_000041380628.pth | Hamilton 0.3205588459968567
./Humanoid-v3_PPOHtermK_5_10033/actor_000042020554.pth | Hamilton 0.34845417737960815
./Humanoid-v3_PPOHtermK_5_10033/actor_000042671031.pth | Hamilton 0.3253549635410309
./Humanoid-v3_PPOHtermK_5_10033/actor_000043316542.pth | Hamilton 0.3485141396522522
./Humanoid-v3_PPOHtermK_5_10033/actor_000043957566.pth | Hamilton 0.3213370740413666
./Humanoid-v3_PPOHtermK_5_10033/actor_000044607006.pth | Hamilton 0.331810861825943
./Humanoid-v3_PPOHtermK_5_10033/actor_000045251706.pth | Hamilton 0.3106342852115631
./Humanoid-v3_PPOHtermK_5_10033/actor_000045899224.pth | Hamilton 0.30923983454704285
./Humanoid-v3_PPOHtermK_5_10033/actor_000046542839.pth | Hamilton 0.3040598928928375
./Humanoid-v3_PPOHtermK_5_10033/actor_000047181778.pth | Hamilton 0.3039582669734955
./Humanoid-v3_PPOHtermK_5_10033/actor_000047829134.pth | Hamilton 0.31180083751678467
./Humanoid-v3_PPOHtermK_5_10033/actor_000048474220.pth | Hamilton 0.30465924739837646
./Humanoid-v3_PPOHtermK_5_10033/actor_000049121372.pth | Hamilton 0.3056856691837311
./Humanoid-v3_PPOHtermK_5_10033/actor_000049763971.pth | Hamilton 0.2879406213760376
./Humanoid-v3_PPOHtermK_5_10033/actor_000050407979.pth | Hamilton 0.2534032166004181
./Humanoid-v3_PPOHtermK_5_10033/actor_000051058487.pth | Hamilton 0.24699027836322784
./Humanoid-v3_PPOHtermK_5_10033/actor_000051708393.pth | Hamilton 0.2187887281179428
./Humanoid-v3_PPOHtermK_5_10033/actor_000052351034.pth | Hamilton 0.2457936704158783
./Humanoid-v3_PPOHtermK_5_10033/actor_000053001679.pth | Hamilton 0.25318437814712524
./Humanoid-v3_PPOHtermK_5_10033/actor_000053646376.pth | Hamilton 0.2474513202905655
./Humanoid-v3_PPOHtermK_5_10033/actor_000054291532.pth | Hamilton 0.2376791536808014
./Humanoid-v3_PPOHtermK_5_10033/actor_000054940400.pth | Hamilton 0.23065496981143951
./Humanoid-v3_PPOHtermK_5_10033/actor_000055590713.pth | Hamilton 0.24335090816020966
./Humanoid-v3_PPOHtermK_5_10033/actor_000056231558.pth | Hamilton 0.2432287335395813
./Humanoid-v3_PPOHtermK_5_10033/actor_000056881953.pth | Hamilton 0.22995524108409882
./Humanoid-v3_PPOHtermK_5_10033/actor_000057533824.pth | Hamilton 0.22388096153736115
./Humanoid-v3_PPOHtermK_5_10033/actor_000058181107.pth | Hamilton 0.2096104472875595
./Humanoid-v3_PPOHtermK_5_10033/actor_000058831519.pth | Hamilton 0.21268220245838165
./Humanoid-v3_PPOHtermK_5_10033/actor_000059474197.pth | Hamilton 0.1959238499403
./Humanoid-v3_PPOHtermK_5_10033/actor_000060113265.pth | Hamilton 0.2372029572725296
./Humanoid-v3_PPOHtermK_5_10033/actor_000060757266.pth | Hamilton 0.22364191710948944
./Humanoid-v3_PPOHtermK_5_10033/actor_000061396248.pth | Hamilton 0.1791951060295105
./Humanoid-v3_PPOHtermK_5_10033/actor_000062040274.pth | Hamilton 0.20790275931358337
./Humanoid-v3_PPOHtermK_5_10033/actor_000062688893.pth | Hamilton 0.2170216292142868
./Humanoid-v3_PPOHtermK_5_10033/actor_000063339585.pth | Hamilton 0.20189276337623596
./Humanoid-v3_PPOHtermK_5_10033/actor_000063980937.pth | Hamilton 0.20145785808563232
./Humanoid-v3_PPOHtermK_5_10033/actor_000064625157.pth | Hamilton 0.19421154260635376
./Humanoid-v3_PPOHtermK_5_10033/actor_000065269407.pth | Hamilton 0.18514040112495422
./Humanoid-v3_PPOHtermK_5_10033/actor_000065921029.pth | Hamilton 0.19150440394878387
./Humanoid-v3_PPOHtermK_5_10033/actor_000066569896.pth | Hamilton 0.20003995299339294
./Humanoid-v3_PPOHtermK_5_10033/actor_000067213248.pth | Hamilton 0.184505432844162
./Humanoid-v3_PPOHtermK_5_10033/actor_000067860901.pth | Hamilton 0.17734766006469727
./Humanoid-v3_PPOHtermK_5_10033/actor_000068502165.pth | Hamilton 0.1813606321811676
./Humanoid-v3_PPOHtermK_5_10033/actor_000069149549.pth | Hamilton 0.15829099714756012
./Humanoid-v3_PPOHtermK_5_10033/actor_000069803202.pth | Hamilton 0.15491798520088196
./Humanoid-v3_PPOHtermK_5_10033/actor_000070447280.pth | Hamilton 0.15637344121932983
./Humanoid-v3_PPOHtermK_5_10033/actor_000071093771.pth | Hamilton 0.18704700469970703
./Humanoid-v3_PPOHtermK_5_10033/actor_000071738859.pth | Hamilton 0.16525283455848694
./Humanoid-v3_PPOHtermK_5_10033/actor_000072382847.pth | Hamilton 0.1731967329978943
./Humanoid-v3_PPOHtermK_5_10033/actor_000073033276.pth | Hamilton 0.17823253571987152
./Humanoid-v3_PPOHtermK_5_10033/actor_000073678732.pth | Hamilton 0.17645412683486938
./Humanoid-v3_PPOHtermK_5_10033/actor_000074322972.pth | Hamilton 0.1686391830444336
./Humanoid-v3_PPOHtermK_5_10033/actor_000074967585.pth | Hamilton 0.18566767871379852
./Humanoid-v3_PPOHtermK_5_10033/actor_000075611227.pth | Hamilton 0.14652897417545319
./Humanoid-v3_PPOHtermK_5_10033/actor_000076260052.pth | Hamilton 0.15764778852462769
./Humanoid-v3_PPOHtermK_5_10033/actor_000076907287.pth | Hamilton 0.14451827108860016
./Humanoid-v3_PPOHtermK_5_10033/actor_000077554782.pth | Hamilton 0.17299498617649078
./Humanoid-v3_PPOHtermK_5_10033/actor_000078207189.pth | Hamilton 0.16183245182037354
./Humanoid-v3_PPOHtermK_5_10033/actor_000078859647.pth | Hamilton 0.16055810451507568
./Humanoid-v3_PPOHtermK_5_10033/actor_000079506337.pth | Hamilton 0.15838028490543365
./Humanoid-v3_PPOHtermK_5_10033/actor_000080156603.pth | Hamilton 0.14067484438419342
./Humanoid-v3_PPOHtermK_5_10033/actor_000080803960.pth | Hamilton 0.13240192830562592
./Humanoid-v3_PPOHtermK_5_10033/actor_000081453751.pth | Hamilton 0.1424064189195633
./Humanoid-v3_PPOHtermK_5_10033/actor_000082105760.pth | Hamilton 0.1345972865819931
./Humanoid-v3_PPOHtermK_5_10033/actor_000082752934.pth | Hamilton 0.15307700634002686
./Humanoid-v3_PPOHtermK_5_10033/actor_000083398115.pth | Hamilton 0.12646138668060303
./Humanoid-v3_PPOHtermK_5_10033/actor_000084049150.pth | Hamilton 0.1220104843378067
./Humanoid-v3_PPOHtermK_5_10033/actor_000084699964.pth | Hamilton 0.12697190046310425
./Humanoid-v3_PPOHtermK_5_10033/actor_000085356368.pth | Hamilton 0.13374580442905426
./Humanoid-v3_PPOHtermK_5_10033/actor_000086005598.pth | Hamilton 0.12274452298879623
./Humanoid-v3_PPOHtermK_5_10033/actor_000086651564.pth | Hamilton 0.11796800047159195
./Humanoid-v3_PPOHtermK_5_10033/actor_000087302815.pth | Hamilton 0.11423718929290771
./Humanoid-v3_PPOHtermK_5_10033/actor_000087948771.pth | Hamilton 0.12409746646881104
./Humanoid-v3_PPOHtermK_5_10033/actor_000088603206.pth | Hamilton 0.1219472885131836
./Humanoid-v3_PPOHtermK_5_10033/actor_000089256042.pth | Hamilton 0.1196109727025032
./Humanoid-v3_PPOHtermK_5_10033/actor_000089913507.pth | Hamilton 0.11291047930717468
./Humanoid-v3_PPOHtermK_5_10033/actor_000090572912.pth | Hamilton 0.12043868750333786
./Humanoid-v3_PPOHtermK_5_10033/actor_000091215266.pth | Hamilton 0.12139196693897247
./Humanoid-v3_PPOHtermK_5_10033/actor_000091875757.pth | Hamilton 0.11408090591430664
./Humanoid-v3_PPOHtermK_5_10033/actor_000092523632.pth | Hamilton 0.1151660829782486
./Humanoid-v3_PPOHtermK_5_10033/actor_000093178069.pth | Hamilton 0.1118767112493515
./Humanoid-v3_PPOHtermK_5_10033/actor_000093835235.pth | Hamilton 0.11775581538677216
./Humanoid-v3_PPOHtermK_5_10033/actor_000094481881.pth | Hamilton 0.1079266220331192
./Humanoid-v3_PPOHtermK_5_10033/actor_000095139652.pth | Hamilton 0.1066155731678009
./Humanoid-v3_PPOHtermK_5_10033/actor_000095787800.pth | Hamilton 0.10187076032161713
./Humanoid-v3_PPOHtermK_5_10033/actor_000096431803.pth | Hamilton 0.12108919769525528
./Humanoid-v3_PPOHtermK_5_10033/actor_000097082571.pth | Hamilton 0.1132136881351471
./Humanoid-v3_PPOHtermK_5_10033/actor_000097736459.pth | Hamilton 0.1021675392985344
./Humanoid-v3_PPOHtermK_5_10033/actor_000098381431.pth | Hamilton 0.10062378644943237
./Humanoid-v3_PPOHtermK_5_10033/actor_000099029560.pth | Hamilton 0.09463013708591461
./Humanoid-v3_PPOHtermK_5_10033/actor_000099678635.pth | Hamilton 0.09559513628482819
./Humanoid-v3_PPOHtermK_5_10033/actor_000100323468.pth | Hamilton 0.10188476741313934
./Humanoid-v3_PPOHtermK_5_10033/actor_000100966020.pth | Hamilton 0.10638144612312317
./Humanoid-v3_PPOHtermK_5_10033/actor_000101616386.pth | Hamilton 0.10570771247148514
./Humanoid-v3_PPOHtermK_5_10033/actor_000102278334.pth | Hamilton 0.10534773021936417
./Humanoid-v3_PPOHtermK_5_10033/actor_000102931182.pth | Hamilton 0.11273243278265
./Humanoid-v3_PPOHtermK_5_10033/actor_000103582134.pth | Hamilton 0.10351397842168808
./Humanoid-v3_PPOHtermK_5_10033/actor_000104230506.pth | Hamilton 0.1016739085316658
./Humanoid-v3_PPOHtermK_5_10033/actor_000104887419.pth | Hamilton 0.10436931997537613
./Humanoid-v3_PPOHtermK_5_10033/actor__000000048166_00240.282.pth | Hamilton 7.867557542340364e-06
./Humanoid-v3_PPOHtermK_5_10033/actor__000000851609_00356.967.pth | Hamilton 2.49532768066274e-05
./Humanoid-v3_PPOHtermK_5_10033/actor__000001663843_01444.155.pth | Hamilton 4.2546420445432886e-05
./Humanoid-v3_PPOHtermK_5_10033/actor__000002463034_02705.989.pth | Hamilton 8.27698822831735e-05
./Humanoid-v3_PPOHtermK_5_10033/actor__000003286628_03589.720.pth | Hamilton 0.0002055414515780285
./Humanoid-v3_PPOHtermK_5_10033/actor__000006565970_05606.597.pth | Hamilton 0.0052353921346366405
./Humanoid-v3_PPOHtermK_5_10033/actor__000008187525_07320.418.pth | Hamilton 0.03157110884785652
./Humanoid-v3_PPOHtermK_5_10033/actor__000009829043_08004.773.pth | Hamilton 0.16415658593177795
./Humanoid-v3_PPOHtermK_5_10033/actor__000013093344_08052.182.pth | Hamilton 0.2120126336812973
./Humanoid-v3_PPOHtermK_5_10033/actor__000016370636_09338.782.pth | Hamilton 0.2443142831325531
./Humanoid-v3_PPOHtermK_5_10033/actor__000022070380_09466.238.pth | Hamilton 0.2382911741733551
./Humanoid-v3_PPOHtermK_5_10033/actor__000023702424_09544.199.pth | Hamilton 0.25375086069107056
./Humanoid-v3_PPOHtermK_5_10033/actor__000027773157_09705.291.pth | Hamilton 0.2661646902561188
./Humanoid-v3_PPOHtermK_5_10033/actor__000029385668_09753.100.pth | Hamilton 0.26711922883987427
./Humanoid-v3_PPOHtermK_5_10033/actor__000039145532_09819.934.pth | Hamilton 0.2415134161710739
./Humanoid-v3_PPOHtermK_5_10033/actor__000041591655_09914.566.pth | Hamilton 0.2548743784427643
./Humanoid-v3_PPOHtermK_5_10033/actor__000050543193_09928.895.pth | Hamilton 0.22058461606502533
./Humanoid-v3_PPOHtermK_5_10033/actor__000061101824_09999.485.pth | Hamilton 0.20243750512599945
./Humanoid-v3_PPOHtermK_5_10033/actor__000070041272_10033.246.pth | Hamilton 0.17469827830791473
"""
# HalfCheetah-v3_PPO_6_7345
data31 = """
./HalfCheetah-v3_PPO_6_7345/actor_000040000.pth | Hamilton -0.005872336681932211
./HalfCheetah-v3_PPO_6_7345/actor_00016000_-0002.642.pth | Hamilton -0.006636478006839752
./HalfCheetah-v3_PPO_6_7345/actor_000616000.pth | Hamilton -0.0029359194450080395
./HalfCheetah-v3_PPO_6_7345/actor_001224000.pth | Hamilton 0.004109603352844715
./HalfCheetah-v3_PPO_6_7345/actor_001832000.pth | Hamilton 0.007841946557164192
./HalfCheetah-v3_PPO_6_7345/actor_00232000_00235.331.pth | Hamilton 0.0017496153013780713
./HalfCheetah-v3_PPO_6_7345/actor_002408000.pth | Hamilton 0.01348801702260971
./HalfCheetah-v3_PPO_6_7345/actor_003016000.pth | Hamilton 0.016688847914338112
./HalfCheetah-v3_PPO_6_7345/actor_003624000.pth | Hamilton 0.020585883408784866
./HalfCheetah-v3_PPO_6_7345/actor_004232000.pth | Hamilton 0.024912988767027855
./HalfCheetah-v3_PPO_6_7345/actor_00448000_01258.157.pth | Hamilton 0.009395475499331951
./HalfCheetah-v3_PPO_6_7345/actor_004808000.pth | Hamilton 0.024603240191936493
./HalfCheetah-v3_PPO_6_7345/actor_005416000.pth | Hamilton 0.029739920049905777
./HalfCheetah-v3_PPO_6_7345/actor_006024000.pth | Hamilton 0.032133765518665314
./HalfCheetah-v3_PPO_6_7345/actor_006632000.pth | Hamilton 0.036653295159339905
./HalfCheetah-v3_PPO_6_7345/actor_00664000_02425.943.pth | Hamilton 0.015335760079324245
./HalfCheetah-v3_PPO_6_7345/actor_007208000.pth | Hamilton 0.04003371298313141
./HalfCheetah-v3_PPO_6_7345/actor_007816000.pth | Hamilton 0.04159301519393921
./HalfCheetah-v3_PPO_6_7345/actor_008424000.pth | Hamilton 0.04465965926647186
./HalfCheetah-v3_PPO_6_7345/actor_00880000_03148.636.pth | Hamilton 0.020984243601560593
./HalfCheetah-v3_PPO_6_7345/actor_009000000.pth | Hamilton 0.04434854909777641
./HalfCheetah-v3_PPO_6_7345/actor_009608000.pth | Hamilton 0.04638892784714699
./HalfCheetah-v3_PPO_6_7345/actor_010216000.pth | Hamilton 0.046764541417360306
./HalfCheetah-v3_PPO_6_7345/actor_010824000.pth | Hamilton 0.048320356756448746
./HalfCheetah-v3_PPO_6_7345/actor_01096000_04012.116.pth | Hamilton 0.026628999039530754
./HalfCheetah-v3_PPO_6_7345/actor_011400000.pth | Hamilton 0.05695949122309685
./HalfCheetah-v3_PPO_6_7345/actor_012008000.pth | Hamilton 0.059391699731349945
./HalfCheetah-v3_PPO_6_7345/actor_012616000.pth | Hamilton 0.061797790229320526
./HalfCheetah-v3_PPO_6_7345/actor_01312000_04339.188.pth | Hamilton 0.03267954662442207
./HalfCheetah-v3_PPO_6_7345/actor_013192000.pth | Hamilton 0.05834709107875824
./HalfCheetah-v3_PPO_6_7345/actor_013800000.pth | Hamilton 0.05774300917983055
./HalfCheetah-v3_PPO_6_7345/actor_014408000.pth | Hamilton 0.06490640342235565
./HalfCheetah-v3_PPO_6_7345/actor_015016000.pth | Hamilton 0.0703200101852417
./HalfCheetah-v3_PPO_6_7345/actor_01528000_04548.041.pth | Hamilton 0.03685052692890167
./HalfCheetah-v3_PPO_6_7345/actor_015592000.pth | Hamilton 0.07412354648113251
./HalfCheetah-v3_PPO_6_7345/actor_016200000.pth | Hamilton 0.07810869067907333
./HalfCheetah-v3_PPO_6_7345/actor_016808000.pth | Hamilton 0.08110470324754715
./HalfCheetah-v3_PPO_6_7345/actor_017416000.pth | Hamilton 0.07919356226921082
./HalfCheetah-v3_PPO_6_7345/actor_01744000_04920.590.pth | Hamilton 0.040606606751680374
./HalfCheetah-v3_PPO_6_7345/actor_017992000.pth | Hamilton 0.08055456727743149
./HalfCheetah-v3_PPO_6_7345/actor_018600000.pth | Hamilton 0.0801466554403305
./HalfCheetah-v3_PPO_6_7345/actor_019208000.pth | Hamilton 0.08046242594718933
./HalfCheetah-v3_PPO_6_7345/actor_01960000_04981.749.pth | Hamilton 0.046274859458208084
./HalfCheetah-v3_PPO_6_7345/actor_019784000.pth | Hamilton 0.07888739556074142
./HalfCheetah-v3_PPO_6_7345/actor_020392000.pth | Hamilton 0.07489366829395294
./HalfCheetah-v3_PPO_6_7345/actor_021000000.pth | Hamilton 0.07973940670490265
./HalfCheetah-v3_PPO_6_7345/actor_021608000.pth | Hamilton 0.06656301766633987
./HalfCheetah-v3_PPO_6_7345/actor_022216000.pth | Hamilton 0.07961215823888779
./HalfCheetah-v3_PPO_6_7345/actor_022824000.pth | Hamilton 0.08062665164470673
./HalfCheetah-v3_PPO_6_7345/actor_023432000.pth | Hamilton 0.08040913939476013
./HalfCheetah-v3_PPO_6_7345/actor_024040000.pth | Hamilton 0.0787510871887207
./HalfCheetah-v3_PPO_6_7345/actor_024648000.pth | Hamilton 0.06715114414691925
./HalfCheetah-v3_PPO_6_7345/actor_025256000.pth | Hamilton 0.06057201698422432
./HalfCheetah-v3_PPO_6_7345/actor_025864000.pth | Hamilton 0.0651414692401886
./HalfCheetah-v3_PPO_6_7345/actor_026472000.pth | Hamilton 0.06846151500940323
./HalfCheetah-v3_PPO_6_7345/actor_027080000.pth | Hamilton 0.07423406094312668
./HalfCheetah-v3_PPO_6_7345/actor_027688000.pth | Hamilton 0.07751761376857758
./HalfCheetah-v3_PPO_6_7345/actor_028296000.pth | Hamilton 0.08617111295461655
./HalfCheetah-v3_PPO_6_7345/actor_02840000_05071.918.pth | Hamilton 0.049897849559783936
./HalfCheetah-v3_PPO_6_7345/actor_028872000.pth | Hamilton 0.08878238499164581
./HalfCheetah-v3_PPO_6_7345/actor_029480000.pth | Hamilton 0.08737097680568695
./HalfCheetah-v3_PPO_6_7345/actor_030088000.pth | Hamilton 0.09027931839227676
./HalfCheetah-v3_PPO_6_7345/actor_030696000.pth | Hamilton 0.08421589434146881
./HalfCheetah-v3_PPO_6_7345/actor_031304000.pth | Hamilton 0.0880567654967308
./HalfCheetah-v3_PPO_6_7345/actor_031912000.pth | Hamilton 0.08721811324357986
./HalfCheetah-v3_PPO_6_7345/actor_032520000.pth | Hamilton 0.08499513566493988
./HalfCheetah-v3_PPO_6_7345/actor_033128000.pth | Hamilton 0.08582136034965515
./HalfCheetah-v3_PPO_6_7345/actor_033736000.pth | Hamilton 0.0666503757238388
./HalfCheetah-v3_PPO_6_7345/actor_034344000.pth | Hamilton 0.07747967541217804
./HalfCheetah-v3_PPO_6_7345/actor_034952000.pth | Hamilton 0.06972482055425644
./HalfCheetah-v3_PPO_6_7345/actor_035560000.pth | Hamilton 0.08390301465988159
./HalfCheetah-v3_PPO_6_7345/actor_036168000.pth | Hamilton 0.06622278690338135
./HalfCheetah-v3_PPO_6_7345/actor_036776000.pth | Hamilton 0.06079159677028656
./HalfCheetah-v3_PPO_6_7345/actor_037384000.pth | Hamilton 0.0640338584780693
./HalfCheetah-v3_PPO_6_7345/actor_037992000.pth | Hamilton 0.06520006060600281
./HalfCheetah-v3_PPO_6_7345/actor_038600000.pth | Hamilton 0.0707312524318695
./HalfCheetah-v3_PPO_6_7345/actor_039208000.pth | Hamilton 0.05933922156691551
./HalfCheetah-v3_PPO_6_7345/actor_039816000.pth | Hamilton 0.058375731110572815
./HalfCheetah-v3_PPO_6_7345/actor_040424000.pth | Hamilton 0.05523880198597908
./HalfCheetah-v3_PPO_6_7345/actor_041032000.pth | Hamilton 0.04060841724276543
./HalfCheetah-v3_PPO_6_7345/actor_041640000.pth | Hamilton 0.051673419773578644
./HalfCheetah-v3_PPO_6_7345/actor_042248000.pth | Hamilton 0.03648228198289871
./HalfCheetah-v3_PPO_6_7345/actor_042856000.pth | Hamilton 0.033507201820611954
./HalfCheetah-v3_PPO_6_7345/actor_043464000.pth | Hamilton 0.02760108932852745
./HalfCheetah-v3_PPO_6_7345/actor_044072000.pth | Hamilton 0.017205415293574333
./HalfCheetah-v3_PPO_6_7345/actor_044680000.pth | Hamilton 0.018874822184443474
./HalfCheetah-v3_PPO_6_7345/actor_045288000.pth | Hamilton 0.013706916943192482
./HalfCheetah-v3_PPO_6_7345/actor_045896000.pth | Hamilton 0.010614077560603619
./HalfCheetah-v3_PPO_6_7345/actor_046504000.pth | Hamilton 0.011557426303625107
./HalfCheetah-v3_PPO_6_7345/actor_047112000.pth | Hamilton 0.009013171307742596
./HalfCheetah-v3_PPO_6_7345/actor_047720000.pth | Hamilton 0.007466568611562252
./HalfCheetah-v3_PPO_6_7345/actor_048328000.pth | Hamilton 0.006678225938230753
./HalfCheetah-v3_PPO_6_7345/actor_048936000.pth | Hamilton 0.007282154634594917
./HalfCheetah-v3_PPO_6_7345/actor_049544000.pth | Hamilton 0.005795080680400133
./HalfCheetah-v3_PPO_6_7345/actor_050152000.pth | Hamilton 0.00465844152495265
./HalfCheetah-v3_PPO_6_7345/actor_050760000.pth | Hamilton 0.002850534161552787
./HalfCheetah-v3_PPO_6_7345/actor_051368000.pth | Hamilton 0.0025290518533438444
./HalfCheetah-v3_PPO_6_7345/actor_051976000.pth | Hamilton 0.0015020620776340365
./HalfCheetah-v3_PPO_6_7345/actor_052584000.pth | Hamilton 0.0015130398096516728
./HalfCheetah-v3_PPO_6_7345/actor_05288000_05175.296.pth | Hamilton 0.043540842831134796
./HalfCheetah-v3_PPO_6_7345/actor_053160000.pth | Hamilton 0.002797044813632965
./HalfCheetah-v3_PPO_6_7345/actor_053768000.pth | Hamilton 0.003447041381150484
./HalfCheetah-v3_PPO_6_7345/actor_054376000.pth | Hamilton 0.0038953477051109076
./HalfCheetah-v3_PPO_6_7345/actor_054984000.pth | Hamilton 0.0015051416121423244
./HalfCheetah-v3_PPO_6_7345/actor_055592000.pth | Hamilton 0.0008800867944955826
./HalfCheetah-v3_PPO_6_7345/actor_056200000.pth | Hamilton -0.00025415068375878036
./HalfCheetah-v3_PPO_6_7345/actor_056808000.pth | Hamilton -0.0018122748006135225
./HalfCheetah-v3_PPO_6_7345/actor_057416000.pth | Hamilton -0.0012903523165732622
./HalfCheetah-v3_PPO_6_7345/actor_058024000.pth | Hamilton -0.002029893221333623
./HalfCheetah-v3_PPO_6_7345/actor_058632000.pth | Hamilton -0.002473299391567707
./HalfCheetah-v3_PPO_6_7345/actor_059240000.pth | Hamilton -0.002141214907169342
./HalfCheetah-v3_PPO_6_7345/actor_059848000.pth | Hamilton -0.0013618569355458021
./HalfCheetah-v3_PPO_6_7345/actor_060456000.pth | Hamilton -0.001121765235438943
./HalfCheetah-v3_PPO_6_7345/actor_061064000.pth | Hamilton -0.001452176016755402
./HalfCheetah-v3_PPO_6_7345/actor_061672000.pth | Hamilton -0.0010737726697698236
./HalfCheetah-v3_PPO_6_7345/actor_062280000.pth | Hamilton -0.00199855281971395
./HalfCheetah-v3_PPO_6_7345/actor_062888000.pth | Hamilton -0.0017628436908125877
./HalfCheetah-v3_PPO_6_7345/actor_063496000.pth | Hamilton -0.001920493901707232
./HalfCheetah-v3_PPO_6_7345/actor_064104000.pth | Hamilton -0.002032246207818389
./HalfCheetah-v3_PPO_6_7345/actor_064712000.pth | Hamilton -0.002545235212892294
./HalfCheetah-v3_PPO_6_7345/actor_065320000.pth | Hamilton -0.002734317211434245
./HalfCheetah-v3_PPO_6_7345/actor_065928000.pth | Hamilton -0.0031600724905729294
./HalfCheetah-v3_PPO_6_7345/actor_066536000.pth | Hamilton -0.0038524179253727198
./HalfCheetah-v3_PPO_6_7345/actor_067144000.pth | Hamilton -0.003989163786172867
./HalfCheetah-v3_PPO_6_7345/actor_067752000.pth | Hamilton -0.0037428399082273245
./HalfCheetah-v3_PPO_6_7345/actor_068360000.pth | Hamilton -0.0022081949282437563
./HalfCheetah-v3_PPO_6_7345/actor_068968000.pth | Hamilton -0.003189855720847845
./HalfCheetah-v3_PPO_6_7345/actor_069576000.pth | Hamilton -0.003136077895760536
./HalfCheetah-v3_PPO_6_7345/actor_070184000.pth | Hamilton -0.002681328682228923
./HalfCheetah-v3_PPO_6_7345/actor_07048000_05292.822.pth | Hamilton 0.03682773560285568
./HalfCheetah-v3_PPO_6_7345/actor_070760000.pth | Hamilton -0.0014877222711220384
./HalfCheetah-v3_PPO_6_7345/actor_071368000.pth | Hamilton 2.956204662041273e-05
./HalfCheetah-v3_PPO_6_7345/actor_071976000.pth | Hamilton 0.00018542844918556511
./HalfCheetah-v3_PPO_6_7345/actor_072584000.pth | Hamilton -0.00010531547741265967
./HalfCheetah-v3_PPO_6_7345/actor_073192000.pth | Hamilton -0.0001580161915626377
./HalfCheetah-v3_PPO_6_7345/actor_073800000.pth | Hamilton 0.0006441928562708199
./HalfCheetah-v3_PPO_6_7345/actor_074408000.pth | Hamilton 0.0009154545841738582
./HalfCheetah-v3_PPO_6_7345/actor_075016000.pth | Hamilton 0.0009639465715736151
./HalfCheetah-v3_PPO_6_7345/actor_075624000.pth | Hamilton -0.0008632910903543234
./HalfCheetah-v3_PPO_6_7345/actor_076232000.pth | Hamilton -0.0013079025084152818
./HalfCheetah-v3_PPO_6_7345/actor_076840000.pth | Hamilton -0.0025534010492265224
./HalfCheetah-v3_PPO_6_7345/actor_077448000.pth | Hamilton -0.0027133480180054903
./HalfCheetah-v3_PPO_6_7345/actor_078056000.pth | Hamilton -0.0033082144800573587
./HalfCheetah-v3_PPO_6_7345/actor_078664000.pth | Hamilton -0.00236134952865541
./HalfCheetah-v3_PPO_6_7345/actor_079272000.pth | Hamilton -0.0013424543431028724
./HalfCheetah-v3_PPO_6_7345/actor_079880000.pth | Hamilton -0.0013584502739831805
./HalfCheetah-v3_PPO_6_7345/actor_09704000_05337.672.pth | Hamilton 0.03513922542333603
./HalfCheetah-v3_PPO_6_7345/actor_10584000_05420.918.pth | Hamilton 0.036973439157009125
./HalfCheetah-v3_PPO_6_7345/actor_11472000_05442.909.pth | Hamilton 0.037672560662031174
./HalfCheetah-v3_PPO_6_7345/actor_11912000_05496.598.pth | Hamilton 0.040864236652851105
./HalfCheetah-v3_PPO_6_7345/actor_18368000_05623.592.pth | Hamilton 0.04596225544810295
./HalfCheetah-v3_PPO_6_7345/actor_19936000_05728.648.pth | Hamilton 0.048306889832019806
./HalfCheetah-v3_PPO_6_7345/actor_21256000_05866.446.pth | Hamilton 0.05372646823525429
./HalfCheetah-v3_PPO_6_7345/actor_24368000_05902.823.pth | Hamilton 0.05505385249853134
./HalfCheetah-v3_PPO_6_7345/actor_25480000_06074.473.pth | Hamilton 0.05644979327917099
./HalfCheetah-v3_PPO_6_7345/actor_25704000_06142.968.pth | Hamilton 0.05825984105467796
./HalfCheetah-v3_PPO_6_7345/actor_25920000_06197.694.pth | Hamilton 0.0611347071826458
./HalfCheetah-v3_PPO_6_7345/actor_26136000_06252.690.pth | Hamilton 0.06385063380002975
./HalfCheetah-v3_PPO_6_7345/actor_26352000_06321.156.pth | Hamilton 0.0674341470003128
./HalfCheetah-v3_PPO_6_7345/actor_26568000_06511.813.pth | Hamilton 0.07105758041143417
./HalfCheetah-v3_PPO_6_7345/actor_27680000_06594.282.pth | Hamilton 0.07258346676826477
./HalfCheetah-v3_PPO_6_7345/actor_28352000_06627.730.pth | Hamilton 0.07935135066509247
./HalfCheetah-v3_PPO_6_7345/actor_31704000_06656.561.pth | Hamilton 0.07133738696575165
./HalfCheetah-v3_PPO_6_7345/actor_31920000_06773.750.pth | Hamilton 0.07341641932725906
./HalfCheetah-v3_PPO_6_7345/actor_40592000_06797.525.pth | Hamilton 0.03414511680603027
./HalfCheetah-v3_PPO_6_7345/actor_44120000_06861.321.pth | Hamilton 0.008449223823845387
./HalfCheetah-v3_PPO_6_7345/actor_44552000_06930.912.pth | Hamilton 0.008967701345682144
./HalfCheetah-v3_PPO_6_7345/actor_50000000_06947.161.pth | Hamilton 0.002228717552497983
./HalfCheetah-v3_PPO_6_7345/actor_52432000_06987.016.pth | Hamilton -8.268222882179543e-05
./HalfCheetah-v3_PPO_6_7345/actor_57968000_07036.802.pth | Hamilton -0.0029199880082160234
./HalfCheetah-v3_PPO_6_7345/actor_62184000_07073.965.pth | Hamilton -0.003321046242490411
./HalfCheetah-v3_PPO_6_7345/actor_62632000_07190.839.pth | Hamilton -0.0036794249899685383
./HalfCheetah-v3_PPO_6_7345/actor_68456000_07229.387.pth | Hamilton -0.0027174456045031548
./HalfCheetah-v3_PPO_6_7345/actor_70472000_07233.929.pth | Hamilton -0.0018890751525759697
./HalfCheetah-v3_PPO_6_7345/actor_74280000_07259.051.pth | Hamilton -0.000559281266760081
./HalfCheetah-v3_PPO_6_7345/actor_75400000_07265.128.pth | Hamilton -0.00292933639138937
./HalfCheetah-v3_PPO_6_7345/actor_77616000_07295.354.pth | Hamilton -0.0053921714425086975
./HalfCheetah-v3_PPO_6_7345/actor_77832000_07345.128.pth | Hamilton -0.005173020996153355
"""
# HalfCheetah-v3_PPO_1_8964
data32 = """
./HalfCheetah-v3_PPO_1_8964/actor_000000012000.pth | Hamilton -0.003979857079684734
./HalfCheetah-v3_PPO_1_8964/actor_000000116000.pth | Hamilton 0.0005319847259670496
./HalfCheetah-v3_PPO_1_8964/actor_000000220000.pth | Hamilton 0.019786672666668892
./HalfCheetah-v3_PPO_1_8964/actor_000000324000.pth | Hamilton 0.017782604321837425
./HalfCheetah-v3_PPO_1_8964/actor_000000428000.pth | Hamilton 0.03874299302697182
./HalfCheetah-v3_PPO_1_8964/actor_000000532000.pth | Hamilton 0.05288464203476906
./HalfCheetah-v3_PPO_1_8964/actor_000000636000.pth | Hamilton 0.05263258516788483
./HalfCheetah-v3_PPO_1_8964/actor_000000740000.pth | Hamilton 0.0959433764219284
./HalfCheetah-v3_PPO_1_8964/actor_000000844000.pth | Hamilton 0.14203675091266632
./HalfCheetah-v3_PPO_1_8964/actor_000000948000.pth | Hamilton 0.18494977056980133
./HalfCheetah-v3_PPO_1_8964/actor_000001052000.pth | Hamilton 0.24411416053771973
./HalfCheetah-v3_PPO_1_8964/actor_000001156000.pth | Hamilton 0.31679773330688477
./HalfCheetah-v3_PPO_1_8964/actor_000001260000.pth | Hamilton 0.36503687500953674
./HalfCheetah-v3_PPO_1_8964/actor_000001364000.pth | Hamilton 0.4148070514202118
./HalfCheetah-v3_PPO_1_8964/actor_000001468000.pth | Hamilton 0.44580820202827454
./HalfCheetah-v3_PPO_1_8964/actor_000001572000.pth | Hamilton 0.5493637919425964
./HalfCheetah-v3_PPO_1_8964/actor_000001676000.pth | Hamilton 0.6912891864776611
./HalfCheetah-v3_PPO_1_8964/actor_000001780000.pth | Hamilton 0.7460814714431763
./HalfCheetah-v3_PPO_1_8964/actor_000001884000.pth | Hamilton 0.9036700129508972
./HalfCheetah-v3_PPO_1_8964/actor_000001988000.pth | Hamilton 1.0497983694076538
./HalfCheetah-v3_PPO_1_8964/actor_000002092000.pth | Hamilton 1.1463302373886108
./HalfCheetah-v3_PPO_1_8964/actor_000002196000.pth | Hamilton 1.0494126081466675
./HalfCheetah-v3_PPO_1_8964/actor_000002300000.pth | Hamilton 1.2026112079620361
./HalfCheetah-v3_PPO_1_8964/actor_000002404000.pth | Hamilton 1.2105458974838257
./HalfCheetah-v3_PPO_1_8964/actor_000002508000.pth | Hamilton 1.3162106275558472
./HalfCheetah-v3_PPO_1_8964/actor_000002612000.pth | Hamilton 1.1545178890228271
./HalfCheetah-v3_PPO_1_8964/actor_000002716000.pth | Hamilton 1.1219470500946045
./HalfCheetah-v3_PPO_1_8964/actor_000002820000.pth | Hamilton 1.2869540452957153
./HalfCheetah-v3_PPO_1_8964/actor_000002924000.pth | Hamilton 1.5890324115753174
./HalfCheetah-v3_PPO_1_8964/actor_000003028000.pth | Hamilton 1.6122132539749146
./HalfCheetah-v3_PPO_1_8964/actor_000003132000.pth | Hamilton 1.6467660665512085
./HalfCheetah-v3_PPO_1_8964/actor_000003236000.pth | Hamilton 1.8364558219909668
./HalfCheetah-v3_PPO_1_8964/actor_000003340000.pth | Hamilton 1.8676265478134155
./HalfCheetah-v3_PPO_1_8964/actor_000003444000.pth | Hamilton 1.9434665441513062
./HalfCheetah-v3_PPO_1_8964/actor_000003548000.pth | Hamilton 1.7675777673721313
./HalfCheetah-v3_PPO_1_8964/actor_000003652000.pth | Hamilton 1.8838943243026733
./HalfCheetah-v3_PPO_1_8964/actor_000003756000.pth | Hamilton 1.980709433555603
./HalfCheetah-v3_PPO_1_8964/actor_000003860000.pth | Hamilton 1.9419200420379639
./HalfCheetah-v3_PPO_1_8964/actor_000003964000.pth | Hamilton 1.9906342029571533
./HalfCheetah-v3_PPO_1_8964/actor_000004068000.pth | Hamilton 1.957250714302063
./HalfCheetah-v3_PPO_1_8964/actor_000004172000.pth | Hamilton 1.7628307342529297
./HalfCheetah-v3_PPO_1_8964/actor_000004276000.pth | Hamilton 1.7199240922927856
./HalfCheetah-v3_PPO_1_8964/actor_000004380000.pth | Hamilton 1.579308271408081
./HalfCheetah-v3_PPO_1_8964/actor_000004484000.pth | Hamilton 1.5821915864944458
./HalfCheetah-v3_PPO_1_8964/actor_000004588000.pth | Hamilton 1.6405023336410522
./HalfCheetah-v3_PPO_1_8964/actor_000004692000.pth | Hamilton 1.4308905601501465
./HalfCheetah-v3_PPO_1_8964/actor_000004796000.pth | Hamilton 1.5986131429672241
./HalfCheetah-v3_PPO_1_8964/actor_000004900000.pth | Hamilton 1.5916123390197754
./HalfCheetah-v3_PPO_1_8964/actor_000005004000.pth | Hamilton 1.5707824230194092
./HalfCheetah-v3_PPO_1_8964/actor_000005108000.pth | Hamilton 1.816959023475647
./HalfCheetah-v3_PPO_1_8964/actor_000005212000.pth | Hamilton 1.9497828483581543
./HalfCheetah-v3_PPO_1_8964/actor_000005316000.pth | Hamilton 1.9593347311019897
./HalfCheetah-v3_PPO_1_8964/actor_000005420000.pth | Hamilton 2.0021653175354004
./HalfCheetah-v3_PPO_1_8964/actor_000005524000.pth | Hamilton 1.9778954982757568
./HalfCheetah-v3_PPO_1_8964/actor_000005628000.pth | Hamilton 2.145540952682495
./HalfCheetah-v3_PPO_1_8964/actor_000005732000.pth | Hamilton 1.604381799697876
./HalfCheetah-v3_PPO_1_8964/actor_000005836000.pth | Hamilton 1.9640414714813232
./HalfCheetah-v3_PPO_1_8964/actor_000005940000.pth | Hamilton 1.7260267734527588
./HalfCheetah-v3_PPO_1_8964/actor_000006044000.pth | Hamilton 1.913672924041748
./HalfCheetah-v3_PPO_1_8964/actor_000006148000.pth | Hamilton 2.1932449340820312
./HalfCheetah-v3_PPO_1_8964/actor_000006252000.pth | Hamilton 2.0036392211914062
./HalfCheetah-v3_PPO_1_8964/actor_000006356000.pth | Hamilton 2.022392988204956
./HalfCheetah-v3_PPO_1_8964/actor_000006460000.pth | Hamilton 2.0594279766082764
./HalfCheetah-v3_PPO_1_8964/actor_000006564000.pth | Hamilton 1.959631323814392
./HalfCheetah-v3_PPO_1_8964/actor_000006668000.pth | Hamilton 2.004650354385376
./HalfCheetah-v3_PPO_1_8964/actor_000006772000.pth | Hamilton 1.75639009475708
./HalfCheetah-v3_PPO_1_8964/actor_000006876000.pth | Hamilton 1.8495930433273315
./HalfCheetah-v3_PPO_1_8964/actor_000007084000.pth | Hamilton 2.130012273788452
./HalfCheetah-v3_PPO_1_8964/actor_000007188000.pth | Hamilton 1.9571412801742554
./HalfCheetah-v3_PPO_1_8964/actor_000007292000.pth | Hamilton 1.9736922979354858
./HalfCheetah-v3_PPO_1_8964/actor_000007396000.pth | Hamilton 2.212538242340088
./HalfCheetah-v3_PPO_1_8964/actor_000007500000.pth | Hamilton 2.1449477672576904
./HalfCheetah-v3_PPO_1_8964/actor_000007604000.pth | Hamilton 2.0295803546905518
./HalfCheetah-v3_PPO_1_8964/actor_000007708000.pth | Hamilton 1.9582854509353638
./HalfCheetah-v3_PPO_1_8964/actor_000007812000.pth | Hamilton 1.7870659828186035
./HalfCheetah-v3_PPO_1_8964/actor_000007916000.pth | Hamilton 1.9454655647277832
./HalfCheetah-v3_PPO_1_8964/actor_000008020000.pth | Hamilton 1.9795809984207153
./HalfCheetah-v3_PPO_1_8964/actor_000008124000.pth | Hamilton 1.9641070365905762
./HalfCheetah-v3_PPO_1_8964/actor_000008228000.pth | Hamilton 1.897706389427185
./HalfCheetah-v3_PPO_1_8964/actor_000008332000.pth | Hamilton 1.7681528329849243
./HalfCheetah-v3_PPO_1_8964/actor_000008436000.pth | Hamilton 1.632794976234436
./HalfCheetah-v3_PPO_1_8964/actor_000008540000.pth | Hamilton 1.6856034994125366
./HalfCheetah-v3_PPO_1_8964/actor_000008644000.pth | Hamilton 1.4600399732589722
./HalfCheetah-v3_PPO_1_8964/actor_000008748000.pth | Hamilton 1.4734028577804565
./HalfCheetah-v3_PPO_1_8964/actor_000008852000.pth | Hamilton 1.465580701828003
./HalfCheetah-v3_PPO_1_8964/actor_000008956000.pth | Hamilton 1.5756754875183105
./HalfCheetah-v3_PPO_1_8964/actor_000009060000.pth | Hamilton 1.4179878234863281
./HalfCheetah-v3_PPO_1_8964/actor_000009164000.pth | Hamilton 1.5848809480667114
./HalfCheetah-v3_PPO_1_8964/actor_000009268000.pth | Hamilton 1.4485093355178833
./HalfCheetah-v3_PPO_1_8964/actor_000009372000.pth | Hamilton 1.4573742151260376
./HalfCheetah-v3_PPO_1_8964/actor_000009476000.pth | Hamilton 1.6152876615524292
./HalfCheetah-v3_PPO_1_8964/actor_000009580000.pth | Hamilton 1.549185037612915
./HalfCheetah-v3_PPO_1_8964/actor_000009684000.pth | Hamilton 1.6965210437774658
./HalfCheetah-v3_PPO_1_8964/actor_000009788000.pth | Hamilton 1.8398573398590088
./HalfCheetah-v3_PPO_1_8964/actor_000009892000.pth | Hamilton 1.98932945728302
./HalfCheetah-v3_PPO_1_8964/actor_000009996000.pth | Hamilton 1.946791648864746
./HalfCheetah-v3_PPO_1_8964/actor_000010100000.pth | Hamilton 1.743231177330017
./HalfCheetah-v3_PPO_1_8964/actor_000010204000.pth | Hamilton 1.3823740482330322
./HalfCheetah-v3_PPO_1_8964/actor_000010308000.pth | Hamilton 1.3877180814743042
./HalfCheetah-v3_PPO_1_8964/actor_000010412000.pth | Hamilton 1.4385331869125366
./HalfCheetah-v3_PPO_1_8964/actor_000010516000.pth | Hamilton 1.6554721593856812
./HalfCheetah-v3_PPO_1_8964/actor_000010620000.pth | Hamilton 1.727883219718933
./HalfCheetah-v3_PPO_1_8964/actor_000010724000.pth | Hamilton 1.728839635848999
./HalfCheetah-v3_PPO_1_8964/actor_000010828000.pth | Hamilton 1.58816659450531
./HalfCheetah-v3_PPO_1_8964/actor_000010932000.pth | Hamilton 1.6525700092315674
./HalfCheetah-v3_PPO_1_8964/actor_000011036000.pth | Hamilton 1.4716426134109497
./HalfCheetah-v3_PPO_1_8964/actor_000011140000.pth | Hamilton 1.5388532876968384
./HalfCheetah-v3_PPO_1_8964/actor_000011244000.pth | Hamilton 1.297379732131958
./HalfCheetah-v3_PPO_1_8964/actor_000011348000.pth | Hamilton 1.3775428533554077
./HalfCheetah-v3_PPO_1_8964/actor_000011452000.pth | Hamilton 1.409623622894287
./HalfCheetah-v3_PPO_1_8964/actor_000011556000.pth | Hamilton 1.5513663291931152
./HalfCheetah-v3_PPO_1_8964/actor_000011660000.pth | Hamilton 1.486272931098938
./HalfCheetah-v3_PPO_1_8964/actor_000011764000.pth | Hamilton 1.6273846626281738
./HalfCheetah-v3_PPO_1_8964/actor_000011868000.pth | Hamilton 1.6893982887268066
./HalfCheetah-v3_PPO_1_8964/actor_000011972000.pth | Hamilton 1.5729925632476807
./HalfCheetah-v3_PPO_1_8964/actor_000012076000.pth | Hamilton 1.2123165130615234
./HalfCheetah-v3_PPO_1_8964/actor_000012180000.pth | Hamilton 1.3421310186386108
./HalfCheetah-v3_PPO_1_8964/actor_000012284000.pth | Hamilton 1.2298297882080078
./HalfCheetah-v3_PPO_1_8964/actor_000012388000.pth | Hamilton 1.0895754098892212
./HalfCheetah-v3_PPO_1_8964/actor_000012492000.pth | Hamilton 1.1628719568252563
./HalfCheetah-v3_PPO_1_8964/actor_000012596000.pth | Hamilton 1.1025280952453613
./HalfCheetah-v3_PPO_1_8964/actor_000012700000.pth | Hamilton 1.0395756959915161
./HalfCheetah-v3_PPO_1_8964/actor_000012804000.pth | Hamilton 1.1211847066879272
./HalfCheetah-v3_PPO_1_8964/actor_000012908000.pth | Hamilton 0.9943718910217285
./HalfCheetah-v3_PPO_1_8964/actor_000013012000.pth | Hamilton 0.9099668264389038
./HalfCheetah-v3_PPO_1_8964/actor_000013116000.pth | Hamilton 1.0568021535873413
./HalfCheetah-v3_PPO_1_8964/actor_000013220000.pth | Hamilton 1.0103585720062256
./HalfCheetah-v3_PPO_1_8964/actor_000013324000.pth | Hamilton 0.9387027621269226
./HalfCheetah-v3_PPO_1_8964/actor_000013428000.pth | Hamilton 1.0500277280807495
./HalfCheetah-v3_PPO_1_8964/actor_000013532000.pth | Hamilton 1.0901583433151245
./HalfCheetah-v3_PPO_1_8964/actor_000013636000.pth | Hamilton 1.2097352743148804
./HalfCheetah-v3_PPO_1_8964/actor_000013740000.pth | Hamilton 0.9060286283493042
./HalfCheetah-v3_PPO_1_8964/actor_000013844000.pth | Hamilton 0.7584921717643738
./HalfCheetah-v3_PPO_1_8964/actor_000013948000.pth | Hamilton 0.8708493113517761
./HalfCheetah-v3_PPO_1_8964/actor_000014052000.pth | Hamilton 0.9186368584632874
./HalfCheetah-v3_PPO_1_8964/actor_000014156000.pth | Hamilton 0.8337190747261047
./HalfCheetah-v3_PPO_1_8964/actor_000014260000.pth | Hamilton 0.8682726621627808
./HalfCheetah-v3_PPO_1_8964/actor_000014364000.pth | Hamilton 0.6403462290763855
./HalfCheetah-v3_PPO_1_8964/actor_000014468000.pth | Hamilton 0.6070886254310608
./HalfCheetah-v3_PPO_1_8964/actor_000014572000.pth | Hamilton 0.6043576002120972
./HalfCheetah-v3_PPO_1_8964/actor_000014676000.pth | Hamilton 0.48928409814834595
./HalfCheetah-v3_PPO_1_8964/actor_000014780000.pth | Hamilton 0.6327598094940186
./HalfCheetah-v3_PPO_1_8964/actor_000014884000.pth | Hamilton 0.7374769449234009
./HalfCheetah-v3_PPO_1_8964/actor_000014988000.pth | Hamilton 0.8693559765815735
./HalfCheetah-v3_PPO_1_8964/actor_000015092000.pth | Hamilton 0.8096561431884766
./HalfCheetah-v3_PPO_1_8964/actor_000015196000.pth | Hamilton 0.7464600205421448
./HalfCheetah-v3_PPO_1_8964/actor_000015300000.pth | Hamilton 0.8350822329521179
./HalfCheetah-v3_PPO_1_8964/actor_000015404000.pth | Hamilton 0.776115357875824
./HalfCheetah-v3_PPO_1_8964/actor_000015508000.pth | Hamilton 0.6952117681503296
./HalfCheetah-v3_PPO_1_8964/actor_000015612000.pth | Hamilton 0.7679410576820374
./HalfCheetah-v3_PPO_1_8964/actor_000015716000.pth | Hamilton 0.6632360219955444
./HalfCheetah-v3_PPO_1_8964/actor_000015820000.pth | Hamilton 0.6529446840286255
./HalfCheetah-v3_PPO_1_8964/actor_000015924000.pth | Hamilton 0.6130725145339966
./HalfCheetah-v3_PPO_1_8964/actor_000016028000.pth | Hamilton 0.7325723171234131
./HalfCheetah-v3_PPO_1_8964/actor_000016132000.pth | Hamilton 0.7729775309562683
./HalfCheetah-v3_PPO_1_8964/actor_000016236000.pth | Hamilton 0.8849681615829468
./HalfCheetah-v3_PPO_1_8964/actor_000016340000.pth | Hamilton 0.8318505883216858
./HalfCheetah-v3_PPO_1_8964/actor_000016444000.pth | Hamilton 0.8611310124397278
./HalfCheetah-v3_PPO_1_8964/actor_000016548000.pth | Hamilton 0.9104518294334412
./HalfCheetah-v3_PPO_1_8964/actor_000016652000.pth | Hamilton 0.8016515374183655
./HalfCheetah-v3_PPO_1_8964/actor_000016756000.pth | Hamilton 0.7305818796157837
./HalfCheetah-v3_PPO_1_8964/actor_000016860000.pth | Hamilton 0.8303316831588745
./HalfCheetah-v3_PPO_1_8964/actor_000016964000.pth | Hamilton 0.8777560591697693
./HalfCheetah-v3_PPO_1_8964/actor_000017068000.pth | Hamilton 0.7630877494812012
./HalfCheetah-v3_PPO_1_8964/actor_000017172000.pth | Hamilton 0.6742391586303711
./HalfCheetah-v3_PPO_1_8964/actor_000017276000.pth | Hamilton 0.8274958729743958
./HalfCheetah-v3_PPO_1_8964/actor_000017380000.pth | Hamilton 0.7243938446044922
./HalfCheetah-v3_PPO_1_8964/actor_000017484000.pth | Hamilton 0.8354402780532837
./HalfCheetah-v3_PPO_1_8964/actor_000017588000.pth | Hamilton 0.8370580673217773
./HalfCheetah-v3_PPO_1_8964/actor_000017692000.pth | Hamilton 0.7384746074676514
./HalfCheetah-v3_PPO_1_8964/actor_000017796000.pth | Hamilton 0.7266943454742432
./HalfCheetah-v3_PPO_1_8964/actor_000017900000.pth | Hamilton 0.6694714426994324
./HalfCheetah-v3_PPO_1_8964/actor_000018004000.pth | Hamilton 0.6298900246620178
./HalfCheetah-v3_PPO_1_8964/actor_000018108000.pth | Hamilton 0.5625998973846436
./HalfCheetah-v3_PPO_1_8964/actor_000018212000.pth | Hamilton 0.6390281915664673
./HalfCheetah-v3_PPO_1_8964/actor_000018316000.pth | Hamilton 0.6253073811531067
./HalfCheetah-v3_PPO_1_8964/actor_000018420000.pth | Hamilton 0.6052616834640503
./HalfCheetah-v3_PPO_1_8964/actor_000018524000.pth | Hamilton 0.5447152853012085
./HalfCheetah-v3_PPO_1_8964/actor_000018628000.pth | Hamilton 0.5262029767036438
./HalfCheetah-v3_PPO_1_8964/actor_000018732000.pth | Hamilton 0.5712801814079285
./HalfCheetah-v3_PPO_1_8964/actor_000018836000.pth | Hamilton 0.5617592930793762
./HalfCheetah-v3_PPO_1_8964/actor_000018940000.pth | Hamilton 0.4906075894832611
./HalfCheetah-v3_PPO_1_8964/actor_000019044000.pth | Hamilton 0.47344017028808594
./HalfCheetah-v3_PPO_1_8964/actor_000019148000.pth | Hamilton 0.4986529052257538
./HalfCheetah-v3_PPO_1_8964/actor_000019252000.pth | Hamilton 0.5197123289108276
./HalfCheetah-v3_PPO_1_8964/actor_000019356000.pth | Hamilton 0.5097570419311523
./HalfCheetah-v3_PPO_1_8964/actor_000019460000.pth | Hamilton 0.5470317602157593
./HalfCheetah-v3_PPO_1_8964/actor_000019564000.pth | Hamilton 0.44074568152427673
./HalfCheetah-v3_PPO_1_8964/actor_000019668000.pth | Hamilton 0.4194537103176117
./HalfCheetah-v3_PPO_1_8964/actor_000019772000.pth | Hamilton 0.43839964270591736
./HalfCheetah-v3_PPO_1_8964/actor_000019876000.pth | Hamilton 0.41302257776260376
./HalfCheetah-v3_PPO_1_8964/actor_000019980000.pth | Hamilton 0.4682996869087219
./HalfCheetah-v3_PPO_1_8964/actor__000000008000_-0002.710.pth | Hamilton 0.00012464739847928286
./HalfCheetah-v3_PPO_1_8964/actor__000000284000_00189.622.pth | Hamilton 0.001999093219637871
./HalfCheetah-v3_PPO_1_8964/actor__000000560000_02657.518.pth | Hamilton 0.01166764460504055
./HalfCheetah-v3_PPO_1_8964/actor__000000836000_03451.868.pth | Hamilton 0.036034759134054184
./HalfCheetah-v3_PPO_1_8964/actor__000001112000_04043.306.pth | Hamilton 0.06913702189922333
./HalfCheetah-v3_PPO_1_8964/actor__000001388000_04070.153.pth | Hamilton 0.13130733370780945
./HalfCheetah-v3_PPO_1_8964/actor__000001664000_04072.376.pth | Hamilton 0.21832144260406494
./HalfCheetah-v3_PPO_1_8964/actor__000001944000_04077.645.pth | Hamilton 0.2964133322238922
./HalfCheetah-v3_PPO_1_8964/actor__000002500000_05031.218.pth | Hamilton 0.4435080587863922
./HalfCheetah-v3_PPO_1_8964/actor__000003336000_05477.639.pth | Hamilton 0.7189747095108032
./HalfCheetah-v3_PPO_1_8964/actor__000004168000_05759.938.pth | Hamilton 0.8003605604171753
./HalfCheetah-v3_PPO_1_8964/actor__000005284000_06171.977.pth | Hamilton 1.0134514570236206
./HalfCheetah-v3_PPO_1_8964/actor__000005564000_06458.562.pth | Hamilton 1.2020690441131592
./HalfCheetah-v3_PPO_1_8964/actor__000006120000_06708.283.pth | Hamilton 1.231970191001892
./HalfCheetah-v3_PPO_1_8964/actor__000006400000_07166.325.pth | Hamilton 1.353542447090149
./HalfCheetah-v3_PPO_1_8964/actor__000006676000_07416.529.pth | Hamilton 1.3807945251464844
./HalfCheetah-v3_PPO_1_8964/actor__000007236000_07740.555.pth | Hamilton 1.456091046333313
./HalfCheetah-v3_PPO_1_8964/actor__000007516000_07802.231.pth | Hamilton 1.478049635887146
./HalfCheetah-v3_PPO_1_8964/actor__000007796000_07930.319.pth | Hamilton 1.6387865543365479
./HalfCheetah-v3_PPO_1_8964/actor__000008076000_08268.371.pth | Hamilton 1.6573160886764526
./HalfCheetah-v3_PPO_1_8964/actor__000008352000_08475.435.pth | Hamilton 1.5635167360305786
./HalfCheetah-v3_PPO_1_8964/actor__000008912000_08702.439.pth | Hamilton 1.5100343227386475
./HalfCheetah-v3_PPO_1_8964/actor__000009472000_08732.789.pth | Hamilton 1.626786708831787
./HalfCheetah-v3_PPO_1_8964/actor__000010032000_08860.623.pth | Hamilton 1.6608036756515503
./HalfCheetah-v3_PPO_1_8964/actor__000011148000_08963.562.pth | Hamilton 1.5901697874069214
"""
# HalfCheetah-v3_PPOHtermK_5_4949
data33 = """
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000012000.pth | Hamilton -0.0022192897740751505
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000036000.pth | Hamilton 0.001671502715907991
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000060000.pth | Hamilton 0.0017595201497897506
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000084000.pth | Hamilton 0.008719025179743767
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000108000.pth | Hamilton 0.012466237880289555
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000132000.pth | Hamilton 0.016328686848282814
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000156000.pth | Hamilton 0.021737422794103622
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000180000.pth | Hamilton 0.025896403938531876
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000204000.pth | Hamilton 0.02531532570719719
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000228000.pth | Hamilton 0.030677855014801025
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000252000.pth | Hamilton 0.034357644617557526
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000276000.pth | Hamilton 0.03955475240945816
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000300000.pth | Hamilton 0.04633951559662819
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000324000.pth | Hamilton 0.05180974304676056
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000348000.pth | Hamilton 0.056474193930625916
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000372000.pth | Hamilton 0.05993979424238205
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000396000.pth | Hamilton 0.06575837731361389
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000420000.pth | Hamilton 0.07025054842233658
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000444000.pth | Hamilton 0.07429561764001846
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000468000.pth | Hamilton 0.07907746732234955
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000492000.pth | Hamilton 0.08170141279697418
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000516000.pth | Hamilton 0.08792464435100555
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000540000.pth | Hamilton 0.09279599040746689
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000564000.pth | Hamilton 0.0952623263001442
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000588000.pth | Hamilton 0.10001105070114136
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000612000.pth | Hamilton 0.10797237604856491
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000636000.pth | Hamilton 0.11327182501554489
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000660000.pth | Hamilton 0.11834818869829178
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000684000.pth | Hamilton 0.12850329279899597
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000708000.pth | Hamilton 0.1359768956899643
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000732000.pth | Hamilton 0.13775557279586792
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000756000.pth | Hamilton 0.14235512912273407
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000780000.pth | Hamilton 0.14918962121009827
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000804000.pth | Hamilton 0.14918091893196106
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000828000.pth | Hamilton 0.1533832997083664
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000852000.pth | Hamilton 0.15777461230754852
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000876000.pth | Hamilton 0.16406415402889252
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000900000.pth | Hamilton 0.16851083934307098
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000924000.pth | Hamilton 0.1785479635000229
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000948000.pth | Hamilton 0.18719789385795593
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000972000.pth | Hamilton 0.2052137404680252
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000996000.pth | Hamilton 0.2140265554189682
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001020000.pth | Hamilton 0.22289986908435822
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001044000.pth | Hamilton 0.24277041852474213
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001068000.pth | Hamilton 0.25177648663520813
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001092000.pth | Hamilton 0.2607744038105011
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001116000.pth | Hamilton 0.27131083607673645
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001140000.pth | Hamilton 0.28859028220176697
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001164000.pth | Hamilton 0.31462910771369934
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001188000.pth | Hamilton 0.352655291557312
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001212000.pth | Hamilton 0.38206756114959717
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001236000.pth | Hamilton 0.4118475019931793
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001260000.pth | Hamilton 0.45568838715553284
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001284000.pth | Hamilton 0.49979886412620544
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001308000.pth | Hamilton 0.5546624064445496
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001332000.pth | Hamilton 0.6216984391212463
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001356000.pth | Hamilton 0.7039884924888611
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001380000.pth | Hamilton 0.7957115173339844
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001404000.pth | Hamilton 0.8870524168014526
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001428000.pth | Hamilton 0.9810815453529358
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001452000.pth | Hamilton 1.0798819065093994
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001476000.pth | Hamilton 1.1843832731246948
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001500000.pth | Hamilton 1.3090015649795532
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001524000.pth | Hamilton 1.4098291397094727
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001548000.pth | Hamilton 1.523430585861206
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001572000.pth | Hamilton 1.604001760482788
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001596000.pth | Hamilton 1.6777764558792114
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001620000.pth | Hamilton 1.7389109134674072
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001644000.pth | Hamilton 1.8250714540481567
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001668000.pth | Hamilton 1.910683512687683
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001692000.pth | Hamilton 1.9573525190353394
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001716000.pth | Hamilton 2.0052759647369385
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001740000.pth | Hamilton 2.0529730319976807
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001764000.pth | Hamilton 2.1524784564971924
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001788000.pth | Hamilton 2.19614315032959
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001812000.pth | Hamilton 2.241459369659424
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001836000.pth | Hamilton 2.321831703186035
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001860000.pth | Hamilton 2.3643710613250732
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001884000.pth | Hamilton 2.4477851390838623
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001908000.pth | Hamilton 2.477522134780884
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001932000.pth | Hamilton 2.5356552600860596
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001956000.pth | Hamilton 2.6056108474731445
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001980000.pth | Hamilton 2.6734538078308105
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002004000.pth | Hamilton 2.6696009635925293
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002028000.pth | Hamilton 2.627070903778076
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002052000.pth | Hamilton 2.62243390083313
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002076000.pth | Hamilton 2.642043352127075
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002100000.pth | Hamilton 2.6363606452941895
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002124000.pth | Hamilton 2.7448549270629883
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002148000.pth | Hamilton 2.7977919578552246
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002172000.pth | Hamilton 2.8215839862823486
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002196000.pth | Hamilton 2.8511650562286377
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002220000.pth | Hamilton 2.8430416584014893
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002244000.pth | Hamilton 2.9197325706481934
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002268000.pth | Hamilton 2.937256336212158
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002292000.pth | Hamilton 2.9692063331604004
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002316000.pth | Hamilton 3.0173466205596924
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002340000.pth | Hamilton 3.041574478149414
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002364000.pth | Hamilton 2.9953219890594482
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002388000.pth | Hamilton 3.044736385345459
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002412000.pth | Hamilton 2.992907762527466
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002436000.pth | Hamilton 3.008979320526123
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002460000.pth | Hamilton 3.1580424308776855
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002484000.pth | Hamilton 3.214596748352051
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002508000.pth | Hamilton 3.171975612640381
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002532000.pth | Hamilton 3.183350086212158
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002556000.pth | Hamilton 3.1225008964538574
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002580000.pth | Hamilton 3.1598825454711914
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002604000.pth | Hamilton 3.18015718460083
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002628000.pth | Hamilton 3.19087815284729
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002652000.pth | Hamilton 3.3427822589874268
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002676000.pth | Hamilton 3.3374075889587402
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002700000.pth | Hamilton 3.3838040828704834
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002724000.pth | Hamilton 3.367133855819702
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000012000.pth | Hamilton -0.0022192897740751505
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000036000.pth | Hamilton 0.001671502715907991
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000060000.pth | Hamilton 0.0017595201497897506
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000084000.pth | Hamilton 0.008719025179743767
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000108000.pth | Hamilton 0.012466237880289555
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000132000.pth | Hamilton 0.016328686848282814
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000156000.pth | Hamilton 0.021737422794103622
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000180000.pth | Hamilton 0.025896403938531876
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000204000.pth | Hamilton 0.02531532570719719
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000228000.pth | Hamilton 0.030677855014801025
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000252000.pth | Hamilton 0.034357644617557526
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000276000.pth | Hamilton 0.03955475240945816
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000300000.pth | Hamilton 0.04633951559662819
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000324000.pth | Hamilton 0.05180974304676056
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000348000.pth | Hamilton 0.056474193930625916
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000372000.pth | Hamilton 0.05993979424238205
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000396000.pth | Hamilton 0.06575837731361389
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000420000.pth | Hamilton 0.07025054842233658
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000444000.pth | Hamilton 0.07429561764001846
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000468000.pth | Hamilton 0.07907746732234955
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000492000.pth | Hamilton 0.08170141279697418
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000516000.pth | Hamilton 0.08792464435100555
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000540000.pth | Hamilton 0.09279599040746689
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000564000.pth | Hamilton 0.0952623263001442
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000588000.pth | Hamilton 0.10001105070114136
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000612000.pth | Hamilton 0.10797237604856491
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000636000.pth | Hamilton 0.11327182501554489
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000660000.pth | Hamilton 0.11834818869829178
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000684000.pth | Hamilton 0.12850329279899597
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000708000.pth | Hamilton 0.1359768956899643
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000732000.pth | Hamilton 0.13775557279586792
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000756000.pth | Hamilton 0.14235512912273407
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000780000.pth | Hamilton 0.14918962121009827
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000804000.pth | Hamilton 0.14918091893196106
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000828000.pth | Hamilton 0.1533832997083664
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000852000.pth | Hamilton 0.15777461230754852
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000876000.pth | Hamilton 0.16406415402889252
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000900000.pth | Hamilton 0.16851083934307098
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000924000.pth | Hamilton 0.1785479635000229
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000948000.pth | Hamilton 0.18719789385795593
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000972000.pth | Hamilton 0.2052137404680252
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000000996000.pth | Hamilton 0.2140265554189682
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001020000.pth | Hamilton 0.22289986908435822
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001044000.pth | Hamilton 0.24277041852474213
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001068000.pth | Hamilton 0.25177648663520813
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001092000.pth | Hamilton 0.2607744038105011
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001116000.pth | Hamilton 0.27131083607673645
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001140000.pth | Hamilton 0.28859028220176697
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001164000.pth | Hamilton 0.31462910771369934
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001188000.pth | Hamilton 0.352655291557312
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001212000.pth | Hamilton 0.38206756114959717
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001236000.pth | Hamilton 0.4118475019931793
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001260000.pth | Hamilton 0.45568838715553284
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001284000.pth | Hamilton 0.49979886412620544
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001308000.pth | Hamilton 0.5546624064445496
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001332000.pth | Hamilton 0.6216984391212463
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001356000.pth | Hamilton 0.7039884924888611
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001380000.pth | Hamilton 0.7957115173339844
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001404000.pth | Hamilton 0.8870524168014526
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001428000.pth | Hamilton 0.9810815453529358
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001452000.pth | Hamilton 1.0798819065093994
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001476000.pth | Hamilton 1.1843832731246948
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001500000.pth | Hamilton 1.3090015649795532
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001524000.pth | Hamilton 1.4098291397094727
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001548000.pth | Hamilton 1.523430585861206
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001572000.pth | Hamilton 1.604001760482788
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001596000.pth | Hamilton 1.6777764558792114
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001620000.pth | Hamilton 1.7389109134674072
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001644000.pth | Hamilton 1.8250714540481567
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001668000.pth | Hamilton 1.910683512687683
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001692000.pth | Hamilton 1.9573525190353394
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001716000.pth | Hamilton 2.0052759647369385
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001740000.pth | Hamilton 2.0529730319976807
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001764000.pth | Hamilton 2.1524784564971924
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001788000.pth | Hamilton 2.19614315032959
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001812000.pth | Hamilton 2.241459369659424
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001836000.pth | Hamilton 2.321831703186035
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001860000.pth | Hamilton 2.3643710613250732
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001884000.pth | Hamilton 2.4477851390838623
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001908000.pth | Hamilton 2.477522134780884
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001932000.pth | Hamilton 2.5356552600860596
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001956000.pth | Hamilton 2.6056108474731445
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000001980000.pth | Hamilton 2.6734538078308105
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002004000.pth | Hamilton 2.6696009635925293
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002028000.pth | Hamilton 2.627070903778076
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002052000.pth | Hamilton 2.62243390083313
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002076000.pth | Hamilton 2.642043352127075
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002100000.pth | Hamilton 2.6363606452941895
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002124000.pth | Hamilton 2.7448549270629883
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002148000.pth | Hamilton 2.7977919578552246
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002172000.pth | Hamilton 2.8215839862823486
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002196000.pth | Hamilton 2.8511650562286377
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002220000.pth | Hamilton 2.8430416584014893
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002244000.pth | Hamilton 2.9197325706481934
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002268000.pth | Hamilton 2.937256336212158
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002292000.pth | Hamilton 2.9692063331604004
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002316000.pth | Hamilton 3.0173466205596924
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002340000.pth | Hamilton 3.041574478149414
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002364000.pth | Hamilton 2.9953219890594482
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002388000.pth | Hamilton 3.044736385345459
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002412000.pth | Hamilton 2.992907762527466
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002436000.pth | Hamilton 3.008979320526123
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002460000.pth | Hamilton 3.1580424308776855
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002484000.pth | Hamilton 3.214596748352051
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002508000.pth | Hamilton 3.171975612640381
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002532000.pth | Hamilton 3.183350086212158
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002556000.pth | Hamilton 3.1225008964538574
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002580000.pth | Hamilton 3.1598825454711914
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002604000.pth | Hamilton 3.18015718460083
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002628000.pth | Hamilton 3.19087815284729
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002652000.pth | Hamilton 3.3427822589874268
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002676000.pth | Hamilton 3.3374075889587402
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002700000.pth | Hamilton 3.3838040828704834
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002724000.pth | Hamilton 3.367133855819702
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002748000.pth | Hamilton 3.363670825958252
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002772000.pth | Hamilton 3.359429359436035
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002796000.pth | Hamilton 3.4430789947509766
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002820000.pth | Hamilton 3.454576253890991
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002844000.pth | Hamilton 3.4403867721557617
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002868000.pth | Hamilton 3.423570394515991
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002892000.pth | Hamilton 3.453339099884033
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002916000.pth | Hamilton 3.4520444869995117
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002940000.pth | Hamilton 3.489888906478882
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002964000.pth | Hamilton 3.473022699356079
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000002988000.pth | Hamilton 3.499610424041748
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003012000.pth | Hamilton 3.50242018699646
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003036000.pth | Hamilton 3.4452319145202637
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003060000.pth | Hamilton 3.5369558334350586
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003084000.pth | Hamilton 3.5912485122680664
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003108000.pth | Hamilton 3.8077502250671387
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003132000.pth | Hamilton 3.7697091102600098
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003156000.pth | Hamilton 3.794032573699951
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003180000.pth | Hamilton 3.762829542160034
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003204000.pth | Hamilton 3.7414958477020264
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003228000.pth | Hamilton 3.6169679164886475
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003252000.pth | Hamilton 3.6591217517852783
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003276000.pth | Hamilton 3.711569309234619
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003300000.pth | Hamilton 3.7797162532806396
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003324000.pth | Hamilton 3.775984764099121
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003348000.pth | Hamilton 3.77791428565979
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003372000.pth | Hamilton 3.8541243076324463
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003396000.pth | Hamilton 3.87099027633667
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003420000.pth | Hamilton 3.8819098472595215
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003444000.pth | Hamilton 3.823038339614868
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003468000.pth | Hamilton 3.8088345527648926
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003492000.pth | Hamilton 3.822805166244507
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003516000.pth | Hamilton 3.747377634048462
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003540000.pth | Hamilton 3.6352920532226562
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003564000.pth | Hamilton 3.6535186767578125
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003588000.pth | Hamilton 3.5246832370758057
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003612000.pth | Hamilton 3.7176568508148193
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003636000.pth | Hamilton 3.712576389312744
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003660000.pth | Hamilton 3.5636813640594482
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003684000.pth | Hamilton 3.5981481075286865
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003708000.pth | Hamilton 3.7239701747894287
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003732000.pth | Hamilton 3.714066505432129
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003756000.pth | Hamilton 3.7786457538604736
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003780000.pth | Hamilton 3.7550008296966553
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003804000.pth | Hamilton 3.7134289741516113
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003828000.pth | Hamilton 3.765432834625244
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003852000.pth | Hamilton 3.7784621715545654
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003876000.pth | Hamilton 3.764662981033325
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003900000.pth | Hamilton 3.849210739135742
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003924000.pth | Hamilton 3.765622615814209
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003948000.pth | Hamilton 3.753859519958496
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003972000.pth | Hamilton 3.704472780227661
./HalfCheetah-v3_PPOHtermK_5_4949/actor_000003996000.pth | Hamilton 3.8059535026550293
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000000008000_-0003.434.pth | Hamilton 0.007284574210643768
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000000240000_02962.880.pth | Hamilton 0.04446012154221535
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000000472000_03702.555.pth | Hamilton 0.13261261582374573
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000000936000_04361.421.pth | Hamilton 0.6569648385047913
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000001168000_04487.735.pth | Hamilton 1.2779731750488281
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000001628000_04531.853.pth | Hamilton 3.7421069145202637
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000001860000_04664.097.pth | Hamilton 4.067005634307861
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000002324000_04708.989.pth | Hamilton 4.1424055099487305
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000003020000_04831.758.pth | Hamilton 4.269741535186768
./HalfCheetah-v3_PPOHtermK_5_4949/actor__000003716000_04949.211.pth | Hamilton 3.887789487838745
"""
# HalfCheetah-v3_PPOHtermK_5_4837
data34 = """
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000016000.pth | Hamilton -0.0052272179163992405
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000048000.pth | Hamilton -0.00501128239557147
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000080000.pth | Hamilton -0.005209390074014664
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000112000.pth | Hamilton -0.004061874933540821
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000144000.pth | Hamilton -0.0033280844800174236
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000176000.pth | Hamilton -0.002436003414914012
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000208000.pth | Hamilton -0.0015811958583071828
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000240000.pth | Hamilton -0.0006868430064059794
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000272000.pth | Hamilton 0.00035468849819153547
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000304000.pth | Hamilton 0.0015650996938347816
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000336000.pth | Hamilton 0.003586801001802087
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000368000.pth | Hamilton 0.005619054194539785
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000400000.pth | Hamilton 0.006567568052560091
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000432000.pth | Hamilton 0.008935822173953056
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000464000.pth | Hamilton 0.011014323681592941
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000496000.pth | Hamilton 0.012673369608819485
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000528000.pth | Hamilton 0.015275489538908005
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000560000.pth | Hamilton 0.017765501514077187
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000592000.pth | Hamilton 0.02037992514669895
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000624000.pth | Hamilton 0.024035189300775528
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000656000.pth | Hamilton 0.027966413646936417
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000688000.pth | Hamilton 0.03270008787512779
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000720000.pth | Hamilton 0.03818775713443756
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000752000.pth | Hamilton 0.04435112327337265
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000784000.pth | Hamilton 0.05045000836253166
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000816000.pth | Hamilton 0.05860753357410431
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000848000.pth | Hamilton 0.06818471848964691
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000880000.pth | Hamilton 0.07777877897024155
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000912000.pth | Hamilton 0.08937337249517441
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000944000.pth | Hamilton 0.10323651880025864
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000000976000.pth | Hamilton 0.11413406580686569
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001008000.pth | Hamilton 0.13049811124801636
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001040000.pth | Hamilton 0.1464116871356964
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001072000.pth | Hamilton 0.16136382520198822
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001104000.pth | Hamilton 0.17525190114974976
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001136000.pth | Hamilton 0.18909801542758942
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001168000.pth | Hamilton 0.20011106133460999
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001200000.pth | Hamilton 0.2113349586725235
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001232000.pth | Hamilton 0.21900656819343567
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001264000.pth | Hamilton 0.22971762716770172
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001296000.pth | Hamilton 0.23172855377197266
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001328000.pth | Hamilton 0.24196800589561462
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001360000.pth | Hamilton 0.2503260672092438
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001392000.pth | Hamilton 0.2612111568450928
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001424000.pth | Hamilton 0.268466591835022
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001456000.pth | Hamilton 0.2727586328983307
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001488000.pth | Hamilton 0.2801262438297272
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001520000.pth | Hamilton 0.28209495544433594
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001552000.pth | Hamilton 0.28050678968429565
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001584000.pth | Hamilton 0.2817646563053131
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001616000.pth | Hamilton 0.2804020643234253
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001648000.pth | Hamilton 0.28414711356163025
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001680000.pth | Hamilton 0.28006455302238464
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001712000.pth | Hamilton 0.2819520831108093
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001744000.pth | Hamilton 0.2845200002193451
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001776000.pth | Hamilton 0.27944695949554443
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001808000.pth | Hamilton 0.28050899505615234
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001840000.pth | Hamilton 0.2765786349773407
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001872000.pth | Hamilton 0.2799142301082611
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001904000.pth | Hamilton 0.2819344997406006
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001936000.pth | Hamilton 0.28533297777175903
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000001968000.pth | Hamilton 0.2890615463256836
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002000000.pth | Hamilton 0.2875467836856842
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002032000.pth | Hamilton 0.2906314432621002
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002064000.pth | Hamilton 0.2913360297679901
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002096000.pth | Hamilton 0.29197630286216736
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002128000.pth | Hamilton 0.2960330843925476
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002160000.pth | Hamilton 0.2990191578865051
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002192000.pth | Hamilton 0.29608312249183655
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002224000.pth | Hamilton 0.2955667972564697
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002256000.pth | Hamilton 0.2947724461555481
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002288000.pth | Hamilton 0.2937895655632019
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002320000.pth | Hamilton 0.29312241077423096
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002352000.pth | Hamilton 0.29103460907936096
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002384000.pth | Hamilton 0.2860967814922333
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002416000.pth | Hamilton 0.28330108523368835
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002448000.pth | Hamilton 0.28225070238113403
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002480000.pth | Hamilton 0.28095322847366333
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002512000.pth | Hamilton 0.285466730594635
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002544000.pth | Hamilton 0.2890729010105133
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002576000.pth | Hamilton 0.28650757670402527
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002608000.pth | Hamilton 0.28654801845550537
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002640000.pth | Hamilton 0.2901208996772766
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002672000.pth | Hamilton 0.27911803126335144
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002704000.pth | Hamilton 0.2834341526031494
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002736000.pth | Hamilton 0.2818754017353058
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002768000.pth | Hamilton 0.2835969626903534
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002800000.pth | Hamilton 0.28478339314460754
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002832000.pth | Hamilton 0.29131942987442017
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002864000.pth | Hamilton 0.2944019138813019
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002896000.pth | Hamilton 0.29295065999031067
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002928000.pth | Hamilton 0.28273072838783264
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002960000.pth | Hamilton 0.28591814637184143
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000002992000.pth | Hamilton 0.2786034345626831
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003024000.pth | Hamilton 0.2848820388317108
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003056000.pth | Hamilton 0.2830178737640381
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003088000.pth | Hamilton 0.2847789227962494
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003120000.pth | Hamilton 0.28348037600517273
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003152000.pth | Hamilton 0.2796453833580017
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003184000.pth | Hamilton 0.2798386812210083
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003216000.pth | Hamilton 0.2742244303226471
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003248000.pth | Hamilton 0.2687837481498718
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003280000.pth | Hamilton 0.26703011989593506
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003312000.pth | Hamilton 0.2635626792907715
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003344000.pth | Hamilton 0.2661835849285126
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003376000.pth | Hamilton 0.2599141299724579
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003408000.pth | Hamilton 0.2538786828517914
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003440000.pth | Hamilton 0.25305256247520447
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003472000.pth | Hamilton 0.2518955171108246
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003504000.pth | Hamilton 0.2509479224681854
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003536000.pth | Hamilton 0.24942779541015625
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003568000.pth | Hamilton 0.250113844871521
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003600000.pth | Hamilton 0.24620535969734192
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003632000.pth | Hamilton 0.2513544261455536
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003664000.pth | Hamilton 0.2476399838924408
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003696000.pth | Hamilton 0.24878698587417603
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003728000.pth | Hamilton 0.24056336283683777
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003760000.pth | Hamilton 0.24676989018917084
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003792000.pth | Hamilton 0.24746091663837433
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003824000.pth | Hamilton 0.2528934180736542
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003856000.pth | Hamilton 0.24981988966464996
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003888000.pth | Hamilton 0.25292643904685974
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003920000.pth | Hamilton 0.2452276200056076
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003952000.pth | Hamilton 0.24430961906909943
./HalfCheetah-v3_PPOHtermK_5_4837/actor_000003984000.pth | Hamilton 0.24479509890079498
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000008000_-0000.958.pth | Hamilton 0.009734472259879112
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000008000_-0001.020.pth | Hamilton 0.011430204845964909
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000184000_00045.956.pth | Hamilton 0.019349971786141396
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000184000_00386.380.pth | Hamilton 0.013458400033414364
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000360000_00568.913.pth | Hamilton 0.01877862960100174
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000360000_01874.841.pth | Hamilton 0.03162994235754013
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000536000_02076.972.pth | Hamilton 0.04998861998319626
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000712000_02021.381.pth | Hamilton 0.0268101803958416
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000712000_02104.065.pth | Hamilton 0.08478313684463501
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000000888000_02244.290.pth | Hamilton 0.03574884310364723
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001064000_02295.495.pth | Hamilton 0.056490540504455566
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001064000_04183.130.pth | Hamilton 0.22685198485851288
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001412000_04606.642.pth | Hamilton 0.26846200227737427
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001588000_02740.600.pth | Hamilton 0.0691491961479187
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001588000_04768.893.pth | Hamilton 0.2661699950695038
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000001936000_04833.800.pth | Hamilton 0.265247106552124
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000002472000_03142.302.pth | Hamilton 0.08244931697845459
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000002832000_03182.860.pth | Hamilton 0.1051744818687439
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000003724000_03264.243.pth | Hamilton 0.08969185501337051
./HalfCheetah-v3_PPOHtermK_5_4837/actor__000003896000_04837.021.pth | Hamilton 0.2369644194841385
"""
# Walker2d-v3_PPOHtermK_5_6196
data41 = """
./Walker2d-v3_PPOHtermK_5_6196/actor_000000074216.pth | Hamilton 0.06103832647204399
./Walker2d-v3_PPOHtermK_5_6196/actor_000000209963.pth | Hamilton 0.10016551613807678
./Walker2d-v3_PPOHtermK_5_6196/actor_000000344457.pth | Hamilton 0.15777799487113953
./Walker2d-v3_PPOHtermK_5_6196/actor_000000479642.pth | Hamilton 0.23624582588672638
./Walker2d-v3_PPOHtermK_5_6196/actor_000000615192.pth | Hamilton 0.36542588472366333
./Walker2d-v3_PPOHtermK_5_6196/actor_000000753149.pth | Hamilton 0.5203403234481812
./Walker2d-v3_PPOHtermK_5_6196/actor_000000891156.pth | Hamilton 0.9866095185279846
./Walker2d-v3_PPOHtermK_5_6196/actor_000001034870.pth | Hamilton 1.4101715087890625
./Walker2d-v3_PPOHtermK_5_6196/actor_000001188467.pth | Hamilton 2.2049710750579834
./Walker2d-v3_PPOHtermK_5_6196/actor_000001343668.pth | Hamilton 2.336531639099121
./Walker2d-v3_PPOHtermK_5_6196/actor_000001497518.pth | Hamilton 2.4956510066986084
./Walker2d-v3_PPOHtermK_5_6196/actor_000001650494.pth | Hamilton 2.591104745864868
./Walker2d-v3_PPOHtermK_5_6196/actor_000001799853.pth | Hamilton 2.8808517456054688
./Walker2d-v3_PPOHtermK_5_6196/actor_000001949103.pth | Hamilton 3.0815041065216064
./Walker2d-v3_PPOHtermK_5_6196/actor_000002098987.pth | Hamilton 3.3642356395721436
./Walker2d-v3_PPOHtermK_5_6196/actor_000002249395.pth | Hamilton 3.401320457458496
./Walker2d-v3_PPOHtermK_5_6196/actor_000002398322.pth | Hamilton 3.5418009757995605
./Walker2d-v3_PPOHtermK_5_6196/actor_000002552127.pth | Hamilton 3.5687568187713623
./Walker2d-v3_PPOHtermK_5_6196/actor_000002698501.pth | Hamilton 3.7166695594787598
./Walker2d-v3_PPOHtermK_5_6196/actor_000002850852.pth | Hamilton 3.7901241779327393
./Walker2d-v3_PPOHtermK_5_6196/actor_000003004430.pth | Hamilton 3.8029255867004395
./Walker2d-v3_PPOHtermK_5_6196/actor_000003151334.pth | Hamilton 3.909519910812378
./Walker2d-v3_PPOHtermK_5_6196/actor_000003300083.pth | Hamilton 3.939710855484009
./Walker2d-v3_PPOHtermK_5_6196/actor_000003457650.pth | Hamilton 3.7962772846221924
./Walker2d-v3_PPOHtermK_5_6196/actor_000003608790.pth | Hamilton 3.8291049003601074
./Walker2d-v3_PPOHtermK_5_6196/actor_000003761464.pth | Hamilton 3.7616467475891113
./Walker2d-v3_PPOHtermK_5_6196/actor_000003909730.pth | Hamilton 3.782686471939087
./Walker2d-v3_PPOHtermK_5_6196/actor_000004062225.pth | Hamilton 3.6201531887054443
./Walker2d-v3_PPOHtermK_5_6196/actor_000004213070.pth | Hamilton 3.744401216506958
./Walker2d-v3_PPOHtermK_5_6196/actor_000004360838.pth | Hamilton 3.649848461151123
./Walker2d-v3_PPOHtermK_5_6196/actor_000004517323.pth | Hamilton 3.7866930961608887
./Walker2d-v3_PPOHtermK_5_6196/actor_000004666107.pth | Hamilton 3.7397091388702393
./Walker2d-v3_PPOHtermK_5_6196/actor_000004819489.pth | Hamilton 3.6510519981384277
./Walker2d-v3_PPOHtermK_5_6196/actor_000004968852.pth | Hamilton 3.6418583393096924
./Walker2d-v3_PPOHtermK_5_6196/actor_000005118393.pth | Hamilton 3.77998423576355
./Walker2d-v3_PPOHtermK_5_6196/actor_000005270020.pth | Hamilton 3.830871105194092
./Walker2d-v3_PPOHtermK_5_6196/actor_000005422202.pth | Hamilton 3.7580339908599854
./Walker2d-v3_PPOHtermK_5_6196/actor_000005570758.pth | Hamilton 3.7497222423553467
./Walker2d-v3_PPOHtermK_5_6196/actor_000005724835.pth | Hamilton 3.6989426612854004
./Walker2d-v3_PPOHtermK_5_6196/actor_000005877260.pth | Hamilton 3.4875621795654297
./Walker2d-v3_PPOHtermK_5_6196/actor_000006030552.pth | Hamilton 3.374180555343628
./Walker2d-v3_PPOHtermK_5_6196/actor_000006181192.pth | Hamilton 3.253258466720581
./Walker2d-v3_PPOHtermK_5_6196/actor_000006334279.pth | Hamilton 3.274677276611328
./Walker2d-v3_PPOHtermK_5_6196/actor_000006488640.pth | Hamilton 3.371246576309204
./Walker2d-v3_PPOHtermK_5_6196/actor_000006638840.pth | Hamilton 3.175199270248413
./Walker2d-v3_PPOHtermK_5_6196/actor_000006790757.pth | Hamilton 3.2314627170562744
./Walker2d-v3_PPOHtermK_5_6196/actor_000006946534.pth | Hamilton 3.156649589538574
./Walker2d-v3_PPOHtermK_5_6196/actor_000007100135.pth | Hamilton 3.099559783935547
./Walker2d-v3_PPOHtermK_5_6196/actor_000007249718.pth | Hamilton 2.9776811599731445
./Walker2d-v3_PPOHtermK_5_6196/actor_000007400556.pth | Hamilton 3.154604196548462
./Walker2d-v3_PPOHtermK_5_6196/actor_000007554527.pth | Hamilton 3.128127336502075
./Walker2d-v3_PPOHtermK_5_6196/actor_000007705558.pth | Hamilton 3.0514307022094727
./Walker2d-v3_PPOHtermK_5_6196/actor_000007851426.pth | Hamilton 2.9579150676727295
./Walker2d-v3_PPOHtermK_5_6196/actor_000008002708.pth | Hamilton 2.943531036376953
./Walker2d-v3_PPOHtermK_5_6196/actor_000008160333.pth | Hamilton 2.9832067489624023
./Walker2d-v3_PPOHtermK_5_6196/actor_000008308268.pth | Hamilton 2.892860174179077
./Walker2d-v3_PPOHtermK_5_6196/actor_000008462521.pth | Hamilton 2.9052670001983643
./Walker2d-v3_PPOHtermK_5_6196/actor_000008614179.pth | Hamilton 3.040005683898926
./Walker2d-v3_PPOHtermK_5_6196/actor_000008764662.pth | Hamilton 2.919335126876831
./Walker2d-v3_PPOHtermK_5_6196/actor_000008916589.pth | Hamilton 2.951991081237793
./Walker2d-v3_PPOHtermK_5_6196/actor_000009068073.pth | Hamilton 3.0373692512512207
./Walker2d-v3_PPOHtermK_5_6196/actor_000009217141.pth | Hamilton 2.975137948989868
./Walker2d-v3_PPOHtermK_5_6196/actor_000009372826.pth | Hamilton 2.9514212608337402
./Walker2d-v3_PPOHtermK_5_6196/actor_000009524853.pth | Hamilton 2.9366204738616943
./Walker2d-v3_PPOHtermK_5_6196/actor_000009676877.pth | Hamilton 2.9007203578948975
./Walker2d-v3_PPOHtermK_5_6196/actor_000009831628.pth | Hamilton 2.920316457748413
./Walker2d-v3_PPOHtermK_5_6196/actor_000009983068.pth | Hamilton 2.9491448402404785
./Walker2d-v3_PPOHtermK_5_6196/actor_000010135066.pth | Hamilton 2.9075927734375
./Walker2d-v3_PPOHtermK_5_6196/actor_000010287643.pth | Hamilton 2.9843502044677734
./Walker2d-v3_PPOHtermK_5_6196/actor_000010437758.pth | Hamilton 3.0238566398620605
./Walker2d-v3_PPOHtermK_5_6196/actor_000010590468.pth | Hamilton 3.0210230350494385
./Walker2d-v3_PPOHtermK_5_6196/actor_000010746240.pth | Hamilton 2.981658697128296
./Walker2d-v3_PPOHtermK_5_6196/actor_000010901101.pth | Hamilton 3.003286123275757
./Walker2d-v3_PPOHtermK_5_6196/actor_000011053508.pth | Hamilton 2.9676616191864014
./Walker2d-v3_PPOHtermK_5_6196/actor_000011206954.pth | Hamilton 2.9344823360443115
./Walker2d-v3_PPOHtermK_5_6196/actor_000011363411.pth | Hamilton 3.0789337158203125
./Walker2d-v3_PPOHtermK_5_6196/actor_000011518327.pth | Hamilton 3.0202269554138184
./Walker2d-v3_PPOHtermK_5_6196/actor_000011671662.pth | Hamilton 2.92421817779541
./Walker2d-v3_PPOHtermK_5_6196/actor_000011822773.pth | Hamilton 2.907562732696533
./Walker2d-v3_PPOHtermK_5_6196/actor_000011976450.pth | Hamilton 3.0149013996124268
./Walker2d-v3_PPOHtermK_5_6196/actor_000012127555.pth | Hamilton 2.92812442779541
./Walker2d-v3_PPOHtermK_5_6196/actor_000012279751.pth | Hamilton 3.0243935585021973
./Walker2d-v3_PPOHtermK_5_6196/actor_000012429460.pth | Hamilton 2.971428632736206
./Walker2d-v3_PPOHtermK_5_6196/actor_000012584028.pth | Hamilton 2.9174630641937256
./Walker2d-v3_PPOHtermK_5_6196/actor_000012736334.pth | Hamilton 2.889002799987793
./Walker2d-v3_PPOHtermK_5_6196/actor_000012894115.pth | Hamilton 2.917287588119507
./Walker2d-v3_PPOHtermK_5_6196/actor_000013047090.pth | Hamilton 2.8693926334381104
./Walker2d-v3_PPOHtermK_5_6196/actor_000013200313.pth | Hamilton 2.855473518371582
./Walker2d-v3_PPOHtermK_5_6196/actor_000013351610.pth | Hamilton 2.7716429233551025
./Walker2d-v3_PPOHtermK_5_6196/actor_000013502588.pth | Hamilton 2.7581980228424072
./Walker2d-v3_PPOHtermK_5_6196/actor_000013662765.pth | Hamilton 2.785093307495117
./Walker2d-v3_PPOHtermK_5_6196/actor_000013814892.pth | Hamilton 2.7097363471984863
./Walker2d-v3_PPOHtermK_5_6196/actor_000013967975.pth | Hamilton 2.664146900177002
./Walker2d-v3_PPOHtermK_5_6196/actor_000014121000.pth | Hamilton 2.6454734802246094
./Walker2d-v3_PPOHtermK_5_6196/actor_000014274063.pth | Hamilton 2.6277332305908203
./Walker2d-v3_PPOHtermK_5_6196/actor_000014423343.pth | Hamilton 2.6705427169799805
./Walker2d-v3_PPOHtermK_5_6196/actor_000014575920.pth | Hamilton 2.628743886947632
./Walker2d-v3_PPOHtermK_5_6196/actor_000014727738.pth | Hamilton 2.6034529209136963
./Walker2d-v3_PPOHtermK_5_6196/actor_000014885212.pth | Hamilton 2.6350207328796387
./Walker2d-v3_PPOHtermK_5_6196/actor_000015036996.pth | Hamilton 2.533966064453125
./Walker2d-v3_PPOHtermK_5_6196/actor_000015193142.pth | Hamilton 2.6095809936523438
./Walker2d-v3_PPOHtermK_5_6196/actor_000015348071.pth | Hamilton 2.5884549617767334
./Walker2d-v3_PPOHtermK_5_6196/actor_000015504112.pth | Hamilton 2.5417354106903076
./Walker2d-v3_PPOHtermK_5_6196/actor_000015656793.pth | Hamilton 2.540105104446411
./Walker2d-v3_PPOHtermK_5_6196/actor_000015812951.pth | Hamilton 2.5189602375030518
./Walker2d-v3_PPOHtermK_5_6196/actor_000015963729.pth | Hamilton 2.5464396476745605
./Walker2d-v3_PPOHtermK_5_6196/actor_000016113654.pth | Hamilton 2.573174238204956
./Walker2d-v3_PPOHtermK_5_6196/actor_000016268694.pth | Hamilton 2.531381607055664
./Walker2d-v3_PPOHtermK_5_6196/actor_000016421799.pth | Hamilton 2.568452835083008
./Walker2d-v3_PPOHtermK_5_6196/actor_000016578825.pth | Hamilton 2.5195488929748535
./Walker2d-v3_PPOHtermK_5_6196/actor_000016730802.pth | Hamilton 2.5331168174743652
./Walker2d-v3_PPOHtermK_5_6196/actor_000016887124.pth | Hamilton 2.497105836868286
./Walker2d-v3_PPOHtermK_5_6196/actor_000017041659.pth | Hamilton 2.556220054626465
./Walker2d-v3_PPOHtermK_5_6196/actor_000017198533.pth | Hamilton 2.563156843185425
./Walker2d-v3_PPOHtermK_5_6196/actor_000017352984.pth | Hamilton 2.559330940246582
./Walker2d-v3_PPOHtermK_5_6196/actor_000017505829.pth | Hamilton 2.488677501678467
./Walker2d-v3_PPOHtermK_5_6196/actor_000017664394.pth | Hamilton 2.4741194248199463
./Walker2d-v3_PPOHtermK_5_6196/actor_000017817404.pth | Hamilton 2.5255026817321777
./Walker2d-v3_PPOHtermK_5_6196/actor_000017971696.pth | Hamilton 2.4651010036468506
./Walker2d-v3_PPOHtermK_5_6196/actor_000018125930.pth | Hamilton 2.4670584201812744
./Walker2d-v3_PPOHtermK_5_6196/actor_000001188467.pth | Hamilton 2.2049710750579834
./Walker2d-v3_PPOHtermK_5_6196/actor_000001343668.pth | Hamilton 2.336531639099121
./Walker2d-v3_PPOHtermK_5_6196/actor_000001497518.pth | Hamilton 2.4956510066986084
./Walker2d-v3_PPOHtermK_5_6196/actor_000001650494.pth | Hamilton 2.591104745864868
./Walker2d-v3_PPOHtermK_5_6196/actor_000001799853.pth | Hamilton 2.8808517456054688
./Walker2d-v3_PPOHtermK_5_6196/actor_000001949103.pth | Hamilton 3.0815041065216064
./Walker2d-v3_PPOHtermK_5_6196/actor_000002098987.pth | Hamilton 3.3642356395721436
./Walker2d-v3_PPOHtermK_5_6196/actor_000002249395.pth | Hamilton 3.401320457458496
./Walker2d-v3_PPOHtermK_5_6196/actor_000002398322.pth | Hamilton 3.5418009757995605
./Walker2d-v3_PPOHtermK_5_6196/actor_000002552127.pth | Hamilton 3.5687568187713623
./Walker2d-v3_PPOHtermK_5_6196/actor_000002698501.pth | Hamilton 3.7166695594787598
./Walker2d-v3_PPOHtermK_5_6196/actor_000002850852.pth | Hamilton 3.7901241779327393
./Walker2d-v3_PPOHtermK_5_6196/actor_000003004430.pth | Hamilton 3.8029255867004395
./Walker2d-v3_PPOHtermK_5_6196/actor_000003151334.pth | Hamilton 3.909519910812378
./Walker2d-v3_PPOHtermK_5_6196/actor_000003300083.pth | Hamilton 3.939710855484009
./Walker2d-v3_PPOHtermK_5_6196/actor_000003457650.pth | Hamilton 3.7962772846221924
./Walker2d-v3_PPOHtermK_5_6196/actor_000003608790.pth | Hamilton 3.8291049003601074
./Walker2d-v3_PPOHtermK_5_6196/actor_000003761464.pth | Hamilton 3.7616467475891113
./Walker2d-v3_PPOHtermK_5_6196/actor_000003909730.pth | Hamilton 3.782686471939087
./Walker2d-v3_PPOHtermK_5_6196/actor_000004062225.pth | Hamilton 3.6201531887054443
./Walker2d-v3_PPOHtermK_5_6196/actor_000004213070.pth | Hamilton 3.744401216506958
./Walker2d-v3_PPOHtermK_5_6196/actor_000004360838.pth | Hamilton 3.649848461151123
./Walker2d-v3_PPOHtermK_5_6196/actor_000004517323.pth | Hamilton 3.7866930961608887
./Walker2d-v3_PPOHtermK_5_6196/actor_000004666107.pth | Hamilton 3.7397091388702393
./Walker2d-v3_PPOHtermK_5_6196/actor_000004819489.pth | Hamilton 3.6510519981384277
./Walker2d-v3_PPOHtermK_5_6196/actor_000004968852.pth | Hamilton 3.6418583393096924
./Walker2d-v3_PPOHtermK_5_6196/actor_000005118393.pth | Hamilton 3.77998423576355
./Walker2d-v3_PPOHtermK_5_6196/actor_000005270020.pth | Hamilton 3.830871105194092
./Walker2d-v3_PPOHtermK_5_6196/actor_000005422202.pth | Hamilton 3.7580339908599854
./Walker2d-v3_PPOHtermK_5_6196/actor_000005570758.pth | Hamilton 3.7497222423553467
./Walker2d-v3_PPOHtermK_5_6196/actor_000005724835.pth | Hamilton 3.6989426612854004
./Walker2d-v3_PPOHtermK_5_6196/actor_000005877260.pth | Hamilton 3.4875621795654297
./Walker2d-v3_PPOHtermK_5_6196/actor_000006030552.pth | Hamilton 3.374180555343628
./Walker2d-v3_PPOHtermK_5_6196/actor_000006181192.pth | Hamilton 3.253258466720581
./Walker2d-v3_PPOHtermK_5_6196/actor_000006334279.pth | Hamilton 3.274677276611328
./Walker2d-v3_PPOHtermK_5_6196/actor_000006488640.pth | Hamilton 3.371246576309204
./Walker2d-v3_PPOHtermK_5_6196/actor_000006638840.pth | Hamilton 3.175199270248413
./Walker2d-v3_PPOHtermK_5_6196/actor_000006790757.pth | Hamilton 3.2314627170562744
./Walker2d-v3_PPOHtermK_5_6196/actor_000006946534.pth | Hamilton 3.156649589538574
./Walker2d-v3_PPOHtermK_5_6196/actor_000007100135.pth | Hamilton 3.099559783935547
./Walker2d-v3_PPOHtermK_5_6196/actor_000007249718.pth | Hamilton 2.9776811599731445
./Walker2d-v3_PPOHtermK_5_6196/actor_000007400556.pth | Hamilton 3.154604196548462
./Walker2d-v3_PPOHtermK_5_6196/actor_000007554527.pth | Hamilton 3.128127336502075
./Walker2d-v3_PPOHtermK_5_6196/actor_000007705558.pth | Hamilton 3.0514307022094727
./Walker2d-v3_PPOHtermK_5_6196/actor_000007851426.pth | Hamilton 2.9579150676727295
./Walker2d-v3_PPOHtermK_5_6196/actor_000008002708.pth | Hamilton 2.943531036376953
./Walker2d-v3_PPOHtermK_5_6196/actor_000008160333.pth | Hamilton 2.9832067489624023
./Walker2d-v3_PPOHtermK_5_6196/actor_000008308268.pth | Hamilton 2.892860174179077
./Walker2d-v3_PPOHtermK_5_6196/actor_000008462521.pth | Hamilton 2.9052670001983643
./Walker2d-v3_PPOHtermK_5_6196/actor_000008614179.pth | Hamilton 3.040005683898926
./Walker2d-v3_PPOHtermK_5_6196/actor_000008764662.pth | Hamilton 2.919335126876831
./Walker2d-v3_PPOHtermK_5_6196/actor_000008916589.pth | Hamilton 2.951991081237793
./Walker2d-v3_PPOHtermK_5_6196/actor_000009068073.pth | Hamilton 3.0373692512512207
./Walker2d-v3_PPOHtermK_5_6196/actor_000009217141.pth | Hamilton 2.975137948989868
./Walker2d-v3_PPOHtermK_5_6196/actor_000009372826.pth | Hamilton 2.9514212608337402
./Walker2d-v3_PPOHtermK_5_6196/actor_000009524853.pth | Hamilton 2.9366204738616943
./Walker2d-v3_PPOHtermK_5_6196/actor_000009676877.pth | Hamilton 2.9007203578948975
./Walker2d-v3_PPOHtermK_5_6196/actor_000009831628.pth | Hamilton 2.920316457748413
./Walker2d-v3_PPOHtermK_5_6196/actor_000009983068.pth | Hamilton 2.9491448402404785
./Walker2d-v3_PPOHtermK_5_6196/actor_000010135066.pth | Hamilton 2.9075927734375
./Walker2d-v3_PPOHtermK_5_6196/actor_000010287643.pth | Hamilton 2.9843502044677734
./Walker2d-v3_PPOHtermK_5_6196/actor_000010437758.pth | Hamilton 3.0238566398620605
./Walker2d-v3_PPOHtermK_5_6196/actor_000010590468.pth | Hamilton 3.0210230350494385
./Walker2d-v3_PPOHtermK_5_6196/actor_000010746240.pth | Hamilton 2.981658697128296
./Walker2d-v3_PPOHtermK_5_6196/actor_000010901101.pth | Hamilton 3.003286123275757
./Walker2d-v3_PPOHtermK_5_6196/actor_000011053508.pth | Hamilton 2.9676616191864014
./Walker2d-v3_PPOHtermK_5_6196/actor_000011206954.pth | Hamilton 2.9344823360443115
./Walker2d-v3_PPOHtermK_5_6196/actor_000011363411.pth | Hamilton 3.0789337158203125
./Walker2d-v3_PPOHtermK_5_6196/actor_000011518327.pth | Hamilton 3.0202269554138184
./Walker2d-v3_PPOHtermK_5_6196/actor_000011671662.pth | Hamilton 2.92421817779541
./Walker2d-v3_PPOHtermK_5_6196/actor_000011822773.pth | Hamilton 2.907562732696533
./Walker2d-v3_PPOHtermK_5_6196/actor_000011976450.pth | Hamilton 3.0149013996124268
./Walker2d-v3_PPOHtermK_5_6196/actor_000012127555.pth | Hamilton 2.92812442779541
./Walker2d-v3_PPOHtermK_5_6196/actor_000012279751.pth | Hamilton 3.0243935585021973
./Walker2d-v3_PPOHtermK_5_6196/actor_000012429460.pth | Hamilton 2.971428632736206
./Walker2d-v3_PPOHtermK_5_6196/actor_000012584028.pth | Hamilton 2.9174630641937256
./Walker2d-v3_PPOHtermK_5_6196/actor_000012736334.pth | Hamilton 2.889002799987793
./Walker2d-v3_PPOHtermK_5_6196/actor_000012894115.pth | Hamilton 2.917287588119507
./Walker2d-v3_PPOHtermK_5_6196/actor_000013047090.pth | Hamilton 2.8693926334381104
./Walker2d-v3_PPOHtermK_5_6196/actor_000013200313.pth | Hamilton 2.855473518371582
./Walker2d-v3_PPOHtermK_5_6196/actor_000013351610.pth | Hamilton 2.7716429233551025
./Walker2d-v3_PPOHtermK_5_6196/actor_000013502588.pth | Hamilton 2.7581980228424072
./Walker2d-v3_PPOHtermK_5_6196/actor_000013662765.pth | Hamilton 2.785093307495117
./Walker2d-v3_PPOHtermK_5_6196/actor_000013814892.pth | Hamilton 2.7097363471984863
./Walker2d-v3_PPOHtermK_5_6196/actor_000013967975.pth | Hamilton 2.664146900177002
./Walker2d-v3_PPOHtermK_5_6196/actor_000014121000.pth | Hamilton 2.6454734802246094
./Walker2d-v3_PPOHtermK_5_6196/actor_000014274063.pth | Hamilton 2.6277332305908203
./Walker2d-v3_PPOHtermK_5_6196/actor_000014423343.pth | Hamilton 2.6705427169799805
./Walker2d-v3_PPOHtermK_5_6196/actor_000014575920.pth | Hamilton 2.628743886947632
./Walker2d-v3_PPOHtermK_5_6196/actor_000014727738.pth | Hamilton 2.6034529209136963
./Walker2d-v3_PPOHtermK_5_6196/actor_000014885212.pth | Hamilton 2.6350207328796387
./Walker2d-v3_PPOHtermK_5_6196/actor_000015036996.pth | Hamilton 2.533966064453125
./Walker2d-v3_PPOHtermK_5_6196/actor_000015193142.pth | Hamilton 2.6095809936523438
./Walker2d-v3_PPOHtermK_5_6196/actor_000015348071.pth | Hamilton 2.5884549617767334
./Walker2d-v3_PPOHtermK_5_6196/actor_000015504112.pth | Hamilton 2.5417354106903076
./Walker2d-v3_PPOHtermK_5_6196/actor_000015656793.pth | Hamilton 2.540105104446411
./Walker2d-v3_PPOHtermK_5_6196/actor_000015812951.pth | Hamilton 2.5189602375030518
./Walker2d-v3_PPOHtermK_5_6196/actor_000015963729.pth | Hamilton 2.5464396476745605
./Walker2d-v3_PPOHtermK_5_6196/actor_000016113654.pth | Hamilton 2.573174238204956
./Walker2d-v3_PPOHtermK_5_6196/actor_000016268694.pth | Hamilton 2.531381607055664
./Walker2d-v3_PPOHtermK_5_6196/actor_000016421799.pth | Hamilton 2.568452835083008
./Walker2d-v3_PPOHtermK_5_6196/actor_000016578825.pth | Hamilton 2.5195488929748535
./Walker2d-v3_PPOHtermK_5_6196/actor_000016730802.pth | Hamilton 2.5331168174743652
./Walker2d-v3_PPOHtermK_5_6196/actor_000016887124.pth | Hamilton 2.497105836868286
./Walker2d-v3_PPOHtermK_5_6196/actor_000017041659.pth | Hamilton 2.556220054626465
./Walker2d-v3_PPOHtermK_5_6196/actor_000017198533.pth | Hamilton 2.563156843185425
./Walker2d-v3_PPOHtermK_5_6196/actor_000017352984.pth | Hamilton 2.559330940246582
./Walker2d-v3_PPOHtermK_5_6196/actor_000017505829.pth | Hamilton 2.488677501678467
./Walker2d-v3_PPOHtermK_5_6196/actor_000017664394.pth | Hamilton 2.4741194248199463
./Walker2d-v3_PPOHtermK_5_6196/actor_000017817404.pth | Hamilton 2.5255026817321777
./Walker2d-v3_PPOHtermK_5_6196/actor_000017971696.pth | Hamilton 2.4651010036468506
./Walker2d-v3_PPOHtermK_5_6196/actor_000018125930.pth | Hamilton 2.4670584201812744
./Walker2d-v3_PPOHtermK_5_6196/actor_000018283393.pth | Hamilton 2.5160789489746094
./Walker2d-v3_PPOHtermK_5_6196/actor_000018435319.pth | Hamilton 2.570801258087158
./Walker2d-v3_PPOHtermK_5_6196/actor_000018591025.pth | Hamilton 2.5854737758636475
./Walker2d-v3_PPOHtermK_5_6196/actor_000018745931.pth | Hamilton 2.567007303237915
./Walker2d-v3_PPOHtermK_5_6196/actor_000018905797.pth | Hamilton 2.6150877475738525
./Walker2d-v3_PPOHtermK_5_6196/actor_000019056651.pth | Hamilton 2.5966053009033203
./Walker2d-v3_PPOHtermK_5_6196/actor_000019209532.pth | Hamilton 2.6586129665374756
./Walker2d-v3_PPOHtermK_5_6196/actor_000019363394.pth | Hamilton 2.635324716567993
./Walker2d-v3_PPOHtermK_5_6196/actor_000019516851.pth | Hamilton 2.6116063594818115
./Walker2d-v3_PPOHtermK_5_6196/actor_000019670805.pth | Hamilton 2.6843416690826416
./Walker2d-v3_PPOHtermK_5_6196/actor_000019825273.pth | Hamilton 2.7173869609832764
./Walker2d-v3_PPOHtermK_5_6196/actor__000000016134_00930.385.pth | Hamilton 0.006547544151544571
./Walker2d-v3_PPOHtermK_5_6196/actor__000000547689_01023.599.pth | Hamilton 0.10164220631122589
./Walker2d-v3_PPOHtermK_5_6196/actor__000000813176_01401.218.pth | Hamilton 0.549475371837616
./Walker2d-v3_PPOHtermK_5_6196/actor__000001081738_04128.690.pth | Hamilton 1.4580780267715454
./Walker2d-v3_PPOHtermK_5_6196/actor__000001352934_04314.376.pth | Hamilton 1.7101364135742188
./Walker2d-v3_PPOHtermK_5_6196/actor__000001622500_04604.481.pth | Hamilton 1.8009814023971558
./Walker2d-v3_PPOHtermK_5_6196/actor__000001892131_04796.554.pth | Hamilton 1.9093735218048096
./Walker2d-v3_PPOHtermK_5_6196/actor__000002427812_04799.140.pth | Hamilton 2.1148128509521484
./Walker2d-v3_PPOHtermK_5_6196/actor__000002698501_04866.675.pth | Hamilton 2.165018320083618
./Walker2d-v3_PPOHtermK_5_6196/actor__000002963773_04889.195.pth | Hamilton 2.304323196411133
./Walker2d-v3_PPOHtermK_5_6196/actor__000003231999_04902.785.pth | Hamilton 2.2968506813049316
./Walker2d-v3_PPOHtermK_5_6196/actor__000003504119_04971.286.pth | Hamilton 2.5185794830322266
./Walker2d-v3_PPOHtermK_5_6196/actor__000003770439_05070.066.pth | Hamilton 2.6017398834228516
./Walker2d-v3_PPOHtermK_5_6196/actor__000004043497_05107.923.pth | Hamilton 2.8066978454589844
./Walker2d-v3_PPOHtermK_5_6196/actor__000004316468_05117.005.pth | Hamilton 2.820406675338745
./Walker2d-v3_PPOHtermK_5_6196/actor__000004583323_05154.680.pth | Hamilton 2.873835802078247
./Walker2d-v3_PPOHtermK_5_6196/actor__000004847206_05246.580.pth | Hamilton 2.8819503784179688
./Walker2d-v3_PPOHtermK_5_6196/actor__000005384593_05253.995.pth | Hamilton 3.0258281230926514
./Walker2d-v3_PPOHtermK_5_6196/actor__000005657379_05328.519.pth | Hamilton 3.2585694789886475
./Walker2d-v3_PPOHtermK_5_6196/actor__000005925929_05362.628.pth | Hamilton 3.166306734085083
./Walker2d-v3_PPOHtermK_5_6196/actor__000006190503_05399.474.pth | Hamilton 3.1285839080810547
./Walker2d-v3_PPOHtermK_5_6196/actor__000006724625_05415.709.pth | Hamilton 3.2131738662719727
./Walker2d-v3_PPOHtermK_5_6196/actor__000007259325_05538.665.pth | Hamilton 3.113234519958496
./Walker2d-v3_PPOHtermK_5_6196/actor__000007527112_05563.535.pth | Hamilton 3.383690595626831
./Walker2d-v3_PPOHtermK_5_6196/actor__000008062692_05585.215.pth | Hamilton 3.382277488708496
./Walker2d-v3_PPOHtermK_5_6196/actor__000009152188_05627.513.pth | Hamilton 3.332095146179199
./Walker2d-v3_PPOHtermK_5_6196/actor__000009418732_05635.250.pth | Hamilton 3.302546501159668
./Walker2d-v3_PPOHtermK_5_6196/actor__000009687843_05670.071.pth | Hamilton 3.4363481998443604
./Walker2d-v3_PPOHtermK_5_6196/actor__000010494557_05679.865.pth | Hamilton 3.4633305072784424
./Walker2d-v3_PPOHtermK_5_6196/actor__000010766058_05769.488.pth | Hamilton 3.4625282287597656
./Walker2d-v3_PPOHtermK_5_6196/actor__000011035019_05774.933.pth | Hamilton 3.514845848083496
./Walker2d-v3_PPOHtermK_5_6196/actor__000011841363_05791.195.pth | Hamilton 3.3303325176239014
./Walker2d-v3_PPOHtermK_5_6196/actor__000012107737_05812.636.pth | Hamilton 3.455310583114624
./Walker2d-v3_PPOHtermK_5_6196/actor__000012913709_05817.056.pth | Hamilton 3.3747763633728027
./Walker2d-v3_PPOHtermK_5_6196/actor__000013180020_05919.832.pth | Hamilton 3.3382797241210938
./Walker2d-v3_PPOHtermK_5_6196/actor__000013728156_05933.581.pth | Hamilton 3.28096342086792
./Walker2d-v3_PPOHtermK_5_6196/actor__000014005259_05980.732.pth | Hamilton 3.2574119567871094
./Walker2d-v3_PPOHtermK_5_6196/actor__000014838261_06000.146.pth | Hamilton 3.1493632793426514
./Walker2d-v3_PPOHtermK_5_6196/actor__000015115265_06054.044.pth | Hamilton 3.1238772869110107
./Walker2d-v3_PPOHtermK_5_6196/actor__000015946492_06065.646.pth | Hamilton 3.0548388957977295
./Walker2d-v3_PPOHtermK_5_6196/actor__000016228489_06102.927.pth | Hamilton 2.9532976150512695
./Walker2d-v3_PPOHtermK_5_6196/actor__000016507252_06116.616.pth | Hamilton 3.0179007053375244
./Walker2d-v3_PPOHtermK_5_6196/actor__000017070313_06143.148.pth | Hamilton 2.94404935836792
./Walker2d-v3_PPOHtermK_5_6196/actor__000017352984_06150.941.pth | Hamilton 2.92793345451355
./Walker2d-v3_PPOHtermK_5_6196/actor__000018775143_06172.219.pth | Hamilton 2.7028586864471436
./Walker2d-v3_PPOHtermK_5_6196/actor__000019343404_06172.396.pth | Hamilton 2.6242010593414307
./Walker2d-v3_PPOHtermK_5_6196/actor__000019631453_06196.522.pth | Hamilton 2.597625732421875
"""
# Walker2d-v3_PPOHtermK_6_6380
data42 = """
./Walker2d-v3_PPOHtermK_6_6380/actor_000000075460.pth | Hamilton 0.0627475380897522
./Walker2d-v3_PPOHtermK_6_6380/actor_000000143822.pth | Hamilton 0.07132560014724731
./Walker2d-v3_PPOHtermK_6_6380/actor_000000210746.pth | Hamilton 0.08077272772789001
./Walker2d-v3_PPOHtermK_6_6380/actor_000000277681.pth | Hamilton 0.1012071743607521
./Walker2d-v3_PPOHtermK_6_6380/actor_000000345896.pth | Hamilton 0.12867338955402374
./Walker2d-v3_PPOHtermK_6_6380/actor_000000414144.pth | Hamilton 0.1799956113100052
./Walker2d-v3_PPOHtermK_6_6380/actor_000000482482.pth | Hamilton 0.22217750549316406
./Walker2d-v3_PPOHtermK_6_6380/actor_000000549834.pth | Hamilton 0.28735148906707764
./Walker2d-v3_PPOHtermK_6_6380/actor_000000618088.pth | Hamilton 0.3667449653148651
./Walker2d-v3_PPOHtermK_6_6380/actor_000000686130.pth | Hamilton 0.48285484313964844
./Walker2d-v3_PPOHtermK_6_6380/actor_000000753752.pth | Hamilton 0.6305201053619385
./Walker2d-v3_PPOHtermK_6_6380/actor_000000822345.pth | Hamilton 0.8301229476928711
./Walker2d-v3_PPOHtermK_6_6380/actor_000000890280.pth | Hamilton 1.1131012439727783
./Walker2d-v3_PPOHtermK_6_6380/actor_000000958332.pth | Hamilton 1.3587899208068848
./Walker2d-v3_PPOHtermK_6_6380/actor_000001026152.pth | Hamilton 1.6355680227279663
./Walker2d-v3_PPOHtermK_6_6380/actor_000001094384.pth | Hamilton 1.889074683189392
./Walker2d-v3_PPOHtermK_6_6380/actor_000001162752.pth | Hamilton 2.033831834793091
./Walker2d-v3_PPOHtermK_6_6380/actor_000001230936.pth | Hamilton 2.229149341583252
./Walker2d-v3_PPOHtermK_6_6380/actor_000001298940.pth | Hamilton 2.2825634479522705
./Walker2d-v3_PPOHtermK_6_6380/actor_000001366846.pth | Hamilton 2.4369330406188965
./Walker2d-v3_PPOHtermK_6_6380/actor_000001435014.pth | Hamilton 2.5608909130096436
./Walker2d-v3_PPOHtermK_6_6380/actor_000001503703.pth | Hamilton 2.5671257972717285
./Walker2d-v3_PPOHtermK_6_6380/actor_000001572942.pth | Hamilton 2.655247926712036
./Walker2d-v3_PPOHtermK_6_6380/actor_000001642254.pth | Hamilton 2.6732277870178223
./Walker2d-v3_PPOHtermK_6_6380/actor_000001711744.pth | Hamilton 2.7276840209960938
./Walker2d-v3_PPOHtermK_6_6380/actor_000001781919.pth | Hamilton 2.839830160140991
./Walker2d-v3_PPOHtermK_6_6380/actor_000001851976.pth | Hamilton 2.914414882659912
./Walker2d-v3_PPOHtermK_6_6380/actor_000001923921.pth | Hamilton 2.9089367389678955
./Walker2d-v3_PPOHtermK_6_6380/actor_000001994812.pth | Hamilton 2.9052419662475586
./Walker2d-v3_PPOHtermK_6_6380/actor_000002066973.pth | Hamilton 2.961277961730957
./Walker2d-v3_PPOHtermK_6_6380/actor_000002140277.pth | Hamilton 2.974660873413086
./Walker2d-v3_PPOHtermK_6_6380/actor_000002213997.pth | Hamilton 3.010127305984497
./Walker2d-v3_PPOHtermK_6_6380/actor_000002285530.pth | Hamilton 2.9574837684631348
./Walker2d-v3_PPOHtermK_6_6380/actor_000002360538.pth | Hamilton 3.009147882461548
./Walker2d-v3_PPOHtermK_6_6380/actor_000002436686.pth | Hamilton 3.0166115760803223
./Walker2d-v3_PPOHtermK_6_6380/actor_000002511519.pth | Hamilton 3.0840718746185303
./Walker2d-v3_PPOHtermK_6_6380/actor_000002586476.pth | Hamilton 3.129490613937378
./Walker2d-v3_PPOHtermK_6_6380/actor_000002662969.pth | Hamilton 3.1324877738952637
./Walker2d-v3_PPOHtermK_6_6380/actor_000002737646.pth | Hamilton 3.1095118522644043
./Walker2d-v3_PPOHtermK_6_6380/actor_000002810009.pth | Hamilton 3.2150840759277344
./Walker2d-v3_PPOHtermK_6_6380/actor_000002889230.pth | Hamilton 3.1798110008239746
./Walker2d-v3_PPOHtermK_6_6380/actor_000002965628.pth | Hamilton 3.2491815090179443
./Walker2d-v3_PPOHtermK_6_6380/actor_000003039919.pth | Hamilton 3.274416446685791
./Walker2d-v3_PPOHtermK_6_6380/actor_000003115977.pth | Hamilton 3.2876813411712646
./Walker2d-v3_PPOHtermK_6_6380/actor_000003191824.pth | Hamilton 3.3377134799957275
./Walker2d-v3_PPOHtermK_6_6380/actor_000003268041.pth | Hamilton 3.451484441757202
./Walker2d-v3_PPOHtermK_6_6380/actor_000003343788.pth | Hamilton 3.3870582580566406
./Walker2d-v3_PPOHtermK_6_6380/actor_000003421182.pth | Hamilton 3.430431842803955
./Walker2d-v3_PPOHtermK_6_6380/actor_000003495779.pth | Hamilton 3.3049213886260986
./Walker2d-v3_PPOHtermK_6_6380/actor_000003566060.pth | Hamilton 3.2986879348754883
./Walker2d-v3_PPOHtermK_6_6380/actor_000003642485.pth | Hamilton 3.3262407779693604
./Walker2d-v3_PPOHtermK_6_6380/actor_000003716942.pth | Hamilton 3.3895769119262695
./Walker2d-v3_PPOHtermK_6_6380/actor_000003794027.pth | Hamilton 3.3401451110839844
./Walker2d-v3_PPOHtermK_6_6380/actor_000003871409.pth | Hamilton 3.371879816055298
./Walker2d-v3_PPOHtermK_6_6380/actor_000003947025.pth | Hamilton 3.416346788406372
./Walker2d-v3_PPOHtermK_6_6380/actor_000004022414.pth | Hamilton 3.323047399520874
./Walker2d-v3_PPOHtermK_6_6380/actor_000004096716.pth | Hamilton 3.3502066135406494
./Walker2d-v3_PPOHtermK_6_6380/actor_000004174091.pth | Hamilton 3.3037021160125732
./Walker2d-v3_PPOHtermK_6_6380/actor_000004251708.pth | Hamilton 3.3032755851745605
./Walker2d-v3_PPOHtermK_6_6380/actor_000004326655.pth | Hamilton 3.2859857082366943
./Walker2d-v3_PPOHtermK_6_6380/actor_000004399488.pth | Hamilton 3.1723008155822754
./Walker2d-v3_PPOHtermK_6_6380/actor_000004476463.pth | Hamilton 3.179539442062378
./Walker2d-v3_PPOHtermK_6_6380/actor_000004550996.pth | Hamilton 3.1812515258789062
./Walker2d-v3_PPOHtermK_6_6380/actor_000004626935.pth | Hamilton 3.047405242919922
./Walker2d-v3_PPOHtermK_6_6380/actor_000004701587.pth | Hamilton 3.122925043106079
./Walker2d-v3_PPOHtermK_6_6380/actor_000004776843.pth | Hamilton 3.1765921115875244
./Walker2d-v3_PPOHtermK_6_6380/actor_000004850772.pth | Hamilton 3.0097899436950684
./Walker2d-v3_PPOHtermK_6_6380/actor_000004927738.pth | Hamilton 3.0573930740356445
./Walker2d-v3_PPOHtermK_6_6380/actor_000005003266.pth | Hamilton 2.989349842071533
./Walker2d-v3_PPOHtermK_6_6380/actor_000005076871.pth | Hamilton 2.9224157333374023
./Walker2d-v3_PPOHtermK_6_6380/actor_000005147690.pth | Hamilton 2.8685574531555176
./Walker2d-v3_PPOHtermK_6_6380/actor_000005226244.pth | Hamilton 2.8906149864196777
./Walker2d-v3_PPOHtermK_6_6380/actor_000005303889.pth | Hamilton 2.847195863723755
./Walker2d-v3_PPOHtermK_6_6380/actor_000005378984.pth | Hamilton 2.8399622440338135
./Walker2d-v3_PPOHtermK_6_6380/actor_000005452834.pth | Hamilton 2.7919344902038574
./Walker2d-v3_PPOHtermK_6_6380/actor_000005529874.pth | Hamilton 2.76607346534729
./Walker2d-v3_PPOHtermK_6_6380/actor_000005605964.pth | Hamilton 2.778975009918213
./Walker2d-v3_PPOHtermK_6_6380/actor_000005678841.pth | Hamilton 2.732301712036133
./Walker2d-v3_PPOHtermK_6_6380/actor_000005751934.pth | Hamilton 2.760108709335327
./Walker2d-v3_PPOHtermK_6_6380/actor_000005827627.pth | Hamilton 2.765896797180176
./Walker2d-v3_PPOHtermK_6_6380/actor_000005901353.pth | Hamilton 2.747377395629883
./Walker2d-v3_PPOHtermK_6_6380/actor_000005971425.pth | Hamilton 2.6814119815826416
./Walker2d-v3_PPOHtermK_6_6380/actor_000006050423.pth | Hamilton 2.795193910598755
./Walker2d-v3_PPOHtermK_6_6380/actor_000006122143.pth | Hamilton 2.663102865219116
./Walker2d-v3_PPOHtermK_6_6380/actor_000006199580.pth | Hamilton 2.6520848274230957
./Walker2d-v3_PPOHtermK_6_6380/actor_000006269793.pth | Hamilton 2.7180402278900146
./Walker2d-v3_PPOHtermK_6_6380/actor_000006344204.pth | Hamilton 2.6152045726776123
./Walker2d-v3_PPOHtermK_6_6380/actor_000006416987.pth | Hamilton 2.6152689456939697
./Walker2d-v3_PPOHtermK_6_6380/actor_000006491475.pth | Hamilton 2.6474788188934326
./Walker2d-v3_PPOHtermK_6_6380/actor_000006566168.pth | Hamilton 2.704747438430786
./Walker2d-v3_PPOHtermK_6_6380/actor_000006640559.pth | Hamilton 2.7403247356414795
./Walker2d-v3_PPOHtermK_6_6380/actor_000006711724.pth | Hamilton 2.662841558456421
./Walker2d-v3_PPOHtermK_6_6380/actor_000006788142.pth | Hamilton 2.7531967163085938
./Walker2d-v3_PPOHtermK_6_6380/actor_000006863670.pth | Hamilton 2.6892058849334717
./Walker2d-v3_PPOHtermK_6_6380/actor_000006938597.pth | Hamilton 2.6429266929626465
./Walker2d-v3_PPOHtermK_6_6380/actor_000007007768.pth | Hamilton 2.7523245811462402
./Walker2d-v3_PPOHtermK_6_6380/actor_000007083253.pth | Hamilton 2.7175161838531494
./Walker2d-v3_PPOHtermK_6_6380/actor_000007154894.pth | Hamilton 2.750582218170166
./Walker2d-v3_PPOHtermK_6_6380/actor_000007227465.pth | Hamilton 2.739222526550293
./Walker2d-v3_PPOHtermK_6_6380/actor_000007297178.pth | Hamilton 2.7558655738830566
./Walker2d-v3_PPOHtermK_6_6380/actor_000007368160.pth | Hamilton 2.668473720550537
./Walker2d-v3_PPOHtermK_6_6380/actor_000007438300.pth | Hamilton 2.633183002471924
./Walker2d-v3_PPOHtermK_6_6380/actor_000007511277.pth | Hamilton 2.5936696529388428
./Walker2d-v3_PPOHtermK_6_6380/actor_000007582721.pth | Hamilton 2.596466064453125
./Walker2d-v3_PPOHtermK_6_6380/actor_000007657228.pth | Hamilton 2.5443007946014404
./Walker2d-v3_PPOHtermK_6_6380/actor_000007732003.pth | Hamilton 2.675086736679077
./Walker2d-v3_PPOHtermK_6_6380/actor_000007805497.pth | Hamilton 2.6662676334381104
./Walker2d-v3_PPOHtermK_6_6380/actor_000007878355.pth | Hamilton 2.6095519065856934
./Walker2d-v3_PPOHtermK_6_6380/actor_000007952686.pth | Hamilton 2.7368860244750977
./Walker2d-v3_PPOHtermK_6_6380/actor_000008024193.pth | Hamilton 2.6591808795928955
./Walker2d-v3_PPOHtermK_6_6380/actor_000008097013.pth | Hamilton 2.6418957710266113
./Walker2d-v3_PPOHtermK_6_6380/actor_000008172398.pth | Hamilton 2.772160768508911
./Walker2d-v3_PPOHtermK_6_6380/actor_000008250839.pth | Hamilton 2.828434944152832
./Walker2d-v3_PPOHtermK_6_6380/actor_000000143822.pth | Hamilton 0.07132560014724731
./Walker2d-v3_PPOHtermK_6_6380/actor_000000210746.pth | Hamilton 0.08077272772789001
./Walker2d-v3_PPOHtermK_6_6380/actor_000000277681.pth | Hamilton 0.1012071743607521
./Walker2d-v3_PPOHtermK_6_6380/actor_000000345896.pth | Hamilton 0.12867338955402374
./Walker2d-v3_PPOHtermK_6_6380/actor_000000414144.pth | Hamilton 0.1799956113100052
./Walker2d-v3_PPOHtermK_6_6380/actor_000000482482.pth | Hamilton 0.22217750549316406
./Walker2d-v3_PPOHtermK_6_6380/actor_000000549834.pth | Hamilton 0.28735148906707764
./Walker2d-v3_PPOHtermK_6_6380/actor_000000618088.pth | Hamilton 0.3667449653148651
./Walker2d-v3_PPOHtermK_6_6380/actor_000000686130.pth | Hamilton 0.48285484313964844
./Walker2d-v3_PPOHtermK_6_6380/actor_000000753752.pth | Hamilton 0.6305201053619385
./Walker2d-v3_PPOHtermK_6_6380/actor_000000822345.pth | Hamilton 0.8301229476928711
./Walker2d-v3_PPOHtermK_6_6380/actor_000000890280.pth | Hamilton 1.1131012439727783
./Walker2d-v3_PPOHtermK_6_6380/actor_000000958332.pth | Hamilton 1.3587899208068848
./Walker2d-v3_PPOHtermK_6_6380/actor_000001026152.pth | Hamilton 1.6355680227279663
./Walker2d-v3_PPOHtermK_6_6380/actor_000001094384.pth | Hamilton 1.889074683189392
./Walker2d-v3_PPOHtermK_6_6380/actor_000001162752.pth | Hamilton 2.033831834793091
./Walker2d-v3_PPOHtermK_6_6380/actor_000001230936.pth | Hamilton 2.229149341583252
./Walker2d-v3_PPOHtermK_6_6380/actor_000001298940.pth | Hamilton 2.2825634479522705
./Walker2d-v3_PPOHtermK_6_6380/actor_000001366846.pth | Hamilton 2.4369330406188965
./Walker2d-v3_PPOHtermK_6_6380/actor_000001435014.pth | Hamilton 2.5608909130096436
./Walker2d-v3_PPOHtermK_6_6380/actor_000001503703.pth | Hamilton 2.5671257972717285
./Walker2d-v3_PPOHtermK_6_6380/actor_000001572942.pth | Hamilton 2.655247926712036
./Walker2d-v3_PPOHtermK_6_6380/actor_000001642254.pth | Hamilton 2.6732277870178223
./Walker2d-v3_PPOHtermK_6_6380/actor_000001711744.pth | Hamilton 2.7276840209960938
./Walker2d-v3_PPOHtermK_6_6380/actor_000001781919.pth | Hamilton 2.839830160140991
./Walker2d-v3_PPOHtermK_6_6380/actor_000001851976.pth | Hamilton 2.914414882659912
./Walker2d-v3_PPOHtermK_6_6380/actor_000001923921.pth | Hamilton 2.9089367389678955
./Walker2d-v3_PPOHtermK_6_6380/actor_000001994812.pth | Hamilton 2.9052419662475586
./Walker2d-v3_PPOHtermK_6_6380/actor_000002066973.pth | Hamilton 2.961277961730957
./Walker2d-v3_PPOHtermK_6_6380/actor_000002140277.pth | Hamilton 2.974660873413086
./Walker2d-v3_PPOHtermK_6_6380/actor_000002213997.pth | Hamilton 3.010127305984497
./Walker2d-v3_PPOHtermK_6_6380/actor_000002285530.pth | Hamilton 2.9574837684631348
./Walker2d-v3_PPOHtermK_6_6380/actor_000002360538.pth | Hamilton 3.009147882461548
./Walker2d-v3_PPOHtermK_6_6380/actor_000002436686.pth | Hamilton 3.0166115760803223
./Walker2d-v3_PPOHtermK_6_6380/actor_000002511519.pth | Hamilton 3.0840718746185303
./Walker2d-v3_PPOHtermK_6_6380/actor_000002586476.pth | Hamilton 3.129490613937378
./Walker2d-v3_PPOHtermK_6_6380/actor_000002662969.pth | Hamilton 3.1324877738952637
./Walker2d-v3_PPOHtermK_6_6380/actor_000002737646.pth | Hamilton 3.1095118522644043
./Walker2d-v3_PPOHtermK_6_6380/actor_000002810009.pth | Hamilton 3.2150840759277344
./Walker2d-v3_PPOHtermK_6_6380/actor_000002889230.pth | Hamilton 3.1798110008239746
./Walker2d-v3_PPOHtermK_6_6380/actor_000002965628.pth | Hamilton 3.2491815090179443
./Walker2d-v3_PPOHtermK_6_6380/actor_000003039919.pth | Hamilton 3.274416446685791
./Walker2d-v3_PPOHtermK_6_6380/actor_000003115977.pth | Hamilton 3.2876813411712646
./Walker2d-v3_PPOHtermK_6_6380/actor_000003191824.pth | Hamilton 3.3377134799957275
./Walker2d-v3_PPOHtermK_6_6380/actor_000003268041.pth | Hamilton 3.451484441757202
./Walker2d-v3_PPOHtermK_6_6380/actor_000003343788.pth | Hamilton 3.3870582580566406
./Walker2d-v3_PPOHtermK_6_6380/actor_000003421182.pth | Hamilton 3.430431842803955
./Walker2d-v3_PPOHtermK_6_6380/actor_000003495779.pth | Hamilton 3.3049213886260986
./Walker2d-v3_PPOHtermK_6_6380/actor_000003566060.pth | Hamilton 3.2986879348754883
./Walker2d-v3_PPOHtermK_6_6380/actor_000003642485.pth | Hamilton 3.3262407779693604
./Walker2d-v3_PPOHtermK_6_6380/actor_000003716942.pth | Hamilton 3.3895769119262695
./Walker2d-v3_PPOHtermK_6_6380/actor_000003794027.pth | Hamilton 3.3401451110839844
./Walker2d-v3_PPOHtermK_6_6380/actor_000003871409.pth | Hamilton 3.371879816055298
./Walker2d-v3_PPOHtermK_6_6380/actor_000003947025.pth | Hamilton 3.416346788406372
./Walker2d-v3_PPOHtermK_6_6380/actor_000004022414.pth | Hamilton 3.323047399520874
./Walker2d-v3_PPOHtermK_6_6380/actor_000004096716.pth | Hamilton 3.3502066135406494
./Walker2d-v3_PPOHtermK_6_6380/actor_000004174091.pth | Hamilton 3.3037021160125732
./Walker2d-v3_PPOHtermK_6_6380/actor_000004251708.pth | Hamilton 3.3032755851745605
./Walker2d-v3_PPOHtermK_6_6380/actor_000004326655.pth | Hamilton 3.2859857082366943
./Walker2d-v3_PPOHtermK_6_6380/actor_000004399488.pth | Hamilton 3.1723008155822754
./Walker2d-v3_PPOHtermK_6_6380/actor_000004476463.pth | Hamilton 3.179539442062378
./Walker2d-v3_PPOHtermK_6_6380/actor_000004550996.pth | Hamilton 3.1812515258789062
./Walker2d-v3_PPOHtermK_6_6380/actor_000004626935.pth | Hamilton 3.047405242919922
./Walker2d-v3_PPOHtermK_6_6380/actor_000004701587.pth | Hamilton 3.122925043106079
./Walker2d-v3_PPOHtermK_6_6380/actor_000004776843.pth | Hamilton 3.1765921115875244
./Walker2d-v3_PPOHtermK_6_6380/actor_000004850772.pth | Hamilton 3.0097899436950684
./Walker2d-v3_PPOHtermK_6_6380/actor_000004927738.pth | Hamilton 3.0573930740356445
./Walker2d-v3_PPOHtermK_6_6380/actor_000005003266.pth | Hamilton 2.989349842071533
./Walker2d-v3_PPOHtermK_6_6380/actor_000005076871.pth | Hamilton 2.9224157333374023
./Walker2d-v3_PPOHtermK_6_6380/actor_000005147690.pth | Hamilton 2.8685574531555176
./Walker2d-v3_PPOHtermK_6_6380/actor_000005226244.pth | Hamilton 2.8906149864196777
./Walker2d-v3_PPOHtermK_6_6380/actor_000005303889.pth | Hamilton 2.847195863723755
./Walker2d-v3_PPOHtermK_6_6380/actor_000005378984.pth | Hamilton 2.8399622440338135
./Walker2d-v3_PPOHtermK_6_6380/actor_000005452834.pth | Hamilton 2.7919344902038574
./Walker2d-v3_PPOHtermK_6_6380/actor_000005529874.pth | Hamilton 2.76607346534729
./Walker2d-v3_PPOHtermK_6_6380/actor_000005605964.pth | Hamilton 2.778975009918213
./Walker2d-v3_PPOHtermK_6_6380/actor_000005678841.pth | Hamilton 2.732301712036133
./Walker2d-v3_PPOHtermK_6_6380/actor_000005751934.pth | Hamilton 2.760108709335327
./Walker2d-v3_PPOHtermK_6_6380/actor_000005827627.pth | Hamilton 2.765896797180176
./Walker2d-v3_PPOHtermK_6_6380/actor_000005901353.pth | Hamilton 2.747377395629883
./Walker2d-v3_PPOHtermK_6_6380/actor_000005971425.pth | Hamilton 2.6814119815826416
./Walker2d-v3_PPOHtermK_6_6380/actor_000006050423.pth | Hamilton 2.795193910598755
./Walker2d-v3_PPOHtermK_6_6380/actor_000006122143.pth | Hamilton 2.663102865219116
./Walker2d-v3_PPOHtermK_6_6380/actor_000006199580.pth | Hamilton 2.6520848274230957
./Walker2d-v3_PPOHtermK_6_6380/actor_000006269793.pth | Hamilton 2.7180402278900146
./Walker2d-v3_PPOHtermK_6_6380/actor_000006344204.pth | Hamilton 2.6152045726776123
./Walker2d-v3_PPOHtermK_6_6380/actor_000006416987.pth | Hamilton 2.6152689456939697
./Walker2d-v3_PPOHtermK_6_6380/actor_000006491475.pth | Hamilton 2.6474788188934326
./Walker2d-v3_PPOHtermK_6_6380/actor_000006566168.pth | Hamilton 2.704747438430786
./Walker2d-v3_PPOHtermK_6_6380/actor_000006640559.pth | Hamilton 2.7403247356414795
./Walker2d-v3_PPOHtermK_6_6380/actor_000006711724.pth | Hamilton 2.662841558456421
./Walker2d-v3_PPOHtermK_6_6380/actor_000006788142.pth | Hamilton 2.7531967163085938
./Walker2d-v3_PPOHtermK_6_6380/actor_000006863670.pth | Hamilton 2.6892058849334717
./Walker2d-v3_PPOHtermK_6_6380/actor_000006938597.pth | Hamilton 2.6429266929626465
./Walker2d-v3_PPOHtermK_6_6380/actor_000007007768.pth | Hamilton 2.7523245811462402
./Walker2d-v3_PPOHtermK_6_6380/actor_000007083253.pth | Hamilton 2.7175161838531494
./Walker2d-v3_PPOHtermK_6_6380/actor_000007154894.pth | Hamilton 2.750582218170166
./Walker2d-v3_PPOHtermK_6_6380/actor_000007227465.pth | Hamilton 2.739222526550293
./Walker2d-v3_PPOHtermK_6_6380/actor_000007297178.pth | Hamilton 2.7558655738830566
./Walker2d-v3_PPOHtermK_6_6380/actor_000007368160.pth | Hamilton 2.668473720550537
./Walker2d-v3_PPOHtermK_6_6380/actor_000007438300.pth | Hamilton 2.633183002471924
./Walker2d-v3_PPOHtermK_6_6380/actor_000007511277.pth | Hamilton 2.5936696529388428
./Walker2d-v3_PPOHtermK_6_6380/actor_000007582721.pth | Hamilton 2.596466064453125
./Walker2d-v3_PPOHtermK_6_6380/actor_000007657228.pth | Hamilton 2.5443007946014404
./Walker2d-v3_PPOHtermK_6_6380/actor_000007732003.pth | Hamilton 2.675086736679077
./Walker2d-v3_PPOHtermK_6_6380/actor_000007805497.pth | Hamilton 2.6662676334381104
./Walker2d-v3_PPOHtermK_6_6380/actor_000007878355.pth | Hamilton 2.6095519065856934
./Walker2d-v3_PPOHtermK_6_6380/actor_000007952686.pth | Hamilton 2.7368860244750977
./Walker2d-v3_PPOHtermK_6_6380/actor_000008024193.pth | Hamilton 2.6591808795928955
./Walker2d-v3_PPOHtermK_6_6380/actor_000008097013.pth | Hamilton 2.6418957710266113
./Walker2d-v3_PPOHtermK_6_6380/actor_000008172398.pth | Hamilton 2.772160768508911
./Walker2d-v3_PPOHtermK_6_6380/actor_000008250839.pth | Hamilton 2.828434944152832
./Walker2d-v3_PPOHtermK_6_6380/actor_000008321909.pth | Hamilton 2.742154836654663
./Walker2d-v3_PPOHtermK_6_6380/actor_000008393699.pth | Hamilton 2.728599786758423
./Walker2d-v3_PPOHtermK_6_6380/actor_000008463759.pth | Hamilton 2.791388750076294
./Walker2d-v3_PPOHtermK_6_6380/actor_000008536807.pth | Hamilton 2.7996938228607178
./Walker2d-v3_PPOHtermK_6_6380/actor_000008612178.pth | Hamilton 2.7829697132110596
./Walker2d-v3_PPOHtermK_6_6380/actor_000008683737.pth | Hamilton 2.772770643234253
./Walker2d-v3_PPOHtermK_6_6380/actor_000008755953.pth | Hamilton 2.779437780380249
./Walker2d-v3_PPOHtermK_6_6380/actor_000008832450.pth | Hamilton 2.7469935417175293
./Walker2d-v3_PPOHtermK_6_6380/actor_000008902156.pth | Hamilton 2.74408221244812
./Walker2d-v3_PPOHtermK_6_6380/actor_000008972765.pth | Hamilton 2.8077948093414307
./Walker2d-v3_PPOHtermK_6_6380/actor_000009044047.pth | Hamilton 2.800044536590576
./Walker2d-v3_PPOHtermK_6_6380/actor_000009117314.pth | Hamilton 2.823373556137085
./Walker2d-v3_PPOHtermK_6_6380/actor_000009191216.pth | Hamilton 2.8886241912841797
./Walker2d-v3_PPOHtermK_6_6380/actor_000009263342.pth | Hamilton 2.826233148574829
./Walker2d-v3_PPOHtermK_6_6380/actor_000009337590.pth | Hamilton 2.9232451915740967
./Walker2d-v3_PPOHtermK_6_6380/actor_000009410265.pth | Hamilton 2.8646326065063477
./Walker2d-v3_PPOHtermK_6_6380/actor_000009483447.pth | Hamilton 2.8999993801116943
./Walker2d-v3_PPOHtermK_6_6380/actor_000009558085.pth | Hamilton 2.8749709129333496
./Walker2d-v3_PPOHtermK_6_6380/actor_000009632225.pth | Hamilton 2.85235595703125
./Walker2d-v3_PPOHtermK_6_6380/actor_000009703915.pth | Hamilton 2.8588786125183105
./Walker2d-v3_PPOHtermK_6_6380/actor_000009777553.pth | Hamilton 2.8617067337036133
./Walker2d-v3_PPOHtermK_6_6380/actor_000009848995.pth | Hamilton 2.9332940578460693
./Walker2d-v3_PPOHtermK_6_6380/actor_000009920957.pth | Hamilton 2.947573661804199
./Walker2d-v3_PPOHtermK_6_6380/actor_000009994632.pth | Hamilton 2.9067983627319336
./Walker2d-v3_PPOHtermK_6_6380/actor_000010067603.pth | Hamilton 2.8467671871185303
./Walker2d-v3_PPOHtermK_6_6380/actor_000010138962.pth | Hamilton 2.8667306900024414
./Walker2d-v3_PPOHtermK_6_6380/actor_000010211320.pth | Hamilton 2.9673523902893066
./Walker2d-v3_PPOHtermK_6_6380/actor_000010287731.pth | Hamilton 2.860609769821167
./Walker2d-v3_PPOHtermK_6_6380/actor_000010359251.pth | Hamilton 2.8929758071899414
./Walker2d-v3_PPOHtermK_6_6380/actor_000010431395.pth | Hamilton 2.854750633239746
./Walker2d-v3_PPOHtermK_6_6380/actor_000010502183.pth | Hamilton 2.746868371963501
./Walker2d-v3_PPOHtermK_6_6380/actor_000010573696.pth | Hamilton 2.886901378631592
./Walker2d-v3_PPOHtermK_6_6380/actor_000010642738.pth | Hamilton 2.9300107955932617
./Walker2d-v3_PPOHtermK_6_6380/actor_000010718071.pth | Hamilton 2.927466630935669
./Walker2d-v3_PPOHtermK_6_6380/actor_000010786960.pth | Hamilton 2.8337652683258057
./Walker2d-v3_PPOHtermK_6_6380/actor_000010855690.pth | Hamilton 2.865790367126465
./Walker2d-v3_PPOHtermK_6_6380/actor_000010924576.pth | Hamilton 2.858492851257324
./Walker2d-v3_PPOHtermK_6_6380/actor_000010993872.pth | Hamilton 2.878523349761963
./Walker2d-v3_PPOHtermK_6_6380/actor_000011064499.pth | Hamilton 2.882023334503174
./Walker2d-v3_PPOHtermK_6_6380/actor_000011137139.pth | Hamilton 2.845759391784668
./Walker2d-v3_PPOHtermK_6_6380/actor_000011207295.pth | Hamilton 2.682384729385376
./Walker2d-v3_PPOHtermK_6_6380/actor_000011280837.pth | Hamilton 2.790651798248291
./Walker2d-v3_PPOHtermK_6_6380/actor_000011352596.pth | Hamilton 2.771451711654663
./Walker2d-v3_PPOHtermK_6_6380/actor_000011423743.pth | Hamilton 2.752410888671875
./Walker2d-v3_PPOHtermK_6_6380/actor_000011496477.pth | Hamilton 2.7736527919769287
./Walker2d-v3_PPOHtermK_6_6380/actor_000011566291.pth | Hamilton 2.7306885719299316
./Walker2d-v3_PPOHtermK_6_6380/actor_000011639294.pth | Hamilton 2.807950019836426
./Walker2d-v3_PPOHtermK_6_6380/actor_000011711542.pth | Hamilton 2.806436777114868
./Walker2d-v3_PPOHtermK_6_6380/actor_000011782243.pth | Hamilton 2.693161964416504
./Walker2d-v3_PPOHtermK_6_6380/actor_000011854565.pth | Hamilton 2.788557767868042
./Walker2d-v3_PPOHtermK_6_6380/actor_000011925047.pth | Hamilton 2.7770822048187256
./Walker2d-v3_PPOHtermK_6_6380/actor_000011996903.pth | Hamilton 2.8102524280548096
./Walker2d-v3_PPOHtermK_6_6380/actor_000012065663.pth | Hamilton 2.794227123260498
./Walker2d-v3_PPOHtermK_6_6380/actor_000012141349.pth | Hamilton 2.839717388153076
./Walker2d-v3_PPOHtermK_6_6380/actor_000012218090.pth | Hamilton 2.827446699142456
./Walker2d-v3_PPOHtermK_6_6380/actor_000012291899.pth | Hamilton 2.8396475315093994
./Walker2d-v3_PPOHtermK_6_6380/actor_000012362498.pth | Hamilton 2.919839382171631
./Walker2d-v3_PPOHtermK_6_6380/actor_000012432779.pth | Hamilton 2.8204243183135986
./Walker2d-v3_PPOHtermK_6_6380/actor_000012505463.pth | Hamilton 2.7981061935424805
./Walker2d-v3_PPOHtermK_6_6380/actor_000012575909.pth | Hamilton 2.865457057952881
./Walker2d-v3_PPOHtermK_6_6380/actor_000012644020.pth | Hamilton 2.9132330417633057
./Walker2d-v3_PPOHtermK_6_6380/actor_000012715904.pth | Hamilton 2.9028162956237793
./Walker2d-v3_PPOHtermK_6_6380/actor_000012788094.pth | Hamilton 2.9492990970611572
./Walker2d-v3_PPOHtermK_6_6380/actor_000012856434.pth | Hamilton 2.9954020977020264
./Walker2d-v3_PPOHtermK_6_6380/actor_000012929884.pth | Hamilton 2.961350202560425
./Walker2d-v3_PPOHtermK_6_6380/actor_000013004936.pth | Hamilton 2.8226265907287598
./Walker2d-v3_PPOHtermK_6_6380/actor_000013079308.pth | Hamilton 2.8537650108337402
./Walker2d-v3_PPOHtermK_6_6380/actor_000013150894.pth | Hamilton 2.93576717376709
./Walker2d-v3_PPOHtermK_6_6380/actor_000013222703.pth | Hamilton 2.9115564823150635
./Walker2d-v3_PPOHtermK_6_6380/actor__000000016241_00290.806.pth | Hamilton 0.019592655822634697
./Walker2d-v3_PPOHtermK_6_6380/actor__000000194051_00462.490.pth | Hamilton 0.027271157130599022
./Walker2d-v3_PPOHtermK_6_6380/actor__000000371567_00785.392.pth | Hamilton 0.05270720273256302
./Walker2d-v3_PPOHtermK_6_6380/actor__000000745266_00860.829.pth | Hamilton 0.21522416174411774
./Walker2d-v3_PPOHtermK_6_6380/actor__000000932762_00900.411.pth | Hamilton 0.378319650888443
./Walker2d-v3_PPOHtermK_6_6380/actor__000001111686_00959.373.pth | Hamilton 0.48789942264556885
./Walker2d-v3_PPOHtermK_6_6380/actor__000001290731_01177.423.pth | Hamilton 0.6105005145072937
./Walker2d-v3_PPOHtermK_6_6380/actor__000001477295_01376.001.pth | Hamilton 0.7220186591148376
./Walker2d-v3_PPOHtermK_6_6380/actor__000001842777_03972.032.pth | Hamilton 0.896857500076294
./Walker2d-v3_PPOHtermK_6_6380/actor__000002022225_04048.467.pth | Hamilton 1.058160662651062
./Walker2d-v3_PPOHtermK_6_6380/actor__000002204355_04453.621.pth | Hamilton 1.2204538583755493
./Walker2d-v3_PPOHtermK_6_6380/actor__000002389593_04638.108.pth | Hamilton 1.4094444513320923
./Walker2d-v3_PPOHtermK_6_6380/actor__000002756563_04810.502.pth | Hamilton 1.6390588283538818
./Walker2d-v3_PPOHtermK_6_6380/actor__000002937657_04889.097.pth | Hamilton 1.7855184078216553
./Walker2d-v3_PPOHtermK_6_6380/actor__000003125299_04920.084.pth | Hamilton 1.9106531143188477
./Walker2d-v3_PPOHtermK_6_6380/actor__000004032507_04938.116.pth | Hamilton 2.1822659969329834
./Walker2d-v3_PPOHtermK_6_6380/actor__000004212893_04994.665.pth | Hamilton 2.2843923568725586
./Walker2d-v3_PPOHtermK_6_6380/actor__000004390621_05104.344.pth | Hamilton 2.4139745235443115
./Walker2d-v3_PPOHtermK_6_6380/actor__000004577789_05196.673.pth | Hamilton 2.515577793121338
./Walker2d-v3_PPOHtermK_6_6380/actor__000004937777_05280.422.pth | Hamilton 2.688485622406006
./Walker2d-v3_PPOHtermK_6_6380/actor__000005303889_05448.386.pth | Hamilton 2.8832085132598877
./Walker2d-v3_PPOHtermK_6_6380/actor__000005482507_05498.714.pth | Hamilton 2.9794793128967285
./Walker2d-v3_PPOHtermK_6_6380/actor__000005668782_05568.015.pth | Hamilton 3.0330400466918945
./Walker2d-v3_PPOHtermK_6_6380/actor__000005855582_05578.576.pth | Hamilton 3.0443379878997803
./Walker2d-v3_PPOHtermK_6_6380/actor__000006398966_05692.406.pth | Hamilton 3.277148723602295
./Walker2d-v3_PPOHtermK_6_6380/actor__000006566168_05750.363.pth | Hamilton 3.273808240890503
./Walker2d-v3_PPOHtermK_6_6380/actor__000006750199_05786.003.pth | Hamilton 3.271698474884033
./Walker2d-v3_PPOHtermK_6_6380/actor__000006938597_05813.048.pth | Hamilton 3.3507120609283447
./Walker2d-v3_PPOHtermK_6_6380/actor__000007484047_05861.979.pth | Hamilton 3.551744222640991
./Walker2d-v3_PPOHtermK_6_6380/actor__000007667143_05891.985.pth | Hamilton 3.4438040256500244
./Walker2d-v3_PPOHtermK_6_6380/actor__000007850701_05905.859.pth | Hamilton 3.467259168624878
./Walker2d-v3_PPOHtermK_6_6380/actor__000008393699_05965.656.pth | Hamilton 3.4827489852905273
./Walker2d-v3_PPOHtermK_6_6380/actor__000008576533_06020.442.pth | Hamilton 3.605449676513672
./Walker2d-v3_PPOHtermK_6_6380/actor__000009501024_06080.619.pth | Hamilton 3.468210220336914
./Walker2d-v3_PPOHtermK_6_6380/actor__000010057923_06122.661.pth | Hamilton 3.453948736190796
./Walker2d-v3_PPOHtermK_6_6380/actor__000010248263_06154.803.pth | Hamilton 3.567803382873535
./Walker2d-v3_PPOHtermK_6_6380/actor__000010439949_06182.687.pth | Hamilton 3.486987352371216
./Walker2d-v3_PPOHtermK_6_6380/actor__000010820292_06228.996.pth | Hamilton 3.4265878200531006
./Walker2d-v3_PPOHtermK_6_6380/actor__000011011586_06240.069.pth | Hamilton 3.416078805923462
./Walker2d-v3_PPOHtermK_6_6380/actor__000011198022_06306.907.pth | Hamilton 3.2585856914520264
./Walker2d-v3_PPOHtermK_6_6380/actor__000011967112_06311.553.pth | Hamilton 3.17917537689209
./Walker2d-v3_PPOHtermK_6_6380/actor__000012734484_06380.495.pth | Hamilton 3.035814046859741
"""
# Walker2d-v3_PPO_3_6635
data43 = """
./Walker2d-v3_PPO_3_6635/actor_000000054966.pth | Hamilton 0.01972796395421028
./Walker2d-v3_PPO_3_6635/actor_000000105619.pth | Hamilton 0.035059910267591476
./Walker2d-v3_PPO_3_6635/actor_000000157873.pth | Hamilton 0.04067114740610123
./Walker2d-v3_PPO_3_6635/actor_000000210512.pth | Hamilton 0.04678366705775261
./Walker2d-v3_PPO_3_6635/actor_000000262669.pth | Hamilton 0.05390090122818947
./Walker2d-v3_PPO_3_6635/actor_000000314467.pth | Hamilton 0.062065452337265015
./Walker2d-v3_PPO_3_6635/actor_000000365912.pth | Hamilton 0.06891259551048279
./Walker2d-v3_PPO_3_6635/actor_000000417472.pth | Hamilton 0.07636240869760513
./Walker2d-v3_PPO_3_6635/actor_000000469423.pth | Hamilton 0.0840894803404808
./Walker2d-v3_PPO_3_6635/actor_000000521525.pth | Hamilton 0.0897422507405281
./Walker2d-v3_PPO_3_6635/actor_000000573971.pth | Hamilton 0.0966050922870636
./Walker2d-v3_PPO_3_6635/actor_000000626111.pth | Hamilton 0.10339801758527756
./Walker2d-v3_PPO_3_6635/actor_000000677706.pth | Hamilton 0.11114295572042465
./Walker2d-v3_PPO_3_6635/actor_000000729593.pth | Hamilton 0.12237662822008133
./Walker2d-v3_PPO_3_6635/actor_000000782027.pth | Hamilton 0.13457736372947693
./Walker2d-v3_PPO_3_6635/actor_000000835157.pth | Hamilton 0.14478591084480286
./Walker2d-v3_PPO_3_6635/actor_000000888502.pth | Hamilton 0.15821334719657898
./Walker2d-v3_PPO_3_6635/actor_000000940764.pth | Hamilton 0.16951744258403778
./Walker2d-v3_PPO_3_6635/actor_000000994348.pth | Hamilton 0.18415145576000214
./Walker2d-v3_PPO_3_6635/actor_000001047991.pth | Hamilton 0.1963721066713333
./Walker2d-v3_PPO_3_6635/actor_000001102779.pth | Hamilton 0.21769124269485474
./Walker2d-v3_PPO_3_6635/actor_000001157214.pth | Hamilton 0.23970939218997955
./Walker2d-v3_PPO_3_6635/actor_000001212812.pth | Hamilton 0.2599489688873291
./Walker2d-v3_PPO_3_6635/actor_000001268549.pth | Hamilton 0.2786579728126526
./Walker2d-v3_PPO_3_6635/actor_000001326109.pth | Hamilton 0.300142765045166
./Walker2d-v3_PPO_3_6635/actor_000001384928.pth | Hamilton 0.3222769796848297
./Walker2d-v3_PPO_3_6635/actor_000001444056.pth | Hamilton 0.3418172299861908
./Walker2d-v3_PPO_3_6635/actor_000001507252.pth | Hamilton 0.3764764368534088
./Walker2d-v3_PPO_3_6635/actor_000001567574.pth | Hamilton 0.41272470355033875
./Walker2d-v3_PPO_3_6635/actor_000001629751.pth | Hamilton 0.45305564999580383
./Walker2d-v3_PPO_3_6635/actor_000001691009.pth | Hamilton 0.4813075363636017
./Walker2d-v3_PPO_3_6635/actor_000001753324.pth | Hamilton 0.5201964974403381
./Walker2d-v3_PPO_3_6635/actor_000001817149.pth | Hamilton 0.5548107624053955
./Walker2d-v3_PPO_3_6635/actor_000001878359.pth | Hamilton 0.6043074727058411
./Walker2d-v3_PPO_3_6635/actor_000001942293.pth | Hamilton 0.6314113736152649
./Walker2d-v3_PPO_3_6635/actor_000002008526.pth | Hamilton 0.6795088648796082
./Walker2d-v3_PPO_3_6635/actor_000002070983.pth | Hamilton 0.7221250534057617
./Walker2d-v3_PPO_3_6635/actor_000002134534.pth | Hamilton 0.7496094107627869
./Walker2d-v3_PPO_3_6635/actor_000002197442.pth | Hamilton 0.7956565022468567
./Walker2d-v3_PPO_3_6635/actor_000002259262.pth | Hamilton 0.8209180235862732
./Walker2d-v3_PPO_3_6635/actor_000002317669.pth | Hamilton 0.8565555214881897
./Walker2d-v3_PPO_3_6635/actor_000002380661.pth | Hamilton 0.8939540982246399
./Walker2d-v3_PPO_3_6635/actor_000002444375.pth | Hamilton 0.9018198847770691
./Walker2d-v3_PPO_3_6635/actor_000002509586.pth | Hamilton 0.9443305134773254
./Walker2d-v3_PPO_3_6635/actor_000002572927.pth | Hamilton 0.9507122039794922
./Walker2d-v3_PPO_3_6635/actor_000002630642.pth | Hamilton 0.9790986776351929
./Walker2d-v3_PPO_3_6635/actor_000002692266.pth | Hamilton 1.001312494277954
./Walker2d-v3_PPO_3_6635/actor_000002755833.pth | Hamilton 1.0064773559570312
./Walker2d-v3_PPO_3_6635/actor_000002820139.pth | Hamilton 1.0224567651748657
./Walker2d-v3_PPO_3_6635/actor_000002881531.pth | Hamilton 1.0335133075714111
./Walker2d-v3_PPO_3_6635/actor_000002946074.pth | Hamilton 1.051541805267334
./Walker2d-v3_PPO_3_6635/actor_000003008674.pth | Hamilton 1.0847855806350708
./Walker2d-v3_PPO_3_6635/actor_000003070054.pth | Hamilton 1.089240550994873
./Walker2d-v3_PPO_3_6635/actor_000003131041.pth | Hamilton 1.0923864841461182
./Walker2d-v3_PPO_3_6635/actor_000003193064.pth | Hamilton 1.0970594882965088
./Walker2d-v3_PPO_3_6635/actor_000003254037.pth | Hamilton 1.111884355545044
./Walker2d-v3_PPO_3_6635/actor_000003311997.pth | Hamilton 1.1318044662475586
./Walker2d-v3_PPO_3_6635/actor_000003376975.pth | Hamilton 1.1258502006530762
./Walker2d-v3_PPO_3_6635/actor_000003437763.pth | Hamilton 1.1204301118850708
./Walker2d-v3_PPO_3_6635/actor_000003498339.pth | Hamilton 1.1107360124588013
./Walker2d-v3_PPO_3_6635/actor_000003560356.pth | Hamilton 1.1189417839050293
./Walker2d-v3_PPO_3_6635/actor_000003619930.pth | Hamilton 1.1212390661239624
./Walker2d-v3_PPO_3_6635/actor_000003680568.pth | Hamilton 1.1144177913665771
./Walker2d-v3_PPO_3_6635/actor_000003743412.pth | Hamilton 1.112485647201538
./Walker2d-v3_PPO_3_6635/actor_000003805319.pth | Hamilton 1.093639612197876
./Walker2d-v3_PPO_3_6635/actor_000003866336.pth | Hamilton 1.1010220050811768
./Walker2d-v3_PPO_3_6635/actor_000003926910.pth | Hamilton 1.0958553552627563
./Walker2d-v3_PPO_3_6635/actor_000003986107.pth | Hamilton 1.080429196357727
./Walker2d-v3_PPO_3_6635/actor_000004047683.pth | Hamilton 1.0827715396881104
./Walker2d-v3_PPO_3_6635/actor_000004106300.pth | Hamilton 1.0995678901672363
./Walker2d-v3_PPO_3_6635/actor_000004169985.pth | Hamilton 1.0886454582214355
./Walker2d-v3_PPO_3_6635/actor_000004228304.pth | Hamilton 1.1055152416229248
./Walker2d-v3_PPO_3_6635/actor_000004284973.pth | Hamilton 1.107505202293396
./Walker2d-v3_PPO_3_6635/actor_000004342665.pth | Hamilton 1.0984554290771484
./Walker2d-v3_PPO_3_6635/actor_000004399358.pth | Hamilton 1.0958807468414307
./Walker2d-v3_PPO_3_6635/actor_000004457209.pth | Hamilton 1.0925768613815308
./Walker2d-v3_PPO_3_6635/actor_000004512042.pth | Hamilton 1.0755237340927124
./Walker2d-v3_PPO_3_6635/actor_000004568106.pth | Hamilton 1.0717418193817139
./Walker2d-v3_PPO_3_6635/actor_000004626400.pth | Hamilton 1.0806517601013184
./Walker2d-v3_PPO_3_6635/actor_000004691271.pth | Hamilton 1.0708420276641846
./Walker2d-v3_PPO_3_6635/actor_000004750893.pth | Hamilton 1.0804322957992554
./Walker2d-v3_PPO_3_6635/actor_000004813899.pth | Hamilton 1.0726450681686401
./Walker2d-v3_PPO_3_6635/actor_000004870007.pth | Hamilton 1.0703684091567993
./Walker2d-v3_PPO_3_6635/actor_000004924015.pth | Hamilton 1.0642073154449463
./Walker2d-v3_PPO_3_6635/actor_000004981319.pth | Hamilton 1.0789607763290405
./Walker2d-v3_PPO_3_6635/actor_000005038251.pth | Hamilton 1.0696189403533936
./Walker2d-v3_PPO_3_6635/actor_000005096952.pth | Hamilton 1.0715504884719849
./Walker2d-v3_PPO_3_6635/actor_000005154283.pth | Hamilton 1.0594005584716797
./Walker2d-v3_PPO_3_6635/actor_000005213615.pth | Hamilton 1.04817533493042
./Walker2d-v3_PPO_3_6635/actor_000005274787.pth | Hamilton 1.0395565032958984
./Walker2d-v3_PPO_3_6635/actor_000005332130.pth | Hamilton 1.0261379480361938
./Walker2d-v3_PPO_3_6635/actor_000005396229.pth | Hamilton 1.0006322860717773
./Walker2d-v3_PPO_3_6635/actor_000005459419.pth | Hamilton 0.959551990032196
./Walker2d-v3_PPO_3_6635/actor_000005519480.pth | Hamilton 0.9368746280670166
./Walker2d-v3_PPO_3_6635/actor_000005579603.pth | Hamilton 0.9290984272956848
./Walker2d-v3_PPO_3_6635/actor_000005641587.pth | Hamilton 0.9125220775604248
./Walker2d-v3_PPO_3_6635/actor_000005703016.pth | Hamilton 0.9109368920326233
./Walker2d-v3_PPO_3_6635/actor_000005766489.pth | Hamilton 0.9127731919288635
./Walker2d-v3_PPO_3_6635/actor_000005821143.pth | Hamilton 0.8996481895446777
./Walker2d-v3_PPO_3_6635/actor_000005880491.pth | Hamilton 0.8935257792472839
./Walker2d-v3_PPO_3_6635/actor_000005938390.pth | Hamilton 0.8621619939804077
./Walker2d-v3_PPO_3_6635/actor_000005994241.pth | Hamilton 0.8484944701194763
./Walker2d-v3_PPO_3_6635/actor_000006051786.pth | Hamilton 0.8623994588851929
./Walker2d-v3_PPO_3_6635/actor_000006111064.pth | Hamilton 0.8559266328811646
./Walker2d-v3_PPO_3_6635/actor_000006172700.pth | Hamilton 0.8494306206703186
./Walker2d-v3_PPO_3_6635/actor_000006234883.pth | Hamilton 0.8376625776290894
./Walker2d-v3_PPO_3_6635/actor_000006292467.pth | Hamilton 0.8147987723350525
./Walker2d-v3_PPO_3_6635/actor_000006351672.pth | Hamilton 0.8209235668182373
./Walker2d-v3_PPO_3_6635/actor_000006413796.pth | Hamilton 0.8096635341644287
./Walker2d-v3_PPO_3_6635/actor_000006474080.pth | Hamilton 0.8078237771987915
./Walker2d-v3_PPO_3_6635/actor_000006529489.pth | Hamilton 0.7891370058059692
./Walker2d-v3_PPO_3_6635/actor_000006585655.pth | Hamilton 0.7922996282577515
./Walker2d-v3_PPO_3_6635/actor_000006646486.pth | Hamilton 0.7994384765625
./Walker2d-v3_PPO_3_6635/actor_000006703895.pth | Hamilton 0.7893878817558289
./Walker2d-v3_PPO_3_6635/actor_000006759400.pth | Hamilton 0.7944397926330566
./Walker2d-v3_PPO_3_6635/actor_000006815682.pth | Hamilton 0.7983978986740112
./Walker2d-v3_PPO_3_6635/actor_000006874665.pth | Hamilton 0.7860860228538513
./Walker2d-v3_PPO_3_6635/actor_000006933966.pth | Hamilton 0.7959814071655273
./Walker2d-v3_PPO_3_6635/actor_000006989248.pth | Hamilton 0.7916241884231567
./Walker2d-v3_PPO_3_6635/actor_000007047421.pth | Hamilton 0.7806171178817749
./Walker2d-v3_PPO_3_6635/actor_000007108867.pth | Hamilton 0.8016980290412903
./Walker2d-v3_PPO_3_6635/actor_000007167695.pth | Hamilton 0.8155857920646667
./Walker2d-v3_PPO_3_6635/actor_000007229038.pth | Hamilton 0.8113080859184265
./Walker2d-v3_PPO_3_6635/actor_000007285949.pth | Hamilton 0.8086192011833191
./Walker2d-v3_PPO_3_6635/actor_000007344004.pth | Hamilton 0.8190962076187134
./Walker2d-v3_PPO_3_6635/actor_000007403025.pth | Hamilton 0.7791344523429871
./Walker2d-v3_PPO_3_6635/actor_000007465088.pth | Hamilton 0.7855120897293091
./Walker2d-v3_PPO_3_6635/actor_000007527952.pth | Hamilton 0.7963833808898926
./Walker2d-v3_PPO_3_6635/actor_000007588208.pth | Hamilton 0.7801435589790344
./Walker2d-v3_PPO_3_6635/actor_000007650859.pth | Hamilton 0.7747620940208435
./Walker2d-v3_PPO_3_6635/actor_000007712106.pth | Hamilton 0.7632256746292114
./Walker2d-v3_PPO_3_6635/actor_000007775304.pth | Hamilton 0.7732028961181641
./Walker2d-v3_PPO_3_6635/actor_000007835163.pth | Hamilton 0.7782047390937805
./Walker2d-v3_PPO_3_6635/actor_000007894179.pth | Hamilton 0.7758763432502747
./Walker2d-v3_PPO_3_6635/actor_000007953023.pth | Hamilton 0.769618570804596
./Walker2d-v3_PPO_3_6635/actor_000008012424.pth | Hamilton 0.761384129524231
./Walker2d-v3_PPO_3_6635/actor_000008068116.pth | Hamilton 0.7561891078948975
./Walker2d-v3_PPO_3_6635/actor_000008124763.pth | Hamilton 0.7476109862327576
./Walker2d-v3_PPO_3_6635/actor_000008176761.pth | Hamilton 0.7540464997291565
./Walker2d-v3_PPO_3_6635/actor_000008230810.pth | Hamilton 0.7447154521942139
./Walker2d-v3_PPO_3_6635/actor_000008290615.pth | Hamilton 0.7597187757492065
./Walker2d-v3_PPO_3_6635/actor_000008354599.pth | Hamilton 0.7634997367858887
./Walker2d-v3_PPO_3_6635/actor_000008419797.pth | Hamilton 0.764761209487915
./Walker2d-v3_PPO_3_6635/actor_000008474258.pth | Hamilton 0.7732122540473938
./Walker2d-v3_PPO_3_6635/actor_000008533154.pth | Hamilton 0.7703934907913208
./Walker2d-v3_PPO_3_6635/actor_000008590844.pth | Hamilton 0.7396253347396851
./Walker2d-v3_PPO_3_6635/actor_000008648224.pth | Hamilton 0.7458689212799072
./Walker2d-v3_PPO_3_6635/actor_000008708901.pth | Hamilton 0.7560151815414429
./Walker2d-v3_PPO_3_6635/actor_000008766844.pth | Hamilton 0.7598954439163208
./Walker2d-v3_PPO_3_6635/actor_000008827592.pth | Hamilton 0.7636743783950806
./Walker2d-v3_PPO_3_6635/actor_000008882910.pth | Hamilton 0.7737241387367249
./Walker2d-v3_PPO_3_6635/actor_000008942884.pth | Hamilton 0.781140923500061
./Walker2d-v3_PPO_3_6635/actor_000008998715.pth | Hamilton 0.7709715366363525
./Walker2d-v3_PPO_3_6635/actor_000009059073.pth | Hamilton 0.7734872698783875
./Walker2d-v3_PPO_3_6635/actor_000009116816.pth | Hamilton 0.7824884653091431
./Walker2d-v3_PPO_3_6635/actor_000009173580.pth | Hamilton 0.7919142842292786
./Walker2d-v3_PPO_3_6635/actor_000009229155.pth | Hamilton 0.7939603328704834
./Walker2d-v3_PPO_3_6635/actor_000009289948.pth | Hamilton 0.7982693910598755
./Walker2d-v3_PPO_3_6635/actor_000009349727.pth | Hamilton 0.794403612613678
./Walker2d-v3_PPO_3_6635/actor_000009408518.pth | Hamilton 0.7916834354400635
./Walker2d-v3_PPO_3_6635/actor_000009466785.pth | Hamilton 0.8053403496742249
./Walker2d-v3_PPO_3_6635/actor_000009522771.pth | Hamilton 0.8052003979682922
./Walker2d-v3_PPO_3_6635/actor_000009581593.pth | Hamilton 0.7969403266906738
./Walker2d-v3_PPO_3_6635/actor_000009640434.pth | Hamilton 0.7918256521224976
./Walker2d-v3_PPO_3_6635/actor_000009698663.pth | Hamilton 0.7704351544380188
./Walker2d-v3_PPO_3_6635/actor_000009753472.pth | Hamilton 0.7847091555595398
./Walker2d-v3_PPO_3_6635/actor_000009807507.pth | Hamilton 0.7594966292381287
./Walker2d-v3_PPO_3_6635/actor_000009859931.pth | Hamilton 0.7624948620796204
./Walker2d-v3_PPO_3_6635/actor_000009911389.pth | Hamilton 0.7485454082489014
./Walker2d-v3_PPO_3_6635/actor_000009965829.pth | Hamilton 0.7480531930923462
./Walker2d-v3_PPO_3_6635/actor_000010021121.pth | Hamilton 0.7323440909385681
./Walker2d-v3_PPO_3_6635/actor_000010076293.pth | Hamilton 0.7485517263412476
./Walker2d-v3_PPO_3_6635/actor_000010135310.pth | Hamilton 0.7378523945808411
./Walker2d-v3_PPO_3_6635/actor_000010192034.pth | Hamilton 0.7374451756477356
./Walker2d-v3_PPO_3_6635/actor_000010244495.pth | Hamilton 0.728163480758667
./Walker2d-v3_PPO_3_6635/actor_000010301442.pth | Hamilton 0.7253691554069519
./Walker2d-v3_PPO_3_6635/actor_000010357588.pth | Hamilton 0.7222223877906799
./Walker2d-v3_PPO_3_6635/actor_000010416375.pth | Hamilton 0.7218358516693115
./Walker2d-v3_PPO_3_6635/actor_000010471627.pth | Hamilton 0.7166824340820312
./Walker2d-v3_PPO_3_6635/actor_000010530137.pth | Hamilton 0.7295182347297668
./Walker2d-v3_PPO_3_6635/actor_000010587710.pth | Hamilton 0.7149263024330139
./Walker2d-v3_PPO_3_6635/actor_000010645766.pth | Hamilton 0.7177409529685974
./Walker2d-v3_PPO_3_6635/actor_000010707894.pth | Hamilton 0.7212621569633484
./Walker2d-v3_PPO_3_6635/actor_000010768712.pth | Hamilton 0.7426449060440063
./Walker2d-v3_PPO_3_6635/actor__000000012172_00012.179.pth | Hamilton 0.03262835741043091
./Walker2d-v3_PPO_3_6635/actor__000000378838_00329.068.pth | Hamilton 0.047297269105911255
./Walker2d-v3_PPO_3_6635/actor__000000742998_00784.900.pth | Hamilton 0.08947175741195679
./Walker2d-v3_PPO_3_6635/actor__000001102779_01365.233.pth | Hamilton 0.17121867835521698
./Walker2d-v3_PPO_3_6635/actor__000001467494_01534.070.pth | Hamilton 0.29262807965278625
./Walker2d-v3_PPO_3_6635/actor__000001831850_04312.181.pth | Hamilton 0.4232810437679291
./Walker2d-v3_PPO_3_6635/actor__000002197442_04548.749.pth | Hamilton 0.5723731517791748
./Walker2d-v3_PPO_3_6635/actor__000002565171_04699.839.pth | Hamilton 0.7016875147819519
./Walker2d-v3_PPO_3_6635/actor__000002931946_04860.750.pth | Hamilton 0.8188680410385132
./Walker2d-v3_PPO_3_6635/actor__000003298249_05175.716.pth | Hamilton 0.9682682156562805
./Walker2d-v3_PPO_3_6635/actor__000003664551_05203.180.pth | Hamilton 0.9984228014945984
./Walker2d-v3_PPO_3_6635/actor__000004033154_05292.623.pth | Hamilton 1.0472486019134521
./Walker2d-v3_PPO_3_6635/actor__000004391985_05459.751.pth | Hamilton 1.0468547344207764
./Walker2d-v3_PPO_3_6635/actor__000004758573_05548.323.pth | Hamilton 1.062732458114624
./Walker2d-v3_PPO_3_6635/actor__000005118937_05670.984.pth | Hamilton 1.080477237701416
./Walker2d-v3_PPO_3_6635/actor__000005480082_05790.323.pth | Hamilton 1.033541202545166
./Walker2d-v3_PPO_3_6635/actor__000005842831_05903.450.pth | Hamilton 1.0606005191802979
./Walker2d-v3_PPO_3_6635/actor__000006204509_06032.451.pth | Hamilton 1.0366705656051636
./Walker2d-v3_PPO_3_6635/actor__000006565574_06154.854.pth | Hamilton 1.0357882976531982
./Walker2d-v3_PPO_3_6635/actor__000007285949_06195.410.pth | Hamilton 1.0567569732666016
./Walker2d-v3_PPO_3_6635/actor__000007650859_06356.390.pth | Hamilton 1.0693386793136597
./Walker2d-v3_PPO_3_6635/actor__000009110078_06453.380.pth | Hamilton 1.0073399543762207
./Walker2d-v3_PPO_3_6635/actor__000009826689_06635.056.pth | Hamilton 0.8949706554412842
"""
# Walker2d-v3_PPO_4_7884
data44 = """
./Walker2d-v3_PPO_4_7884/actor_000000073225.pth | Hamilton 0.044810328632593155
./Walker2d-v3_PPO_4_7884/actor_000000209233.pth | Hamilton 0.061470333486795425
./Walker2d-v3_PPO_4_7884/actor_000000342196.pth | Hamilton 0.08724474906921387
./Walker2d-v3_PPO_4_7884/actor_000000475664.pth | Hamilton 0.11592239141464233
./Walker2d-v3_PPO_4_7884/actor_000000610321.pth | Hamilton 0.1401374489068985
./Walker2d-v3_PPO_4_7884/actor_000000745534.pth | Hamilton 0.17884403467178345
./Walker2d-v3_PPO_4_7884/actor_000000880592.pth | Hamilton 0.22284580767154694
./Walker2d-v3_PPO_4_7884/actor_000001017668.pth | Hamilton 0.2631847858428955
./Walker2d-v3_PPO_4_7884/actor_000001154163.pth | Hamilton 0.3264837861061096
./Walker2d-v3_PPO_4_7884/actor_000001290606.pth | Hamilton 0.397513210773468
./Walker2d-v3_PPO_4_7884/actor_000001431004.pth | Hamilton 0.4849456548690796
./Walker2d-v3_PPO_4_7884/actor_000001578032.pth | Hamilton 0.6127609014511108
./Walker2d-v3_PPO_4_7884/actor_000001728512.pth | Hamilton 0.7138102054595947
./Walker2d-v3_PPO_4_7884/actor_000001883008.pth | Hamilton 0.8252093195915222
./Walker2d-v3_PPO_4_7884/actor_000002036166.pth | Hamilton 0.9256483912467957
./Walker2d-v3_PPO_4_7884/actor_000002187135.pth | Hamilton 0.9922093749046326
./Walker2d-v3_PPO_4_7884/actor_000002340466.pth | Hamilton 1.0900201797485352
./Walker2d-v3_PPO_4_7884/actor_000002492104.pth | Hamilton 1.2071281671524048
./Walker2d-v3_PPO_4_7884/actor_000002645510.pth | Hamilton 1.2975560426712036
./Walker2d-v3_PPO_4_7884/actor_000002794886.pth | Hamilton 1.3490664958953857
./Walker2d-v3_PPO_4_7884/actor_000002946283.pth | Hamilton 1.3788185119628906
./Walker2d-v3_PPO_4_7884/actor_000003098172.pth | Hamilton 1.3769645690917969
./Walker2d-v3_PPO_4_7884/actor_000003244026.pth | Hamilton 1.4195348024368286
./Walker2d-v3_PPO_4_7884/actor_000003388723.pth | Hamilton 1.4133703708648682
./Walker2d-v3_PPO_4_7884/actor_000003543716.pth | Hamilton 1.4055488109588623
./Walker2d-v3_PPO_4_7884/actor_000003696605.pth | Hamilton 1.4227633476257324
./Walker2d-v3_PPO_4_7884/actor_000003843102.pth | Hamilton 1.4294084310531616
./Walker2d-v3_PPO_4_7884/actor_000003988188.pth | Hamilton 1.3942257165908813
./Walker2d-v3_PPO_4_7884/actor_000004133833.pth | Hamilton 1.4088448286056519
./Walker2d-v3_PPO_4_7884/actor_000004285654.pth | Hamilton 1.4054874181747437
./Walker2d-v3_PPO_4_7884/actor_000004434570.pth | Hamilton 1.332322359085083
./Walker2d-v3_PPO_4_7884/actor_000004589946.pth | Hamilton 1.3136394023895264
./Walker2d-v3_PPO_4_7884/actor_000004738336.pth | Hamilton 1.2826125621795654
./Walker2d-v3_PPO_4_7884/actor_000004883155.pth | Hamilton 1.2865899801254272
./Walker2d-v3_PPO_4_7884/actor_000005034912.pth | Hamilton 1.2543002367019653
./Walker2d-v3_PPO_4_7884/actor_000005183905.pth | Hamilton 1.2268785238265991
./Walker2d-v3_PPO_4_7884/actor_000005328808.pth | Hamilton 1.1741422414779663
./Walker2d-v3_PPO_4_7884/actor_000005476784.pth | Hamilton 1.139819622039795
./Walker2d-v3_PPO_4_7884/actor_000005623711.pth | Hamilton 1.1153515577316284
./Walker2d-v3_PPO_4_7884/actor_000005773136.pth | Hamilton 1.1210931539535522
./Walker2d-v3_PPO_4_7884/actor_000005926945.pth | Hamilton 1.1309336423873901
./Walker2d-v3_PPO_4_7884/actor_000006069329.pth | Hamilton 1.105654001235962
./Walker2d-v3_PPO_4_7884/actor_000006212489.pth | Hamilton 1.1002238988876343
./Walker2d-v3_PPO_4_7884/actor_000006363845.pth | Hamilton 1.0909202098846436
./Walker2d-v3_PPO_4_7884/actor_000006517253.pth | Hamilton 1.0542353391647339
./Walker2d-v3_PPO_4_7884/actor_000006667502.pth | Hamilton 1.0891151428222656
./Walker2d-v3_PPO_4_7884/actor_000006813599.pth | Hamilton 1.1217628717422485
./Walker2d-v3_PPO_4_7884/actor_000006961590.pth | Hamilton 1.1565836668014526
./Walker2d-v3_PPO_4_7884/actor_000007107006.pth | Hamilton 1.140400767326355
./Walker2d-v3_PPO_4_7884/actor_000007247572.pth | Hamilton 1.1038604974746704
./Walker2d-v3_PPO_4_7884/actor_000007400022.pth | Hamilton 1.0980366468429565
./Walker2d-v3_PPO_4_7884/actor_000007546588.pth | Hamilton 1.1156584024429321
./Walker2d-v3_PPO_4_7884/actor_000007697080.pth | Hamilton 1.1234101057052612
./Walker2d-v3_PPO_4_7884/actor_000007842828.pth | Hamilton 1.08625066280365
./Walker2d-v3_PPO_4_7884/actor_000007990789.pth | Hamilton 1.1056365966796875
./Walker2d-v3_PPO_4_7884/actor_000008135041.pth | Hamilton 1.0732046365737915
./Walker2d-v3_PPO_4_7884/actor_000008281602.pth | Hamilton 1.0186923742294312
./Walker2d-v3_PPO_4_7884/actor_000008428605.pth | Hamilton 1.0509549379348755
./Walker2d-v3_PPO_4_7884/actor_000008575028.pth | Hamilton 1.0293654203414917
./Walker2d-v3_PPO_4_7884/actor_000008719602.pth | Hamilton 1.031142234802246
./Walker2d-v3_PPO_4_7884/actor_000008870366.pth | Hamilton 1.0119407176971436
./Walker2d-v3_PPO_4_7884/actor_000009015717.pth | Hamilton 0.9998535513877869
./Walker2d-v3_PPO_4_7884/actor_000009160288.pth | Hamilton 0.9700120687484741
./Walker2d-v3_PPO_4_7884/actor_000009303823.pth | Hamilton 0.9851129055023193
./Walker2d-v3_PPO_4_7884/actor_000009450356.pth | Hamilton 0.9827463626861572
./Walker2d-v3_PPO_4_7884/actor_000009596589.pth | Hamilton 0.9993273615837097
./Walker2d-v3_PPO_4_7884/actor_000009741106.pth | Hamilton 0.9758424162864685
./Walker2d-v3_PPO_4_7884/actor_000009881545.pth | Hamilton 0.9426652789115906
./Walker2d-v3_PPO_4_7884/actor_000010023570.pth | Hamilton 0.9391447305679321
./Walker2d-v3_PPO_4_7884/actor_000010169288.pth | Hamilton 0.9776617884635925
./Walker2d-v3_PPO_4_7884/actor_000010314367.pth | Hamilton 0.967212438583374
./Walker2d-v3_PPO_4_7884/actor_000010462803.pth | Hamilton 0.9687291979789734
./Walker2d-v3_PPO_4_7884/actor_000010604819.pth | Hamilton 0.9568792581558228
./Walker2d-v3_PPO_4_7884/actor_000010743516.pth | Hamilton 0.9551551938056946
./Walker2d-v3_PPO_4_7884/actor_000010883673.pth | Hamilton 0.9347825050354004
./Walker2d-v3_PPO_4_7884/actor_000011026189.pth | Hamilton 0.931612491607666
./Walker2d-v3_PPO_4_7884/actor_000011173866.pth | Hamilton 0.9647425413131714
./Walker2d-v3_PPO_4_7884/actor_000011319209.pth | Hamilton 0.943316638469696
./Walker2d-v3_PPO_4_7884/actor_000011462060.pth | Hamilton 0.9403418302536011
./Walker2d-v3_PPO_4_7884/actor_000011599892.pth | Hamilton 0.9124143719673157
./Walker2d-v3_PPO_4_7884/actor_000011744775.pth | Hamilton 0.8836234211921692
./Walker2d-v3_PPO_4_7884/actor_000011891037.pth | Hamilton 0.8960395455360413
./Walker2d-v3_PPO_4_7884/actor_000012040383.pth | Hamilton 0.9100744724273682
./Walker2d-v3_PPO_4_7884/actor_000012186731.pth | Hamilton 0.886787474155426
./Walker2d-v3_PPO_4_7884/actor_000012333955.pth | Hamilton 0.8668569922447205
./Walker2d-v3_PPO_4_7884/actor_000012488200.pth | Hamilton 0.903489887714386
./Walker2d-v3_PPO_4_7884/actor_000012638981.pth | Hamilton 0.9060494899749756
./Walker2d-v3_PPO_4_7884/actor_000012788489.pth | Hamilton 0.915973424911499
./Walker2d-v3_PPO_4_7884/actor_000012932830.pth | Hamilton 0.8710293769836426
./Walker2d-v3_PPO_4_7884/actor_000013079448.pth | Hamilton 0.8548725843429565
./Walker2d-v3_PPO_4_7884/actor_000013223346.pth | Hamilton 0.8557795882225037
./Walker2d-v3_PPO_4_7884/actor_000013371208.pth | Hamilton 0.8430655002593994
./Walker2d-v3_PPO_4_7884/actor_000013520948.pth | Hamilton 0.8432157635688782
./Walker2d-v3_PPO_4_7884/actor_000013671663.pth | Hamilton 0.8388532400131226
./Walker2d-v3_PPO_4_7884/actor_000013821359.pth | Hamilton 0.83554607629776
./Walker2d-v3_PPO_4_7884/actor_000013974606.pth | Hamilton 0.8343712091445923
./Walker2d-v3_PPO_4_7884/actor_000014123633.pth | Hamilton 0.843206524848938
./Walker2d-v3_PPO_4_7884/actor_000014272822.pth | Hamilton 0.8422938585281372
./Walker2d-v3_PPO_4_7884/actor_000014422133.pth | Hamilton 0.8300326466560364
./Walker2d-v3_PPO_4_7884/actor_000014574626.pth | Hamilton 0.8480035066604614
./Walker2d-v3_PPO_4_7884/actor_000014724011.pth | Hamilton 0.8517335653305054
./Walker2d-v3_PPO_4_7884/actor_000014867233.pth | Hamilton 0.8566312789916992
./Walker2d-v3_PPO_4_7884/actor_000015012442.pth | Hamilton 0.8417342901229858
./Walker2d-v3_PPO_4_7884/actor_000015150118.pth | Hamilton 0.8494038581848145
./Walker2d-v3_PPO_4_7884/actor_000015286852.pth | Hamilton 0.8653177618980408
./Walker2d-v3_PPO_4_7884/actor_000015428926.pth | Hamilton 0.8875323534011841
./Walker2d-v3_PPO_4_7884/actor_000015578886.pth | Hamilton 0.8919367790222168
./Walker2d-v3_PPO_4_7884/actor_000015726601.pth | Hamilton 0.9082167744636536
./Walker2d-v3_PPO_4_7884/actor_000015872215.pth | Hamilton 0.9046029448509216
./Walker2d-v3_PPO_4_7884/actor_000016016187.pth | Hamilton 0.8892053961753845
./Walker2d-v3_PPO_4_7884/actor_000016159262.pth | Hamilton 0.8920382857322693
./Walker2d-v3_PPO_4_7884/actor_000016302694.pth | Hamilton 0.854295551776886
./Walker2d-v3_PPO_4_7884/actor_000016442077.pth | Hamilton 0.8632417917251587
./Walker2d-v3_PPO_4_7884/actor_000016582661.pth | Hamilton 0.8530678749084473
./Walker2d-v3_PPO_4_7884/actor_000016722503.pth | Hamilton 0.8256974816322327
./Walker2d-v3_PPO_4_7884/actor_000016864550.pth | Hamilton 0.8281449675559998
./Walker2d-v3_PPO_4_7884/actor_000017013977.pth | Hamilton 0.8289390802383423
./Walker2d-v3_PPO_4_7884/actor_000017164857.pth | Hamilton 0.8402869701385498
./Walker2d-v3_PPO_4_7884/actor_000017313426.pth | Hamilton 0.8473198413848877
./Walker2d-v3_PPO_4_7884/actor_000017458602.pth | Hamilton 0.8511500358581543
./Walker2d-v3_PPO_4_7884/actor_000017605108.pth | Hamilton 0.8387201428413391
./Walker2d-v3_PPO_4_7884/actor_000017750555.pth | Hamilton 0.836371660232544
./Walker2d-v3_PPO_4_7884/actor_000017899665.pth | Hamilton 0.7755534052848816
./Walker2d-v3_PPO_4_7884/actor_000018045485.pth | Hamilton 0.7680171132087708
./Walker2d-v3_PPO_4_7884/actor_000018183632.pth | Hamilton 0.7570532560348511
./Walker2d-v3_PPO_4_7884/actor_000018330037.pth | Hamilton 0.7302579879760742
./Walker2d-v3_PPO_4_7884/actor_000018481808.pth | Hamilton 0.707948625087738
./Walker2d-v3_PPO_4_7884/actor_000018625126.pth | Hamilton 0.7164007425308228
./Walker2d-v3_PPO_4_7884/actor_000018771570.pth | Hamilton 0.732720136642456
./Walker2d-v3_PPO_4_7884/actor_000018920899.pth | Hamilton 0.703303873538971
./Walker2d-v3_PPO_4_7884/actor_000019068053.pth | Hamilton 0.7047895789146423
./Walker2d-v3_PPO_4_7884/actor_000019223353.pth | Hamilton 0.699423611164093
./Walker2d-v3_PPO_4_7884/actor_000019369729.pth | Hamilton 0.686767578125
./Walker2d-v3_PPO_4_7884/actor_000019519391.pth | Hamilton 0.6802480220794678
./Walker2d-v3_PPO_4_7884/actor_000019659919.pth | Hamilton 0.6746554970741272
./Walker2d-v3_PPO_4_7884/actor_000019807645.pth | Hamilton 0.681246280670166
./Walker2d-v3_PPO_4_7884/actor_000019957758.pth | Hamilton 0.682015061378479
./Walker2d-v3_PPO_4_7884/actor_000019957758.pth | Hamilton 0.682015061378479
./Walker2d-v3_PPO_4_7884/actor__000000016096_00098.680.pth | Hamilton 0.009500125423073769
./Walker2d-v3_PPO_4_7884/actor__000000375681_00518.777.pth | Hamilton 0.03045632876455784
./Walker2d-v3_PPO_4_7884/actor__000000737265_00799.500.pth | Hamilton 0.08073711395263672
./Walker2d-v3_PPO_4_7884/actor__000001094283_00858.355.pth | Hamilton 0.1364787071943283
./Walker2d-v3_PPO_4_7884/actor__000001459056_02571.776.pth | Hamilton 0.2304132878780365
./Walker2d-v3_PPO_4_7884/actor__000001823860_05191.970.pth | Hamilton 0.33594363927841187
./Walker2d-v3_PPO_4_7884/actor__000002187135_05464.047.pth | Hamilton 0.4610356092453003
./Walker2d-v3_PPO_4_7884/actor__000002918093_05819.945.pth | Hamilton 0.5923557281494141
./Walker2d-v3_PPO_4_7884/actor__000003659378_06260.537.pth | Hamilton 0.6636354923248291
./Walker2d-v3_PPO_4_7884/actor__000004024376_06369.756.pth | Hamilton 0.7016980051994324
./Walker2d-v3_PPO_4_7884/actor__000004389276_06449.121.pth | Hamilton 0.7344204187393188
./Walker2d-v3_PPO_4_7884/actor__000004757994_06566.651.pth | Hamilton 0.7607460618019104
./Walker2d-v3_PPO_4_7884/actor__000005486413_06780.500.pth | Hamilton 0.787914514541626
./Walker2d-v3_PPO_4_7884/actor__000005850928_07051.599.pth | Hamilton 0.8591291308403015
./Walker2d-v3_PPO_4_7884/actor__000006952595_07151.677.pth | Hamilton 0.9046728014945984
./Walker2d-v3_PPO_4_7884/actor__000008419381_07250.233.pth | Hamilton 0.9762046337127686
./Walker2d-v3_PPO_4_7884/actor__000008786061_07354.451.pth | Hamilton 1.012585997581482
./Walker2d-v3_PPO_4_7884/actor__000011724686_07384.114.pth | Hamilton 1.0615894794464111
./Walker2d-v3_PPO_4_7884/actor__000012833068_07466.334.pth | Hamilton 1.1436699628829956
./Walker2d-v3_PPO_4_7884/actor__000014697394_07625.368.pth | Hamilton 1.1226475238800049
./Walker2d-v3_PPO_4_7884/actor__000015807921_07706.893.pth | Hamilton 1.126160979270935
./Walker2d-v3_PPO_4_7884/actor__000017641970_07742.604.pth | Hamilton 0.9916056394577026
./Walker2d-v3_PPO_4_7884/actor__000018754143_07884.901.pth | Hamilton 0.8471044898033142
"""
# Walker2d-v3_PPO_2_7191
data45 = """
./Walker2d-v3_PPO_2_7191/actor_000000074593.pth | Hamilton 0.02706068381667137
./Walker2d-v3_PPO_2_7191/actor_000000208200.pth | Hamilton 0.04495815187692642
./Walker2d-v3_PPO_2_7191/actor_000000341240.pth | Hamilton 0.0657462626695633
./Walker2d-v3_PPO_2_7191/actor_000000473718.pth | Hamilton 0.08721550554037094
./Walker2d-v3_PPO_2_7191/actor_000000607635.pth | Hamilton 0.1116761863231659
./Walker2d-v3_PPO_2_7191/actor_000000742810.pth | Hamilton 0.14602085947990417
./Walker2d-v3_PPO_2_7191/actor_000000878175.pth | Hamilton 0.18098057806491852
./Walker2d-v3_PPO_2_7191/actor_000001018008.pth | Hamilton 0.2286052256822586
./Walker2d-v3_PPO_2_7191/actor_000001162862.pth | Hamilton 0.29176223278045654
./Walker2d-v3_PPO_2_7191/actor_000001310774.pth | Hamilton 0.3957574665546417
./Walker2d-v3_PPO_2_7191/actor_000001462296.pth | Hamilton 0.45471832156181335
./Walker2d-v3_PPO_2_7191/actor_000001613578.pth | Hamilton 0.5143200159072876
./Walker2d-v3_PPO_2_7191/actor_000001771270.pth | Hamilton 0.6280671954154968
./Walker2d-v3_PPO_2_7191/actor_000001920864.pth | Hamilton 0.7256432771682739
./Walker2d-v3_PPO_2_7191/actor_000002064887.pth | Hamilton 0.7917588949203491
./Walker2d-v3_PPO_2_7191/actor_000002215407.pth | Hamilton 0.8519753813743591
./Walker2d-v3_PPO_2_7191/actor_000002369579.pth | Hamilton 0.8804962038993835
./Walker2d-v3_PPO_2_7191/actor_000002520021.pth | Hamilton 0.9376488924026489
./Walker2d-v3_PPO_2_7191/actor_000002663448.pth | Hamilton 0.9509913921356201
./Walker2d-v3_PPO_2_7191/actor_000002807613.pth | Hamilton 1.0002484321594238
./Walker2d-v3_PPO_2_7191/actor_000002952238.pth | Hamilton 1.0015190839767456
./Walker2d-v3_PPO_2_7191/actor_000003103261.pth | Hamilton 0.9994683861732483
./Walker2d-v3_PPO_2_7191/actor_000003252154.pth | Hamilton 1.011381983757019
./Walker2d-v3_PPO_2_7191/actor_000003399053.pth | Hamilton 1.040096640586853
./Walker2d-v3_PPO_2_7191/actor_000003541941.pth | Hamilton 1.0437896251678467
./Walker2d-v3_PPO_2_7191/actor_000003690148.pth | Hamilton 1.031385064125061
./Walker2d-v3_PPO_2_7191/actor_000003840894.pth | Hamilton 1.0316625833511353
./Walker2d-v3_PPO_2_7191/actor_000003988847.pth | Hamilton 1.0628533363342285
./Walker2d-v3_PPO_2_7191/actor_000004133631.pth | Hamilton 1.073077917098999
./Walker2d-v3_PPO_2_7191/actor_000004278794.pth | Hamilton 1.0695089101791382
./Walker2d-v3_PPO_2_7191/actor_000004419695.pth | Hamilton 1.0806150436401367
./Walker2d-v3_PPO_2_7191/actor_000004560055.pth | Hamilton 1.0987417697906494
./Walker2d-v3_PPO_2_7191/actor_000004706520.pth | Hamilton 1.0848559141159058
./Walker2d-v3_PPO_2_7191/actor_000004849941.pth | Hamilton 1.0855449438095093
./Walker2d-v3_PPO_2_7191/actor_000004995749.pth | Hamilton 1.1250569820404053
./Walker2d-v3_PPO_2_7191/actor_000005133769.pth | Hamilton 1.1274211406707764
./Walker2d-v3_PPO_2_7191/actor_000005277326.pth | Hamilton 1.1044501066207886
./Walker2d-v3_PPO_2_7191/actor_000005415725.pth | Hamilton 1.1185340881347656
./Walker2d-v3_PPO_2_7191/actor_000005555160.pth | Hamilton 1.1340880393981934
./Walker2d-v3_PPO_2_7191/actor_000005697136.pth | Hamilton 1.1250739097595215
./Walker2d-v3_PPO_2_7191/actor_000005841632.pth | Hamilton 1.148882269859314
./Walker2d-v3_PPO_2_7191/actor_000005984558.pth | Hamilton 1.1619127988815308
./Walker2d-v3_PPO_2_7191/actor_000006132681.pth | Hamilton 1.1532893180847168
./Walker2d-v3_PPO_2_7191/actor_000006275829.pth | Hamilton 1.1663776636123657
./Walker2d-v3_PPO_2_7191/actor_000006413003.pth | Hamilton 1.1481581926345825
./Walker2d-v3_PPO_2_7191/actor_000006549936.pth | Hamilton 1.1534254550933838
./Walker2d-v3_PPO_2_7191/actor_000006687052.pth | Hamilton 1.1556867361068726
./Walker2d-v3_PPO_2_7191/actor_000006832378.pth | Hamilton 1.145704984664917
./Walker2d-v3_PPO_2_7191/actor_000006978471.pth | Hamilton 1.1522480249404907
./Walker2d-v3_PPO_2_7191/actor_000007114562.pth | Hamilton 1.161920428276062
./Walker2d-v3_PPO_2_7191/actor_000007252168.pth | Hamilton 1.1399940252304077
./Walker2d-v3_PPO_2_7191/actor_000007389799.pth | Hamilton 1.1015349626541138
./Walker2d-v3_PPO_2_7191/actor_000007526673.pth | Hamilton 1.083055019378662
./Walker2d-v3_PPO_2_7191/actor_000007670063.pth | Hamilton 1.0688763856887817
./Walker2d-v3_PPO_2_7191/actor_000007814612.pth | Hamilton 1.0752573013305664
./Walker2d-v3_PPO_2_7191/actor_000007956925.pth | Hamilton 1.0846757888793945
./Walker2d-v3_PPO_2_7191/actor_000008095882.pth | Hamilton 1.0713415145874023
./Walker2d-v3_PPO_2_7191/actor_000008239587.pth | Hamilton 1.0811548233032227
./Walker2d-v3_PPO_2_7191/actor_000008377481.pth | Hamilton 1.0645619630813599
./Walker2d-v3_PPO_2_7191/actor_000008521335.pth | Hamilton 1.0533292293548584
./Walker2d-v3_PPO_2_7191/actor_000008667889.pth | Hamilton 1.0226120948791504
./Walker2d-v3_PPO_2_7191/actor_000008805826.pth | Hamilton 0.9843504428863525
./Walker2d-v3_PPO_2_7191/actor_000008942533.pth | Hamilton 0.9783807396888733
./Walker2d-v3_PPO_2_7191/actor_000009078231.pth | Hamilton 0.97062748670578
./Walker2d-v3_PPO_2_7191/actor_000009217606.pth | Hamilton 0.9520869255065918
./Walker2d-v3_PPO_2_7191/actor_000009360041.pth | Hamilton 0.9395900368690491
./Walker2d-v3_PPO_2_7191/actor_000009501627.pth | Hamilton 0.9472063183784485
./Walker2d-v3_PPO_2_7191/actor_000009640448.pth | Hamilton 0.9370429515838623
./Walker2d-v3_PPO_2_7191/actor_000009777659.pth | Hamilton 0.9310764670372009
./Walker2d-v3_PPO_2_7191/actor_000009914636.pth | Hamilton 0.8988937139511108
./Walker2d-v3_PPO_2_7191/actor_000010057887.pth | Hamilton 0.9002514481544495
./Walker2d-v3_PPO_2_7191/actor_000010199345.pth | Hamilton 0.9074289202690125
./Walker2d-v3_PPO_2_7191/actor_000010342646.pth | Hamilton 0.8828000426292419
./Walker2d-v3_PPO_2_7191/actor_000010483767.pth | Hamilton 0.8691655397415161
./Walker2d-v3_PPO_2_7191/actor_000010621457.pth | Hamilton 0.8661413192749023
./Walker2d-v3_PPO_2_7191/actor_000010761789.pth | Hamilton 0.8716228008270264
./Walker2d-v3_PPO_2_7191/actor_000010907652.pth | Hamilton 0.8553466200828552
./Walker2d-v3_PPO_2_7191/actor_000011051697.pth | Hamilton 0.837342381477356
./Walker2d-v3_PPO_2_7191/actor_000011192313.pth | Hamilton 0.8361001014709473
./Walker2d-v3_PPO_2_7191/actor_000011327120.pth | Hamilton 0.8421844244003296
./Walker2d-v3_PPO_2_7191/actor_000011467774.pth | Hamilton 0.8517001271247864
./Walker2d-v3_PPO_2_7191/actor_000011609896.pth | Hamilton 0.8417742848396301
./Walker2d-v3_PPO_2_7191/actor_000011749306.pth | Hamilton 0.8494670987129211
./Walker2d-v3_PPO_2_7191/actor_000011888778.pth | Hamilton 0.8304811120033264
./Walker2d-v3_PPO_2_7191/actor_000012027245.pth | Hamilton 0.8229493498802185
./Walker2d-v3_PPO_2_7191/actor_000012166536.pth | Hamilton 0.8057084679603577
./Walker2d-v3_PPO_2_7191/actor_000012302897.pth | Hamilton 0.7905206084251404
./Walker2d-v3_PPO_2_7191/actor_000012442116.pth | Hamilton 0.7911020517349243
./Walker2d-v3_PPO_2_7191/actor_000012581024.pth | Hamilton 0.7934130430221558
./Walker2d-v3_PPO_2_7191/actor_000012718639.pth | Hamilton 0.7995584011077881
./Walker2d-v3_PPO_2_7191/actor_000012853882.pth | Hamilton 0.7985075116157532
./Walker2d-v3_PPO_2_7191/actor_000012994206.pth | Hamilton 0.7893863916397095
./Walker2d-v3_PPO_2_7191/actor_000013127551.pth | Hamilton 0.7848753333091736
./Walker2d-v3_PPO_2_7191/actor_000013261980.pth | Hamilton 0.7858855128288269
./Walker2d-v3_PPO_2_7191/actor_000013402455.pth | Hamilton 0.7683066129684448
./Walker2d-v3_PPO_2_7191/actor_000013542251.pth | Hamilton 0.7840059399604797
./Walker2d-v3_PPO_2_7191/actor_000013679767.pth | Hamilton 0.7833513617515564
./Walker2d-v3_PPO_2_7191/actor_000013821556.pth | Hamilton 0.7994288206100464
./Walker2d-v3_PPO_2_7191/actor_000013961131.pth | Hamilton 0.7847924828529358
./Walker2d-v3_PPO_2_7191/actor_000014095936.pth | Hamilton 0.787776529788971
./Walker2d-v3_PPO_2_7191/actor_000014229202.pth | Hamilton 0.8048922419548035
./Walker2d-v3_PPO_2_7191/actor_000014365869.pth | Hamilton 0.795133113861084
./Walker2d-v3_PPO_2_7191/actor_000014502155.pth | Hamilton 0.8053557276725769
./Walker2d-v3_PPO_2_7191/actor_000014634023.pth | Hamilton 0.799594521522522
./Walker2d-v3_PPO_2_7191/actor_000014770318.pth | Hamilton 0.7997891306877136
./Walker2d-v3_PPO_2_7191/actor_000014907588.pth | Hamilton 0.7863771319389343
./Walker2d-v3_PPO_2_7191/actor_000015041941.pth | Hamilton 0.7918623685836792
./Walker2d-v3_PPO_2_7191/actor_000015176038.pth | Hamilton 0.7717202305793762
./Walker2d-v3_PPO_2_7191/actor_000015315419.pth | Hamilton 0.7594095468521118
./Walker2d-v3_PPO_2_7191/actor_000015455882.pth | Hamilton 0.7480865120887756
./Walker2d-v3_PPO_2_7191/actor_000015599389.pth | Hamilton 0.7643914222717285
./Walker2d-v3_PPO_2_7191/actor_000015735781.pth | Hamilton 0.751555323600769
./Walker2d-v3_PPO_2_7191/actor_000015876693.pth | Hamilton 0.7530186772346497
./Walker2d-v3_PPO_2_7191/actor_000016017560.pth | Hamilton 0.766223132610321
./Walker2d-v3_PPO_2_7191/actor_000016155581.pth | Hamilton 0.769619345664978
./Walker2d-v3_PPO_2_7191/actor_000016293273.pth | Hamilton 0.7689034938812256
./Walker2d-v3_PPO_2_7191/actor_000016432715.pth | Hamilton 0.7646679878234863
./Walker2d-v3_PPO_2_7191/actor_000016576664.pth | Hamilton 0.7605509757995605
./Walker2d-v3_PPO_2_7191/actor_000016717356.pth | Hamilton 0.7519553303718567
./Walker2d-v3_PPO_2_7191/actor_000016855020.pth | Hamilton 0.7263669371604919
./Walker2d-v3_PPO_2_7191/actor_000016996667.pth | Hamilton 0.7400013208389282
./Walker2d-v3_PPO_2_7191/actor_000017137727.pth | Hamilton 0.7125568389892578
./Walker2d-v3_PPO_2_7191/actor__000000016301_-0002.010.pth | Hamilton 0.016312673687934875
./Walker2d-v3_PPO_2_7191/actor__000000374246_00380.411.pth | Hamilton 0.035325028002262115
./Walker2d-v3_PPO_2_7191/actor__000000734307_01400.557.pth | Hamilton 0.07955460250377655
./Walker2d-v3_PPO_2_7191/actor__000001090133_03548.353.pth | Hamilton 0.1683308631181717
./Walker2d-v3_PPO_2_7191/actor__000001452780_04881.900.pth | Hamilton 0.25703248381614685
./Walker2d-v3_PPO_2_7191/actor__000001809053_05199.737.pth | Hamilton 0.3392091691493988
./Walker2d-v3_PPO_2_7191/actor__000002167811_05380.266.pth | Hamilton 0.41228827834129333
./Walker2d-v3_PPO_2_7191/actor__000002529035_05508.923.pth | Hamilton 0.5086715221405029
./Walker2d-v3_PPO_2_7191/actor__000002889421_05659.489.pth | Hamilton 0.597420334815979
./Walker2d-v3_PPO_2_7191/actor__000003252154_05730.158.pth | Hamilton 0.639396071434021
./Walker2d-v3_PPO_2_7191/actor__000003616791_05853.276.pth | Hamilton 0.7095003724098206
./Walker2d-v3_PPO_2_7191/actor__000003979481_06037.587.pth | Hamilton 0.7803106904029846
./Walker2d-v3_PPO_2_7191/actor__000005054915_06161.796.pth | Hamilton 0.8780654668807983
./Walker2d-v3_PPO_2_7191/actor__000005415725_06286.334.pth | Hamilton 0.9216188192367554
./Walker2d-v3_PPO_2_7191/actor__000006488827_06423.980.pth | Hamilton 0.9855380058288574
./Walker2d-v3_PPO_2_7191/actor__000007570480_06497.105.pth | Hamilton 0.9940043091773987
./Walker2d-v3_PPO_2_7191/actor__000008641549_06607.059.pth | Hamilton 1.014743685722351
./Walker2d-v3_PPO_2_7191/actor__000009360041_06667.828.pth | Hamilton 1.0375499725341797
./Walker2d-v3_PPO_2_7191/actor__000010074838_06726.993.pth | Hamilton 1.0260100364685059
./Walker2d-v3_PPO_2_7191/actor__000011519821_06786.244.pth | Hamilton 1.0455682277679443
./Walker2d-v3_PPO_2_7191/actor__000012967548_06836.671.pth | Hamilton 1.0122387409210205
./Walker2d-v3_PPO_2_7191/actor__000014054768_07046.842.pth | Hamilton 0.9785829186439514
./Walker2d-v3_PPO_2_7191/actor__000014417693_07091.800.pth | Hamilton 0.9941135048866272
./Walker2d-v3_PPO_2_7191/actor__000015859169_07191.100.pth | Hamilton 0.8641228675842285
"""
# Walker2d-v3_PPO_3_5449
data46 = """
./Walker2d-v3_PPO_3_5449/actor_000000074258.pth | Hamilton 0.05927193909883499
./Walker2d-v3_PPO_3_5449/actor_000000210176.pth | Hamilton 0.08470804244279861
./Walker2d-v3_PPO_3_5449/actor_000000344773.pth | Hamilton 0.12457162141799927
./Walker2d-v3_PPO_3_5449/actor_000000480811.pth | Hamilton 0.17581620812416077
./Walker2d-v3_PPO_3_5449/actor_000000615139.pth | Hamilton 0.24777986109256744
./Walker2d-v3_PPO_3_5449/actor_000000751933.pth | Hamilton 0.3219289779663086
./Walker2d-v3_PPO_3_5449/actor_000000893483.pth | Hamilton 0.4655914604663849
./Walker2d-v3_PPO_3_5449/actor_000001043310.pth | Hamilton 0.6539282202720642
./Walker2d-v3_PPO_3_5449/actor_000001196260.pth | Hamilton 0.8266529440879822
./Walker2d-v3_PPO_3_5449/actor_000001345761.pth | Hamilton 0.8739662766456604
./Walker2d-v3_PPO_3_5449/actor_000001494994.pth | Hamilton 1.0259034633636475
./Walker2d-v3_PPO_3_5449/actor_000001645714.pth | Hamilton 0.9435088634490967
./Walker2d-v3_PPO_3_5449/actor_000001793638.pth | Hamilton 1.0118099451065063
./Walker2d-v3_PPO_3_5449/actor_000001939570.pth | Hamilton 0.890204906463623
./Walker2d-v3_PPO_3_5449/actor_000002092800.pth | Hamilton 0.9676378965377808
./Walker2d-v3_PPO_3_5449/actor_000002242869.pth | Hamilton 0.8449556827545166
./Walker2d-v3_PPO_3_5449/actor_000002392652.pth | Hamilton 0.7909901738166809
./Walker2d-v3_PPO_3_5449/actor_000002541433.pth | Hamilton 0.7888413667678833
./Walker2d-v3_PPO_3_5449/actor_000002694797.pth | Hamilton 0.835628867149353
./Walker2d-v3_PPO_3_5449/actor_000002845493.pth | Hamilton 0.7881745100021362
./Walker2d-v3_PPO_3_5449/actor_000002995144.pth | Hamilton 0.8428267240524292
./Walker2d-v3_PPO_3_5449/actor_000003144140.pth | Hamilton 0.8054295182228088
./Walker2d-v3_PPO_3_5449/actor_000003291419.pth | Hamilton 0.6952739357948303
./Walker2d-v3_PPO_3_5449/actor_000003440494.pth | Hamilton 0.7230075001716614
./Walker2d-v3_PPO_3_5449/actor_000003589396.pth | Hamilton 0.6298981308937073
./Walker2d-v3_PPO_3_5449/actor_000003737585.pth | Hamilton 0.6098143458366394
./Walker2d-v3_PPO_3_5449/actor_000003886177.pth | Hamilton 0.5895996689796448
./Walker2d-v3_PPO_3_5449/actor_000004032828.pth | Hamilton 0.5341766476631165
./Walker2d-v3_PPO_3_5449/actor_000004181927.pth | Hamilton 0.5673055648803711
./Walker2d-v3_PPO_3_5449/actor_000004329079.pth | Hamilton 0.5569615364074707
./Walker2d-v3_PPO_3_5449/actor_000004479143.pth | Hamilton 0.5873989462852478
./Walker2d-v3_PPO_3_5449/actor_000004628515.pth | Hamilton 0.5362474322319031
./Walker2d-v3_PPO_3_5449/actor_000004777491.pth | Hamilton 0.542984664440155
./Walker2d-v3_PPO_3_5449/actor_000004922135.pth | Hamilton 0.5058737993240356
./Walker2d-v3_PPO_3_5449/actor_000005079640.pth | Hamilton 0.5049150586128235
./Walker2d-v3_PPO_3_5449/actor_000005229014.pth | Hamilton 0.4917865991592407
./Walker2d-v3_PPO_3_5449/actor_000005376125.pth | Hamilton 0.5029265880584717
./Walker2d-v3_PPO_3_5449/actor_000005520214.pth | Hamilton 0.4371909201145172
./Walker2d-v3_PPO_3_5449/actor_000005668593.pth | Hamilton 0.42250123620033264
./Walker2d-v3_PPO_3_5449/actor_000005817298.pth | Hamilton 0.4222284257411957
./Walker2d-v3_PPO_3_5449/actor_000005970805.pth | Hamilton 0.42004191875457764
./Walker2d-v3_PPO_3_5449/actor_000006116171.pth | Hamilton 0.38574182987213135
./Walker2d-v3_PPO_3_5449/actor_000006257903.pth | Hamilton 0.4004722535610199
./Walker2d-v3_PPO_3_5449/actor_000006404988.pth | Hamilton 0.4078262448310852
./Walker2d-v3_PPO_3_5449/actor_000006548649.pth | Hamilton 0.4215970039367676
./Walker2d-v3_PPO_3_5449/actor_000006702295.pth | Hamilton 0.40582090616226196
./Walker2d-v3_PPO_3_5449/actor_000006854724.pth | Hamilton 0.4143565595149994
./Walker2d-v3_PPO_3_5449/actor_000007003979.pth | Hamilton 0.39081352949142456
./Walker2d-v3_PPO_3_5449/actor_000007154368.pth | Hamilton 0.40674731135368347
./Walker2d-v3_PPO_3_5449/actor_000007301571.pth | Hamilton 0.37200799584388733
./Walker2d-v3_PPO_3_5449/actor_000007449284.pth | Hamilton 0.37650981545448303
./Walker2d-v3_PPO_3_5449/actor_000007597071.pth | Hamilton 0.37701019644737244
./Walker2d-v3_PPO_3_5449/actor_000007746696.pth | Hamilton 0.38779065012931824
./Walker2d-v3_PPO_3_5449/actor_000007894699.pth | Hamilton 0.3533872067928314
./Walker2d-v3_PPO_3_5449/actor_000008038341.pth | Hamilton 0.37704506516456604
./Walker2d-v3_PPO_3_5449/actor_000008191243.pth | Hamilton 0.37151890993118286
./Walker2d-v3_PPO_3_5449/actor_000008341748.pth | Hamilton 0.38730698823928833
./Walker2d-v3_PPO_3_5449/actor_000008492965.pth | Hamilton 0.36730828881263733
./Walker2d-v3_PPO_3_5449/actor_000008646414.pth | Hamilton 0.3151562511920929
./Walker2d-v3_PPO_3_5449/actor_000008798459.pth | Hamilton 0.31878551840782166
./Walker2d-v3_PPO_3_5449/actor_000008947944.pth | Hamilton 0.30067676305770874
./Walker2d-v3_PPO_3_5449/actor_000009097431.pth | Hamilton 0.31717318296432495
./Walker2d-v3_PPO_3_5449/actor_000009249438.pth | Hamilton 0.29750099778175354
./Walker2d-v3_PPO_3_5449/actor_000009396922.pth | Hamilton 0.28379613161087036
./Walker2d-v3_PPO_3_5449/actor_000009548836.pth | Hamilton 0.28231579065322876
./Walker2d-v3_PPO_3_5449/actor_000009698775.pth | Hamilton 0.29302772879600525
./Walker2d-v3_PPO_3_5449/actor_000009844873.pth | Hamilton 0.29226282238960266
./Walker2d-v3_PPO_3_5449/actor_000009993815.pth | Hamilton 0.2809481918811798
./Walker2d-v3_PPO_3_5449/actor_000010144489.pth | Hamilton 0.27926284074783325
./Walker2d-v3_PPO_3_5449/actor_000010294691.pth | Hamilton 0.2632773816585541
./Walker2d-v3_PPO_3_5449/actor_000010447771.pth | Hamilton 0.2708342671394348
./Walker2d-v3_PPO_3_5449/actor_000010599047.pth | Hamilton 0.2633924186229706
./Walker2d-v3_PPO_3_5449/actor_000010747502.pth | Hamilton 0.2496318370103836
./Walker2d-v3_PPO_3_5449/actor_000010901222.pth | Hamilton 0.24378125369548798
./Walker2d-v3_PPO_3_5449/actor_000011047347.pth | Hamilton 0.2540603280067444
./Walker2d-v3_PPO_3_5449/actor_000011196414.pth | Hamilton 0.22598586976528168
./Walker2d-v3_PPO_3_5449/actor_000011345566.pth | Hamilton 0.21788570284843445
./Walker2d-v3_PPO_3_5449/actor_000011494670.pth | Hamilton 0.20984409749507904
./Walker2d-v3_PPO_3_5449/actor_000011649553.pth | Hamilton 0.20027506351470947
./Walker2d-v3_PPO_3_5449/actor_000011799782.pth | Hamilton 0.20682501792907715
./Walker2d-v3_PPO_3_5449/actor_000011948669.pth | Hamilton 0.1994415670633316
./Walker2d-v3_PPO_3_5449/actor_000012095490.pth | Hamilton 0.20922201871871948
./Walker2d-v3_PPO_3_5449/actor_000012242089.pth | Hamilton 0.21021421253681183
./Walker2d-v3_PPO_3_5449/actor_000012393008.pth | Hamilton 0.21288038790225983
./Walker2d-v3_PPO_3_5449/actor_000012539995.pth | Hamilton 0.1932596117258072
./Walker2d-v3_PPO_3_5449/actor_000012691132.pth | Hamilton 0.19240409135818481
./Walker2d-v3_PPO_3_5449/actor_000012835382.pth | Hamilton 0.1945323795080185
./Walker2d-v3_PPO_3_5449/actor_000012981267.pth | Hamilton 0.18749694526195526
./Walker2d-v3_PPO_3_5449/actor_000013132524.pth | Hamilton 0.199021577835083
./Walker2d-v3_PPO_3_5449/actor_000013284800.pth | Hamilton 0.18894587457180023
./Walker2d-v3_PPO_3_5449/actor_000013436999.pth | Hamilton 0.1882842630147934
./Walker2d-v3_PPO_3_5449/actor_000013587883.pth | Hamilton 0.1951446235179901
./Walker2d-v3_PPO_3_5449/actor_000013734035.pth | Hamilton 0.1773858219385147
./Walker2d-v3_PPO_3_5449/actor_000013884120.pth | Hamilton 0.17267131805419922
./Walker2d-v3_PPO_3_5449/actor_000014027863.pth | Hamilton 0.1482010930776596
./Walker2d-v3_PPO_3_5449/actor_000014181161.pth | Hamilton 0.15587134659290314
./Walker2d-v3_PPO_3_5449/actor_000014327947.pth | Hamilton 0.14679761230945587
./Walker2d-v3_PPO_3_5449/actor_000014477881.pth | Hamilton 0.1432546228170395
./Walker2d-v3_PPO_3_5449/actor_000014622563.pth | Hamilton 0.133799210190773
./Walker2d-v3_PPO_3_5449/actor_000014770568.pth | Hamilton 0.1289616972208023
./Walker2d-v3_PPO_3_5449/actor_000014923072.pth | Hamilton 0.12415821850299835
./Walker2d-v3_PPO_3_5449/actor_000015067951.pth | Hamilton 0.12816055119037628
./Walker2d-v3_PPO_3_5449/actor_000015220741.pth | Hamilton 0.12286010384559631
./Walker2d-v3_PPO_3_5449/actor_000015368278.pth | Hamilton 0.12356770038604736
./Walker2d-v3_PPO_3_5449/actor_000015516619.pth | Hamilton 0.11822894215583801
./Walker2d-v3_PPO_3_5449/actor_000015671095.pth | Hamilton 0.10763926804065704
./Walker2d-v3_PPO_3_5449/actor_000015819755.pth | Hamilton 0.10935814678668976
./Walker2d-v3_PPO_3_5449/actor_000015970361.pth | Hamilton 0.12423727661371231
./Walker2d-v3_PPO_3_5449/actor_000016123149.pth | Hamilton 0.11704185605049133
./Walker2d-v3_PPO_3_5449/actor_000016276082.pth | Hamilton 0.1224769726395607
./Walker2d-v3_PPO_3_5449/actor_000016430277.pth | Hamilton 0.11499479413032532
./Walker2d-v3_PPO_3_5449/actor_000016580498.pth | Hamilton 0.11111478507518768
./Walker2d-v3_PPO_3_5449/actor_000016727817.pth | Hamilton 0.11295266449451447
./Walker2d-v3_PPO_3_5449/actor_000016875091.pth | Hamilton 0.11787348240613937
./Walker2d-v3_PPO_3_5449/actor_000017024821.pth | Hamilton 0.11866798996925354
./Walker2d-v3_PPO_3_5449/actor_000017171210.pth | Hamilton 0.10967691987752914
./Walker2d-v3_PPO_3_5449/actor_000017323249.pth | Hamilton 0.11170575022697449
./Walker2d-v3_PPO_3_5449/actor_000017472690.pth | Hamilton 0.1056210920214653
./Walker2d-v3_PPO_3_5449/actor_000017622262.pth | Hamilton 0.11044346541166306
./Walker2d-v3_PPO_3_5449/actor_000017768490.pth | Hamilton 0.10271184146404266
./Walker2d-v3_PPO_3_5449/actor_000017913653.pth | Hamilton 0.10808862000703812
./Walker2d-v3_PPO_3_5449/actor_000018063553.pth | Hamilton 0.10521787405014038
./Walker2d-v3_PPO_3_5449/actor_000018212101.pth | Hamilton 0.11340231448411942
./Walker2d-v3_PPO_3_5449/actor_000018360973.pth | Hamilton 0.10834623873233795
./Walker2d-v3_PPO_3_5449/actor_000018501420.pth | Hamilton 0.10186772048473358
./Walker2d-v3_PPO_3_5449/actor_000018647528.pth | Hamilton 0.10813114047050476
./Walker2d-v3_PPO_3_5449/actor_000018792642.pth | Hamilton 0.10287192463874817
./Walker2d-v3_PPO_3_5449/actor_000018943652.pth | Hamilton 0.09922906011343002
./Walker2d-v3_PPO_3_5449/actor_000019095722.pth | Hamilton 0.10815797746181488
./Walker2d-v3_PPO_3_5449/actor_000019244988.pth | Hamilton 0.10201079398393631
./Walker2d-v3_PPO_3_5449/actor_000019398384.pth | Hamilton 0.09539986401796341
./Walker2d-v3_PPO_3_5449/actor_000019553640.pth | Hamilton 0.09772010892629623
./Walker2d-v3_PPO_3_5449/actor_000019707502.pth | Hamilton 0.09324537217617035
./Walker2d-v3_PPO_3_5449/actor_000019857759.pth | Hamilton 0.09246058762073517
./Walker2d-v3_PPO_3_5449/actor_000020008414.pth | Hamilton 0.09673815965652466
./Walker2d-v3_PPO_3_5449/actor__000000016162_00000.100.pth | Hamilton 0.0024733352474868298
./Walker2d-v3_PPO_3_5449/actor__000000369982_00844.587.pth | Hamilton 0.012272307649254799
./Walker2d-v3_PPO_3_5449/actor__000000726461_01317.149.pth | Hamilton 0.04832163080573082
./Walker2d-v3_PPO_3_5449/actor__000001082124_04293.286.pth | Hamilton 0.1255750209093094
./Walker2d-v3_PPO_3_5449/actor__000001438355_04606.865.pth | Hamilton 0.18486599624156952
./Walker2d-v3_PPO_3_5449/actor__000001793638_04759.254.pth | Hamilton 0.21724434196949005
./Walker2d-v3_PPO_3_5449/actor__000002148196_04847.393.pth | Hamilton 0.22975826263427734
./Walker2d-v3_PPO_3_5449/actor__000002503448_04925.915.pth | Hamilton 0.22452126443386078
./Walker2d-v3_PPO_3_5449/actor__000003209146_04928.707.pth | Hamilton 0.2314271181821823
./Walker2d-v3_PPO_3_5449/actor__000003560939_04932.584.pth | Hamilton 0.22611020505428314
./Walker2d-v3_PPO_3_5449/actor__000003915019_04978.277.pth | Hamilton 0.24576348066329956
./Walker2d-v3_PPO_3_5449/actor__000004263496_05288.619.pth | Hamilton 0.256229043006897
./Walker2d-v3_PPO_3_5449/actor__000007794050_05299.770.pth | Hamilton 0.26436153054237366
./Walker2d-v3_PPO_3_5449/actor__000008863552_05378.819.pth | Hamilton 0.2479991912841797
./Walker2d-v3_PPO_3_5449/actor__000010294691_05400.554.pth | Hamilton 0.24982310831546783
./Walker2d-v3_PPO_3_5449/actor__000016285635_05449.702.pth | Hamilton 0.16927051544189453
"""
# Walker2d-v3_PPO_2_5640
data47 = """
./Walker2d-v3_PPO_2_5640/actor_000000076821.pth | Hamilton 0.06140350177884102
./Walker2d-v3_PPO_2_5640/actor_000000212404.pth | Hamilton 0.09936045855283737
./Walker2d-v3_PPO_2_5640/actor_000000347657.pth | Hamilton 0.14608590304851532
./Walker2d-v3_PPO_2_5640/actor_000000484531.pth | Hamilton 0.22183483839035034
./Walker2d-v3_PPO_2_5640/actor_000000626476.pth | Hamilton 0.3692648112773895
./Walker2d-v3_PPO_2_5640/actor_000000768541.pth | Hamilton 0.5038611888885498
./Walker2d-v3_PPO_2_5640/actor_000000915352.pth | Hamilton 0.45478299260139465
./Walker2d-v3_PPO_2_5640/actor_000001064134.pth | Hamilton 0.710417628288269
./Walker2d-v3_PPO_2_5640/actor_000001219433.pth | Hamilton 1.0367754697799683
./Walker2d-v3_PPO_2_5640/actor_000001368798.pth | Hamilton 0.9565792679786682
./Walker2d-v3_PPO_2_5640/actor_000001521736.pth | Hamilton 0.8148611783981323
./Walker2d-v3_PPO_2_5640/actor_000001672462.pth | Hamilton 0.7896491289138794
./Walker2d-v3_PPO_2_5640/actor_000001818396.pth | Hamilton 0.723064124584198
./Walker2d-v3_PPO_2_5640/actor_000001968958.pth | Hamilton 0.7489979267120361
./Walker2d-v3_PPO_2_5640/actor_000002122980.pth | Hamilton 0.6381734609603882
./Walker2d-v3_PPO_2_5640/actor_000002275203.pth | Hamilton 0.7043440341949463
./Walker2d-v3_PPO_2_5640/actor_000002424815.pth | Hamilton 0.6432493925094604
./Walker2d-v3_PPO_2_5640/actor_000002573446.pth | Hamilton 0.6924611330032349
./Walker2d-v3_PPO_2_5640/actor_000002723296.pth | Hamilton 0.5120658874511719
./Walker2d-v3_PPO_2_5640/actor_000002876816.pth | Hamilton 0.5475721955299377
./Walker2d-v3_PPO_2_5640/actor_000003024988.pth | Hamilton 0.5267606973648071
./Walker2d-v3_PPO_2_5640/actor_000003174746.pth | Hamilton 0.5102020502090454
./Walker2d-v3_PPO_2_5640/actor_000003319654.pth | Hamilton 0.5229117274284363
./Walker2d-v3_PPO_2_5640/actor_000003470447.pth | Hamilton 0.5075846910476685
./Walker2d-v3_PPO_2_5640/actor_000003621751.pth | Hamilton 0.5167932510375977
./Walker2d-v3_PPO_2_5640/actor_000003777124.pth | Hamilton 0.4908055067062378
./Walker2d-v3_PPO_2_5640/actor_000003927368.pth | Hamilton 0.3870166838169098
./Walker2d-v3_PPO_2_5640/actor_000004078774.pth | Hamilton 0.4389871656894684
./Walker2d-v3_PPO_2_5640/actor_000004230758.pth | Hamilton 0.35056108236312866
./Walker2d-v3_PPO_2_5640/actor_000004382899.pth | Hamilton 0.3738396167755127
./Walker2d-v3_PPO_2_5640/actor_000004530855.pth | Hamilton 0.36066770553588867
./Walker2d-v3_PPO_2_5640/actor_000004681859.pth | Hamilton 0.35803788900375366
./Walker2d-v3_PPO_2_5640/actor_000004832352.pth | Hamilton 0.3696693181991577
./Walker2d-v3_PPO_2_5640/actor_000004977920.pth | Hamilton 0.28550100326538086
./Walker2d-v3_PPO_2_5640/actor_000005128781.pth | Hamilton 0.2737581133842468
./Walker2d-v3_PPO_2_5640/actor_000005279919.pth | Hamilton 0.31691408157348633
./Walker2d-v3_PPO_2_5640/actor_000005431903.pth | Hamilton 0.31718602776527405
./Walker2d-v3_PPO_2_5640/actor_000005590406.pth | Hamilton 0.3361060619354248
./Walker2d-v3_PPO_2_5640/actor_000005744704.pth | Hamilton 0.31106844544410706
./Walker2d-v3_PPO_2_5640/actor_000005887678.pth | Hamilton 0.270893394947052
./Walker2d-v3_PPO_2_5640/actor_000006030184.pth | Hamilton 0.28500455617904663
./Walker2d-v3_PPO_2_5640/actor_000006177722.pth | Hamilton 0.26184943318367004
./Walker2d-v3_PPO_2_5640/actor_000006327757.pth | Hamilton 0.3042178750038147
./Walker2d-v3_PPO_2_5640/actor_000006475432.pth | Hamilton 0.28963491320610046
./Walker2d-v3_PPO_2_5640/actor_000006628363.pth | Hamilton 0.28662794828414917
./Walker2d-v3_PPO_2_5640/actor_000006783728.pth | Hamilton 0.30702200531959534
./Walker2d-v3_PPO_2_5640/actor_000006928952.pth | Hamilton 0.2299567312002182
./Walker2d-v3_PPO_2_5640/actor_000007081286.pth | Hamilton 0.22566278278827667
./Walker2d-v3_PPO_2_5640/actor_000007229422.pth | Hamilton 0.23525746166706085
./Walker2d-v3_PPO_2_5640/actor_000007379040.pth | Hamilton 0.20558996498584747
./Walker2d-v3_PPO_2_5640/actor_000007528255.pth | Hamilton 0.20947854220867157
./Walker2d-v3_PPO_2_5640/actor_000007681834.pth | Hamilton 0.206080362200737
./Walker2d-v3_PPO_2_5640/actor_000007828423.pth | Hamilton 0.15691331028938293
./Walker2d-v3_PPO_2_5640/actor_000007979129.pth | Hamilton 0.1649388074874878
./Walker2d-v3_PPO_2_5640/actor_000008133063.pth | Hamilton 0.12807220220565796
./Walker2d-v3_PPO_2_5640/actor_000008284016.pth | Hamilton 0.15413163602352142
./Walker2d-v3_PPO_2_5640/actor_000008435444.pth | Hamilton 0.16379103064537048
./Walker2d-v3_PPO_2_5640/actor_000008586816.pth | Hamilton 0.14073342084884644
./Walker2d-v3_PPO_2_5640/actor_000008736582.pth | Hamilton 0.16420258581638336
./Walker2d-v3_PPO_2_5640/actor_000008890706.pth | Hamilton 0.12213072925806046
./Walker2d-v3_PPO_2_5640/actor_000009041051.pth | Hamilton 0.1297212690114975
./Walker2d-v3_PPO_2_5640/actor_000009193866.pth | Hamilton 0.15423282980918884
./Walker2d-v3_PPO_2_5640/actor_000009344379.pth | Hamilton 0.1297639161348343
./Walker2d-v3_PPO_2_5640/actor_000009495840.pth | Hamilton 0.13005506992340088
./Walker2d-v3_PPO_2_5640/actor_000009645311.pth | Hamilton 0.13654427230358124
./Walker2d-v3_PPO_2_5640/actor_000009796356.pth | Hamilton 0.09926596283912659
./Walker2d-v3_PPO_2_5640/actor_000009950663.pth | Hamilton 0.10893446952104568
./Walker2d-v3_PPO_2_5640/actor_000010105564.pth | Hamilton 0.11457328498363495
./Walker2d-v3_PPO_2_5640/actor_000010258048.pth | Hamilton 0.09255792945623398
./Walker2d-v3_PPO_2_5640/actor_000010410049.pth | Hamilton 0.11191736161708832
./Walker2d-v3_PPO_2_5640/actor_000010561253.pth | Hamilton 0.10320854932069778
./Walker2d-v3_PPO_2_5640/actor_000010715997.pth | Hamilton 0.08656732738018036
./Walker2d-v3_PPO_2_5640/actor_000010870587.pth | Hamilton 0.08067519217729568
./Walker2d-v3_PPO_2_5640/actor_000011017969.pth | Hamilton 0.09623593837022781
./Walker2d-v3_PPO_2_5640/actor_000011162587.pth | Hamilton 0.08847132325172424
./Walker2d-v3_PPO_2_5640/actor_000011308134.pth | Hamilton 0.08809972554445267
./Walker2d-v3_PPO_2_5640/actor_000011455512.pth | Hamilton 0.07532154768705368
./Walker2d-v3_PPO_2_5640/actor_000011596570.pth | Hamilton 0.07442637532949448
./Walker2d-v3_PPO_2_5640/actor_000011744076.pth | Hamilton 0.06713340431451797
./Walker2d-v3_PPO_2_5640/actor_000011891688.pth | Hamilton 0.07853170484304428
./Walker2d-v3_PPO_2_5640/actor_000012041946.pth | Hamilton 0.07240073382854462
./Walker2d-v3_PPO_2_5640/actor_000012191170.pth | Hamilton 0.06085826829075813
./Walker2d-v3_PPO_2_5640/actor_000012340166.pth | Hamilton 0.0619654655456543
./Walker2d-v3_PPO_2_5640/actor_000012493116.pth | Hamilton 0.07762257009744644
./Walker2d-v3_PPO_2_5640/actor_000012643687.pth | Hamilton 0.0782988965511322
./Walker2d-v3_PPO_2_5640/actor_000012789780.pth | Hamilton 0.07158641517162323
./Walker2d-v3_PPO_2_5640/actor_000012933076.pth | Hamilton 0.0580611415207386
./Walker2d-v3_PPO_2_5640/actor_000013080291.pth | Hamilton 0.07202361524105072
./Walker2d-v3_PPO_2_5640/actor_000013228244.pth | Hamilton 0.07016364485025406
./Walker2d-v3_PPO_2_5640/actor_000013376774.pth | Hamilton 0.05116642266511917
./Walker2d-v3_PPO_2_5640/actor_000013523987.pth | Hamilton 0.06734585762023926
./Walker2d-v3_PPO_2_5640/actor_000013673736.pth | Hamilton 0.06764388084411621
./Walker2d-v3_PPO_2_5640/actor_000013824445.pth | Hamilton 0.07039181143045425
./Walker2d-v3_PPO_2_5640/actor_000013971091.pth | Hamilton 0.05509909242391586
./Walker2d-v3_PPO_2_5640/actor_000014116202.pth | Hamilton 0.05333920195698738
./Walker2d-v3_PPO_2_5640/actor_000014266507.pth | Hamilton 0.05673415958881378
./Walker2d-v3_PPO_2_5640/actor_000014416645.pth | Hamilton 0.047981712967157364
./Walker2d-v3_PPO_2_5640/actor_000014565229.pth | Hamilton 0.03268176317214966
./Walker2d-v3_PPO_2_5640/actor_000014711794.pth | Hamilton 0.03352981433272362
./Walker2d-v3_PPO_2_5640/actor_000014856809.pth | Hamilton 0.035317469388246536
./Walker2d-v3_PPO_2_5640/actor_000015007815.pth | Hamilton 0.0520830973982811
./Walker2d-v3_PPO_2_5640/actor_000015155419.pth | Hamilton 0.037610314786434174
./Walker2d-v3_PPO_2_5640/actor_000015303071.pth | Hamilton 0.04611772671341896
./Walker2d-v3_PPO_2_5640/actor_000015450792.pth | Hamilton 0.052486881613731384
./Walker2d-v3_PPO_2_5640/actor_000015601522.pth | Hamilton 0.03982250764966011
./Walker2d-v3_PPO_2_5640/actor_000015754523.pth | Hamilton 0.05269639939069748
./Walker2d-v3_PPO_2_5640/actor_000015898889.pth | Hamilton 0.059752415865659714
./Walker2d-v3_PPO_2_5640/actor_000016050430.pth | Hamilton 0.04219430312514305
./Walker2d-v3_PPO_2_5640/actor_000016202538.pth | Hamilton 0.048400912433862686
./Walker2d-v3_PPO_2_5640/actor_000016350465.pth | Hamilton 0.0556575246155262
./Walker2d-v3_PPO_2_5640/actor_000016498242.pth | Hamilton 0.04758830741047859
./Walker2d-v3_PPO_2_5640/actor_000016646501.pth | Hamilton 0.03371794894337654
./Walker2d-v3_PPO_2_5640/actor_000016797232.pth | Hamilton 0.04772068187594414
./Walker2d-v3_PPO_2_5640/actor_000016949053.pth | Hamilton 0.041665639728307724
./Walker2d-v3_PPO_2_5640/actor_000017095083.pth | Hamilton 0.052450161427259445
./Walker2d-v3_PPO_2_5640/actor_000017244474.pth | Hamilton 0.04448487237095833
./Walker2d-v3_PPO_2_5640/actor_000017395552.pth | Hamilton 0.04572470113635063
./Walker2d-v3_PPO_2_5640/actor_000017547104.pth | Hamilton 0.049822088330984116
./Walker2d-v3_PPO_2_5640/actor_000017694988.pth | Hamilton 0.033766359090805054
./Walker2d-v3_PPO_2_5640/actor_000017839979.pth | Hamilton 0.04611736908555031
./Walker2d-v3_PPO_2_5640/actor_000017990570.pth | Hamilton 0.038567353039979935
./Walker2d-v3_PPO_2_5640/actor_000018146147.pth | Hamilton 0.03878886252641678
./Walker2d-v3_PPO_2_5640/actor_000018296143.pth | Hamilton 0.04831673204898834
./Walker2d-v3_PPO_2_5640/actor_000018445257.pth | Hamilton 0.049680113792419434
./Walker2d-v3_PPO_2_5640/actor_000018593772.pth | Hamilton 0.04619337618350983
./Walker2d-v3_PPO_2_5640/actor_000018746809.pth | Hamilton 0.05223681032657623
./Walker2d-v3_PPO_2_5640/actor_000018890605.pth | Hamilton 0.052570175379514694
./Walker2d-v3_PPO_2_5640/actor_000019036039.pth | Hamilton 0.04088686779141426
./Walker2d-v3_PPO_2_5640/actor_000019184847.pth | Hamilton 0.03313204273581505
./Walker2d-v3_PPO_2_5640/actor_000019328886.pth | Hamilton 0.048131126910448074
./Walker2d-v3_PPO_2_5640/actor_000019469102.pth | Hamilton 0.04186626896262169
./Walker2d-v3_PPO_2_5640/actor_000019610722.pth | Hamilton 0.04089484363794327
./Walker2d-v3_PPO_2_5640/actor_000019755748.pth | Hamilton 0.04358883947134018
./Walker2d-v3_PPO_2_5640/actor_000019903202.pth | Hamilton 0.032192736864089966
./Walker2d-v3_PPO_2_5640/actor__000000016081_00028.184.pth | Hamilton 0.0014493158087134361
./Walker2d-v3_PPO_2_5640/actor__000000372808_00839.921.pth | Hamilton 0.011533746495842934
./Walker2d-v3_PPO_2_5640/actor__000000714027_02375.833.pth | Hamilton 0.0472266860306263
./Walker2d-v3_PPO_2_5640/actor__000001055158_03630.054.pth | Hamilton 0.10037130862474442
./Walker2d-v3_PPO_2_5640/actor__000001737725_04133.763.pth | Hamilton 0.12496183812618256
./Walker2d-v3_PPO_2_5640/actor__000002084540_04404.578.pth | Hamilton 0.13333489000797272
./Walker2d-v3_PPO_2_5640/actor__000003787316_04930.331.pth | Hamilton 0.14740243554115295
./Walker2d-v3_PPO_2_5640/actor__000004127199_04971.100.pth | Hamilton 0.1509365439414978
./Walker2d-v3_PPO_2_5640/actor__000006177722_05104.358.pth | Hamilton 0.1656665951013565
./Walker2d-v3_PPO_2_5640/actor__000008256410_05166.105.pth | Hamilton 0.10291064530611038
./Walker2d-v3_PPO_2_5640/actor__000012052404_05280.073.pth | Hamilton 0.07355519384145737
./Walker2d-v3_PPO_2_5640/actor__000013061458_05288.496.pth | Hamilton 0.07532291859388351
./Walker2d-v3_PPO_2_5640/actor__000013403600_05335.223.pth | Hamilton 0.06522668898105621
./Walker2d-v3_PPO_2_5640/actor__000013748557_05361.889.pth | Hamilton 0.0660557672381401
./Walker2d-v3_PPO_2_5640/actor__000017538344_05452.276.pth | Hamilton 0.060848336666822433
./Walker2d-v3_PPO_2_5640/actor__000018241887_05640.687.pth | Hamilton 0.05773117393255234
"""
# Ant-v3_PPOHtermK_6_6862
data51 = """
./Ant-v3_PPOHtermK_6_6862/actor_000000087603.pth | Hamilton 0.004111563321202993
./Ant-v3_PPOHtermK_6_6862/actor_000000246667.pth | Hamilton 0.00904847402125597
./Ant-v3_PPOHtermK_6_6862/actor_000000398257.pth | Hamilton 0.025377538055181503
./Ant-v3_PPOHtermK_6_6862/actor_000000545659.pth | Hamilton 0.08640637993812561
./Ant-v3_PPOHtermK_6_6862/actor_000000693256.pth | Hamilton 0.22825787961483002
./Ant-v3_PPOHtermK_6_6862/actor_000000838401.pth | Hamilton 0.5533413887023926
./Ant-v3_PPOHtermK_6_6862/actor_000000987434.pth | Hamilton 1.2805309295654297
./Ant-v3_PPOHtermK_6_6862/actor_000001133795.pth | Hamilton 1.4815641641616821
./Ant-v3_PPOHtermK_6_6862/actor_000001283625.pth | Hamilton 1.6453808546066284
./Ant-v3_PPOHtermK_6_6862/actor_000001436028.pth | Hamilton 1.9077728986740112
./Ant-v3_PPOHtermK_6_6862/actor_000001584269.pth | Hamilton 1.9327963590621948
./Ant-v3_PPOHtermK_6_6862/actor_000001733910.pth | Hamilton 1.7672089338302612
./Ant-v3_PPOHtermK_6_6862/actor_000001888955.pth | Hamilton 2.3662257194519043
./Ant-v3_PPOHtermK_6_6862/actor_000002047932.pth | Hamilton 2.3127212524414062
./Ant-v3_PPOHtermK_6_6862/actor_000002200662.pth | Hamilton 2.4863293170928955
./Ant-v3_PPOHtermK_6_6862/actor_000002363463.pth | Hamilton 2.734362840652466
./Ant-v3_PPOHtermK_6_6862/actor_000002517425.pth | Hamilton 2.6895899772644043
./Ant-v3_PPOHtermK_6_6862/actor_000002675381.pth | Hamilton 2.6771883964538574
./Ant-v3_PPOHtermK_6_6862/actor_000002835295.pth | Hamilton 2.9090685844421387
./Ant-v3_PPOHtermK_6_6862/actor_000002995491.pth | Hamilton 2.9900214672088623
./Ant-v3_PPOHtermK_6_6862/actor_000003151664.pth | Hamilton 2.7318813800811768
./Ant-v3_PPOHtermK_6_6862/actor_000003306704.pth | Hamilton 2.8500683307647705
./Ant-v3_PPOHtermK_6_6862/actor_000003468221.pth | Hamilton 3.0387306213378906
./Ant-v3_PPOHtermK_6_6862/actor_000003630115.pth | Hamilton 3.378432512283325
./Ant-v3_PPOHtermK_6_6862/actor_000003791466.pth | Hamilton 3.1688318252563477
./Ant-v3_PPOHtermK_6_6862/actor_000003952989.pth | Hamilton 3.180849552154541
./Ant-v3_PPOHtermK_6_6862/actor_000004107299.pth | Hamilton 3.079395055770874
./Ant-v3_PPOHtermK_6_6862/actor_000004266281.pth | Hamilton 3.0492520332336426
./Ant-v3_PPOHtermK_6_6862/actor_000004418219.pth | Hamilton 3.1465437412261963
./Ant-v3_PPOHtermK_6_6862/actor_000004577536.pth | Hamilton 3.235098123550415
./Ant-v3_PPOHtermK_6_6862/actor_000004736440.pth | Hamilton 3.45585560798645
./Ant-v3_PPOHtermK_6_6862/actor_000004891760.pth | Hamilton 3.501124143600464
./Ant-v3_PPOHtermK_6_6862/actor_000005049463.pth | Hamilton 3.7424118518829346
./Ant-v3_PPOHtermK_6_6862/actor_000005205544.pth | Hamilton 3.790123701095581
./Ant-v3_PPOHtermK_6_6862/actor_000005362281.pth | Hamilton 3.9188179969787598
./Ant-v3_PPOHtermK_6_6862/actor_000005527772.pth | Hamilton 3.9709179401397705
./Ant-v3_PPOHtermK_6_6862/actor_000005682452.pth | Hamilton 3.7400968074798584
./Ant-v3_PPOHtermK_6_6862/actor_000005838312.pth | Hamilton 3.8978843688964844
./Ant-v3_PPOHtermK_6_6862/actor_000005997566.pth | Hamilton 4.282077312469482
./Ant-v3_PPOHtermK_6_6862/actor_000006155727.pth | Hamilton 4.3972954750061035
./Ant-v3_PPOHtermK_6_6862/actor_000006316241.pth | Hamilton 4.737549304962158
./Ant-v3_PPOHtermK_6_6862/actor_000006473952.pth | Hamilton 4.755157470703125
./Ant-v3_PPOHtermK_6_6862/actor_000006627564.pth | Hamilton 4.790607929229736
./Ant-v3_PPOHtermK_6_6862/actor_000006789113.pth | Hamilton 5.055866718292236
./Ant-v3_PPOHtermK_6_6862/actor_000006946831.pth | Hamilton 5.124453067779541
./Ant-v3_PPOHtermK_6_6862/actor_000007104592.pth | Hamilton 5.167172431945801
./Ant-v3_PPOHtermK_6_6862/actor_000007256785.pth | Hamilton 5.360196590423584
./Ant-v3_PPOHtermK_6_6862/actor_000007408385.pth | Hamilton 5.436351776123047
./Ant-v3_PPOHtermK_6_6862/actor_000007560620.pth | Hamilton 5.378294467926025
./Ant-v3_PPOHtermK_6_6862/actor_000007712368.pth | Hamilton 5.422183990478516
./Ant-v3_PPOHtermK_6_6862/actor_000007857981.pth | Hamilton 5.532299041748047
./Ant-v3_PPOHtermK_6_6862/actor_000008009371.pth | Hamilton 5.511137962341309
./Ant-v3_PPOHtermK_6_6862/actor_000008154302.pth | Hamilton 5.3729681968688965
./Ant-v3_PPOHtermK_6_6862/actor_000008303116.pth | Hamilton 5.573635578155518
./Ant-v3_PPOHtermK_6_6862/actor_000008454579.pth | Hamilton 5.734554290771484
./Ant-v3_PPOHtermK_6_6862/actor_000008604435.pth | Hamilton 5.67193078994751
./Ant-v3_PPOHtermK_6_6862/actor_000008757690.pth | Hamilton 5.687824726104736
./Ant-v3_PPOHtermK_6_6862/actor_000008900052.pth | Hamilton 5.833279132843018
./Ant-v3_PPOHtermK_6_6862/actor_000009050747.pth | Hamilton 5.891057968139648
./Ant-v3_PPOHtermK_6_6862/actor_000009197457.pth | Hamilton 6.00933313369751
./Ant-v3_PPOHtermK_6_6862/actor_000009347330.pth | Hamilton 6.11137056350708
./Ant-v3_PPOHtermK_6_6862/actor_000009494809.pth | Hamilton 6.233556270599365
./Ant-v3_PPOHtermK_6_6862/actor_000009648731.pth | Hamilton 6.201189994812012
./Ant-v3_PPOHtermK_6_6862/actor_000009802830.pth | Hamilton 6.262927055358887
./Ant-v3_PPOHtermK_6_6862/actor_000009947677.pth | Hamilton 6.23444938659668
./Ant-v3_PPOHtermK_6_6862/actor_000010090871.pth | Hamilton 6.1717047691345215
./Ant-v3_PPOHtermK_6_6862/actor_000010239256.pth | Hamilton 6.221645832061768
./Ant-v3_PPOHtermK_6_6862/actor_000010382777.pth | Hamilton 6.212965488433838
./Ant-v3_PPOHtermK_6_6862/actor_000010527422.pth | Hamilton 6.27482795715332
./Ant-v3_PPOHtermK_6_6862/actor_000010669250.pth | Hamilton 6.220563888549805
./Ant-v3_PPOHtermK_6_6862/actor_000010812839.pth | Hamilton 6.310513019561768
./Ant-v3_PPOHtermK_6_6862/actor_000010952073.pth | Hamilton 6.290161609649658
./Ant-v3_PPOHtermK_6_6862/actor_000011095900.pth | Hamilton 6.4406585693359375
./Ant-v3_PPOHtermK_6_6862/actor_000011239724.pth | Hamilton 6.328786373138428
./Ant-v3_PPOHtermK_6_6862/actor_000011386056.pth | Hamilton 6.4611496925354
./Ant-v3_PPOHtermK_6_6862/actor_000011534945.pth | Hamilton 6.370532512664795
./Ant-v3_PPOHtermK_6_6862/actor_000011679265.pth | Hamilton 6.443540573120117
./Ant-v3_PPOHtermK_6_6862/actor_000011828814.pth | Hamilton 6.598026752471924
./Ant-v3_PPOHtermK_6_6862/actor_000011973710.pth | Hamilton 6.660208702087402
./Ant-v3_PPOHtermK_6_6862/actor_000012114998.pth | Hamilton 6.538715362548828
./Ant-v3_PPOHtermK_6_6862/actor_000012260024.pth | Hamilton 6.747320175170898
./Ant-v3_PPOHtermK_6_6862/actor_000012402436.pth | Hamilton 6.661251544952393
./Ant-v3_PPOHtermK_6_6862/actor_000012546185.pth | Hamilton 6.686399459838867
./Ant-v3_PPOHtermK_6_6862/actor_000012688467.pth | Hamilton 6.857907295227051
./Ant-v3_PPOHtermK_6_6862/actor_000012832478.pth | Hamilton 6.824693202972412
./Ant-v3_PPOHtermK_6_6862/actor_000012980017.pth | Hamilton 6.763824462890625
./Ant-v3_PPOHtermK_6_6862/actor_000013126035.pth | Hamilton 6.702686309814453
./Ant-v3_PPOHtermK_6_6862/actor_000013270598.pth | Hamilton 6.876688480377197
./Ant-v3_PPOHtermK_6_6862/actor_000013416374.pth | Hamilton 6.880148410797119
./Ant-v3_PPOHtermK_6_6862/actor_000013561230.pth | Hamilton 6.919610023498535
./Ant-v3_PPOHtermK_6_6862/actor_000013702191.pth | Hamilton 6.9075026512146
./Ant-v3_PPOHtermK_6_6862/actor_000013842919.pth | Hamilton 6.949341773986816
./Ant-v3_PPOHtermK_6_6862/actor_000013988815.pth | Hamilton 6.828098773956299
./Ant-v3_PPOHtermK_6_6862/actor_000014138333.pth | Hamilton 6.6650800704956055
./Ant-v3_PPOHtermK_6_6862/actor_000014285220.pth | Hamilton 6.846170902252197
./Ant-v3_PPOHtermK_6_6862/actor_000014427159.pth | Hamilton 6.799041271209717
./Ant-v3_PPOHtermK_6_6862/actor_000014583105.pth | Hamilton 6.801196575164795
./Ant-v3_PPOHtermK_6_6862/actor_000014727298.pth | Hamilton 6.7754411697387695
./Ant-v3_PPOHtermK_6_6862/actor_000014868819.pth | Hamilton 6.792905807495117
./Ant-v3_PPOHtermK_6_6862/actor_000015011728.pth | Hamilton 6.869265556335449
./Ant-v3_PPOHtermK_6_6862/actor_000015155252.pth | Hamilton 6.774500846862793
./Ant-v3_PPOHtermK_6_6862/actor_000015300820.pth | Hamilton 6.753936767578125
./Ant-v3_PPOHtermK_6_6862/actor_000015447459.pth | Hamilton 6.735507965087891
./Ant-v3_PPOHtermK_6_6862/actor_000015599211.pth | Hamilton 6.690241813659668
./Ant-v3_PPOHtermK_6_6862/actor_000015743664.pth | Hamilton 6.789775371551514
./Ant-v3_PPOHtermK_6_6862/actor_000015884557.pth | Hamilton 6.692172050476074
./Ant-v3_PPOHtermK_6_6862/actor_000016029921.pth | Hamilton 6.638717174530029
./Ant-v3_PPOHtermK_6_6862/actor_000016170424.pth | Hamilton 6.560934543609619
./Ant-v3_PPOHtermK_6_6862/actor_000016317889.pth | Hamilton 6.48925256729126
./Ant-v3_PPOHtermK_6_6862/actor_000016463893.pth | Hamilton 6.519744396209717
./Ant-v3_PPOHtermK_6_6862/actor_000016603620.pth | Hamilton 6.478171348571777
./Ant-v3_PPOHtermK_6_6862/actor_000016751777.pth | Hamilton 6.401797294616699
./Ant-v3_PPOHtermK_6_6862/actor_000016902551.pth | Hamilton 6.257814884185791
./Ant-v3_PPOHtermK_6_6862/actor_000017043467.pth | Hamilton 6.341620445251465
./Ant-v3_PPOHtermK_6_6862/actor_000017185932.pth | Hamilton 6.317134380340576
./Ant-v3_PPOHtermK_6_6862/actor_000017333114.pth | Hamilton 6.2454023361206055
./Ant-v3_PPOHtermK_6_6862/actor_000017481615.pth | Hamilton 6.086867809295654
./Ant-v3_PPOHtermK_6_6862/actor_000017628701.pth | Hamilton 6.178828716278076
./Ant-v3_PPOHtermK_6_6862/actor_000017781552.pth | Hamilton 5.912972450256348
./Ant-v3_PPOHtermK_6_6862/actor_000017927988.pth | Hamilton 5.986026763916016
./Ant-v3_PPOHtermK_6_6862/actor_000018075339.pth | Hamilton 5.951847553253174
./Ant-v3_PPOHtermK_6_6862/actor_000018221846.pth | Hamilton 5.759809494018555
./Ant-v3_PPOHtermK_6_6862/actor_000018368954.pth | Hamilton 5.641787528991699
./Ant-v3_PPOHtermK_6_6862/actor_000018519042.pth | Hamilton 5.591580390930176
./Ant-v3_PPOHtermK_6_6862/actor_000018666266.pth | Hamilton 5.464154243469238
./Ant-v3_PPOHtermK_6_6862/actor_000018811176.pth | Hamilton 5.375728607177734
./Ant-v3_PPOHtermK_6_6862/actor_000018957031.pth | Hamilton 5.386233329772949
./Ant-v3_PPOHtermK_6_6862/actor_000019107266.pth | Hamilton 5.284639358520508
./Ant-v3_PPOHtermK_6_6862/actor_000019255930.pth | Hamilton 5.22750997543335
./Ant-v3_PPOHtermK_6_6862/actor_000019401309.pth | Hamilton 5.271519660949707
./Ant-v3_PPOHtermK_6_6862/actor_000019547857.pth | Hamilton 5.185274124145508
./Ant-v3_PPOHtermK_6_6862/actor_000019699533.pth | Hamilton 5.045224666595459
./Ant-v3_PPOHtermK_6_6862/actor_000019852074.pth | Hamilton 4.8921217918396
./Ant-v3_PPOHtermK_6_6862/actor_000019997557.pth | Hamilton 4.950551509857178
./Ant-v3_PPOHtermK_6_6862/actor__000000010957_00957.849.pth | Hamilton 0.04204603284597397
./Ant-v3_PPOHtermK_6_6862/actor__000000416210_01152.519.pth | Hamilton 0.20841628313064575
./Ant-v3_PPOHtermK_6_6862/actor__000001212994_02997.341.pth | Hamilton 1.2611675262451172
./Ant-v3_PPOHtermK_6_6862/actor__000002088219_04006.116.pth | Hamilton 1.9224694967269897
./Ant-v3_PPOHtermK_6_6862/actor__000002974593_04896.996.pth | Hamilton 2.7628250122070312
./Ant-v3_PPOHtermK_6_6862/actor__000003416406_05810.475.pth | Hamilton 3.0727179050445557
./Ant-v3_PPOHtermK_6_6862/actor__000004299876_05995.790.pth | Hamilton 3.885120391845703
./Ant-v3_PPOHtermK_6_6862/actor__000004741392_06281.468.pth | Hamilton 4.302432060241699
./Ant-v3_PPOHtermK_6_6862/actor__000006054387_06414.502.pth | Hamilton 5.55662727355957
./Ant-v3_PPOHtermK_6_6862/actor__000006942014_06539.019.pth | Hamilton 6.010439395904541
./Ant-v3_PPOHtermK_6_6862/actor__000009592078_06650.828.pth | Hamilton 7.172863006591797
./Ant-v3_PPOHtermK_6_6862/actor__000010033074_06712.405.pth | Hamilton 7.369781494140625
./Ant-v3_PPOHtermK_6_6862/actor__000010476567_06776.393.pth | Hamilton 7.357755184173584
./Ant-v3_PPOHtermK_6_6862/actor__000013565810_06815.816.pth | Hamilton 7.623598575592041
./Ant-v3_PPOHtermK_6_6862/actor__000015333872_06862.747.pth | Hamilton 7.3322858810424805
"""
# Ant-v3_PPO_5_6799
data54 = """
./Ant-v3_PPO_5_6799/actor_000000093883.pth | Hamilton 0.005074503365904093
./Ant-v3_PPO_5_6799/actor_000000254112.pth | Hamilton 0.012386777438223362
./Ant-v3_PPO_5_6799/actor_000000413429.pth | Hamilton 0.037784725427627563
./Ant-v3_PPO_5_6799/actor_000000567042.pth | Hamilton 0.07026351988315582
./Ant-v3_PPO_5_6799/actor_000000726493.pth | Hamilton 0.21908153593540192
./Ant-v3_PPO_5_6799/actor_000000882388.pth | Hamilton 0.36841586232185364
./Ant-v3_PPO_5_6799/actor_000001037631.pth | Hamilton 0.4825523793697357
./Ant-v3_PPO_5_6799/actor_000001199238.pth | Hamilton 1.0373966693878174
./Ant-v3_PPO_5_6799/actor_000001359842.pth | Hamilton 1.291121482849121
./Ant-v3_PPO_5_6799/actor_000001522417.pth | Hamilton 1.6402531862258911
./Ant-v3_PPO_5_6799/actor_000001686742.pth | Hamilton 1.9664427042007446
./Ant-v3_PPO_5_6799/actor_000001851366.pth | Hamilton 2.3771016597747803
./Ant-v3_PPO_5_6799/actor_000002005324.pth | Hamilton 2.6183810234069824
./Ant-v3_PPO_5_6799/actor_000002168956.pth | Hamilton 2.9140841960906982
./Ant-v3_PPO_5_6799/actor_000002328271.pth | Hamilton 2.8002612590789795
./Ant-v3_PPO_5_6799/actor_000002485601.pth | Hamilton 2.894040107727051
./Ant-v3_PPO_5_6799/actor_000002652389.pth | Hamilton 2.832108974456787
./Ant-v3_PPO_5_6799/actor_000002817057.pth | Hamilton 2.7549281120300293
./Ant-v3_PPO_5_6799/actor_000002983113.pth | Hamilton 2.7792575359344482
./Ant-v3_PPO_5_6799/actor_000003138878.pth | Hamilton 2.3709654808044434
./Ant-v3_PPO_5_6799/actor_000003301338.pth | Hamilton 2.4817473888397217
./Ant-v3_PPO_5_6799/actor_000003458485.pth | Hamilton 2.4570975303649902
./Ant-v3_PPO_5_6799/actor_000003614187.pth | Hamilton 2.6773123741149902
./Ant-v3_PPO_5_6799/actor_000003776163.pth | Hamilton 2.610283851623535
./Ant-v3_PPO_5_6799/actor_000003930963.pth | Hamilton 2.8402206897735596
./Ant-v3_PPO_5_6799/actor_000004090408.pth | Hamilton 2.82114315032959
./Ant-v3_PPO_5_6799/actor_000004246929.pth | Hamilton 2.8226566314697266
./Ant-v3_PPO_5_6799/actor_000004405943.pth | Hamilton 2.8057987689971924
./Ant-v3_PPO_5_6799/actor_000004563834.pth | Hamilton 2.5844919681549072
./Ant-v3_PPO_5_6799/actor_000004720746.pth | Hamilton 2.6300995349884033
./Ant-v3_PPO_5_6799/actor_000004873595.pth | Hamilton 2.6104531288146973
./Ant-v3_PPO_5_6799/actor_000005029293.pth | Hamilton 2.486684560775757
./Ant-v3_PPO_5_6799/actor_000005183761.pth | Hamilton 2.53338623046875
./Ant-v3_PPO_5_6799/actor_000005343847.pth | Hamilton 2.507483720779419
./Ant-v3_PPO_5_6799/actor_000005493343.pth | Hamilton 2.57580828666687
./Ant-v3_PPO_5_6799/actor_000005644460.pth | Hamilton 2.578012704849243
./Ant-v3_PPO_5_6799/actor_000005793061.pth | Hamilton 2.5577147006988525
./Ant-v3_PPO_5_6799/actor_000005949866.pth | Hamilton 2.7746925354003906
./Ant-v3_PPO_5_6799/actor_000006098009.pth | Hamilton 2.7040562629699707
./Ant-v3_PPO_5_6799/actor_000006252605.pth | Hamilton 2.5675675868988037
./Ant-v3_PPO_5_6799/actor_000006403492.pth | Hamilton 2.4022552967071533
./Ant-v3_PPO_5_6799/actor_000006560549.pth | Hamilton 2.5230021476745605
./Ant-v3_PPO_5_6799/actor_000006716404.pth | Hamilton 2.4601328372955322
./Ant-v3_PPO_5_6799/actor_000006866965.pth | Hamilton 2.413377523422241
./Ant-v3_PPO_5_6799/actor_000007014246.pth | Hamilton 2.4905846118927
./Ant-v3_PPO_5_6799/actor_000007164670.pth | Hamilton 2.5154776573181152
./Ant-v3_PPO_5_6799/actor_000007324438.pth | Hamilton 2.3725459575653076
./Ant-v3_PPO_5_6799/actor_000007478593.pth | Hamilton 2.418517589569092
./Ant-v3_PPO_5_6799/actor_000007635257.pth | Hamilton 2.457030773162842
./Ant-v3_PPO_5_6799/actor_000007787782.pth | Hamilton 2.3272931575775146
./Ant-v3_PPO_5_6799/actor_000007936743.pth | Hamilton 2.247887134552002
./Ant-v3_PPO_5_6799/actor_000008090262.pth | Hamilton 2.3328776359558105
./Ant-v3_PPO_5_6799/actor_000008234363.pth | Hamilton 2.360222339630127
./Ant-v3_PPO_5_6799/actor_000008388099.pth | Hamilton 2.2227847576141357
./Ant-v3_PPO_5_6799/actor_000008539534.pth | Hamilton 2.1688270568847656
./Ant-v3_PPO_5_6799/actor_000008689462.pth | Hamilton 2.0613059997558594
./Ant-v3_PPO_5_6799/actor_000008839036.pth | Hamilton 2.1086008548736572
./Ant-v3_PPO_5_6799/actor_000008992293.pth | Hamilton 1.9779132604599
./Ant-v3_PPO_5_6799/actor_000009139903.pth | Hamilton 1.9654568433761597
./Ant-v3_PPO_5_6799/actor_000009294066.pth | Hamilton 1.9782301187515259
./Ant-v3_PPO_5_6799/actor_000009440658.pth | Hamilton 1.9262315034866333
./Ant-v3_PPO_5_6799/actor_000009590270.pth | Hamilton 2.0049564838409424
./Ant-v3_PPO_5_6799/actor_000009734535.pth | Hamilton 1.7898106575012207
./Ant-v3_PPO_5_6799/actor_000009887791.pth | Hamilton 1.7079881429672241
./Ant-v3_PPO_5_6799/actor_000010039418.pth | Hamilton 1.553754448890686
./Ant-v3_PPO_5_6799/actor_000010191313.pth | Hamilton 1.5349966287612915
./Ant-v3_PPO_5_6799/actor_000010340457.pth | Hamilton 1.6025221347808838
./Ant-v3_PPO_5_6799/actor_000010488961.pth | Hamilton 1.55060613155365
./Ant-v3_PPO_5_6799/actor_000010640089.pth | Hamilton 1.5156381130218506
./Ant-v3_PPO_5_6799/actor_000010794482.pth | Hamilton 1.3353220224380493
./Ant-v3_PPO_5_6799/actor_000010945279.pth | Hamilton 1.2922077178955078
./Ant-v3_PPO_5_6799/actor_000011094906.pth | Hamilton 1.2890613079071045
./Ant-v3_PPO_5_6799/actor_000011244721.pth | Hamilton 1.214226484298706
./Ant-v3_PPO_5_6799/actor_000011398958.pth | Hamilton 1.1959927082061768
./Ant-v3_PPO_5_6799/actor_000011552495.pth | Hamilton 1.1368263959884644
./Ant-v3_PPO_5_6799/actor_000011707220.pth | Hamilton 0.979556679725647
./Ant-v3_PPO_5_6799/actor_000011863011.pth | Hamilton 0.9126589894294739
./Ant-v3_PPO_5_6799/actor_000012010070.pth | Hamilton 0.6956690549850464
./Ant-v3_PPO_5_6799/actor_000012164675.pth | Hamilton 0.7874578833580017
./Ant-v3_PPO_5_6799/actor_000012312880.pth | Hamilton 0.790744960308075
./Ant-v3_PPO_5_6799/actor_000012472701.pth | Hamilton 0.7231869101524353
./Ant-v3_PPO_5_6799/actor_000012622513.pth | Hamilton 0.7606087923049927
./Ant-v3_PPO_5_6799/actor_000012775604.pth | Hamilton 0.7387474775314331
./Ant-v3_PPO_5_6799/actor_000012934039.pth | Hamilton 0.7471483945846558
./Ant-v3_PPO_5_6799/actor_000013091603.pth | Hamilton 0.7238107323646545
./Ant-v3_PPO_5_6799/actor_000013251598.pth | Hamilton 0.6864250898361206
./Ant-v3_PPO_5_6799/actor_000013406857.pth | Hamilton 0.6028667092323303
./Ant-v3_PPO_5_6799/actor_000013561454.pth | Hamilton 0.6044448614120483
./Ant-v3_PPO_5_6799/actor_000013717553.pth | Hamilton 0.622761607170105
./Ant-v3_PPO_5_6799/actor_000013876547.pth | Hamilton 0.5787383913993835
./Ant-v3_PPO_5_6799/actor_000014029743.pth | Hamilton 0.5842840075492859
./Ant-v3_PPO_5_6799/actor_000014189140.pth | Hamilton 0.5771746635437012
./Ant-v3_PPO_5_6799/actor_000014352768.pth | Hamilton 0.5072037577629089
./Ant-v3_PPO_5_6799/actor_000014502447.pth | Hamilton 0.6147286891937256
./Ant-v3_PPO_5_6799/actor_000014651310.pth | Hamilton 0.6233721971511841
./Ant-v3_PPO_5_6799/actor_000014805698.pth | Hamilton 0.6438687443733215
./Ant-v3_PPO_5_6799/actor_000014956381.pth | Hamilton 0.6372586488723755
./Ant-v3_PPO_5_6799/actor_000015110777.pth | Hamilton 0.5142120122909546
./Ant-v3_PPO_5_6799/actor_000015264842.pth | Hamilton 0.5593512654304504
./Ant-v3_PPO_5_6799/actor_000015420141.pth | Hamilton 0.523980975151062
./Ant-v3_PPO_5_6799/actor_000015576268.pth | Hamilton 0.5798567533493042
./Ant-v3_PPO_5_6799/actor_000015729766.pth | Hamilton 0.5724379420280457
./Ant-v3_PPO_5_6799/actor_000015880050.pth | Hamilton 0.5451202392578125
./Ant-v3_PPO_5_6799/actor_000016036372.pth | Hamilton 0.5015071630477905
./Ant-v3_PPO_5_6799/actor_000016195101.pth | Hamilton 0.5483999848365784
./Ant-v3_PPO_5_6799/actor_000016352907.pth | Hamilton 0.5136932134628296
./Ant-v3_PPO_5_6799/actor_000016510911.pth | Hamilton 0.4691963195800781
./Ant-v3_PPO_5_6799/actor_000016666061.pth | Hamilton 0.5445407629013062
./Ant-v3_PPO_5_6799/actor_000016817462.pth | Hamilton 0.5234227776527405
./Ant-v3_PPO_5_6799/actor_000016972569.pth | Hamilton 0.5796849727630615
./Ant-v3_PPO_5_6799/actor_000017123186.pth | Hamilton 0.5272115468978882
./Ant-v3_PPO_5_6799/actor_000017275815.pth | Hamilton 0.45750850439071655
./Ant-v3_PPO_5_6799/actor_000017422527.pth | Hamilton 0.49887269735336304
./Ant-v3_PPO_5_6799/actor_000017580499.pth | Hamilton 0.46061211824417114
./Ant-v3_PPO_5_6799/actor_000017735310.pth | Hamilton 0.4814170002937317
./Ant-v3_PPO_5_6799/actor_000017888350.pth | Hamilton 0.48095399141311646
./Ant-v3_PPO_5_6799/actor_000018048059.pth | Hamilton 0.5034792423248291
./Ant-v3_PPO_5_6799/actor_000018202806.pth | Hamilton 0.4589376449584961
./Ant-v3_PPO_5_6799/actor_000018358215.pth | Hamilton 0.4749937951564789
./Ant-v3_PPO_5_6799/actor_000018516982.pth | Hamilton 0.4343283772468567
./Ant-v3_PPO_5_6799/actor_000018673787.pth | Hamilton 0.4559686481952667
./Ant-v3_PPO_5_6799/actor_000018823344.pth | Hamilton 0.4595167338848114
./Ant-v3_PPO_5_6799/actor_000018980971.pth | Hamilton 0.4632956385612488
./Ant-v3_PPO_5_6799/actor_000019133559.pth | Hamilton 0.46645283699035645
./Ant-v3_PPO_5_6799/actor_000019285475.pth | Hamilton 0.4773906171321869
./Ant-v3_PPO_5_6799/actor_000019442907.pth | Hamilton 0.4533741772174835
./Ant-v3_PPO_5_6799/actor_000019596158.pth | Hamilton 0.4300614297389984
./Ant-v3_PPO_5_6799/actor_000019743642.pth | Hamilton 0.4415527284145355
./Ant-v3_PPO_5_6799/actor_000019899697.pth | Hamilton 0.44830670952796936
./Ant-v3_PPO_5_6799/actor__000000010793_00947.179.pth | Hamilton 0.01776743493974209
./Ant-v3_PPO_5_6799/actor__000000652987_01597.806.pth | Hamilton 0.14094121754169464
./Ant-v3_PPO_5_6799/actor__000001272096_03643.128.pth | Hamilton 0.48397231101989746
./Ant-v3_PPO_5_6799/actor__000001881198_04605.684.pth | Hamilton 0.9186487197875977
./Ant-v3_PPO_5_6799/actor__000002521292_05754.153.pth | Hamilton 1.1610654592514038
./Ant-v3_PPO_5_6799/actor__000005762309_06584.520.pth | Hamilton 1.5993343591690063
./Ant-v3_PPO_5_6799/actor__000007707235_06667.059.pth | Hamilton 1.6180704832077026
./Ant-v3_PPO_5_6799/actor__000009001412_06779.400.pth | Hamilton 1.5048744678497314
"""
# Ant-v3_PPO_1_5652
data55 = """
./Ant-v3_PPO_1_5652/actor_000000169067.pth | Hamilton 0.0069841961376369
./Ant-v3_PPO_1_5652/actor_000000250914.pth | Hamilton 0.009560899809002876
./Ant-v3_PPO_1_5652/actor_000000330483.pth | Hamilton 0.020319728180766106
./Ant-v3_PPO_1_5652/actor_000000407810.pth | Hamilton 0.031599223613739014
./Ant-v3_PPO_1_5652/actor_000000483432.pth | Hamilton 0.04474034905433655
./Ant-v3_PPO_1_5652/actor_000000559814.pth | Hamilton 0.053308483213186264
./Ant-v3_PPO_1_5652/actor_000000634714.pth | Hamilton 0.07590979337692261
./Ant-v3_PPO_1_5652/actor_000000707799.pth | Hamilton 0.09556791931390762
./Ant-v3_PPO_1_5652/actor_000000780532.pth | Hamilton 0.11470188200473785
./Ant-v3_PPO_1_5652/actor_000000853463.pth | Hamilton 0.14212079346179962
./Ant-v3_PPO_1_5652/actor_000000924971.pth | Hamilton 0.14334863424301147
./Ant-v3_PPO_1_5652/actor_000000993933.pth | Hamilton 0.23164157569408417
./Ant-v3_PPO_1_5652/actor_000001063847.pth | Hamilton 0.29071682691574097
./Ant-v3_PPO_1_5652/actor_000001133194.pth | Hamilton 0.34055787324905396
./Ant-v3_PPO_1_5652/actor_000001202299.pth | Hamilton 0.4016701281070709
./Ant-v3_PPO_1_5652/actor_000001270003.pth | Hamilton 0.45839497447013855
./Ant-v3_PPO_1_5652/actor_000001340760.pth | Hamilton 0.49206140637397766
./Ant-v3_PPO_1_5652/actor_000001411600.pth | Hamilton 0.4933777153491974
./Ant-v3_PPO_1_5652/actor_000001483487.pth | Hamilton 0.5019961595535278
./Ant-v3_PPO_1_5652/actor_000001557749.pth | Hamilton 0.5797672867774963
./Ant-v3_PPO_1_5652/actor_000001625870.pth | Hamilton 0.6342185139656067
./Ant-v3_PPO_1_5652/actor_000001695936.pth | Hamilton 0.6761088371276855
./Ant-v3_PPO_1_5652/actor_000001768913.pth | Hamilton 0.6023170948028564
./Ant-v3_PPO_1_5652/actor_000001838635.pth | Hamilton 0.6019571423530579
./Ant-v3_PPO_1_5652/actor_000001912070.pth | Hamilton 0.6810991764068604
./Ant-v3_PPO_1_5652/actor_000001983892.pth | Hamilton 0.6134727597236633
./Ant-v3_PPO_1_5652/actor_000002050529.pth | Hamilton 0.6821702718734741
./Ant-v3_PPO_1_5652/actor_000002121921.pth | Hamilton 0.6696080565452576
./Ant-v3_PPO_1_5652/actor_000002193576.pth | Hamilton 0.6481669545173645
./Ant-v3_PPO_1_5652/actor_000002262973.pth | Hamilton 0.4746580123901367
./Ant-v3_PPO_1_5652/actor_000002343933.pth | Hamilton 0.4034137725830078
./Ant-v3_PPO_1_5652/actor_000002415955.pth | Hamilton 0.5272387266159058
./Ant-v3_PPO_1_5652/actor_000002488950.pth | Hamilton 0.5453565716743469
./Ant-v3_PPO_1_5652/actor_000002560971.pth | Hamilton 0.5164162516593933
./Ant-v3_PPO_1_5652/actor_000002630874.pth | Hamilton 0.57940673828125
./Ant-v3_PPO_1_5652/actor_000002701258.pth | Hamilton 0.5405268669128418
./Ant-v3_PPO_1_5652/actor_000002771307.pth | Hamilton 0.5170269012451172
./Ant-v3_PPO_1_5652/actor_000002840697.pth | Hamilton 0.5910995006561279
./Ant-v3_PPO_1_5652/actor_000002912944.pth | Hamilton 0.6247982382774353
./Ant-v3_PPO_1_5652/actor_000002988665.pth | Hamilton 0.5708028078079224
./Ant-v3_PPO_1_5652/actor_000003064135.pth | Hamilton 0.5678332448005676
./Ant-v3_PPO_1_5652/actor_000003136164.pth | Hamilton 0.5668685436248779
./Ant-v3_PPO_1_5652/actor_000003209396.pth | Hamilton 0.6232424974441528
./Ant-v3_PPO_1_5652/actor_000003282091.pth | Hamilton 0.7090603113174438
./Ant-v3_PPO_1_5652/actor_000003354307.pth | Hamilton 0.6471967697143555
./Ant-v3_PPO_1_5652/actor_000003423065.pth | Hamilton 0.6186540722846985
./Ant-v3_PPO_1_5652/actor_000003490554.pth | Hamilton 0.7214058637619019
./Ant-v3_PPO_1_5652/actor_000003568949.pth | Hamilton 0.716549813747406
./Ant-v3_PPO_1_5652/actor_000003644933.pth | Hamilton 0.6535992622375488
./Ant-v3_PPO_1_5652/actor_000003717814.pth | Hamilton 0.7264279723167419
./Ant-v3_PPO_1_5652/actor_000003789800.pth | Hamilton 0.6259000301361084
./Ant-v3_PPO_1_5652/actor_000003863836.pth | Hamilton 0.6691027283668518
./Ant-v3_PPO_1_5652/actor_000003938472.pth | Hamilton 0.688693106174469
./Ant-v3_PPO_1_5652/actor_000004014467.pth | Hamilton 0.6773417592048645
./Ant-v3_PPO_1_5652/actor_000004088522.pth | Hamilton 0.6989647746086121
./Ant-v3_PPO_1_5652/actor_000004158611.pth | Hamilton 0.7517485022544861
./Ant-v3_PPO_1_5652/actor_000004232233.pth | Hamilton 0.7928637266159058
./Ant-v3_PPO_1_5652/actor_000004304445.pth | Hamilton 0.6899208426475525
./Ant-v3_PPO_1_5652/actor_000004372821.pth | Hamilton 0.7734887003898621
./Ant-v3_PPO_1_5652/actor_000004441366.pth | Hamilton 0.7817652821540833
./Ant-v3_PPO_1_5652/actor_000004515780.pth | Hamilton 0.8398405909538269
./Ant-v3_PPO_1_5652/actor_000004589930.pth | Hamilton 0.8786786198616028
./Ant-v3_PPO_1_5652/actor_000004663072.pth | Hamilton 0.8141953945159912
./Ant-v3_PPO_1_5652/actor_000004735287.pth | Hamilton 0.7988201379776001
./Ant-v3_PPO_1_5652/actor_000004810875.pth | Hamilton 0.7918642163276672
./Ant-v3_PPO_1_5652/actor_000004884332.pth | Hamilton 0.8188532590866089
./Ant-v3_PPO_1_5652/actor_000004954588.pth | Hamilton 0.8237034678459167
./Ant-v3_PPO_1_5652/actor_000005025949.pth | Hamilton 0.8513928055763245
./Ant-v3_PPO_1_5652/actor_000005100131.pth | Hamilton 0.7215188145637512
./Ant-v3_PPO_1_5652/actor_000005176086.pth | Hamilton 0.6839065551757812
./Ant-v3_PPO_1_5652/actor_000005246430.pth | Hamilton 0.7215323448181152
./Ant-v3_PPO_1_5652/actor_000005318697.pth | Hamilton 0.729893684387207
./Ant-v3_PPO_1_5652/actor_000005393017.pth | Hamilton 0.689328670501709
./Ant-v3_PPO_1_5652/actor_000005467725.pth | Hamilton 0.7332067489624023
./Ant-v3_PPO_1_5652/actor_000005538009.pth | Hamilton 0.7209208011627197
./Ant-v3_PPO_1_5652/actor_000005608742.pth | Hamilton 0.7581558227539062
./Ant-v3_PPO_1_5652/actor_000005681108.pth | Hamilton 0.7802922129631042
./Ant-v3_PPO_1_5652/actor_000005749900.pth | Hamilton 0.7291790246963501
./Ant-v3_PPO_1_5652/actor_000005819115.pth | Hamilton 0.7499415874481201
./Ant-v3_PPO_1_5652/actor_000005889217.pth | Hamilton 0.8079853057861328
./Ant-v3_PPO_1_5652/actor_000005961590.pth | Hamilton 0.7244646549224854
./Ant-v3_PPO_1_5652/actor_000006032014.pth | Hamilton 0.662145733833313
./Ant-v3_PPO_1_5652/actor_000006101744.pth | Hamilton 0.720055878162384
./Ant-v3_PPO_1_5652/actor_000006169337.pth | Hamilton 0.7310866117477417
./Ant-v3_PPO_1_5652/actor_000006239859.pth | Hamilton 0.7449906468391418
./Ant-v3_PPO_1_5652/actor_000006308579.pth | Hamilton 0.7727760672569275
./Ant-v3_PPO_1_5652/actor_000006381472.pth | Hamilton 0.7957735657691956
./Ant-v3_PPO_1_5652/actor_000006446102.pth | Hamilton 0.7490442395210266
./Ant-v3_PPO_1_5652/actor_000006519333.pth | Hamilton 0.7140330672264099
./Ant-v3_PPO_1_5652/actor_000006588402.pth | Hamilton 0.6261993646621704
./Ant-v3_PPO_1_5652/actor_000006661593.pth | Hamilton 0.5894293785095215
./Ant-v3_PPO_1_5652/actor_000006735515.pth | Hamilton 0.6083714962005615
./Ant-v3_PPO_1_5652/actor_000006805911.pth | Hamilton 0.5622373819351196
./Ant-v3_PPO_1_5652/actor_000006873670.pth | Hamilton 0.527230441570282
./Ant-v3_PPO_1_5652/actor_000006943941.pth | Hamilton 0.5927292108535767
./Ant-v3_PPO_1_5652/actor_000007016527.pth | Hamilton 0.5469595789909363
./Ant-v3_PPO_1_5652/actor_000007090346.pth | Hamilton 0.580588161945343
./Ant-v3_PPO_1_5652/actor_000007163846.pth | Hamilton 0.5832631587982178
./Ant-v3_PPO_1_5652/actor_000007235232.pth | Hamilton 0.583800733089447
./Ant-v3_PPO_1_5652/actor_000007306734.pth | Hamilton 0.6016122102737427
./Ant-v3_PPO_1_5652/actor_000007383598.pth | Hamilton 0.6271316409111023
./Ant-v3_PPO_1_5652/actor_000007455852.pth | Hamilton 0.5809938907623291
./Ant-v3_PPO_1_5652/actor_000007529852.pth | Hamilton 0.6324378252029419
./Ant-v3_PPO_1_5652/actor_000007600442.pth | Hamilton 0.6212618947029114
./Ant-v3_PPO_1_5652/actor_000007673957.pth | Hamilton 0.5678633451461792
./Ant-v3_PPO_1_5652/actor_000007750665.pth | Hamilton 0.5429845452308655
./Ant-v3_PPO_1_5652/actor_000007828312.pth | Hamilton 0.5519565939903259
./Ant-v3_PPO_1_5652/actor_000007901719.pth | Hamilton 0.5232998132705688
./Ant-v3_PPO_1_5652/actor_000007974511.pth | Hamilton 0.5144525766372681
./Ant-v3_PPO_1_5652/actor_000008045528.pth | Hamilton 0.4854491353034973
./Ant-v3_PPO_1_5652/actor_000008116599.pth | Hamilton 0.47758084535598755
./Ant-v3_PPO_1_5652/actor_000008189711.pth | Hamilton 0.4661515951156616
./Ant-v3_PPO_1_5652/actor_000008265604.pth | Hamilton 0.4524286985397339
./Ant-v3_PPO_1_5652/actor_000008342324.pth | Hamilton 0.4611773192882538
./Ant-v3_PPO_1_5652/actor_000008416636.pth | Hamilton 0.41194844245910645
./Ant-v3_PPO_1_5652/actor_000008489589.pth | Hamilton 0.4226505160331726
./Ant-v3_PPO_1_5652/actor_000008562342.pth | Hamilton 0.3516698479652405
./Ant-v3_PPO_1_5652/actor_000008638083.pth | Hamilton 0.3146302402019501
./Ant-v3_PPO_1_5652/actor_000008714962.pth | Hamilton 0.32267847657203674
./Ant-v3_PPO_1_5652/actor_000008788282.pth | Hamilton 0.291159063577652
./Ant-v3_PPO_1_5652/actor_000008870340.pth | Hamilton 0.2883017957210541
./Ant-v3_PPO_1_5652/actor_000008951105.pth | Hamilton 0.2523723542690277
./Ant-v3_PPO_1_5652/actor_000009027735.pth | Hamilton 0.2867855429649353
./Ant-v3_PPO_1_5652/actor_000009103805.pth | Hamilton 0.22562040388584137
./Ant-v3_PPO_1_5652/actor_000009180968.pth | Hamilton 0.2351466715335846
./Ant-v3_PPO_1_5652/actor_000009254744.pth | Hamilton 0.2769852876663208
./Ant-v3_PPO_1_5652/actor_000009337464.pth | Hamilton 0.20251613855361938
./Ant-v3_PPO_1_5652/actor_000009414932.pth | Hamilton 0.21815727651119232
./Ant-v3_PPO_1_5652/actor_000009489873.pth | Hamilton 0.2109820544719696
./Ant-v3_PPO_1_5652/actor_000009565802.pth | Hamilton 0.22592395544052124
./Ant-v3_PPO_1_5652/actor_000009641733.pth | Hamilton 0.20558813214302063
./Ant-v3_PPO_1_5652/actor_000009721570.pth | Hamilton 0.1782127022743225
./Ant-v3_PPO_1_5652/actor_000009793581.pth | Hamilton 0.19156116247177124
./Ant-v3_PPO_1_5652/actor_000009867257.pth | Hamilton 0.17498981952667236
./Ant-v3_PPO_1_5652/actor_000009941998.pth | Hamilton 0.21455034613609314
./Ant-v3_PPO_1_5652/actor_000010022917.pth | Hamilton 0.1971164494752884
./Ant-v3_PPO_1_5652/actor_000010099451.pth | Hamilton 0.1811075359582901
./Ant-v3_PPO_1_5652/actor_000010179414.pth | Hamilton 0.20524540543556213
./Ant-v3_PPO_1_5652/actor_000010258582.pth | Hamilton 0.16834595799446106
./Ant-v3_PPO_1_5652/actor_000010332567.pth | Hamilton 0.1972096860408783
./Ant-v3_PPO_1_5652/actor_000010407592.pth | Hamilton 0.16798628866672516
./Ant-v3_PPO_1_5652/actor_000010482940.pth | Hamilton 0.1779131442308426
./Ant-v3_PPO_1_5652/actor_000010552349.pth | Hamilton 0.17091748118400574
./Ant-v3_PPO_1_5652/actor_000010632218.pth | Hamilton 0.2084471732378006
./Ant-v3_PPO_1_5652/actor_000010711476.pth | Hamilton 0.18735790252685547
./Ant-v3_PPO_1_5652/actor_000010788446.pth | Hamilton 0.2094695121049881
./Ant-v3_PPO_1_5652/actor_000010867023.pth | Hamilton 0.18471428751945496
./Ant-v3_PPO_1_5652/actor_000010947048.pth | Hamilton 0.180636927485466
./Ant-v3_PPO_1_5652/actor_000011027106.pth | Hamilton 0.17618940770626068
./Ant-v3_PPO_1_5652/actor_000011109058.pth | Hamilton 0.15484780073165894
./Ant-v3_PPO_1_5652/actor_000011184037.pth | Hamilton 0.19102251529693604
./Ant-v3_PPO_1_5652/actor_000011262347.pth | Hamilton 0.1894334852695465
./Ant-v3_PPO_1_5652/actor_000011340574.pth | Hamilton 0.17500774562358856
./Ant-v3_PPO_1_5652/actor_000011418073.pth | Hamilton 0.1880897432565689
./Ant-v3_PPO_1_5652/actor_000011493178.pth | Hamilton 0.18235641717910767
./Ant-v3_PPO_1_5652/actor_000011567282.pth | Hamilton 0.156173974275589
./Ant-v3_PPO_1_5652/actor_000011640921.pth | Hamilton 0.1959352195262909
./Ant-v3_PPO_1_5652/actor_000011717239.pth | Hamilton 0.1892167031764984
./Ant-v3_PPO_1_5652/actor_000011795760.pth | Hamilton 0.1929270476102829
./Ant-v3_PPO_1_5652/actor_000011874103.pth | Hamilton 0.1788388043642044
./Ant-v3_PPO_1_5652/actor_000011952196.pth | Hamilton 0.17896829545497894
./Ant-v3_PPO_1_5652/actor_000012034347.pth | Hamilton 0.187763050198555
./Ant-v3_PPO_1_5652/actor_000012111216.pth | Hamilton 0.1680489331483841
./Ant-v3_PPO_1_5652/actor_000012187379.pth | Hamilton 0.16268162429332733
./Ant-v3_PPO_1_5652/actor_000012268094.pth | Hamilton 0.14186729490756989
./Ant-v3_PPO_1_5652/actor_000012345268.pth | Hamilton 0.16010813415050507
./Ant-v3_PPO_1_5652/actor_000012423459.pth | Hamilton 0.1609327793121338
./Ant-v3_PPO_1_5652/actor_000012503019.pth | Hamilton 0.15605810284614563
./Ant-v3_PPO_1_5652/actor_000012581527.pth | Hamilton 0.15872521698474884
./Ant-v3_PPO_1_5652/actor_000012659369.pth | Hamilton 0.15299907326698303
./Ant-v3_PPO_1_5652/actor_000012738137.pth | Hamilton 0.15954577922821045
./Ant-v3_PPO_1_5652/actor_000012817414.pth | Hamilton 0.14546158909797668
./Ant-v3_PPO_1_5652/actor_000012894607.pth | Hamilton 0.14075154066085815
./Ant-v3_PPO_1_5652/actor_000012974090.pth | Hamilton 0.13155095279216766
./Ant-v3_PPO_1_5652/actor_000013050723.pth | Hamilton 0.14252451062202454
./Ant-v3_PPO_1_5652/actor_000013127946.pth | Hamilton 0.13593260943889618
./Ant-v3_PPO_1_5652/actor_000013204486.pth | Hamilton 0.13663159310817719
./Ant-v3_PPO_1_5652/actor_000013288684.pth | Hamilton 0.13915646076202393
./Ant-v3_PPO_1_5652/actor_000013368224.pth | Hamilton 0.13286098837852478
./Ant-v3_PPO_1_5652/actor_000013449387.pth | Hamilton 0.13698256015777588
./Ant-v3_PPO_1_5652/actor_000013526191.pth | Hamilton 0.1327577829360962
./Ant-v3_PPO_1_5652/actor_000013605516.pth | Hamilton 0.12888017296791077
./Ant-v3_PPO_1_5652/actor_000013686127.pth | Hamilton 0.13338102400302887
./Ant-v3_PPO_1_5652/actor_000013761511.pth | Hamilton 0.15550848841667175
./Ant-v3_PPO_1_5652/actor_000013839577.pth | Hamilton 0.15162503719329834
./Ant-v3_PPO_1_5652/actor_000013920160.pth | Hamilton 0.1384885609149933
./Ant-v3_PPO_1_5652/actor_000013998922.pth | Hamilton 0.14208491146564484
./Ant-v3_PPO_1_5652/actor_000014080430.pth | Hamilton 0.15164339542388916
./Ant-v3_PPO_1_5652/actor_000014155054.pth | Hamilton 0.13233420252799988
./Ant-v3_PPO_1_5652/actor_000014234949.pth | Hamilton 0.09791077673435211
./Ant-v3_PPO_1_5652/actor_000014316097.pth | Hamilton 0.09895771741867065
./Ant-v3_PPO_1_5652/actor_000014398371.pth | Hamilton 0.11614248901605606
./Ant-v3_PPO_1_5652/actor_000014477234.pth | Hamilton 0.11270968616008759
./Ant-v3_PPO_1_5652/actor_000014558000.pth | Hamilton 0.10100586712360382
./Ant-v3_PPO_1_5652/actor_000014633977.pth | Hamilton 0.0960649624466896
./Ant-v3_PPO_1_5652/actor_000014708606.pth | Hamilton 0.09586820006370544
./Ant-v3_PPO_1_5652/actor_000014786566.pth | Hamilton 0.09393085539340973
./Ant-v3_PPO_1_5652/actor_000014863062.pth | Hamilton 0.11305338144302368
./Ant-v3_PPO_1_5652/actor_000014945417.pth | Hamilton 0.11354319006204605
./Ant-v3_PPO_1_5652/actor_000015024788.pth | Hamilton 0.10254859924316406
./Ant-v3_PPO_1_5652/actor_000015106880.pth | Hamilton 0.10442005842924118
./Ant-v3_PPO_1_5652/actor_000015188696.pth | Hamilton 0.1076483502984047
./Ant-v3_PPO_1_5652/actor_000015269254.pth | Hamilton 0.10799899697303772
./Ant-v3_PPO_1_5652/actor_000015352186.pth | Hamilton 0.10403682291507721
./Ant-v3_PPO_1_5652/actor_000015430730.pth | Hamilton 0.112589992582798
./Ant-v3_PPO_1_5652/actor_000015509996.pth | Hamilton 0.10476023703813553
./Ant-v3_PPO_1_5652/actor_000015594578.pth | Hamilton 0.08612991869449615
./Ant-v3_PPO_1_5652/actor_000015675992.pth | Hamilton 0.07796421647071838
./Ant-v3_PPO_1_5652/actor_000015756727.pth | Hamilton 0.09738533943891525
./Ant-v3_PPO_1_5652/actor_000015836673.pth | Hamilton 0.08513901382684708
./Ant-v3_PPO_1_5652/actor_000015915331.pth | Hamilton 0.09248708933591843
./Ant-v3_PPO_1_5652/actor_000015994052.pth | Hamilton 0.1047055646777153
./Ant-v3_PPO_1_5652/actor_000016075868.pth | Hamilton 0.07500381767749786
./Ant-v3_PPO_1_5652/actor_000016155226.pth | Hamilton 0.09608280658721924
./Ant-v3_PPO_1_5652/actor_000016237958.pth | Hamilton 0.09948208183050156
./Ant-v3_PPO_1_5652/actor_000016319882.pth | Hamilton 0.09576436132192612
./Ant-v3_PPO_1_5652/actor_000016400474.pth | Hamilton 0.08358833193778992
./Ant-v3_PPO_1_5652/actor_000016479551.pth | Hamilton 0.08021368831396103
./Ant-v3_PPO_1_5652/actor__000000008915_00966.274.pth | Hamilton 0.004856710322201252
./Ant-v3_PPO_1_5652/actor__000000449322_01366.960.pth | Hamilton 0.021951785311102867
./Ant-v3_PPO_1_5652/actor__000000866673_02317.041.pth | Hamilton 0.06273657828569412
./Ant-v3_PPO_1_5652/actor__000002568971_03993.560.pth | Hamilton 0.2581771910190582
./Ant-v3_PPO_1_5652/actor__000002998082_04643.997.pth | Hamilton 0.32738110423088074
./Ant-v3_PPO_1_5652/actor__000003431065_05183.716.pth | Hamilton 0.42117103934288025
./Ant-v3_PPO_1_5652/actor__000003863836_05462.534.pth | Hamilton 0.4759177267551422
./Ant-v3_PPO_1_5652/actor__000006015148_05608.758.pth | Hamilton 0.5216149687767029
./Ant-v3_PPO_1_5652/actor__000007294230_05652.746.pth | Hamilton 0.5176161527633667
"""
# Ant-v3_PPO_0_5855
data56 = """
./Ant-v3_PPO_0/actor_000000085414.pth | Hamilton 0.0029025734402239323
./Ant-v3_PPO_0/actor_000000243770.pth | Hamilton 0.006328597664833069
./Ant-v3_PPO_0/actor_000000401074.pth | Hamilton 0.02756342850625515
./Ant-v3_PPO_0/actor_000000560596.pth | Hamilton 0.04912560433149338
./Ant-v3_PPO_0/actor_000000716464.pth | Hamilton 0.10722806304693222
./Ant-v3_PPO_0/actor_000000873409.pth | Hamilton 0.24436624348163605
./Ant-v3_PPO_0/actor_000001027516.pth | Hamilton 0.4213712513446808
./Ant-v3_PPO_0/actor_000001177630.pth | Hamilton 0.7905212044715881
./Ant-v3_PPO_0/actor_000001327142.pth | Hamilton 0.9974576234817505
./Ant-v3_PPO_0/actor_000001474622.pth | Hamilton 0.8539029955863953
./Ant-v3_PPO_0/actor_000001631244.pth | Hamilton 1.3321231603622437
./Ant-v3_PPO_0/actor_000001790766.pth | Hamilton 1.5765880346298218
./Ant-v3_PPO_0/actor_000001939431.pth | Hamilton 1.7624365091323853
./Ant-v3_PPO_0/actor_000002086669.pth | Hamilton 1.8549476861953735
./Ant-v3_PPO_0/actor_000002237150.pth | Hamilton 1.9288318157196045
./Ant-v3_PPO_0/actor_000002385324.pth | Hamilton 1.9405803680419922
./Ant-v3_PPO_0/actor_000002533284.pth | Hamilton 1.7299922704696655
./Ant-v3_PPO_0/actor_000002684138.pth | Hamilton 1.5286706686019897
./Ant-v3_PPO_0/actor_000002830980.pth | Hamilton 1.3947529792785645
./Ant-v3_PPO_0/actor_000002980876.pth | Hamilton 1.2257091999053955
./Ant-v3_PPO_0/actor_000003129933.pth | Hamilton 1.3302849531173706
./Ant-v3_PPO_0/actor_000003282094.pth | Hamilton 1.3594427108764648
./Ant-v3_PPO_0/actor_000003426790.pth | Hamilton 1.2633490562438965
./Ant-v3_PPO_0/actor_000003572407.pth | Hamilton 1.3654605150222778
./Ant-v3_PPO_0/actor_000003718858.pth | Hamilton 1.294988751411438
./Ant-v3_PPO_0/actor_000003857839.pth | Hamilton 1.3169890642166138
./Ant-v3_PPO_0/actor_000004003009.pth | Hamilton 1.1112805604934692
./Ant-v3_PPO_0/actor_000004145535.pth | Hamilton 1.169765591621399
./Ant-v3_PPO_0/actor_000004292479.pth | Hamilton 1.1815712451934814
./Ant-v3_PPO_0/actor_000004444328.pth | Hamilton 1.0644750595092773
./Ant-v3_PPO_0/actor_000004587701.pth | Hamilton 1.112640142440796
./Ant-v3_PPO_0/actor_000004728468.pth | Hamilton 1.1046756505966187
./Ant-v3_PPO_0/actor_000004872869.pth | Hamilton 1.0918989181518555
./Ant-v3_PPO_0/actor_000005014430.pth | Hamilton 1.1371606588363647
./Ant-v3_PPO_0/actor_000005159151.pth | Hamilton 1.1001709699630737
./Ant-v3_PPO_0/actor_000005304228.pth | Hamilton 0.920396089553833
./Ant-v3_PPO_0/actor_000005441214.pth | Hamilton 0.9862926602363586
./Ant-v3_PPO_0/actor_000005584829.pth | Hamilton 1.0144598484039307
./Ant-v3_PPO_0/actor_000005728220.pth | Hamilton 1.028064250946045
./Ant-v3_PPO_0/actor_000005869702.pth | Hamilton 0.9929002523422241
./Ant-v3_PPO_0/actor_000006008427.pth | Hamilton 1.0489033460617065
./Ant-v3_PPO_0/actor_000006147835.pth | Hamilton 1.0967928171157837
./Ant-v3_PPO_0/actor_000006287959.pth | Hamilton 1.0431030988693237
./Ant-v3_PPO_0/actor_000006428281.pth | Hamilton 0.9418889284133911
./Ant-v3_PPO_0/actor_000006564566.pth | Hamilton 0.8754620552062988
./Ant-v3_PPO_0/actor_000006701337.pth | Hamilton 0.80799400806427
./Ant-v3_PPO_0/actor_000006839567.pth | Hamilton 0.8622046709060669
./Ant-v3_PPO_0/actor_000006978486.pth | Hamilton 0.8850733041763306
./Ant-v3_PPO_0/actor_000007120505.pth | Hamilton 0.8072265982627869
./Ant-v3_PPO_0/actor_000007259122.pth | Hamilton 0.8856381773948669
./Ant-v3_PPO_0/actor_000007400504.pth | Hamilton 0.8131003379821777
./Ant-v3_PPO_0/actor_000007543686.pth | Hamilton 0.8418211936950684
./Ant-v3_PPO_0/actor_000007684431.pth | Hamilton 0.8467435240745544
./Ant-v3_PPO_0/actor_000007822637.pth | Hamilton 0.6920300126075745
./Ant-v3_PPO_0/actor_000007958464.pth | Hamilton 0.6199498176574707
./Ant-v3_PPO_0/actor_000008099490.pth | Hamilton 0.7212328314781189
./Ant-v3_PPO_0/actor_000008241567.pth | Hamilton 0.6973507404327393
./Ant-v3_PPO_0/actor_000008384073.pth | Hamilton 0.6892178058624268
./Ant-v3_PPO_0/actor_000008522044.pth | Hamilton 0.7241084575653076
./Ant-v3_PPO_0/actor_000008663378.pth | Hamilton 0.6114094853401184
./Ant-v3_PPO_0/actor_000008805321.pth | Hamilton 0.5926937460899353
./Ant-v3_PPO_0/actor_000008950384.pth | Hamilton 0.6066598296165466
./Ant-v3_PPO_0/actor_000009093238.pth | Hamilton 0.5689615607261658
./Ant-v3_PPO_0/actor_000009239470.pth | Hamilton 0.513106644153595
./Ant-v3_PPO_0/actor_000009385945.pth | Hamilton 0.496509850025177
./Ant-v3_PPO_0/actor_000009530167.pth | Hamilton 0.47634872794151306
./Ant-v3_PPO_0/actor_000009672484.pth | Hamilton 0.34248748421669006
./Ant-v3_PPO_0/actor_000009812732.pth | Hamilton 0.36613577604293823
./Ant-v3_PPO_0/actor_000009962345.pth | Hamilton 0.3264988958835602
./Ant-v3_PPO_0/actor_000010105717.pth | Hamilton 0.31633368134498596
./Ant-v3_PPO_0/actor_000010251397.pth | Hamilton 0.33853277564048767
./Ant-v3_PPO_0/actor_000010397087.pth | Hamilton 0.3083697259426117
./Ant-v3_PPO_0/actor_000010538222.pth | Hamilton 0.30327925086021423
./Ant-v3_PPO_0/actor_000010689395.pth | Hamilton 0.30258941650390625
./Ant-v3_PPO_0/actor_000010843033.pth | Hamilton 0.2216603308916092
./Ant-v3_PPO_0/actor_000010990443.pth | Hamilton 0.2471635639667511
./Ant-v3_PPO_0/actor_000011144672.pth | Hamilton 0.24953070282936096
./Ant-v3_PPO_0/actor_000011289214.pth | Hamilton 0.14620532095432281
./Ant-v3_PPO_0/actor_000011441873.pth | Hamilton 0.19712316989898682
./Ant-v3_PPO_0/actor_000011584387.pth | Hamilton 0.09800136834383011
./Ant-v3_PPO_0/actor_000011728758.pth | Hamilton 0.18696282804012299
./Ant-v3_PPO_0/actor_000011875836.pth | Hamilton 0.1589224636554718
./Ant-v3_PPO_0/actor_000012026877.pth | Hamilton 0.19990846514701843
./Ant-v3_PPO_0/actor_000012170069.pth | Hamilton 0.20581716299057007
./Ant-v3_PPO_0/actor_000012317557.pth | Hamilton 0.12021981179714203
./Ant-v3_PPO_0/actor_000012466186.pth | Hamilton 0.16489849984645844
./Ant-v3_PPO_0/actor_000012614491.pth | Hamilton 0.055201709270477295
./Ant-v3_PPO_0/actor_000012766087.pth | Hamilton 0.08762515336275101
./Ant-v3_PPO_0/actor_000012919013.pth | Hamilton 0.13393522799015045
./Ant-v3_PPO_0/actor_000013069910.pth | Hamilton 0.12683454155921936
./Ant-v3_PPO_0/actor_000013223674.pth | Hamilton 0.13377448916435242
./Ant-v3_PPO_0/actor_000013381797.pth | Hamilton 0.10117260366678238
./Ant-v3_PPO_0/actor_000013531514.pth | Hamilton 0.10573001205921173
./Ant-v3_PPO_0/actor_000013690847.pth | Hamilton 0.12195708602666855
./Ant-v3_PPO_0/actor_000013844307.pth | Hamilton 0.09576383233070374
./Ant-v3_PPO_0/actor_000013998689.pth | Hamilton 0.12003029137849808
./Ant-v3_PPO_0/actor_000014148265.pth | Hamilton 0.10425082594156265
./Ant-v3_PPO_0/actor_000014298872.pth | Hamilton 0.09037936478853226
./Ant-v3_PPO_0/actor_000014452702.pth | Hamilton 0.08776895701885223
./Ant-v3_PPO_0/actor_000014604683.pth | Hamilton 0.08233048021793365
./Ant-v3_PPO_0/actor_000014762569.pth | Hamilton 0.06156100332736969
./Ant-v3_PPO_0/actor_000014920979.pth | Hamilton 0.07099446654319763
./Ant-v3_PPO_0/actor_000015072102.pth | Hamilton 0.07946766167879105
./Ant-v3_PPO_0/actor_000015221525.pth | Hamilton 0.04775020107626915
./Ant-v3_PPO_0/actor_000015369866.pth | Hamilton 0.06353195756673813
./Ant-v3_PPO_0/actor_000015532338.pth | Hamilton 0.06797437369823456
./Ant-v3_PPO_0/actor_000015679584.pth | Hamilton 0.06938889622688293
./Ant-v3_PPO_0/actor_000015838278.pth | Hamilton 0.046853866428136826
./Ant-v3_PPO_0/actor_000015989063.pth | Hamilton 0.055338963866233826
./Ant-v3_PPO_0/actor_000016141566.pth | Hamilton 0.010161545127630234
./Ant-v3_PPO_0/actor_000016292189.pth | Hamilton 0.03867040574550629
./Ant-v3_PPO_0/actor_000016449289.pth | Hamilton 0.0444650836288929
./Ant-v3_PPO_0/actor_000016602294.pth | Hamilton 0.04583687335252762
./Ant-v3_PPO_0/actor_000016757845.pth | Hamilton 0.0447036437690258
./Ant-v3_PPO_0/actor_000016915256.pth | Hamilton 0.023902952671051025
./Ant-v3_PPO_0/actor_000017071825.pth | Hamilton 0.037863556295633316
./Ant-v3_PPO_0/actor_000017228431.pth | Hamilton 0.04261035844683647
./Ant-v3_PPO_0/actor_000017379557.pth | Hamilton 0.05935411900281906
./Ant-v3_PPO_0/actor_000017535941.pth | Hamilton 0.03696506470441818
./Ant-v3_PPO_0/actor_000017698850.pth | Hamilton 0.03556128591299057
./Ant-v3_PPO_0/actor_000017856089.pth | Hamilton 0.04959869384765625
./Ant-v3_PPO_0/actor_000018014290.pth | Hamilton 0.051861897110939026
./Ant-v3_PPO_0/actor_000018177096.pth | Hamilton 0.05240265652537346
./Ant-v3_PPO_0/actor_000018334121.pth | Hamilton 0.0536409430205822
./Ant-v3_PPO_0/actor_000018493290.pth | Hamilton 0.03107709065079689
./Ant-v3_PPO_0/actor_000018650731.pth | Hamilton 0.03254678472876549
./Ant-v3_PPO_0/actor_000018807593.pth | Hamilton 0.033785946667194366
./Ant-v3_PPO_0/actor_000018969480.pth | Hamilton 0.02604510635137558
./Ant-v3_PPO_0/actor_000019136172.pth | Hamilton 0.029944289475679398
./Ant-v3_PPO_0/actor_000019287203.pth | Hamilton 0.053006611764431
./Ant-v3_PPO_0/actor_000019438865.pth | Hamilton 0.009729847311973572
./Ant-v3_PPO_0/actor_000019593388.pth | Hamilton 0.023494603112339973
./Ant-v3_PPO_0/actor_000019744978.pth | Hamilton 0.04619710519909859
./Ant-v3_PPO_0/actor_000019899676.pth | Hamilton 0.036250434815883636
./Ant-v3_PPO_0/actor__000000010423_00994.712.pth | Hamilton 0.004397555720061064
./Ant-v3_PPO_0/actor__000000665275_02040.477.pth | Hamilton 0.054149847477674484
./Ant-v3_PPO_0/actor__000001327142_04493.069.pth | Hamilton 0.2578836977481842
./Ant-v3_PPO_0/actor__000001988539_05181.520.pth | Hamilton 0.47620826959609985
./Ant-v3_PPO_0/actor__000003313114_05609.881.pth | Hamilton 0.5133584141731262
./Ant-v3_PPO_0/actor__000003980346_05855.220.pth | Hamilton 0.5323107838630676
"""
# Swimmer-v3_PPOHtermK_3_153
data61 = """
./Swimmer-v3_PPOHtermK_3_153/actor_000000016000.pth | Hamilton 0.015284018591046333
./Swimmer-v3_PPOHtermK_3_153/actor_000000440000.pth | Hamilton 0.02318093739449978
./Swimmer-v3_PPOHtermK_3_153/actor_000000864000.pth | Hamilton 0.02110038883984089
./Swimmer-v3_PPOHtermK_3_153/actor_000001288000.pth | Hamilton 0.0277717225253582
./Swimmer-v3_PPOHtermK_3_153/actor_000001712000.pth | Hamilton 0.03361089527606964
./Swimmer-v3_PPOHtermK_3_153/actor_000002136000.pth | Hamilton 0.0430649071931839
./Swimmer-v3_PPOHtermK_3_153/actor_000002560000.pth | Hamilton 0.052320223301649094
./Swimmer-v3_PPOHtermK_3_153/actor_000002984000.pth | Hamilton 0.0483604297041893
./Swimmer-v3_PPOHtermK_3_153/actor_000003408000.pth | Hamilton 0.05923140421509743
./Swimmer-v3_PPOHtermK_3_153/actor_000003832000.pth | Hamilton 0.06308675557374954
./Swimmer-v3_PPOHtermK_3_153/actor_000004256000.pth | Hamilton 0.06348717212677002
./Swimmer-v3_PPOHtermK_3_153/actor_000004680000.pth | Hamilton 0.06725157797336578
./Swimmer-v3_PPOHtermK_3_153/actor_000005104000.pth | Hamilton 0.06573229283094406
./Swimmer-v3_PPOHtermK_3_153/actor_000005528000.pth | Hamilton 0.06860259920358658
./Swimmer-v3_PPOHtermK_3_153/actor_000005952000.pth | Hamilton 0.06931988894939423
./Swimmer-v3_PPOHtermK_3_153/actor_000006376000.pth | Hamilton 0.06955544650554657
./Swimmer-v3_PPOHtermK_3_153/actor_000006800000.pth | Hamilton 0.07448301464319229
./Swimmer-v3_PPOHtermK_3_153/actor_000007224000.pth | Hamilton 0.057893797755241394
./Swimmer-v3_PPOHtermK_3_153/actor_000007648000.pth | Hamilton 0.07393565773963928
./Swimmer-v3_PPOHtermK_3_153/actor_000008072000.pth | Hamilton 0.07223065942525864
./Swimmer-v3_PPOHtermK_3_153/actor_000008496000.pth | Hamilton 0.06485088914632797
./Swimmer-v3_PPOHtermK_3_153/actor_000008920000.pth | Hamilton 0.05824441835284233
./Swimmer-v3_PPOHtermK_3_153/actor_000009344000.pth | Hamilton 0.06692440807819366
./Swimmer-v3_PPOHtermK_3_153/actor_000009768000.pth | Hamilton 0.07243632525205612
./Swimmer-v3_PPOHtermK_3_153/actor_000010192000.pth | Hamilton 0.07557813078165054
./Swimmer-v3_PPOHtermK_3_153/actor_000010616000.pth | Hamilton 0.08084622770547867
./Swimmer-v3_PPOHtermK_3_153/actor_000011040000.pth | Hamilton 0.08483884483575821
./Swimmer-v3_PPOHtermK_3_153/actor_000011464000.pth | Hamilton 0.09236171096563339
./Swimmer-v3_PPOHtermK_3_153/actor_000011888000.pth | Hamilton 0.08220705389976501
./Swimmer-v3_PPOHtermK_3_153/actor_000012312000.pth | Hamilton 0.09198032319545746
./Swimmer-v3_PPOHtermK_3_153/actor_000012736000.pth | Hamilton 0.08358502388000488
./Swimmer-v3_PPOHtermK_3_153/actor_000013160000.pth | Hamilton 0.09170962125062943
./Swimmer-v3_PPOHtermK_3_153/actor_000013584000.pth | Hamilton 0.09168653935194016
./Swimmer-v3_PPOHtermK_3_153/actor_000014008000.pth | Hamilton 0.09277141094207764
./Swimmer-v3_PPOHtermK_3_153/actor_000014432000.pth | Hamilton 0.08668225258588791
./Swimmer-v3_PPOHtermK_3_153/actor_000014856000.pth | Hamilton 0.08933420479297638
./Swimmer-v3_PPOHtermK_3_153/actor_000015280000.pth | Hamilton 0.08612120896577835
./Swimmer-v3_PPOHtermK_3_153/actor_000015704000.pth | Hamilton 0.08954863250255585
./Swimmer-v3_PPOHtermK_3_153/actor_000016128000.pth | Hamilton 0.08818070590496063
./Swimmer-v3_PPOHtermK_3_153/actor_000016552000.pth | Hamilton 0.0858926996588707
./Swimmer-v3_PPOHtermK_3_153/actor_000016976000.pth | Hamilton 0.08892080932855606
./Swimmer-v3_PPOHtermK_3_153/actor_000017400000.pth | Hamilton 0.08661225438117981
./Swimmer-v3_PPOHtermK_3_153/actor_000017824000.pth | Hamilton 0.09251777082681656
./Swimmer-v3_PPOHtermK_3_153/actor_000018248000.pth | Hamilton 0.09396494925022125
./Swimmer-v3_PPOHtermK_3_153/actor_000018672000.pth | Hamilton 0.09765814244747162
./Swimmer-v3_PPOHtermK_3_153/actor_000019096000.pth | Hamilton 0.10147365182638168
./Swimmer-v3_PPOHtermK_3_153/actor_000019520000.pth | Hamilton 0.10208629816770554
./Swimmer-v3_PPOHtermK_3_153/actor_000019944000.pth | Hamilton 0.10211846977472305
./Swimmer-v3_PPOHtermK_3_153/actor_000020368000.pth | Hamilton 0.10014037042856216
./Swimmer-v3_PPOHtermK_3_153/actor_000020792000.pth | Hamilton 0.11104506254196167
./Swimmer-v3_PPOHtermK_3_153/actor_000021216000.pth | Hamilton 0.10182332992553711
./Swimmer-v3_PPOHtermK_3_153/actor_000021640000.pth | Hamilton 0.11111660301685333
./Swimmer-v3_PPOHtermK_3_153/actor_000022064000.pth | Hamilton 0.10507290065288544
./Swimmer-v3_PPOHtermK_3_153/actor_000022488000.pth | Hamilton 0.11727281659841537
./Swimmer-v3_PPOHtermK_3_153/actor_000022912000.pth | Hamilton 0.1116613820195198
./Swimmer-v3_PPOHtermK_3_153/actor_000023336000.pth | Hamilton 0.1207902729511261
./Swimmer-v3_PPOHtermK_3_153/actor_000023760000.pth | Hamilton 0.12059961259365082
./Swimmer-v3_PPOHtermK_3_153/actor_000024184000.pth | Hamilton 0.11582706868648529
./Swimmer-v3_PPOHtermK_3_153/actor_000024608000.pth | Hamilton 0.11412307620048523
./Swimmer-v3_PPOHtermK_3_153/actor_000025032000.pth | Hamilton 0.10451658070087433
./Swimmer-v3_PPOHtermK_3_153/actor_000025456000.pth | Hamilton 0.1134413629770279
./Swimmer-v3_PPOHtermK_3_153/actor_000025880000.pth | Hamilton 0.11217883229255676
./Swimmer-v3_PPOHtermK_3_153/actor_000026304000.pth | Hamilton 0.12590916454792023
./Swimmer-v3_PPOHtermK_3_153/actor_000026728000.pth | Hamilton 0.11783110350370407
./Swimmer-v3_PPOHtermK_3_153/actor_000027152000.pth | Hamilton 0.12443403899669647
./Swimmer-v3_PPOHtermK_3_153/actor_000027576000.pth | Hamilton 0.12275739759206772
./Swimmer-v3_PPOHtermK_3_153/actor_000028000000.pth | Hamilton 0.1277901977300644
./Swimmer-v3_PPOHtermK_3_153/actor_000028424000.pth | Hamilton 0.12068721652030945
./Swimmer-v3_PPOHtermK_3_153/actor_000028848000.pth | Hamilton 0.1195996105670929
./Swimmer-v3_PPOHtermK_3_153/actor_000029272000.pth | Hamilton 0.12629397213459015
./Swimmer-v3_PPOHtermK_3_153/actor_000029696000.pth | Hamilton 0.13557474315166473
./Swimmer-v3_PPOHtermK_3_153/actor_000030120000.pth | Hamilton 0.12547877430915833
./Swimmer-v3_PPOHtermK_3_153/actor_000030544000.pth | Hamilton 0.14528505504131317
./Swimmer-v3_PPOHtermK_3_153/actor_000030968000.pth | Hamilton 0.14160755276679993
./Swimmer-v3_PPOHtermK_3_153/actor_000031392000.pth | Hamilton 0.12636616826057434
./Swimmer-v3_PPOHtermK_3_153/actor_000031816000.pth | Hamilton 0.14631716907024384
./Swimmer-v3_PPOHtermK_3_153/actor_000032240000.pth | Hamilton 0.1478620022535324
./Swimmer-v3_PPOHtermK_3_153/actor_000032664000.pth | Hamilton 0.141898512840271
./Swimmer-v3_PPOHtermK_3_153/actor_000033088000.pth | Hamilton 0.14540569484233856
./Swimmer-v3_PPOHtermK_3_153/actor_000033512000.pth | Hamilton 0.150565505027771
./Swimmer-v3_PPOHtermK_3_153/actor_000033936000.pth | Hamilton 0.15319319069385529
./Swimmer-v3_PPOHtermK_3_153/actor_000034360000.pth | Hamilton 0.15617600083351135
./Swimmer-v3_PPOHtermK_3_153/actor_000034784000.pth | Hamilton 0.15575018525123596
./Swimmer-v3_PPOHtermK_3_153/actor_000035208000.pth | Hamilton 0.14449091255664825
./Swimmer-v3_PPOHtermK_3_153/actor_000035632000.pth | Hamilton 0.1428202986717224
./Swimmer-v3_PPOHtermK_3_153/actor_000036056000.pth | Hamilton 0.15125827491283417
./Swimmer-v3_PPOHtermK_3_153/actor_000036480000.pth | Hamilton 0.14112010598182678
./Swimmer-v3_PPOHtermK_3_153/actor_000036904000.pth | Hamilton 0.1489597111940384
./Swimmer-v3_PPOHtermK_3_153/actor_000037328000.pth | Hamilton 0.14565598964691162
./Swimmer-v3_PPOHtermK_3_153/actor_000037752000.pth | Hamilton 0.15420189499855042
./Swimmer-v3_PPOHtermK_3_153/actor_000038176000.pth | Hamilton 0.14877143502235413
./Swimmer-v3_PPOHtermK_3_153/actor_000038600000.pth | Hamilton 0.15154969692230225
./Swimmer-v3_PPOHtermK_3_153/actor_000039024000.pth | Hamilton 0.15099884569644928
./Swimmer-v3_PPOHtermK_3_153/actor_000039448000.pth | Hamilton 0.14501804113388062
./Swimmer-v3_PPOHtermK_3_153/actor_000039872000.pth | Hamilton 0.15877105295658112
./Swimmer-v3_PPOHtermK_3_153/actor_000040296000.pth | Hamilton 0.14741770923137665
./Swimmer-v3_PPOHtermK_3_153/actor_000040720000.pth | Hamilton 0.1589246243238449
./Swimmer-v3_PPOHtermK_3_153/actor_000041144000.pth | Hamilton 0.14963215589523315
./Swimmer-v3_PPOHtermK_3_153/actor_000041568000.pth | Hamilton 0.1523827314376831
./Swimmer-v3_PPOHtermK_3_153/actor_000041992000.pth | Hamilton 0.15112946927547455
./Swimmer-v3_PPOHtermK_3_153/actor_000042416000.pth | Hamilton 0.15467104315757751
./Swimmer-v3_PPOHtermK_3_153/actor_000042840000.pth | Hamilton 0.15519611537456512
./Swimmer-v3_PPOHtermK_3_153/actor_000043264000.pth | Hamilton 0.16917535662651062
./Swimmer-v3_PPOHtermK_3_153/actor_000043688000.pth | Hamilton 0.16293977200984955
./Swimmer-v3_PPOHtermK_3_153/actor_000044112000.pth | Hamilton 0.1714775562286377
./Swimmer-v3_PPOHtermK_3_153/actor_000044536000.pth | Hamilton 0.14362981915473938
./Swimmer-v3_PPOHtermK_3_153/actor_000044960000.pth | Hamilton 0.16829423606395721
./Swimmer-v3_PPOHtermK_3_153/actor_000045384000.pth | Hamilton 0.16601337492465973
./Swimmer-v3_PPOHtermK_3_153/actor_000045808000.pth | Hamilton 0.18333348631858826
./Swimmer-v3_PPOHtermK_3_153/actor_000046232000.pth | Hamilton 0.1440504938364029
./Swimmer-v3_PPOHtermK_3_153/actor_000046656000.pth | Hamilton 0.15719082951545715
./Swimmer-v3_PPOHtermK_3_153/actor_000047080000.pth | Hamilton 0.15102042257785797
./Swimmer-v3_PPOHtermK_3_153/actor_000047504000.pth | Hamilton 0.14053581655025482
./Swimmer-v3_PPOHtermK_3_153/actor_000047928000.pth | Hamilton 0.1395692080259323
./Swimmer-v3_PPOHtermK_3_153/actor_000048352000.pth | Hamilton 0.1574215441942215
./Swimmer-v3_PPOHtermK_3_153/actor_000048776000.pth | Hamilton 0.1586548238992691
./Swimmer-v3_PPOHtermK_3_153/actor_000049200000.pth | Hamilton 0.15576069056987762
./Swimmer-v3_PPOHtermK_3_153/actor_000049624000.pth | Hamilton 0.16046197712421417
./Swimmer-v3_PPOHtermK_3_153/actor_000050048000.pth | Hamilton 0.15701504051685333
./Swimmer-v3_PPOHtermK_3_153/actor_000050472000.pth | Hamilton 0.15996167063713074
./Swimmer-v3_PPOHtermK_3_153/actor_000050896000.pth | Hamilton 0.16164934635162354
./Swimmer-v3_PPOHtermK_3_153/actor_000051320000.pth | Hamilton 0.15149831771850586
./Swimmer-v3_PPOHtermK_3_153/actor_000051744000.pth | Hamilton 0.174483522772789
./Swimmer-v3_PPOHtermK_3_153/actor_000052168000.pth | Hamilton 0.1738002747297287
./Swimmer-v3_PPOHtermK_3_153/actor_000052592000.pth | Hamilton 0.16324815154075623
./Swimmer-v3_PPOHtermK_3_153/actor_000053016000.pth | Hamilton 0.16712622344493866
./Swimmer-v3_PPOHtermK_3_153/actor_000053440000.pth | Hamilton 0.16858640313148499
./Swimmer-v3_PPOHtermK_3_153/actor_000053864000.pth | Hamilton 0.1659340262413025
./Swimmer-v3_PPOHtermK_3_153/actor_000054288000.pth | Hamilton 0.16154757142066956
./Swimmer-v3_PPOHtermK_3_153/actor_000054712000.pth | Hamilton 0.16616764664649963
./Swimmer-v3_PPOHtermK_3_153/actor__000000008000_00026.720.pth | Hamilton -0.0056559364311397076
./Swimmer-v3_PPOHtermK_3_153/actor__000000296000_00043.494.pth | Hamilton 0.03497564047574997
./Swimmer-v3_PPOHtermK_3_153/actor__000000580000_00044.778.pth | Hamilton 0.049423009157180786
./Swimmer-v3_PPOHtermK_3_153/actor__000001144000_00049.282.pth | Hamilton 0.05980132520198822
./Swimmer-v3_PPOHtermK_3_153/actor__000001692000_00065.766.pth | Hamilton 0.03951427340507507
./Swimmer-v3_PPOHtermK_3_153/actor__000001964000_00091.779.pth | Hamilton 0.048991721123456955
./Swimmer-v3_PPOHtermK_3_153/actor__000002784000_00094.737.pth | Hamilton 0.05736237019300461
./Swimmer-v3_PPOHtermK_3_153/actor__000003056000_00099.481.pth | Hamilton 0.05974080041050911
./Swimmer-v3_PPOHtermK_3_153/actor__000004148000_00104.685.pth | Hamilton 0.06004209816455841
./Swimmer-v3_PPOHtermK_3_153/actor__000004420000_00105.507.pth | Hamilton 0.07214643061161041
./Swimmer-v3_PPOHtermK_3_153/actor__000004696000_00105.942.pth | Hamilton 0.08490351587533951
./Swimmer-v3_PPOHtermK_3_153/actor__000005244000_00109.913.pth | Hamilton 0.08586522191762924
./Swimmer-v3_PPOHtermK_3_153/actor__000005520000_00113.042.pth | Hamilton 0.10934942960739136
./Swimmer-v3_PPOHtermK_3_153/actor__000006616000_00114.542.pth | Hamilton 0.11011172086000443
./Swimmer-v3_PPOHtermK_3_153/actor__000006888000_00117.570.pth | Hamilton 0.0966690182685852
./Swimmer-v3_PPOHtermK_3_153/actor__000007160000_00119.502.pth | Hamilton 0.10650037974119186
./Swimmer-v3_PPOHtermK_3_153/actor__000008808000_00121.802.pth | Hamilton 0.12468832731246948
./Swimmer-v3_PPOHtermK_3_153/actor__000009900000_00123.984.pth | Hamilton 0.11578761041164398
./Swimmer-v3_PPOHtermK_3_153/actor__000010176000_00125.334.pth | Hamilton 0.1252458095550537
./Swimmer-v3_PPOHtermK_3_153/actor__000011548000_00126.883.pth | Hamilton 0.12920786440372467
./Swimmer-v3_PPOHtermK_3_153/actor__000011820000_00127.968.pth | Hamilton 0.12380073219537735
./Swimmer-v3_PPOHtermK_3_153/actor__000013996000_00130.706.pth | Hamilton 0.15050362050533295
./Swimmer-v3_PPOHtermK_3_153/actor__000016956000_00132.336.pth | Hamilton 0.1623106151819229
./Swimmer-v3_PPOHtermK_3_153/actor__000018576000_00135.680.pth | Hamilton 0.15424451231956482
./Swimmer-v3_PPOHtermK_3_153/actor__000022420000_00138.739.pth | Hamilton 0.16139452159404755
./Swimmer-v3_PPOHtermK_3_153/actor__000024120000_00140.370.pth | Hamilton 0.17065179347991943
./Swimmer-v3_PPOHtermK_3_153/actor__000026676000_00141.306.pth | Hamilton 0.1682356297969818
./Swimmer-v3_PPOHtermK_3_153/actor__000029496000_00143.761.pth | Hamilton 0.1664300113916397
./Swimmer-v3_PPOHtermK_3_153/actor__000039568000_00144.932.pth | Hamilton 0.1686769425868988
./Swimmer-v3_PPOHtermK_3_153/actor__000043916000_00147.217.pth | Hamilton 0.17447277903556824
./Swimmer-v3_PPOHtermK_3_153/actor__000045632000_00148.932.pth | Hamilton 0.1674611121416092
./Swimmer-v3_PPOHtermK_3_153/actor__000049356000_00153.211.pth | Hamilton 0.14647331833839417
"""
# Swimmer-v3_PPO_2_157
data62 = """
./Swimmer-v3_PPO_2_157/actor_000000016000.pth | Hamilton 0.019995037466287613
./Swimmer-v3_PPO_2_157/actor_000000712000.pth | Hamilton 0.03158316761255264
./Swimmer-v3_PPO_2_157/actor_000001408000.pth | Hamilton 0.03391844779253006
./Swimmer-v3_PPO_2_157/actor_000002104000.pth | Hamilton 0.038075800985097885
./Swimmer-v3_PPO_2_157/actor_000002800000.pth | Hamilton 0.03368336707353592
./Swimmer-v3_PPO_2_157/actor_000003496000.pth | Hamilton 0.03722989186644554
./Swimmer-v3_PPO_2_157/actor_000004192000.pth | Hamilton 0.0404045432806015
./Swimmer-v3_PPO_2_157/actor_000004888000.pth | Hamilton 0.037362758070230484
./Swimmer-v3_PPO_2_157/actor_000005584000.pth | Hamilton 0.03143014758825302
./Swimmer-v3_PPO_2_157/actor_000006280000.pth | Hamilton 0.03598101809620857
./Swimmer-v3_PPO_2_157/actor_000006976000.pth | Hamilton 0.04401993006467819
./Swimmer-v3_PPO_2_157/actor_000007672000.pth | Hamilton 0.03900811821222305
./Swimmer-v3_PPO_2_157/actor_000008368000.pth | Hamilton 0.038677118718624115
./Swimmer-v3_PPO_2_157/actor_000009064000.pth | Hamilton 0.022812874987721443
./Swimmer-v3_PPO_2_157/actor_000009760000.pth | Hamilton 0.023398952558636665
./Swimmer-v3_PPO_2_157/actor_000010456000.pth | Hamilton 0.02106904238462448
./Swimmer-v3_PPO_2_157/actor_000011152000.pth | Hamilton 0.024352645501494408
./Swimmer-v3_PPO_2_157/actor_000011848000.pth | Hamilton 0.020867686718702316
./Swimmer-v3_PPO_2_157/actor_000012544000.pth | Hamilton 0.018705010414123535
./Swimmer-v3_PPO_2_157/actor_000013240000.pth | Hamilton 0.02120162919163704
./Swimmer-v3_PPO_2_157/actor_000013936000.pth | Hamilton 0.025674479082226753
./Swimmer-v3_PPO_2_157/actor_000014632000.pth | Hamilton 0.025216616690158844
./Swimmer-v3_PPO_2_157/actor_000015328000.pth | Hamilton 0.02105531468987465
./Swimmer-v3_PPO_2_157/actor_000016024000.pth | Hamilton 0.018278788775205612
./Swimmer-v3_PPO_2_157/actor_000016720000.pth | Hamilton 0.013056074269115925
./Swimmer-v3_PPO_2_157/actor_000017416000.pth | Hamilton 0.006706462241709232
./Swimmer-v3_PPO_2_157/actor_000018112000.pth | Hamilton 0.008312438614666462
./Swimmer-v3_PPO_2_157/actor_000018808000.pth | Hamilton 0.017496785148978233
./Swimmer-v3_PPO_2_157/actor_000019504000.pth | Hamilton 0.016852933913469315
./Swimmer-v3_PPO_2_157/actor_000020200000.pth | Hamilton 0.01514681987464428
./Swimmer-v3_PPO_2_157/actor_000020896000.pth | Hamilton 0.015505579300224781
./Swimmer-v3_PPO_2_157/actor_000021592000.pth | Hamilton 0.016421226784586906
./Swimmer-v3_PPO_2_157/actor_000022288000.pth | Hamilton 0.010968300513923168
./Swimmer-v3_PPO_2_157/actor_000022984000.pth | Hamilton 0.012436152435839176
./Swimmer-v3_PPO_2_157/actor_000023680000.pth | Hamilton 0.01448430959135294
./Swimmer-v3_PPO_2_157/actor_000024376000.pth | Hamilton 0.015711169689893723
./Swimmer-v3_PPO_2_157/actor_000025072000.pth | Hamilton 0.01409896370023489
./Swimmer-v3_PPO_2_157/actor_000025768000.pth | Hamilton 0.01505272276699543
./Swimmer-v3_PPO_2_157/actor_000026464000.pth | Hamilton 0.01543173287063837
./Swimmer-v3_PPO_2_157/actor_000027160000.pth | Hamilton 0.015697676688432693
./Swimmer-v3_PPO_2_157/actor_000027856000.pth | Hamilton 0.013849505223333836
./Swimmer-v3_PPO_2_157/actor_000028552000.pth | Hamilton 0.014705875888466835
./Swimmer-v3_PPO_2_157/actor_000029248000.pth | Hamilton 0.015139546245336533
./Swimmer-v3_PPO_2_157/actor_000029944000.pth | Hamilton 0.013185468502342701
./Swimmer-v3_PPO_2_157/actor_000030640000.pth | Hamilton 0.01479868683964014
./Swimmer-v3_PPO_2_157/actor_000031336000.pth | Hamilton 0.019362425431609154
./Swimmer-v3_PPO_2_157/actor_000032032000.pth | Hamilton 0.022476935759186745
./Swimmer-v3_PPO_2_157/actor_000032728000.pth | Hamilton 0.021831654012203217
./Swimmer-v3_PPO_2_157/actor_000033424000.pth | Hamilton 0.016212865710258484
./Swimmer-v3_PPO_2_157/actor_000034120000.pth | Hamilton 0.01690341718494892
./Swimmer-v3_PPO_2_157/actor_000034816000.pth | Hamilton 0.017606357112526894
./Swimmer-v3_PPO_2_157/actor_000035512000.pth | Hamilton 0.018641507253050804
./Swimmer-v3_PPO_2_157/actor_000036208000.pth | Hamilton 0.01755562424659729
./Swimmer-v3_PPO_2_157/actor_000036904000.pth | Hamilton 0.016795014962553978
./Swimmer-v3_PPO_2_157/actor_000037600000.pth | Hamilton 0.014272648841142654
./Swimmer-v3_PPO_2_157/actor_000038296000.pth | Hamilton 0.014743547886610031
./Swimmer-v3_PPO_2_157/actor_000038992000.pth | Hamilton 0.014608648605644703
./Swimmer-v3_PPO_2_157/actor_000039688000.pth | Hamilton 0.008981222286820412
./Swimmer-v3_PPO_2_157/actor_000040384000.pth | Hamilton 0.014369030483067036
./Swimmer-v3_PPO_2_157/actor_000041080000.pth | Hamilton 0.014613962732255459
./Swimmer-v3_PPO_2_157/actor_000041776000.pth | Hamilton 0.01037408784031868
./Swimmer-v3_PPO_2_157/actor_000042472000.pth | Hamilton 0.01845851168036461
./Swimmer-v3_PPO_2_157/actor_000043168000.pth | Hamilton 0.020437849685549736
./Swimmer-v3_PPO_2_157/actor_000043864000.pth | Hamilton 0.018492264673113823
./Swimmer-v3_PPO_2_157/actor_000044560000.pth | Hamilton 0.017848167568445206
./Swimmer-v3_PPO_2_157/actor_000045256000.pth | Hamilton 0.019611360505223274
./Swimmer-v3_PPO_2_157/actor_000045952000.pth | Hamilton 0.016057956963777542
./Swimmer-v3_PPO_2_157/actor_000046648000.pth | Hamilton 0.012315947562456131
./Swimmer-v3_PPO_2_157/actor_000047344000.pth | Hamilton 0.012855513021349907
./Swimmer-v3_PPO_2_157/actor_000048040000.pth | Hamilton 0.01187040377408266
./Swimmer-v3_PPO_2_157/actor_000048736000.pth | Hamilton 0.013788025826215744
./Swimmer-v3_PPO_2_157/actor_000049432000.pth | Hamilton 0.009797187522053719
./Swimmer-v3_PPO_2_157/actor_000050128000.pth | Hamilton 0.011238890700042248
./Swimmer-v3_PPO_2_157/actor_000050824000.pth | Hamilton 0.007988587953150272
./Swimmer-v3_PPO_2_157/actor_000051520000.pth | Hamilton 0.00910080038011074
./Swimmer-v3_PPO_2_157/actor_000052216000.pth | Hamilton 0.005356697365641594
./Swimmer-v3_PPO_2_157/actor_000052912000.pth | Hamilton 0.0055587273091077805
./Swimmer-v3_PPO_2_157/actor_000053608000.pth | Hamilton 0.008264468051493168
./Swimmer-v3_PPO_2_157/actor_000054304000.pth | Hamilton 0.009239505976438522
./Swimmer-v3_PPO_2_157/actor_000055000000.pth | Hamilton 0.012277119792997837
./Swimmer-v3_PPO_2_157/actor_000055696000.pth | Hamilton 0.010112500749528408
./Swimmer-v3_PPO_2_157/actor_000056392000.pth | Hamilton 0.01197579875588417
./Swimmer-v3_PPO_2_157/actor_000057088000.pth | Hamilton 0.010916337370872498
./Swimmer-v3_PPO_2_157/actor_000057784000.pth | Hamilton 0.011387772858142853
./Swimmer-v3_PPO_2_157/actor_000058480000.pth | Hamilton 0.013467689976096153
./Swimmer-v3_PPO_2_157/actor_000059176000.pth | Hamilton 0.009855030104517937
./Swimmer-v3_PPO_2_157/actor_000059872000.pth | Hamilton 0.013516574166715145
./Swimmer-v3_PPO_2_157/actor_000060568000.pth | Hamilton 0.011831969954073429
./Swimmer-v3_PPO_2_157/actor_000061264000.pth | Hamilton 0.009549036622047424
./Swimmer-v3_PPO_2_157/actor_000061960000.pth | Hamilton 0.00813988596200943
./Swimmer-v3_PPO_2_157/actor_000062656000.pth | Hamilton 0.007627996616065502
./Swimmer-v3_PPO_2_157/actor_000063352000.pth | Hamilton 0.007748943753540516
./Swimmer-v3_PPO_2_157/actor_000064048000.pth | Hamilton 0.009834565222263336
./Swimmer-v3_PPO_2_157/actor_000064744000.pth | Hamilton 0.009840334765613079
./Swimmer-v3_PPO_2_157/actor_000065440000.pth | Hamilton 0.008699195459485054
./Swimmer-v3_PPO_2_157/actor_000066136000.pth | Hamilton 0.009807972237467766
./Swimmer-v3_PPO_2_157/actor_000066832000.pth | Hamilton 0.0083409883081913
./Swimmer-v3_PPO_2_157/actor_000067528000.pth | Hamilton 0.009312639012932777
./Swimmer-v3_PPO_2_157/actor_000068224000.pth | Hamilton 0.010695721954107285
./Swimmer-v3_PPO_2_157/actor_000068920000.pth | Hamilton 0.010364196263253689
./Swimmer-v3_PPO_2_157/actor_000069616000.pth | Hamilton 0.012949197553098202
./Swimmer-v3_PPO_2_157/actor_000070312000.pth | Hamilton 0.011184034869074821
./Swimmer-v3_PPO_2_157/actor_000071008000.pth | Hamilton 0.013443278148770332
./Swimmer-v3_PPO_2_157/actor_000071704000.pth | Hamilton 0.011860419064760208
./Swimmer-v3_PPO_2_157/actor_000072400000.pth | Hamilton 0.01005767285823822
./Swimmer-v3_PPO_2_157/actor_000073096000.pth | Hamilton 0.009039181284606457
./Swimmer-v3_PPO_2_157/actor_000073792000.pth | Hamilton 0.004229344427585602
./Swimmer-v3_PPO_2_157/actor_000074488000.pth | Hamilton 0.0005344958044588566
./Swimmer-v3_PPO_2_157/actor_000075184000.pth | Hamilton 0.005745874252170324
./Swimmer-v3_PPO_2_157/actor_000075880000.pth | Hamilton 0.00572598073631525
./Swimmer-v3_PPO_2_157/actor_000076576000.pth | Hamilton 0.005671259947121143
./Swimmer-v3_PPO_2_157/actor_000077272000.pth | Hamilton 0.0034571047872304916
./Swimmer-v3_PPO_2_157/actor_000077968000.pth | Hamilton 0.003943407908082008
./Swimmer-v3_PPO_2_157/actor_000078664000.pth | Hamilton -0.002030279953032732
./Swimmer-v3_PPO_2_157/actor_000079360000.pth | Hamilton 0.001996755599975586
./Swimmer-v3_PPO_2_157/actor_000080056000.pth | Hamilton 0.003908275626599789
./Swimmer-v3_PPO_2_157/actor_000080752000.pth | Hamilton 0.00192910002078861
./Swimmer-v3_PPO_2_157/actor_000081448000.pth | Hamilton -0.00031784476595930755
./Swimmer-v3_PPO_2_157/actor_000082144000.pth | Hamilton 0.0022377141285687685
./Swimmer-v3_PPO_2_157/actor_000082840000.pth | Hamilton 0.0036601454485207796
./Swimmer-v3_PPO_2_157/actor_000083536000.pth | Hamilton 0.005272920709103346
./Swimmer-v3_PPO_2_157/actor_000084232000.pth | Hamilton 0.004845046903938055
./Swimmer-v3_PPO_2_157/actor_000084928000.pth | Hamilton 0.00639183958992362
./Swimmer-v3_PPO_2_157/actor_000085624000.pth | Hamilton 0.004767554812133312
./Swimmer-v3_PPO_2_157/actor_000086320000.pth | Hamilton 0.008992607705295086
./Swimmer-v3_PPO_2_157/actor_000087016000.pth | Hamilton 0.005928999278694391
./Swimmer-v3_PPO_2_157/actor_000087712000.pth | Hamilton 0.00470054242759943
./Swimmer-v3_PPO_2_157/actor_000088408000.pth | Hamilton 0.004120151977986097
./Swimmer-v3_PPO_2_157/actor_000089104000.pth | Hamilton 0.005183403380215168
./Swimmer-v3_PPO_2_157/actor_000089800000.pth | Hamilton 0.003859275486320257
./Swimmer-v3_PPO_2_157/actor__000000008000_00024.399.pth | Hamilton -0.0007429367396980524
./Swimmer-v3_PPO_2_157/actor__000000468000_00088.503.pth | Hamilton 0.005865113344043493
./Swimmer-v3_PPO_2_157/actor__000000928000_00113.274.pth | Hamilton 0.010407093912363052
./Swimmer-v3_PPO_2_157/actor__000001388000_00126.480.pth | Hamilton 0.011208338662981987
./Swimmer-v3_PPO_2_157/actor__000001848000_00132.156.pth | Hamilton 0.012549827806651592
./Swimmer-v3_PPO_2_157/actor__000016428000_00133.427.pth | Hamilton 0.014171402901411057
./Swimmer-v3_PPO_2_157/actor__000017784000_00137.632.pth | Hamilton 0.011679843068122864
./Swimmer-v3_PPO_2_157/actor__000019592000_00138.403.pth | Hamilton 0.013699631206691265
./Swimmer-v3_PPO_2_157/actor__000020504000_00145.119.pth | Hamilton 0.010951054282486439
./Swimmer-v3_PPO_2_157/actor__000035052000_00149.012.pth | Hamilton 0.012782643549144268
./Swimmer-v3_PPO_2_157/actor__000041492000_00152.034.pth | Hamilton 0.009379813447594643
./Swimmer-v3_PPO_2_157/actor__000041952000_00153.683.pth | Hamilton 0.009855174459517002
./Swimmer-v3_PPO_2_157/actor__000052992000_00157.911.pth | Hamilton 0.005188527517020702
"""
# Swimmer-v3_PPO_3_121
data63 = """
./Swimmer-v3_PPO_3_121/actor_000000024000.pth | Hamilton 0.020110653713345528
./Swimmer-v3_PPO_3_121/actor_000000084000.pth | Hamilton 0.02715129218995571
./Swimmer-v3_PPO_3_121/actor_000000144000.pth | Hamilton 0.028327804058790207
./Swimmer-v3_PPO_3_121/actor_000000204000.pth | Hamilton 0.025701317936182022
./Swimmer-v3_PPO_3_121/actor_000000264000.pth | Hamilton 0.028354130685329437
./Swimmer-v3_PPO_3_121/actor_000000324000.pth | Hamilton 0.03895146772265434
./Swimmer-v3_PPO_3_121/actor_000000384000.pth | Hamilton 0.04108880087733269
./Swimmer-v3_PPO_3_121/actor_000000444000.pth | Hamilton 0.03940640389919281
./Swimmer-v3_PPO_3_121/actor_000000504000.pth | Hamilton 0.04017093405127525
./Swimmer-v3_PPO_3_121/actor_000000564000.pth | Hamilton 0.04205701872706413
./Swimmer-v3_PPO_3_121/actor_000000624000.pth | Hamilton 0.039314862340688705
./Swimmer-v3_PPO_3_121/actor_000000684000.pth | Hamilton 0.034143801778554916
./Swimmer-v3_PPO_3_121/actor_000000744000.pth | Hamilton 0.031359873712062836
./Swimmer-v3_PPO_3_121/actor_000000804000.pth | Hamilton 0.03140445426106453
./Swimmer-v3_PPO_3_121/actor_000000864000.pth | Hamilton 0.029558423906564713
./Swimmer-v3_PPO_3_121/actor_000000924000.pth | Hamilton 0.02673630230128765
./Swimmer-v3_PPO_3_121/actor_000000984000.pth | Hamilton 0.027689866721630096
./Swimmer-v3_PPO_3_121/actor_000001044000.pth | Hamilton 0.028624309226870537
./Swimmer-v3_PPO_3_121/actor_000001104000.pth | Hamilton 0.02707952819764614
./Swimmer-v3_PPO_3_121/actor_000001164000.pth | Hamilton 0.02607092820107937
./Swimmer-v3_PPO_3_121/actor_000001224000.pth | Hamilton 0.027311982586979866
./Swimmer-v3_PPO_3_121/actor_000001284000.pth | Hamilton 0.027716435492038727
./Swimmer-v3_PPO_3_121/actor_000001344000.pth | Hamilton 0.02583765611052513
./Swimmer-v3_PPO_3_121/actor_000001404000.pth | Hamilton 0.026801040396094322
./Swimmer-v3_PPO_3_121/actor_000001464000.pth | Hamilton 0.026641536504030228
./Swimmer-v3_PPO_3_121/actor_000001524000.pth | Hamilton 0.025790296494960785
./Swimmer-v3_PPO_3_121/actor_000001584000.pth | Hamilton 0.02472236379981041
./Swimmer-v3_PPO_3_121/actor_000001644000.pth | Hamilton 0.023757899180054665
./Swimmer-v3_PPO_3_121/actor_000001704000.pth | Hamilton 0.021652374416589737
./Swimmer-v3_PPO_3_121/actor_000001764000.pth | Hamilton 0.021052736788988113
./Swimmer-v3_PPO_3_121/actor_000001824000.pth | Hamilton 0.019100451841950417
./Swimmer-v3_PPO_3_121/actor_000001884000.pth | Hamilton 0.019208500161767006
./Swimmer-v3_PPO_3_121/actor_000001944000.pth | Hamilton 0.018183141946792603
./Swimmer-v3_PPO_3_121/actor_000002004000.pth | Hamilton 0.018541771918535233
./Swimmer-v3_PPO_3_121/actor_000002064000.pth | Hamilton 0.018695896491408348
./Swimmer-v3_PPO_3_121/actor_000002124000.pth | Hamilton 0.022700989618897438
./Swimmer-v3_PPO_3_121/actor_000002184000.pth | Hamilton 0.02044208161532879
./Swimmer-v3_PPO_3_121/actor_000002244000.pth | Hamilton 0.020912671461701393
./Swimmer-v3_PPO_3_121/actor_000002304000.pth | Hamilton 0.018033040687441826
./Swimmer-v3_PPO_3_121/actor_000002364000.pth | Hamilton 0.018431710079312325
./Swimmer-v3_PPO_3_121/actor_000002424000.pth | Hamilton 0.018097015097737312
./Swimmer-v3_PPO_3_121/actor_000002484000.pth | Hamilton 0.014529840089380741
./Swimmer-v3_PPO_3_121/actor_000002544000.pth | Hamilton 0.011763281188905239
./Swimmer-v3_PPO_3_121/actor_000002604000.pth | Hamilton 0.009444762021303177
./Swimmer-v3_PPO_3_121/actor_000002664000.pth | Hamilton 0.010801385156810284
./Swimmer-v3_PPO_3_121/actor_000002724000.pth | Hamilton 0.013880142010748386
./Swimmer-v3_PPO_3_121/actor_000002784000.pth | Hamilton 0.00629993574693799
./Swimmer-v3_PPO_3_121/actor_000002844000.pth | Hamilton 0.0032545998692512512
./Swimmer-v3_PPO_3_121/actor_000002904000.pth | Hamilton 0.0011023205006495118
./Swimmer-v3_PPO_3_121/actor_000002964000.pth | Hamilton 0.0008038826636038721
./Swimmer-v3_PPO_3_121/actor_000003024000.pth | Hamilton 0.00023198139388114214
./Swimmer-v3_PPO_3_121/actor_000003084000.pth | Hamilton 0.0035369268152862787
./Swimmer-v3_PPO_3_121/actor_000003144000.pth | Hamilton 0.0013505296083167195
./Swimmer-v3_PPO_3_121/actor_000003204000.pth | Hamilton 0.0022728133480995893
./Swimmer-v3_PPO_3_121/actor_000003264000.pth | Hamilton 0.002597300335764885
./Swimmer-v3_PPO_3_121/actor_000003324000.pth | Hamilton 0.0010193174239248037
./Swimmer-v3_PPO_3_121/actor_000003384000.pth | Hamilton 0.0002685838844627142
./Swimmer-v3_PPO_3_121/actor_000003444000.pth | Hamilton 0.0008530693594366312
./Swimmer-v3_PPO_3_121/actor_000003504000.pth | Hamilton 0.0015817201929166913
./Swimmer-v3_PPO_3_121/actor_000003564000.pth | Hamilton 0.006050200667232275
./Swimmer-v3_PPO_3_121/actor_000003624000.pth | Hamilton 0.008601261302828789
./Swimmer-v3_PPO_3_121/actor_000003684000.pth | Hamilton 0.0076811183243989944
./Swimmer-v3_PPO_3_121/actor_000003744000.pth | Hamilton 0.006085643079131842
./Swimmer-v3_PPO_3_121/actor_000003804000.pth | Hamilton 0.0021045375615358353
./Swimmer-v3_PPO_3_121/actor_000003864000.pth | Hamilton 0.0021000357810407877
./Swimmer-v3_PPO_3_121/actor_000003924000.pth | Hamilton -0.0015884075546637177
./Swimmer-v3_PPO_3_121/actor_000003984000.pth | Hamilton -0.005167168099433184
./Swimmer-v3_PPO_3_121/actor_000004044000.pth | Hamilton 0.0020239760633558035
./Swimmer-v3_PPO_3_121/actor_000004104000.pth | Hamilton 0.0049362024292349815
./Swimmer-v3_PPO_3_121/actor_000004164000.pth | Hamilton 0.008682173676788807
./Swimmer-v3_PPO_3_121/actor_000004224000.pth | Hamilton 0.007948365062475204
./Swimmer-v3_PPO_3_121/actor_000004284000.pth | Hamilton 0.011425988748669624
./Swimmer-v3_PPO_3_121/actor_000004344000.pth | Hamilton 0.0050744046457111835
./Swimmer-v3_PPO_3_121/actor_000004404000.pth | Hamilton -0.0013705224264413118
./Swimmer-v3_PPO_3_121/actor_000004464000.pth | Hamilton -0.0022696650121361017
./Swimmer-v3_PPO_3_121/actor_000004524000.pth | Hamilton -0.0029377539176493883
./Swimmer-v3_PPO_3_121/actor_000004584000.pth | Hamilton -0.006270260084420443
./Swimmer-v3_PPO_3_121/actor_000004644000.pth | Hamilton -0.003216156968846917
./Swimmer-v3_PPO_3_121/actor_000004704000.pth | Hamilton -0.0018393512582406402
./Swimmer-v3_PPO_3_121/actor_000004764000.pth | Hamilton -0.003030079649761319
./Swimmer-v3_PPO_3_121/actor_000004824000.pth | Hamilton -9.265080734621733e-05
./Swimmer-v3_PPO_3_121/actor_000004884000.pth | Hamilton -0.0025914961006492376
./Swimmer-v3_PPO_3_121/actor_000004944000.pth | Hamilton -0.0010421517072245479
./Swimmer-v3_PPO_3_121/actor_000005004000.pth | Hamilton -0.0013305610045790672
./Swimmer-v3_PPO_3_121/actor_000005064000.pth | Hamilton -0.0027508672792464495
./Swimmer-v3_PPO_3_121/actor_000005124000.pth | Hamilton -0.0010066138347610831
./Swimmer-v3_PPO_3_121/actor_000005184000.pth | Hamilton 0.0029595736414194107
./Swimmer-v3_PPO_3_121/actor_000005244000.pth | Hamilton -0.0012292381143197417
./Swimmer-v3_PPO_3_121/actor_000005304000.pth | Hamilton -0.001544262282550335
./Swimmer-v3_PPO_3_121/actor_000005364000.pth | Hamilton -0.004575483966618776
./Swimmer-v3_PPO_3_121/actor_000005424000.pth | Hamilton -0.008215312846004963
./Swimmer-v3_PPO_3_121/actor_000005484000.pth | Hamilton -0.017040060833096504
./Swimmer-v3_PPO_3_121/actor_000005544000.pth | Hamilton -0.019839206710457802
./Swimmer-v3_PPO_3_121/actor_000005604000.pth | Hamilton -0.015014270320534706
./Swimmer-v3_PPO_3_121/actor_000005664000.pth | Hamilton -0.016327740624547005
./Swimmer-v3_PPO_3_121/actor_000005724000.pth | Hamilton -0.01976838894188404
./Swimmer-v3_PPO_3_121/actor_000005784000.pth | Hamilton -0.01817617379128933
./Swimmer-v3_PPO_3_121/actor_000005844000.pth | Hamilton -0.02162090875208378
./Swimmer-v3_PPO_3_121/actor_000005904000.pth | Hamilton -0.021669557318091393
./Swimmer-v3_PPO_3_121/actor_000005964000.pth | Hamilton -0.022768324241042137
./Swimmer-v3_PPO_3_121/actor_000006024000.pth | Hamilton -0.02053086832165718
./Swimmer-v3_PPO_3_121/actor_000006084000.pth | Hamilton -0.019846003502607346
./Swimmer-v3_PPO_3_121/actor_000006144000.pth | Hamilton -0.01994282752275467
./Swimmer-v3_PPO_3_121/actor_000006204000.pth | Hamilton -0.021280312910676003
./Swimmer-v3_PPO_3_121/actor_000006264000.pth | Hamilton -0.019609684124588966
./Swimmer-v3_PPO_3_121/actor_000006324000.pth | Hamilton -0.01926925964653492
./Swimmer-v3_PPO_3_121/actor_000006384000.pth | Hamilton -0.012251075357198715
./Swimmer-v3_PPO_3_121/actor_000006444000.pth | Hamilton -0.015791069716215134
./Swimmer-v3_PPO_3_121/actor_000006504000.pth | Hamilton -0.011971722356975079
./Swimmer-v3_PPO_3_121/actor_000006564000.pth | Hamilton -0.01662479341030121
./Swimmer-v3_PPO_3_121/actor_000006624000.pth | Hamilton -0.011844886466860771
./Swimmer-v3_PPO_3_121/actor_000006684000.pth | Hamilton -0.01606610417366028
./Swimmer-v3_PPO_3_121/actor_000006744000.pth | Hamilton -0.012678180821239948
./Swimmer-v3_PPO_3_121/actor_000006804000.pth | Hamilton -0.015020159073174
./Swimmer-v3_PPO_3_121/actor_000006864000.pth | Hamilton -0.01938313990831375
./Swimmer-v3_PPO_3_121/actor_000006924000.pth | Hamilton -0.014173192903399467
./Swimmer-v3_PPO_3_121/actor_000006984000.pth | Hamilton -0.01522061601281166
./Swimmer-v3_PPO_3_121/actor_000007044000.pth | Hamilton -0.01016961969435215
./Swimmer-v3_PPO_3_121/actor_000007104000.pth | Hamilton -0.001998821273446083
./Swimmer-v3_PPO_3_121/actor_000007164000.pth | Hamilton -3.881568773067556e-05
./Swimmer-v3_PPO_3_121/actor_000007224000.pth | Hamilton 0.0011051241308450699
./Swimmer-v3_PPO_3_121/actor_000007284000.pth | Hamilton 0.004169078543782234
./Swimmer-v3_PPO_3_121/actor_000007344000.pth | Hamilton 0.004327323753386736
./Swimmer-v3_PPO_3_121/actor_000007404000.pth | Hamilton 0.002742733107879758
./Swimmer-v3_PPO_3_121/actor_000007464000.pth | Hamilton 0.004444441292434931
./Swimmer-v3_PPO_3_121/actor_000007524000.pth | Hamilton 0.005409060977399349
./Swimmer-v3_PPO_3_121/actor_000007584000.pth | Hamilton 0.0029933673795312643
./Swimmer-v3_PPO_3_121/actor_000007644000.pth | Hamilton 0.005345356650650501
./Swimmer-v3_PPO_3_121/actor_000007704000.pth | Hamilton 0.006548906676471233
./Swimmer-v3_PPO_3_121/actor__000000012000_00023.539.pth | Hamilton -0.016312943771481514
./Swimmer-v3_PPO_3_121/actor__000000468000_00044.515.pth | Hamilton 0.00028338495758362114
./Swimmer-v3_PPO_3_121/actor__000000918000_00048.165.pth | Hamilton 0.012842411175370216
./Swimmer-v3_PPO_3_121/actor__000001362000_00060.433.pth | Hamilton 0.023773295804858208
./Swimmer-v3_PPO_3_121/actor__000001812000_00087.028.pth | Hamilton 0.0027495701797306538
./Swimmer-v3_PPO_3_121/actor__000002262000_00091.127.pth | Hamilton -0.0031706145964562893
./Swimmer-v3_PPO_3_121/actor__000002712000_00096.560.pth | Hamilton 0.013367146253585815
./Swimmer-v3_PPO_3_121/actor__000003162000_00106.286.pth | Hamilton 0.007930559106171131
./Swimmer-v3_PPO_3_121/actor__000003612000_00110.085.pth | Hamilton -0.0012087668292224407
./Swimmer-v3_PPO_3_121/actor__000004968000_00113.125.pth | Hamilton -0.0013892920687794685
./Swimmer-v3_PPO_3_121/actor__000005424000_00117.198.pth | Hamilton -0.00485979113727808
./Swimmer-v3_PPO_3_121/actor__000005880000_00118.991.pth | Hamilton -0.01140047051012516
./Swimmer-v3_PPO_3_121/actor__000006330000_00121.160.pth | Hamilton -0.012507532723248005
"""
data = data11.split('\n')[1:-1]
ary1 = []
ary2 = []
for item in data:
item1 = item.split(' ')
obj_h = float(item1[-1])
item2 = item1[0].split('/')
item3 = item2[2].split('_')
if len(item2[2]) <= 22:
step = int(item3[-1][:-4])
ary1.append((step, obj_h))
else:
step = int(item3[-2])
score = float(item3[-1][:-4])
ary2.append((step, obj_h, score))
ary1 = np.array(ary1)
ary2 = np.array(ary2)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1)
x_step = ary1[:, 0]
y_obj_h = ary1[:, 1]
ax.plot(x_step, y_obj_h)
ax01 = ax.twinx()
x_step = ary2[:, 0]
y_score = ary2[:, 2]
ax01.plot(x_step, y_score)
plt.grid()
plt.show()
if __name__ == '__main__':
demo_evaluator_actor_h_term_to_str()
# demo_get_h_term_curve_from_str()
| 473,601 | 90.428958 | 113 | py |
ElegantRL | ElegantRL-master/examples/demo_mujoco_render.py | from elegantrl.train.evaluator import *
from elegantrl.train.config import Arguments
from elegantrl.envs.CustomGymEnv import GymNormaEnv
from elegantrl.agents.AgentPPO import AgentPPO, AgentPPOgetObjHterm
from elegantrl.agents.AgentSAC import AgentSAC, AgentReSAC
def get_cumulative_returns_and_step(env, act, if_render=False) -> (float, int):
"""Usage
eval_times = 4
net_dim = 2 ** 7
actor_path = './LunarLanderContinuous-v2_PPO_1/actor.pth'
env = build_env(env_func=env_func, env_args=env_args)
act = agent(net_dim, env.state_dim, env.action_dim, gpu_id=gpu_id).act
act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
r_s_ary = [get_episode_return_and_step(env, act) for _ in range(eval_times)]
r_s_ary = np.array(r_s_ary, dtype=np.float32)
r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
"""
max_step = env.max_step
if_discrete = env.if_discrete
device = next(act.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
steps = None
returns = 0.0 # sum of rewards in an episode
for steps in range(max_step):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = act(tensor_state).argmax(dim=1) if if_discrete else act(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
returns += reward
if if_render:
env.render()
if done:
break
returns = getattr(env, 'cumulative_returns', returns)
steps += 1
return returns, steps
def demo_evaluator_actor_pth():
from elegantrl.train.config import build_env
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
env_name = ['Hopper-v3',
'Swimmer-v3',
'HalfCheetah-v3',
'Walker2d-v3',
'Ant-v3',
'Humanoid-v3',
][5]
agent_class = [AgentPPO, ][0] # using AgentPPO or AgentPPOHtermK is the same when evaluating
if env_name == 'Hopper-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'Hopper-v3',
'max_step': 1000,
'state_dim': 11,
'action_dim': 3,
'if_discrete': False,
'target_return': 3500.,
}
actor_path = './actor_Hopper_PPO_hop.pth'
# actor_path = './actor_Hopper_PPO_hop_fail.pth'
# actor_path = './actor_Hopper_PPO_fail.pth'
actor_path = './actor_Hopper_PPO_stand.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'HalfCheetah-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'HalfCheetah-v3',
'max_step': 1000,
'state_dim': 17,
'action_dim': 6,
'if_discrete': False,
'target_return': 4800.0,
}
# actor_path = './actor_HalfCheetah_PPO_run.pth'
# actor_path = './actor_HalfCheetah_PPO_kiss_ground.pth'
actor_path = './actor_HalfCheetah_PPO_stand.pth'
net_dim = 2 ** 7
layer_num = 3
elif env_name == 'Swimmer-v3':
env_func = GymNormaEnv # gym.make
# import gym
# env_func = gym.make
env_args = {
'action_dim': 2,
'env_name': 'Swimmer-v3',
'env_num': 1,
'if_discrete': False,
'max_step': 1000,
'state_dim': 8,
'target_return': 360.0
}
# agent_class = AgentPPO
# actor_path = './actor_Swimmer_PPO_C_160.pth'
# actor_path = './actor_Swimmer_PPO_C_134.pth'
# actor_path = './actor_Swimmer_PPO_C_157.pth'
# actor_path = './actor_Swimmer_PPO_C_152.pth'
# actor_path = './actor_Swimmer_PPO_C_097.201.pth'
actor_path = './actor_Swimmer_PPO_stay_031.pth'
# agent_class = AgentReSAC
# actor_path = './actor_Swimmer_ReSAC_S_211.pth'
# actor_path = './actor_Swimmer_ReSAC_S_224.pth'
# actor_path = './actor_Swimmer_ReSAC_S_286.pth' # norm
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Walker2d-v3':
env_func = GymNormaEnv # gym.make
env_args = {
'env_num': 1,
'env_name': 'Walker2d-v3',
'if_discrete': False,
'max_step': 1000,
'state_dim': 17,
'action_dim': 6,
'target_return': 7000,
}
# actor_path = './actor_Walker2d_run11_7870.pth' # norm
# actor_path = './actor_Walker2d_run11_7209.pth' # norm
# actor_path = './actor_Walker2d_run11_6812.pth' # norm
# actor_path = './actor_Walker2d_run11_6955.pth' # norm
# actor_path = './actor_Walker2d_run12_5461.pth' # norm
# actor_path = './actor_Walker2d_run12_3295.pth' # norm
# actor_path = './actor_Walker2d_jump_4008.pth' # norm
# actor_path = './actor_Walker2d_fail_4512.pth' # norm
# actor_path = './actor_Walker2d_fail_6792.pth' # norm
# actor_path = './actor_Walker2d_fail_4992.pth' # norm
actor_path = './actor_Walker2d_fail_0431.pth' # norm
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Ant-v3':
env_func = GymNormaEnv
env_args = {
'env_num': 1,
'env_name': 'Ant-v3',
'max_step': 1000,
'state_dim': 111,
'action_dim': 8,
'if_discrete': False,
'target_return': 6000.0,
}
# actor_path = './actor_Ant_PPO_run_4701.pth'
# actor_path = './actor_Ant_PPO_run_2105.pth'
# actor_path = './actor_Ant_PPO_fail_174.pth'
# actor_path = './actor_Ant_PPO_stay_909.pth'
actor_path = './actor_Ant_PPO_stay_986.pth'
net_dim = 2 ** 8
layer_num = 3
elif env_name == 'Humanoid-v3':
from elegantrl.envs.CustomGymEnv import HumanoidEnv
env_func = HumanoidEnv
env_args = {
'env_num': 1,
'env_name': 'Humanoid-v3',
'max_step': 1000,
'state_dim': 376,
'action_dim': 17,
'if_discrete': False,
'target_return': 8000.,
}
# from elegantrl.agents.AgentSAC import AgentReSAC
# agent_class = AgentReSAC
agent_class = AgentPPO
actor_path = './actor_Huamnoid_PPO_run_8021.pth'
# actor_path = './actor_Huamnoid_PPO_run_7105.pth'
# actor_path = './actor_Huamnoid_PPO_run_6437.pth'
# actor_path = './actor_Huamnoid_PPO_run_5422.pth'
# actor_path = './actor_Huamnoid_PPO_run_3491.pth'
# actor_path = './actor_Huamnoid_PPO_lift_leg_7500.pth'
# actor_path = './actor_Huamnoid_PPO_lift_leg_6076.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_5136.pth'
# actor_path = './actor_Huamnoid_PPO_curl_leg_4244.pth' # net_dim = 2 ** 7
# actor_path = './actor_Huamnoid_PPO_curl_leg_6378.pth'
# actor_path = './actor_Huamnoid_PPO_run_7194.pth' # norm
# actor_path = './actor_Huamnoid_PPO_lift_knee_6887.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_7585.pth'
# actor_path = './actor_Huamnoid_PPO_lift_knee_5278.pth'
# actor_path = './actor_Huamnoid_PPO_run_4759.pth'
# actor_path = './actor__000108565781_07978.063.pth' # (Humanoid-v3_PPOHtermK_6 from single to two legs)
# actor_path = './actor_Huamnoid_PPO_run_9732.pth' # norm, nice racing
# actor_path = './actor_Huamnoid_PPO_run_10863.pth' # norm, nice racing
# actor_path = './actor__000027862483_10202.021.pth' # norm, nice racing
net_dim = 2 ** 9
layer_num = 3
else:
raise ValueError('env_name:', env_name)
eval_times = 2 ** 4
'''init'''
args = Arguments(agent_class=agent_class, env_func=env_func, env_args=env_args)
args.net_dim = net_dim
args.num_layer = layer_num
env = build_env(env_func=args.env_func, env_args=args.env_args)
act = agent_class(net_dim, args.state_dim, args.action_dim, gpu_id=gpu_id, args=args).act
act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
'''evaluate file'''
r_s_ary = [get_cumulative_returns_and_step(env, act, if_render=True) for _ in range(eval_times)]
# r_s_ary = [get_cumulative_returns_and_step(env, act, if_render=False) for _ in range(eval_times)]
r_s_ary = np.array(r_s_ary, dtype=np.float32)
r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
print(f'{actor_path:64} | r_avg {r_avg:9.3f} | s_avg {s_avg:9.3f}')
'''evaluate directory'''
# dir_path = 'Humanoid-v3_PPO_4'
# for name in os.listdir(dir_path):
# if name[-4:] != '.pth':
# continue
# actor_path = f"{dir_path}/{name}"
#
# act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
#
# r_s_ary = [get_cumulative_returns_and_step(env, act, if_render=False) for _ in range(eval_times)]
# r_s_ary = np.array(r_s_ary, dtype=np.float32)
# r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
# print(f'{actor_path:64} | r_avg {r_avg:9.3f} | s_avg {s_avg:9.3f}')
if __name__ == '__main__':
demo_evaluator_actor_pth()
| 9,577 | 39.075314 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/StockTradingEnv.py | import os
import numpy as np
import numpy.random as rd
import pandas as pd
import torch
from functorch import vmap
class StockTradingEnv:
def __init__(self, initial_amount=1e6, max_stock=1e2, cost_pct=1e-3, gamma=0.99,
beg_idx=0, end_idx=1113):
self.df_pwd = './elegantrl/envs/China_A_shares.pandas.dataframe'
self.npz_pwd = './elegantrl/envs/China_A_shares.numpy.npz'
self.close_ary, self.tech_ary = self.load_data_from_disk()
self.close_ary = self.close_ary[beg_idx:end_idx]
self.tech_ary = self.tech_ary[beg_idx:end_idx]
# print(f"| StockTradingEnv: close_ary.shape {self.close_ary.shape}")
# print(f"| StockTradingEnv: tech_ary.shape {self.tech_ary.shape}")
self.max_stock = max_stock
self.cost_pct = cost_pct
self.reward_scale = 2 ** -12
self.initial_amount = initial_amount
self.gamma = gamma
# reset()
self.day = None
self.rewards = None
self.total_asset = None
self.cumulative_returns = 0
self.if_random_reset = True
self.amount = None
self.shares = None
self.shares_num = self.close_ary.shape[1]
amount_dim = 1
# environment information
self.env_name = 'StockTradingEnv-v2'
self.state_dim = self.shares_num + self.close_ary.shape[1] + self.tech_ary.shape[1] + amount_dim
self.action_dim = self.shares_num
self.if_discrete = False
self.max_step = self.close_ary.shape[0] - 1
self.target_return = +np.inf
def reset(self):
self.day = 0
if self.if_random_reset:
self.amount = self.initial_amount * rd.uniform(0.9, 1.1)
self.shares = (np.abs(rd.randn(self.shares_num).clip(-2, +2)) * 2 ** 6).astype(int)
else:
self.amount = self.initial_amount
self.shares = np.zeros(self.shares_num, dtype=np.float32)
self.rewards = []
self.total_asset = (self.close_ary[self.day] * self.shares).sum() + self.amount
return self.get_state()
def get_state(self):
state = np.hstack((np.tanh(np.array(self.amount * 2 ** -16)),
self.shares * 2 ** -9,
self.close_ary[self.day] * 2 ** -7,
self.tech_ary[self.day] * 2 ** -6,))
return state
def step(self, action):
self.day += 1
action = action.copy()
action[(-0.1 < action) & (action < 0.1)] = 0
action_int = (action * self.max_stock).astype(int)
# actions initially is scaled between -1 and 1
# convert into integer because we can't buy fraction of shares
for index in range(self.action_dim):
stock_action = action_int[index]
adj_close_price = self.close_ary[self.day, index] # `adjcp` denotes adjusted close price
if stock_action > 0: # buy_stock
delta_stock = min(self.amount // adj_close_price, stock_action)
self.amount -= adj_close_price * delta_stock * (1 + self.cost_pct)
self.shares[index] += delta_stock
elif self.shares[index] > 0: # sell_stock
delta_stock = min(-stock_action, self.shares[index])
self.amount += adj_close_price * delta_stock * (1 - self.cost_pct)
self.shares[index] -= delta_stock
total_asset = (self.close_ary[self.day] * self.shares).sum() + self.amount
reward = (total_asset - self.total_asset) * self.reward_scale
self.rewards.append(reward)
self.total_asset = total_asset
done = self.day == self.max_step
if done:
reward += 1 / (1 - self.gamma) * np.mean(self.rewards)
self.cumulative_returns = total_asset / self.initial_amount * 100 # todo
state = self.get_state()
return state, reward, done, {}
def load_data_from_disk(self, tech_id_list=None):
tech_id_list = [
"macd", "boll_ub", "boll_lb", "rsi_30", "cci_30", "dx_30", "close_30_sma", "close_60_sma",
] if tech_id_list is None else tech_id_list
if os.path.exists(self.npz_pwd):
ary_dict = np.load(self.npz_pwd, allow_pickle=True)
close_ary = ary_dict['close_ary']
tech_ary = ary_dict['tech_ary']
elif os.path.exists(self.df_pwd): # convert pandas.DataFrame to numpy.array
df = pd.read_pickle(self.df_pwd)
tech_ary = []
close_ary = []
df_len = len(df.index.unique()) # df_len = max_step
for day in range(df_len):
item = df.loc[day]
tech_items = [item[tech].values.tolist() for tech in tech_id_list]
tech_items_flatten = sum(tech_items, [])
tech_ary.append(tech_items_flatten)
close_ary.append(item.close)
close_ary = np.array(close_ary)
tech_ary = np.array(tech_ary)
np.savez_compressed(self.npz_pwd, close_ary=close_ary, tech_ary=tech_ary, )
else:
error_str = f"| StockTradingEnv need {self.df_pwd} or {self.npz_pwd}" \
f"\n download the following files and save in `.`" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.numpy.npz" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe"
raise FileNotFoundError(error_str)
return close_ary, tech_ary
'''function for vmap'''
def _inplace_amount_shares_when_buy(amount, shares, stock_action, close, cost_pct):
stock_delta = torch.min(stock_action, torch.div(amount, close, rounding_mode='floor'))
amount -= close * stock_delta * (1 + cost_pct)
shares += stock_delta
return torch.zeros(1)
def _inplace_amount_shares_when_sell(amount, shares, stock_action, close, cost_rate):
stock_delta = torch.min(-stock_action, shares)
amount += close * stock_delta * (1 - cost_rate)
shares -= stock_delta
return torch.zeros(1)
class StockTradingVecEnv:
def __init__(self, initial_amount=1e6, max_stock=1e2, cost_pct=1e-3, gamma=0.99,
beg_idx=0, end_idx=1113, num_envs=4, gpu_id=0):
self.df_pwd = './elegantrl/envs/China_A_shares.pandas.dataframe'
self.npz_pwd = './elegantrl/envs/China_A_shares.numpy.npz'
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
'''load data'''
close_ary, tech_ary = self.load_data_from_disk()
close_ary = close_ary[beg_idx:end_idx]
tech_ary = tech_ary[beg_idx:end_idx]
self.close_price = torch.tensor(close_ary, dtype=torch.float32, device=self.device)
self.tech_factor = torch.tensor(tech_ary, dtype=torch.float32, device=self.device)
# print(f"| StockTradingEnv: close_ary.shape {close_ary.shape}")
# print(f"| StockTradingEnv: tech_ary.shape {tech_ary.shape}")
'''init'''
self.gamma = gamma
self.cost_pct = cost_pct
self.max_stock = max_stock
self.reward_scale = 2 ** -12
self.initial_amount = initial_amount
self.if_random_reset = True
'''init (reset)'''
self.day = None
self.rewards = None
self.total_asset = None
self.cumulative_returns = None
self.amount = None
self.shares = None
self.clears = None
self.num_shares = self.close_price.shape[1]
amount_dim = 1
'''environment information'''
self.env_name = 'StockTradingEnv-v2'
self.num_envs = num_envs
self.max_step = self.close_price.shape[0] - 1
self.state_dim = self.num_shares + self.close_price.shape[1] + self.tech_factor.shape[1] + amount_dim
self.action_dim = self.num_shares
self.if_discrete = False
'''vmap function'''
self.vmap_get_state = vmap(
func=lambda amount, shares, close, techs: torch.cat((amount, shares, close, techs)),
in_dims=(0, 0, None, None), out_dims=0)
self.vmap_get_total_asset = vmap(
func=lambda close, shares, amount: (close * shares).sum() + amount,
in_dims=(None, 0, 0), out_dims=0)
self.vmap_inplace_amount_shares_when_buy = vmap(
func=_inplace_amount_shares_when_buy, in_dims=(0, 0, 0, None, None), out_dims=0)
self.vmap_inplace_amount_shares_when_sell = vmap(
func=_inplace_amount_shares_when_sell, in_dims=(0, 0, 0, None, None), out_dims=0)
def reset(self):
self.day = 0
self.amount = torch.zeros((self.num_envs, 1), dtype=torch.float32, device=self.device) + self.initial_amount
self.shares = torch.zeros((self.num_envs, self.num_shares), dtype=torch.float32, device=self.device)
if self.if_random_reset:
rand_amount = torch.rand((self.num_envs, 1), dtype=torch.float32, device=self.device) * 0.5 + 0.75
self.amount = self.amount * rand_amount
rand_shares = torch.randn((self.num_envs, self.num_shares), dtype=torch.float32, device=self.device)
rand_shares = rand_shares.clip(-2, +2) * 2 ** 7
self.shares = self.shares + torch.abs(rand_shares).type(torch.int32)
self.rewards = list()
self.total_asset = self.vmap_get_total_asset(self.close_price[self.day], self.shares, self.amount)
return self.get_state()
def get_state(self):
return self.vmap_get_state((self.amount * 2 ** -18).tanh(),
(self.shares * 2 ** -10).tanh(),
self.close_price[self.day] * 2 ** -7,
self.tech_factor[self.day] * 2 ** -6) # state
def step(self, action):
self.day += 1
if self.day == 1:
self.cumulative_returns = 0.
# action = action.clone()
action = torch.ones_like(action)
action[(-0.1 < action) & (action < 0.1)] = 0
action_int = (action * self.max_stock).to(torch.int32)
# actions initially is scaled between -1 and 1
# convert `action` into integer as `stock_action`, because we can't buy fraction of shares
for i in range(self.num_shares):
buy_idx = torch.where(action_int[:, i] > 0)[0]
if buy_idx.shape[0] > 0:
part_amount = self.amount[buy_idx]
part_shares = self.shares[buy_idx, i]
self.vmap_inplace_amount_shares_when_buy(part_amount, part_shares, action_int[buy_idx, i],
self.close_price[self.day, i], self.cost_pct)
self.amount[buy_idx] = part_amount
self.shares[buy_idx, i] = part_shares
sell_idx = torch.where((action_int < 0) & (self.shares > 0))[0]
if sell_idx.shape[0] > 0:
part_amount = self.amount[sell_idx]
part_shares = self.shares[sell_idx, i]
self.vmap_inplace_amount_shares_when_sell(part_amount, part_shares, action_int[sell_idx, i],
self.close_price[self.day, i], self.cost_pct)
self.amount[sell_idx] = part_amount
self.shares[sell_idx, i] = part_shares
# for index in range(self.action_dim):
# stock_actions = action_int[:, index]
# close_price = self.close_price[self.day, index]
#
# # delta_stock.shape == ()
# for i in range(self.num_envs):
# if stock_actions[i] > 0: # buy_stock
# delta_stock = torch.div(self.amount[i], close_price, rounding_mode='floor')
# delta_stock = torch.min(delta_stock, stock_actions[0])
# self.amount[i] -= close_price * delta_stock * (1 + self.cost_pct)
# self.shares[i, index] = self.shares[i, index] + delta_stock
# elif self.shares[i, index] > 0: # sell_stock
# delta_stock = torch.min(-stock_actions[i], self.shares[i, index])
# self.amount[i] += close_price * delta_stock * (1 - self.cost_pct)
# self.shares[i, index] = self.shares[i, index] + delta_stock
'''random clear the inventory'''
# reset_rate = 1e-2 * self.num_shares / self.max_step
# if self.if_random_reset and (rd.rand() < reset_rate):
# env_i = rd.randint(self.num_envs)
# shares_i = rd.randint(self.num_shares)
#
# self.amount[env_i] = (self.amount[env_i] +
# self.shares[env_i, shares_i] * self.close_price[self.day, shares_i]) # not cost_pct
# self.shares[env_i, shares_i] = 0
'''get reward'''
total_asset = self.vmap_get_total_asset(self.close_price[self.day], self.shares, self.amount)
reward = (total_asset - self.total_asset).squeeze(1) * self.reward_scale # shape == (num_envs, )
self.rewards.append(reward)
self.total_asset = total_asset
'''get done and state'''
done = self.day == self.max_step
if done:
reward += torch.stack(self.rewards).mean(dim=0) * (1. / (1. - self.gamma))
self.cumulative_returns = (total_asset / self.initial_amount) * 100 # todo
self.cumulative_returns = self.cumulative_returns.squeeze(1).cpu().data.tolist()
state = self.reset() if done else self.get_state() # automatically reset in vectorized env
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return state, reward, done, ()
def load_data_from_disk(self, tech_id_list=None):
tech_id_list = [
"macd", "boll_ub", "boll_lb", "rsi_30", "cci_30", "dx_30", "close_30_sma", "close_60_sma",
] if tech_id_list is None else tech_id_list
if os.path.exists(self.npz_pwd):
ary_dict = np.load(self.npz_pwd, allow_pickle=True)
close_ary = ary_dict['close_ary']
tech_ary = ary_dict['tech_ary']
elif os.path.exists(self.df_pwd): # convert pandas.DataFrame to numpy.array
df = pd.read_pickle(self.df_pwd)
tech_ary = []
close_ary = []
df_len = len(df.index.unique()) # df_len = max_step
for day in range(df_len):
item = df.loc[day]
tech_items = [item[tech].values.tolist() for tech in tech_id_list]
tech_items_flatten = sum(tech_items, [])
tech_ary.append(tech_items_flatten)
close_ary.append(item.close)
close_ary = np.array(close_ary)
tech_ary = np.array(tech_ary)
np.savez_compressed(self.npz_pwd, close_ary=close_ary, tech_ary=tech_ary, )
else:
error_str = f"| StockTradingEnv need {self.df_pwd} or {self.npz_pwd}" \
f"\n download the following files and save in `.`" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.numpy.npz" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe"
raise FileNotFoundError(error_str)
return close_ary, tech_ary
| 15,453 | 43.66474 | 120 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/StockTradingVmapEnv.py | import os
import torch
import numpy as np
import numpy.random as rd
import pandas as pd
from functorch import vmap
"""finance environment
Source:
https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/Demo_China_A_share_market.ipynb
Modify: Github YonV1943
"""
'''vmap function'''
def _get_total_asset(close, shares, amount):
return (close * shares).sum() + amount # total_asset
def _get_state(amount, shares, close, tech):
return torch.cat((amount, shares, close, tech))
def _inplace_amount_shares_when_buy(amount, shares, stock_action, close, buy_cost_rate):
stock_delta = torch.min(stock_action, torch.div(amount, close, rounding_mode='floor'))
amount -= close * stock_delta * buy_cost_rate
shares += stock_delta
return torch.zeros(1)
def _inplace_amount_shares_when_sell(amount, shares, stock_action, close, sell_cost_rate):
stock_delta = torch.min(-stock_action, shares)
amount += close * stock_delta * sell_cost_rate
shares -= stock_delta
return torch.zeros(1)
class StockTradingVmapEnv:
def __init__(self, initial_amount=1e6, max_stock=100, buy_cost_pct=1e-3, sell_cost_pct=1e-3, gamma=0.99,
beg_idx=0, end_idx=1113, gpu_id: int = 0, num_envs: int = 4):
self.df_pwd = './China_A_shares.pandas.dataframe'
'''load data'''
close_ary, tech_ary = self.load_data_from_disk()
close_ary = close_ary[beg_idx:end_idx]
tech_ary = tech_ary[beg_idx:end_idx]
print(f"| StockTradingEnv: close_ary.shape {close_ary.shape}")
print(f"| StockTradingEnv: tech_ary.shape {tech_ary.shape}")
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.num_envs = num_envs
self.close_price = torch.tensor(close_ary, dtype=torch.float32, device=self.device)
self.tech_factor = torch.tensor(tech_ary, dtype=torch.float32, device=self.device)
'''init'''
self.gamma = gamma
self.max_stock = max_stock
self.initial_amount = initial_amount
self.max_step = self.close_price.shape[0]
self.buy_cost_rate = 1. + buy_cost_pct
self.sell_cost_rate = 1. - sell_cost_pct
'''init (set in reset)'''
self.day = None
self.rewards = None
self.total_asset = None
self.if_random_reset = True
self.cumulative_returns = None
self.amount = None
self.shares = None
self.shares_num = self.close_price.shape[1]
amount_dim = 1
'''environment information'''
self.env_name = 'StockTradingEnvVMAP-v2'
self.state_dim = self.shares_num + self.close_price.shape[1] + self.tech_factor.shape[1] + amount_dim
self.action_dim = self.shares_num
self.if_discrete = False
'''vmap function'''
self.vmap_get_total_asset = vmap(
func=_get_total_asset, in_dims=(None, 0, 0), out_dims=0)
self.vmap_get_state = vmap(
func=_get_state, in_dims=(0, 0, None, None), out_dims=0)
self.vmap_inplace_amount_shares_when_buy = vmap(
func=_inplace_amount_shares_when_buy, in_dims=(0, 0, 0, None, None), out_dims=0)
self.vmap_inplace_amount_shares_when_sell = vmap(
func=_inplace_amount_shares_when_sell, in_dims=(0, 0, 0, None, None), out_dims=0)
def reset(self):
self.day = 0
self.amount = torch.zeros((self.num_envs, 1), dtype=torch.float32, device=self.device) + self.initial_amount
self.shares = torch.zeros((self.num_envs, self.shares_num), dtype=torch.float32, device=self.device)
if self.if_random_reset:
self.amount *= torch.rand((self.num_envs, 1), dtype=torch.float32, device=self.device) * 0.10 + 0.95
self.shares += torch.randint(0, int(self.max_stock),
size=(self.num_envs, self.shares_num), device=self.device)
self.rewards = list()
self.total_asset = self.vmap_get_total_asset(self.close_price[self.day],
self.shares,
self.amount)
state = self.get_state()
return state
def get_state(self):
return self.vmap_get_state(self.amount * 2 ** 16,
self.shares * 2 ** -9,
self.close_price[self.day] * 2 ** -7,
self.tech_factor[self.day] * 2 ** -6) # state
def step(self, action):
self.day += 1
action = action.clone()
action[(-0.1 < action) & (action < 0.1)] = 0
stock_action = (action * self.max_stock).to(torch.int32)
# actions initially is scaled between -1 and 1
# convert `action` into integer as `stock_action`, because we can't buy fraction of shares
for i in range(self.shares_num):
buy_idx = torch.where(stock_action[:, i] > 0)[0]
if buy_idx.shape[0] > 0:
part_amount = self.amount[buy_idx]
part_shares = self.shares[buy_idx, i]
self.vmap_inplace_amount_shares_when_buy(part_amount,
part_shares,
stock_action[buy_idx, i],
self.close_price[self.day, i],
self.buy_cost_rate)
self.amount[buy_idx] = part_amount
self.shares[buy_idx, i] = part_shares
sell_idx = torch.where((stock_action < 0) & (self.shares > 0))[0]
if sell_idx.shape[0] > 0:
part_amount = self.amount[sell_idx]
part_shares = self.shares[sell_idx, i]
self.vmap_inplace_amount_shares_when_sell(part_amount,
part_shares,
stock_action[sell_idx, i],
self.close_price[self.day, i],
self.sell_cost_rate)
self.amount[sell_idx] = part_amount
self.shares[sell_idx, i] = part_shares
state = self.get_state()
total_asset = self.vmap_get_total_asset(self.close_price[self.day],
self.shares,
self.amount)
reward = (total_asset - self.total_asset) * 2 ** -6
self.rewards.append(reward)
self.total_asset = total_asset
done = self.day == self.max_step - 1
if done:
reward += 1. / (1. - self.gamma) * torch.stack(self.rewards).mean(dim=0)
self.cumulative_returns = total_asset / self.initial_amount
self.cumulative_returns = self.cumulative_returns.mean().item()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return state, reward, done, {}
def load_data_from_disk(self, tech_id_list=None):
tech_id_list = [
"macd", "boll_ub", "boll_lb", "rsi_30", "cci_30", "dx_30", "close_30_sma", "close_60_sma",
] if tech_id_list is None else tech_id_list
if os.path.exists(self.df_pwd): # convert pandas.DataFrame to numpy.array
df = pd.read_pickle(self.df_pwd)
tech_ary = []
close_ary = []
df_len = len(df.index.unique()) # df_len = max_step
for day in range(df_len):
item = df.loc[day]
tech_items = [item[tech].values.tolist() for tech in tech_id_list]
tech_items_flatten = sum(tech_items, [])
tech_ary.append(tech_items_flatten)
close_ary.append(item.close)
close_ary = np.array(close_ary)
tech_ary = np.array(tech_ary)
else:
error_str = f"| StockTradingEnv need {self.df_pwd}" \
f"\n download the following files and save in `.`" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe (2MB)"
raise FileNotFoundError(error_str)
return close_ary, tech_ary
def check_env():
gpu_id = 0
env_num = 32
env = StockTradingVmapEnv(beg_idx=834, end_idx=1113, gpu_id=gpu_id, num_envs=env_num)
env.if_random_reset = False
evaluate_time = 4
"""
env = StockTradingEnv(beg_idx=0, end_idx=1113)
cumulative_returns of random action : 1.63
cumulative_returns of buy all share : 2.80
env = StockTradingEnv(beg_idx=0, end_idx=834)
cumulative_returns of random action : 1.94
cumulative_returns of buy all share : 2.51
env = StockTradingEnv(beg_idx=834, end_idx=1113)
cumulative_returns of random action : 1.12
cumulative_returns of buy all share : 1.19
"""
print()
policy_name = 'random action'
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.rand((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
policy_name = 'buy all share (if_random_reset = False)'
env.if_random_reset = False
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.ones((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
print()
policy_name = 'buy all share (if_random_reset = True)'
env.if_random_reset = True
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.ones((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
if __name__ == '__main__':
check_env()
| 10,618 | 39.071698 | 120 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/IsaacGym.py | import gym.spaces
import isaacgym
import numpy as np
import torch
from elegantrl.envs.isaac_tasks import isaacgym_task_map
from elegantrl.envs.isaac_tasks.base.vec_task import VecTask
from elegantrl.envs.utils.utils import set_seed
from elegantrl.envs.utils.config_utils import load_task_config, get_max_step_from_config
from pprint import pprint
from typing import Dict, Tuple
"""
Source: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs (I hate `import hydra` in IsaacGym Preview 3)
Modify: https://github.com/hmomin (hmomin's code is quite good!)
Modify: https://github.com/Yonv1943 (I make a little change based on hmomin's code)
There are still cuda:0 BUG in Isaac Gym Preview 3:
Isaac Gym Preview 3 will force the cuda:0 to be used even you set the `sim_device_id=1, rl_device_id=1`
You can only use `export CUDA_VISIBLE_DEVICES=1,2,3` to let Isaac Gym use a specified GPU.
isaacgym/gymdeps.py", line 21, in _import_deps
raise ImportError("PyTorch was imported before isaacgym modules.
Please import torch after isaacgym modules.")
run the following code in bash before running.
export LD_LIBRARY_PATH=/xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
can't use os.environ['LD_LIBRARY_PATH'] = /xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
cd isaacgym/python/ElegantRL-1212
conda activate rlgpu
export LD_LIBRARY_PATH=~/anaconda3/envs/rlgpu/lib
"""
class IsaacVecEnv:
def __init__(
self,
env_name: str,
env_num=-1,
sim_device_id=0,
rl_device_id=0,
headless=True,
should_print=False,
):
"""Preprocesses a vectorized Isaac Gym environment for RL training.
[Isaac Gym - Preview 3 Release](https://developer.nvidia.com/isaac-gym)
Args:
env_name (str): the name of the environment to be processed.
env_num (int, optional): the number of environments to simulate on the
device. Defaults to whatever is specified in the corresponding config
file.
sim_device_id (int, optional): the GPU device id to render physics on.
Defaults to 0.
rl_device_id (int, optional): the GPU device id to perform RL training on.
Defaults to 0.
headless (bool, optional): whether or not the Isaac Gym environment should
render on-screen. Defaults to False.
should_print (bool, optional): whether or not the arguments should be
printed. Defaults to False.
"""
task_config = load_task_config(env_name)
sim_device = f"cuda:{sim_device_id}" if sim_device_id >= 0 else "cpu"
self.device = sim_device
isaac_task = isaacgym_task_map[env_name]
self._override_default_env_num(env_num, task_config)
set_seed(-1, False)
env: VecTask = isaac_task(
cfg=task_config,
sim_device=sim_device,
graphics_device_id=rl_device_id,
headless=headless,
)
is_discrete = isinstance(env.action_space, gym.spaces.Discrete)
# is_discrete = not isinstance(env.action_space, gym.spaces.Box) # Continuous action space
state_dimension = env.num_obs
assert isinstance(state_dimension, int)
action_dim = getattr(env.action_space, 'n') if is_discrete else env.num_acts
if not is_discrete:
assert all(getattr(env.action_space, 'high') == np.ones(action_dim))
assert all(-getattr(env.action_space, 'low') == np.ones(action_dim))
target_return = 10 ** 10 # TODO: plan to make `target_returns` optional
env_config = task_config["env"]
max_step = get_max_step_from_config(env_config)
self.device = torch.device(rl_device_id)
self.env = env
self.env_num = env.num_envs
self.env_name = env_name
self.max_step = max_step
self.state_dim = state_dimension
self.action_dim = action_dim
self.if_discrete = is_discrete
self.target_return = target_return
if should_print:
pprint(
{
"num_envs": env.num_envs,
"env_name": env_name,
"max_step": max_step,
"state_dim": state_dimension,
"action_dim": action_dim,
"if_discrete": is_discrete,
"target_return": target_return,
}
)
@staticmethod
def _override_default_env_num(num_envs: int, config_args: Dict):
"""Overrides the default number of environments if it's passed in.
Args:
num_envs (int): new number of environments.
config_args (Dict): configuration retrieved.
"""
if num_envs > 0:
config_args["env"]["numEnvs"] = num_envs
def reset(self) -> torch.Tensor:
"""Resets the environments in the VecTask that need to be reset.
Returns:
torch.Tensor: the next states in the simulation.
"""
observations = self.env.reset()['obs'].to(self.device)
return observations
def step(
self, actions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Dict]:
"""Steps through the vectorized environment.
Args:
actions (torch.Tensor): a multidimensional tensor of actions to perform on
*each* environment.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Dict]: a tuple containing
observations, rewards, dones, and extra info.
"""
observations_dict, rewards, dones, info_dict = self.env.step(actions)
observations = observations_dict["obs"].to(self.device)
return observations, rewards.to(self.device), dones.to(self.device), info_dict
class IsaacOneEnv(IsaacVecEnv):
def __init__(self, env_name: str, device_id=0, headless=False, should_print=False):
"""Preprocesses a single Isaac Gym environment for RL evaluating.
[Isaac Gym - Preview 3 Release](https://developer.nvidia.com/isaac-gym)
Args:
env_name (str): the name of the environment to be processed.
device_id (int, optional): the GPU device id to render physics and perform
RL training. Defaults to 0.
headless (bool, optional): whether or not the Isaac Gym environment should
render on-screen. Defaults to False.
should_print (bool, optional): whether or not the arguments should be
printed. Defaults to False.
"""
super().__init__(
env_name=env_name,
env_num=1,
sim_device_id=device_id,
rl_device_id=device_id,
headless=True,
should_print=should_print,
)
def reset(self) -> np.ndarray:
"""Resets the environments in the VecTask that need to be reset.
Returns:
np.ndarray: a numpy array containing the new state of the single
environment.
"""
tensor_state_dict = self.env.reset()
tensor_states = tensor_state_dict["obs"]
first_state = tensor_states[0]
return first_state.cpu().detach().numpy() # state
def step(
self, action: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:
"""Steps through the single environment.
Args:
action (np.ndarray): a (possibly multidimensional) numpy array of actions
to perform on the single environment.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]: a tuple containing
observations, rewards, dones, and extra info.
"""
tensor_action = torch.as_tensor(action, dtype=torch.float32).unsqueeze(0)
tensor_state_dict, tensor_reward, tensor_done, info_dict = self.env.step(
tensor_action
)
tensor_state = tensor_state_dict["obs"]
state = tensor_state[0].cpu().detach().numpy()
reward = tensor_reward[0].item()
done = tensor_done[0].item()
return state, reward, done, info_dict
def check_isaac_gym(env_name):
gpu_id = 5
env = IsaacVecEnv(env_name=env_name, env_num=1024, sim_device_id=gpu_id, rl_device_id=gpu_id, should_print=True)
states = env.reset()
print('\n\nstates.shape', states.shape)
import torch
action = torch.rand((env.env_num, env.action_dim), dtype=torch.float32)
print('\n\naction.shape', action.shape)
states, rewards, dones, info_dict = env.step(action)
print(f'\nstates.shape {states.shape}'
f'\nrewards.shape {rewards.shape}'
f'\ndones.shape {dones.shape}'
f'\nrepr(info.dict) {repr(info_dict)}')
from tqdm import trange
device = torch.device(f"cuda:{gpu_id}")
rewards_ary = []
dones_ary = []
env.reset()
for _ in trange(env.max_step * 2):
action = torch.rand((env.env_num, env.action_dim), dtype=torch.float32, device=device)
states, rewards, dones, info_dict = env.step(action)
rewards_ary.append(rewards)
dones_ary.append(dones)
rewards_ary = torch.stack(rewards_ary) # rewards_ary.shape == (env.max_step, env.env_num)
dones_ary = torch.stack(dones_ary)
print(f'\nrewards_ary.shape {rewards_ary.shape}'
f'\ndones_ary.shape {dones_ary.shape}')
reward_list = []
steps_list = []
print()
for i in trange(env.env_num):
dones_where = torch.where(dones_ary[:, i])[0]
episode_num = dones_where.shape[0]
if episode_num == 0:
continue
j0 = 0
rewards_env = rewards_ary[:, i]
for j1 in dones_where + 1:
reward_list.append(rewards_env[j0:j1].sum())
steps_list.append(j1 - j0 + 1)
j0 = j1
reward_list = torch.tensor(reward_list, dtype=torch.float32)
steps_list = torch.tensor(steps_list, dtype=torch.float32)
print(f'\n reward_list avg {reward_list.mean(0):9.2f}'
f'\n std {reward_list.std(0):9.2f}'
f'\n steps_list avg {steps_list.mean(0):9.2f}'
f'\n std {steps_list.std(0):9.2f}'
f'\n episode_num {steps_list.shape[0]}')
return reward_list, steps_list
if __name__ == '__main__':
check_isaac_gym()
| 10,482 | 36.708633 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/CustomGymEnv.py | import gym
import torch
import numpy as np
'''[ElegantRL.2022.12.12](github.com/AI4Fiance-Foundation/ElegantRL)'''
Array = np.ndarray
Tensor = torch.Tensor
InstallGymBox2D = """Install gym[Box2D]
# LinuxOS (Ubuntu)
sudo apt update && sudo apt install swig
python3 -m pip install --upgrade pip --no-warn-script-location
pip3 install -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com --user gym==0.23.1 gym[Box2D]
# WindowOS (Windows NT)
python -m pip install --upgrade pip
pip3 install -i http://pypi.douban.com/simple/ --trusted-host pypi.douban.com swig gym==0.23.1 gym[Box2D]
"""
class PendulumEnv: # a demo of custom gym env
def __init__(self):
gym.logger.set_level(40) # Block warning
assert gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
self.env = gym.make(env_name)
'''the necessary env information when you design a custom env'''
self.env_name = env_name # the name of this env.
self.num_envs = 1 # the number of sub env is greater than 1 in vectorized env.
self.max_step = getattr(self.env, '_max_episode_steps') # the max step number of an episode.
self.state_dim = self.env.observation_space.shape[0] # feature number of state
self.action_dim = self.env.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> Array: # reset the agent in env
return self.env.reset()
def step(self, action: Array) -> (Array, float, bool, dict): # agent interacts in env
# OpenAI Pendulum env set its action space as (-2, +2). It is bad.
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
return state, reward, done, info_dict
def render(self):
self.env.render()
class GymNormaEnv(gym.Wrapper):
def __init__(self, env_name: str = 'Hopper-v3'):
gym.logger.set_level(40) # Block warning
super(GymNormaEnv, self).__init__(env=gym.make(env_name))
if env_name == 'Hopper-v3':
self.env_num = 1
self.env_name = env_name
self.max_step = 1000
self.state_dim = 11
self.action_dim = 3
self.if_discrete = False
self.target_return = 3000
# 4 runs
self.state_avg = torch.tensor([1.3819, -0.0105, -0.3804, -0.1759, 0.1959, 2.4185, -0.0406, -0.0172,
-0.1465, -0.0450, -0.1616], dtype=torch.float32)
self.state_std = torch.tensor([0.1612, 0.0747, 0.2357, 0.1889, 0.6431, 0.6253, 1.4806, 1.1569, 2.2850,
2.2124, 6.5147], dtype=torch.float32)
elif env_name == 'Swimmer-v3':
self.env_num = 1
self.env_name = env_name
self.max_step = 1000
self.state_dim = 8
self.action_dim = 2
self.if_discrete = False
self.target_return = 360.0
# self.state_avg = torch.zeros(1, dtype=torch.float32)
# self.state_std = torch.ones(1, dtype=torch.float32)
# 6 runs
self.state_avg = torch.tensor([0.5877, -0.2745, -0.2057, 0.0802, 0.0105, 0.0158, -0.0047, -0.0057],
dtype=torch.float32)
self.state_std = torch.tensor([0.5324, 0.5573, 0.5869, 0.4787, 0.5617, 0.8538, 1.2658, 1.4649],
dtype=torch.float32)
elif env_name == 'Ant-v3':
self.env_num = 1
self.env_name = env_name
self.max_step = 1000
self.state_dim = 17
self.action_dim = 6
self.if_discrete = False
self.target_return = 5000
# self.state_avg = torch.zeros(1, dtype=torch.float32)
# self.state_std = torch.ones(1, dtype=torch.float32)
# 2 runs
self.state_avg = torch.tensor([6.3101e-01, 9.3039e-01, 1.1357e-02, -6.0412e-02, -1.9220e-01,
1.4675e-01, 6.7936e-01, -1.2429e-01, -6.3794e-01, -2.9083e-02,
-6.0464e-01, 1.0855e-01, 6.5904e-01, 5.2163e+00, 7.5811e-02,
8.2149e-03, -3.0893e-02, -4.0532e-02, -4.5461e-02, 3.8929e-03,
7.3546e-02, -5.1845e-02, -2.2415e-02, 7.4109e-03, -4.0126e-02,
7.2162e-02, 3.4596e-02, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00], dtype=torch.float32)
self.state_std = torch.tensor([0.1170, 0.0548, 0.0683, 0.0856, 0.1434, 0.3606, 0.2035, 0.4071, 0.1488,
0.3565, 0.1285, 0.4071, 0.1953, 1.2645, 1.0212, 1.1494, 1.6127, 1.8113,
1.3163, 4.3250, 3.2312, 5.4796, 2.4919, 4.3622, 2.3617, 5.3836, 3.0482,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
0.0000, 0.0000, 0.0000], dtype=torch.float32)
# 4 runs
# self.state_avg = torch.tensor([6.1537e-01, 8.9688e-01, 2.1685e-02, -5.6615e-02, -3.6099e-01,
# 5.5272e-02, 6.4884e-01, -1.1314e-01, -5.7535e-01, -1.1797e-01,
# -5.4735e-01, 1.2350e-01, 6.3261e-01, 5.0387e+00, -3.1005e-01,
# 5.8508e-03, -4.0760e-03, -3.9709e-03, -4.0554e-02, -4.4973e-03,
# 5.5552e-02, -7.7341e-02, -3.3138e-02, -8.2667e-03, -2.2928e-02,
# 6.2883e-02, 3.0411e-02, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00], dtype=torch.float32)
# self.state_std = torch.tensor([0.1276, 0.0580, 0.0686, 0.0839, 0.1335, 0.3699, 0.2019, 0.4514, 0.1049,
# 0.1996, 0.0715, 0.4507, 0.1640, 1.3036, 1.0192, 1.2708, 1.6660, 1.5512,
# 1.2885, 4.3279, 3.5145, 6.1747, 2.1667, 2.8137, 1.4356, 6.1903, 2.8142,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000,
# 0.0000, 0.0000, 0.0000], dtype=torch.float32)
elif env_name == 'HalfCheetah-v3':
self.env_num = 1
self.env_name = env_name
self.max_step = 1000
self.state_dim = 17
self.action_dim = 6
self.if_discrete = False
self.target_return = 5000
# 2 runs
self.state_avg = torch.tensor([-0.1786, 0.8515, 0.0683, 0.0049, 0.0143, -0.1074, -0.1226, -0.1223,
3.2042, -0.0244, 0.0103, 0.0679, -0.1574, 0.0661, -0.0098, 0.0513,
-0.0142], dtype=torch.float32)
self.state_std = torch.tensor([0.1224, 0.6781, 0.3616, 0.3545, 0.3379, 0.4800, 0.3575, 0.3372,
1.3460, 0.7967, 2.2092, 9.1078, 9.4349, 9.4631, 11.0645, 9.3995,
8.6867], dtype=torch.float32)
elif env_name == 'Walker2d-v3':
self.env_num = 1
self.env_name = env_name
self.max_step = 1000
self.state_dim = 17
self.action_dim = 6
self.if_discrete = False
self.target_return = 8000
# 6 runs
self.state_avg = torch.tensor([1.2954, 0.4176, -0.0995, -0.2242, 0.2234, -0.2319, -0.3035, -0.0614,
3.7896, -0.1081, 0.1643, -0.0470, -0.1533, -0.0410, -0.1140, -0.2981,
-0.6278], dtype=torch.float32)
self.state_std = torch.tensor([0.1095, 0.1832, 0.1664, 0.2951, 0.6291, 0.2582, 0.3270, 0.6931, 1.1162,
1.0560, 2.7070, 3.1108, 4.4344, 6.4363, 3.1945, 4.4594, 6.0115],
dtype=torch.float32)
# 11 runs
# self.state_avg = torch.tensor([1.2026, 0.3181, -0.2361, -0.6064, -0.0210, -0.2863, -0.3759, -0.0214,
# 4.7048, -0.0621, -0.0452, -0.1847, -0.6116, 0.0934, -0.0572, -0.5106,
# -0.5421], dtype=torch.float32)
# self.state_std = torch.tensor([0.0975, 0.2671, 0.2845, 0.6044, 0.6855, 0.3448, 0.4304, 0.7049, 1.5023,
# 1.0364, 3.8605, 4.0202, 5.9124, 6.7366, 4.3993, 5.2269, 6.5471],
# dtype=torch.float32)
else:
self.state_avg = torch.zeros(1, dtype=torch.float32)
self.state_std = torch.ones(1, dtype=torch.float32)
print(f"{self.__class__.__name__} WARNING: env_name not found {env_name}")
self.state_std = torch.clamp(self.state_std, 2 ** -4, 2 ** 4) # todo
print(f'\n| {self.__class__.__name__}: We modified MuJoCo Env and do norm for state to make it better.')
def get_state_norm(self, state: Array) -> Tensor:
state = torch.tensor(state, dtype=torch.float32)
return (state - self.state_avg) / self.state_std
def reset(self) -> Tensor:
state = self.env.reset()
return self.get_state_norm(state)
def step(self, action: Array) -> (Tensor, float, bool, dict):
state, reward, done, info_dict = self.env.step(action) # state, reward, done, info_dict
return self.get_state_norm(state), reward, done, info_dict
class HumanoidEnv(gym.Wrapper): # [ElegantRL.2021.11.11]
def __init__(self, gym_env_id='Humanoid-v3', target_return=8000):
gym.logger.set_level(40) # Block warning
super(HumanoidEnv, self).__init__(env=gym.make(gym_env_id))
# from elegantrl.envs.Gym import get_gym_env_info
# get_gym_env_info(env, if_print=True) # use this function to print the env information
self.env_num = 1 # the env number of VectorEnv is greater than 1
self.env_name = gym_env_id # the name of this env.
self.max_step = 1000 # the max step of each episode
self.state_dim = 376 # feature number of state
self.action_dim = 17 # feature number of action
self.if_discrete = False # discrete action or continuous action
self.target_return = target_return # episode return is between (-1600, 0)
# 5 runs
# self.state_avg = torch.tensor([1.2027e+00, 9.0388e-01, -1.0409e-01, 4.4935e-02, -2.8785e-02,
# 2.9601e-01, -3.1656e-01, 3.0909e-01, -4.3196e-02, -1.2750e-01,
# -2.6788e-01, -1.1086e+00, -1.1024e-01, 1.2908e-01, -5.8439e-01,
# -1.6043e+00, 8.1362e-02, -7.7958e-01, -4.3869e-01, -4.9594e-02,
# 6.4827e-01, -3.0660e-01, 3.4619e+00, -5.2682e-02, -7.4712e-02,
# -5.4782e-02, 4.0784e-02, 1.3942e-01, 1.1000e-01, -1.3992e-02,
# 9.3216e-02, -1.3473e-01, -7.6183e-02, -3.0072e-01, -1.3914e+00,
# -7.6460e-02, 1.6543e-02, -2.1907e-01, -3.8219e-01, -1.0018e-01,
# -1.5629e-01, -1.0627e-01, -3.7252e-03, 2.1453e-01, 2.7610e-02,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 1.5680e+00, 1.5862e+00, 1.9913e-01, 9.9125e-03, 5.5228e-03,
# -1.0950e-01, -1.2668e-01, 2.9367e-01, 3.5102e+00, 8.4719e+00,
# 6.4141e-02, 6.6425e-02, 2.3180e-02, -2.3346e-03, 4.5395e-03,
# 8.0720e-03, -3.0787e-02, -5.2109e-02, 3.2192e-01, 2.0724e+00,
# 5.9864e-02, 5.0491e-02, 7.7832e-02, -3.2226e-03, 1.7504e-04,
# -1.9180e-03, -8.2688e-02, -1.9763e-01, 1.0849e-02, 5.9581e+00,
# 2.5272e-01, 2.6957e-01, 1.1540e-01, 1.6143e-02, 2.7386e-02,
# -6.4959e-02, 2.4176e-01, -4.1101e-01, -8.2298e-01, 4.6070e+00,
# 6.3743e-01, 7.0587e-01, 1.2301e-01, -4.3697e-04, -4.5899e-02,
# -6.8465e-02, 2.5412e-02, -1.7718e-01, -1.2062e+00, 2.6798e+00,
# 6.8834e-01, 7.6378e-01, 1.2859e-01, -8.0863e-03, -1.0989e-01,
# -4.6906e-02, -1.4599e-01, -1.0927e-01, -1.0181e+00, 1.7989e+00,
# 1.9099e-01, 2.0230e-01, 9.9341e-02, -1.5814e-02, 1.5009e-02,
# 5.1159e-02, 1.6290e-01, 3.2563e-01, -6.0960e-01, 4.6070e+00,
# 4.5602e-01, 4.9681e-01, 1.0787e-01, -5.9067e-04, -3.5140e-02,
# 7.0788e-02, 2.5216e-02, 2.1480e-01, -9.1849e-01, 2.6798e+00,
# 4.6612e-01, 5.2530e-01, 9.9732e-02, 1.3496e-02, -8.3317e-02,
# 4.6769e-02, -1.8264e-01, 1.1677e-01, -7.7112e-01, 1.7989e+00,
# 2.9806e-01, 2.7976e-01, 1.1250e-01, 3.8320e-03, 1.4312e-03,
# 9.2314e-02, -2.9700e-02, -2.5973e-01, 5.9897e-01, 1.6228e+00,
# 2.1239e-01, 1.6878e-01, 1.8192e-01, 6.9662e-03, -2.5374e-02,
# 7.5638e-02, 3.0046e-02, -3.1797e-01, 2.8894e-01, 1.2199e+00,
# 2.5424e-01, 2.0008e-01, 1.0215e-01, 1.6763e-03, -1.8978e-03,
# -8.9815e-02, -5.8642e-03, 3.2081e-01, 4.9344e-01, 1.6228e+00,
# 1.8071e-01, 1.4553e-01, 1.4435e-01, -1.2074e-02, -1.3314e-02,
# -3.5878e-02, 5.3603e-02, 2.7511e-01, 2.0549e-01, 1.2199e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 4.2954e-02, 6.7016e-02, -2.8482e-02, 3.5978e+00,
# -4.8962e-02, -5.6775e-02, -4.4155e-02, -1.1466e-01, 1.6261e-01,
# 3.5054e+00, -5.0701e-02, -4.9236e-02, -4.1256e-02, 1.1351e-01,
# 1.3945e-01, 3.4389e+00, -4.3797e-02, -3.3252e-02, 2.8187e-02,
# -3.3888e-02, -3.5859e-01, 3.5962e+00, -3.8793e-02, -2.0773e-02,
# -2.4524e-02, 1.1582e+00, -4.5108e-02, 5.1413e+00, -8.7558e-02,
# -5.7185e-01, -2.4524e-02, 1.1582e+00, -4.5108e-02, 5.1413e+00,
# -8.7558e-02, -5.7185e-01, 9.9391e-02, -2.4059e-02, -1.7425e-01,
# 3.4541e+00, -8.4718e-02, 1.8192e-02, 4.4070e-01, 3.9781e-01,
# 3.5545e-01, 4.3428e+00, -1.8370e-01, -6.5439e-01, 4.4070e-01,
# 3.9781e-01, 3.5545e-01, 4.3428e+00, -1.8370e-01, -6.5439e-01,
# 1.5922e-01, 2.0918e-01, -9.8105e-02, 3.7604e+00, -2.9619e-02,
# -5.8485e-02, 1.0385e-01, 2.1228e-01, -1.7878e-01, 3.7999e+00,
# -7.4080e-02, -5.3348e-02, -2.6477e-01, 4.1909e-01, 2.9927e-02,
# 3.6885e+00, -1.1708e-01, -6.7030e-02, -2.1599e-01, 3.9669e-01,
# 6.0856e-03, 3.8305e+00, -8.3960e-02, -1.1403e-01, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 1.6677e+01, 4.9107e+01, -9.6274e+00, -2.9728e+01, -5.9374e+01,
# 7.3201e+01, -5.8161e+01, -3.6315e+01, 2.7580e+01, 4.1244e+00,
# 1.1711e+02, -8.4357e+00, -1.0379e+01, 1.0683e+01, 3.3124e+00,
# 5.4840e+00, 8.2456e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00], dtype=torch.float32)
# self.state_std = torch.tensor([3.7685e-02, 4.5415e-02, 6.8201e-02, 9.5235e-02, 1.2801e-01, 2.2247e-01,
# 2.2774e-01, 1.9151e-01, 1.0900e-01, 1.8950e-01, 3.8430e-01, 6.4591e-01,
# 1.1708e-01, 1.7833e-01, 4.0411e-01, 6.1461e-01, 2.8869e-01, 3.0227e-01,
# 4.4105e-01, 3.1090e-01, 3.5227e-01, 2.9399e-01, 8.6883e-01, 3.8865e-01,
# 4.2435e-01, 2.4784e+00, 3.5310e+00, 4.3277e+00, 8.6461e+00, 6.9988e+00,
# 7.2420e+00, 8.6105e+00, 9.3459e+00, 2.6776e+01, 4.3671e+01, 7.4211e+00,
# 1.0446e+01, 1.4800e+01, 2.2152e+01, 5.7955e+00, 6.3750e+00, 7.0280e+00,
# 6.4058e+00, 9.1694e+00, 7.0480e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 1.5156e-01, 1.4699e-01, 7.1690e-02, 3.6331e-02, 1.4871e-01,
# 1.1873e-01, 3.9063e-01, 3.2367e-01, 1.9474e-01, 0.0000e+00, 1.1069e-02,
# 1.1819e-02, 5.2432e-03, 3.1321e-03, 8.8166e-03, 6.7725e-03, 5.4790e-02,
# 4.3172e-02, 3.4676e-02, 0.0000e+00, 1.0203e-02, 1.2745e-02, 1.4526e-02,
# 9.4642e-03, 5.2404e-03, 5.6170e-03, 1.5328e-01, 1.1638e-01, 1.0253e-01,
# 0.0000e+00, 4.8770e-02, 4.3080e-02, 4.4482e-02, 2.2124e-02, 4.9892e-02,
# 2.2123e-02, 2.4277e-01, 1.0974e-01, 1.2796e-01, 0.0000e+00, 1.5967e-01,
# 1.5963e-01, 6.8688e-02, 3.1619e-02, 1.2107e-01, 5.2330e-02, 2.8835e-01,
# 1.1818e-01, 1.9899e-01, 0.0000e+00, 2.0831e-01, 2.2797e-01, 9.6549e-02,
# 3.5202e-02, 1.2134e-01, 5.9960e-02, 2.1897e-01, 1.0345e-01, 2.1384e-01,
# 0.0000e+00, 4.7938e-02, 4.4530e-02, 3.8997e-02, 2.2406e-02, 4.1815e-02,
# 2.0735e-02, 2.1493e-01, 1.0405e-01, 1.4387e-01, 0.0000e+00, 1.5225e-01,
# 1.6402e-01, 6.2498e-02, 3.1570e-02, 1.1685e-01, 4.3421e-02, 2.8339e-01,
# 1.0626e-01, 2.1353e-01, 0.0000e+00, 1.9867e-01, 2.2000e-01, 8.5643e-02,
# 3.0187e-02, 1.2717e-01, 5.0311e-02, 2.2468e-01, 9.0330e-02, 2.1959e-01,
# 0.0000e+00, 4.6455e-02, 4.4841e-02, 2.4198e-02, 1.8876e-02, 3.3907e-02,
# 2.6701e-02, 9.6149e-02, 7.2464e-02, 6.3727e-02, 0.0000e+00, 6.9340e-02,
# 6.5581e-02, 5.0208e-02, 3.8457e-02, 3.7162e-02, 3.9005e-02, 1.2357e-01,
# 9.5124e-02, 1.0308e-01, 0.0000e+00, 4.5508e-02, 4.2817e-02, 2.3776e-02,
# 2.1004e-02, 3.2342e-02, 2.5299e-02, 1.0703e-01, 7.1359e-02, 6.8018e-02,
# 0.0000e+00, 5.5628e-02, 5.4957e-02, 4.5547e-02, 3.1943e-02, 3.2783e-02,
# 2.8549e-02, 1.1968e-01, 9.6011e-02, 9.6069e-02, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 2.7535e+00,
# 3.3878e+00, 4.3144e+00, 1.3207e+00, 8.2827e-01, 4.9262e-01, 4.4686e+00,
# 3.7037e+00, 6.4924e+00, 9.9583e-01, 6.8091e-01, 4.7597e-01, 4.3920e+00,
# 4.7409e+00, 6.0906e+00, 9.3958e-01, 4.9473e-01, 4.9569e-01, 7.5115e+00,
# 1.5371e+01, 1.1053e+01, 1.2450e+00, 7.6206e-01, 1.0601e+00, 9.2410e+00,
# 2.3707e+01, 1.0356e+01, 6.6857e+00, 2.4551e+00, 2.8653e+00, 9.2410e+00,
# 2.3707e+01, 1.0356e+01, 6.6857e+00, 2.4551e+00, 2.8653e+00, 5.3753e+00,
# 8.6029e+00, 8.1809e+00, 1.1586e+00, 5.8827e-01, 8.2327e-01, 6.3651e+00,
# 1.1362e+01, 8.7067e+00, 4.3533e+00, 1.4509e+00, 2.1305e+00, 6.3651e+00,
# 1.1362e+01, 8.7067e+00, 4.3533e+00, 1.4509e+00, 2.1305e+00, 4.5383e+00,
# 5.4198e+00, 5.3263e+00, 2.0749e+00, 1.5746e+00, 8.2220e-01, 5.7299e+00,
# 6.2163e+00, 6.0368e+00, 2.1437e+00, 1.8280e+00, 1.2940e+00, 5.5326e+00,
# 5.0856e+00, 5.3383e+00, 1.7817e+00, 1.5361e+00, 8.9927e-01, 6.1037e+00,
# 6.5608e+00, 6.2712e+00, 1.9360e+00, 1.6504e+00, 1.1001e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 1.7051e+02,
# 1.5721e+02, 1.7507e+02, 1.7297e+02, 1.3840e+02, 5.2837e+02, 2.8931e+02,
# 1.6753e+02, 1.6898e+02, 5.0561e+02, 3.0826e+02, 2.2299e+01, 2.6949e+01,
# 2.4568e+01, 2.5537e+01, 2.9878e+01, 2.6547e+01, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
# 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00], dtype=torch.float32)
# 8 runs
self.state_avg = torch.tensor([1.2108e+00, 9.0593e-01, -1.2575e-01, 4.8278e-02, -4.7363e-02,
3.0758e-01, -3.6351e-01, 3.3824e-01, -4.4513e-02, -9.5673e-02,
-2.6830e-01, -1.0654e+00, -1.1868e-01, 1.6859e-01, -6.7167e-01,
-1.7219e+00, 1.7098e-01, -7.5045e-01, -3.6428e-01, -4.5543e-02,
6.9729e-01, -4.1325e-01, 3.3065e+00, -4.8535e-02, -8.7482e-02,
-7.2437e-02, 8.0267e-02, 1.1422e-01, 6.3917e-02, -4.3369e-02,
1.0969e-01, -1.7911e-01, -2.4718e-02, -4.7037e-01, -1.8689e+00,
-3.3888e-02, 3.1659e-02, -1.8880e-01, -4.1088e-01, -4.3491e-02,
-1.4319e-01, -2.2842e-02, -2.9954e-02, 3.3196e-01, -2.8202e-02,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
1.5682e+00, 1.5911e+00, 2.1484e-01, 1.3796e-02, 2.4396e-02,
-1.2319e-01, -1.8833e-01, 3.3373e-01, 3.5928e+00, 8.5499e+00,
6.5384e-02, 6.8184e-02, 2.5217e-02, -3.2912e-03, 7.6363e-03,
9.5407e-03, -5.0383e-02, -6.1559e-02, 3.2461e-01, 2.0915e+00,
6.2163e-02, 5.2387e-02, 8.1777e-02, -4.5177e-03, 3.6478e-04,
-1.9426e-03, -1.1423e-01, -2.2379e-01, 1.0095e-02, 6.0130e+00,
2.5295e-01, 2.6585e-01, 1.1579e-01, 1.5304e-02, 2.3657e-02,
-6.4327e-02, 2.3218e-01, -4.2584e-01, -8.2816e-01, 4.6494e+00,
6.5633e-01, 7.2495e-01, 1.2384e-01, 2.1672e-03, -3.9951e-02,
-6.5070e-02, 4.8542e-02, -1.7317e-01, -1.2392e+00, 2.7045e+00,
7.2061e-01, 7.9773e-01, 1.2873e-01, -4.7357e-03, -9.5605e-02,
-4.3178e-02, -1.1211e-01, -1.0523e-01, -1.0520e+00, 1.8155e+00,
1.8269e-01, 1.9581e-01, 9.5197e-02, -1.4370e-02, 1.2924e-02,
4.5945e-02, 1.7367e-01, 3.0414e-01, -5.7666e-01, 4.6494e+00,
4.4517e-01, 4.8470e-01, 1.0228e-01, -1.4548e-03, -3.1125e-02,
6.3631e-02, 5.2045e-02, 2.0269e-01, -8.8813e-01, 2.7045e+00,
4.6257e-01, 5.1419e-01, 9.1832e-02, 1.2465e-02, -7.4154e-02,
4.2695e-02, -1.6522e-01, 1.1613e-01, -7.5953e-01, 1.8155e+00,
2.9357e-01, 2.7974e-01, 1.1385e-01, 6.3163e-03, 3.7935e-03,
8.7228e-02, -3.7212e-02, -2.4926e-01, 5.9919e-01, 1.6377e+00,
2.0201e-01, 1.6933e-01, 1.7335e-01, 8.5288e-03, -2.7483e-02,
7.0444e-02, 3.2598e-02, -2.9903e-01, 2.8950e-01, 1.2311e+00,
2.4482e-01, 1.9030e-01, 1.0209e-01, 6.4776e-04, -3.0012e-03,
-8.5235e-02, -3.3090e-03, 3.2367e-01, 4.7833e-01, 1.6377e+00,
1.8395e-01, 1.4705e-01, 1.4698e-01, -1.1777e-02, -1.3145e-02,
-3.0231e-02, 4.9042e-02, 2.7848e-01, 1.9004e-01, 1.2311e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 5.9201e-02, 9.9734e-02, -6.5489e-02, 3.5387e+00,
-4.5762e-02, -6.9222e-02, -6.2818e-03, -5.8119e-02, 8.4466e-02,
3.3673e+00, -4.9401e-02, -6.1099e-02, 1.7103e-02, 1.5226e-01,
9.0974e-02, 3.2815e+00, -4.2284e-02, -4.6787e-02, 1.7020e-01,
-6.6382e-03, -2.6923e-01, 3.5500e+00, -4.1203e-02, -3.6445e-02,
-2.4479e-01, 1.1339e+00, 1.1257e-01, 5.9749e+00, -4.5722e-02,
-6.0756e-01, -2.4479e-01, 1.1339e+00, 1.1257e-01, 5.9749e+00,
-4.5722e-02, -6.0756e-01, 9.5687e-02, 6.2377e-03, -3.1253e-01,
3.3551e+00, -7.5612e-02, 3.7902e-03, 4.9319e-01, 4.9548e-01,
7.1103e-02, 4.4660e+00, -1.7679e-01, -5.6680e-01, 4.9319e-01,
4.9548e-01, 7.1103e-02, 4.4660e+00, -1.7679e-01, -5.6680e-01,
2.4826e-01, 2.7281e-01, -5.3309e-02, 3.8251e+00, -6.9774e-03,
-7.2389e-02, 1.6979e-01, 2.6176e-01, -7.4322e-02, 3.8449e+00,
-5.5816e-02, -8.0149e-02, -2.8148e-01, 4.7921e-01, 7.2474e-03,
3.7309e+00, -1.1763e-01, -7.3255e-02, -3.3529e-01, 5.1496e-01,
-2.1279e-02, 3.9610e+00, -9.3358e-02, -1.1908e-01, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
-3.1586e+00, 5.5951e+01, -5.1688e+00, -2.0856e+01, -9.7607e+00,
1.0722e+02, 3.5213e+01, 1.2223e+01, 3.3327e+01, -6.1532e+01,
1.0860e+02, 2.3747e+00, -9.9348e+00, 1.9073e+01, -2.5358e-01,
1.0303e+01, 3.9810e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00], dtype=torch.float32)
self.state_std = torch.tensor([3.1392e-02, 3.7876e-02, 5.7367e-02, 8.0870e-02, 1.1060e-01, 1.9527e-01,
1.9386e-01, 1.6299e-01, 9.1475e-02, 1.6569e-01, 3.3626e-01, 5.7772e-01,
9.8797e-02, 1.5675e-01, 3.4861e-01, 5.3704e-01, 2.4981e-01, 2.8493e-01,
3.7375e-01, 2.8416e-01, 3.1271e-01, 2.7643e-01, 8.0354e-01, 3.5500e-01,
3.8352e-01, 2.5504e+00, 3.5465e+00, 4.5748e+00, 9.9119e+00, 7.4044e+00,
7.7644e+00, 9.2271e+00, 1.2024e+01, 3.4966e+01, 7.6774e+01, 8.9679e+00,
1.2524e+01, 1.5256e+01, 2.4119e+01, 6.6164e+00, 7.7344e+00, 7.6366e+00,
7.4501e+00, 9.9335e+00, 1.0271e+01, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 1.3350e-01, 1.2936e-01, 6.1098e-02, 3.0401e-02, 1.2934e-01,
1.0183e-01, 3.6737e-01, 2.9502e-01, 1.7495e-01, 0.0000e+00, 9.3017e-03,
9.9151e-03, 4.3507e-03, 2.5781e-03, 7.3343e-03, 5.6374e-03, 4.5623e-02,
3.5967e-02, 2.9087e-02, 0.0000e+00, 8.4761e-03, 1.0561e-02, 1.2082e-02,
7.8437e-03, 4.3507e-03, 4.7044e-03, 1.3116e-01, 9.9861e-02, 8.7706e-02,
0.0000e+00, 4.0845e-02, 3.5960e-02, 3.7137e-02, 1.8467e-02, 4.1754e-02,
1.8461e-02, 2.0857e-01, 9.3311e-02, 1.1018e-01, 0.0000e+00, 1.3535e-01,
1.3499e-01, 5.7386e-02, 2.6211e-02, 1.0184e-01, 4.3831e-02, 2.4708e-01,
1.0116e-01, 1.7149e-01, 0.0000e+00, 1.7665e-01, 1.9352e-01, 8.0759e-02,
2.9150e-02, 1.0249e-01, 5.0426e-02, 1.8785e-01, 8.8313e-02, 1.8192e-01,
0.0000e+00, 4.0083e-02, 3.7293e-02, 3.2804e-02, 1.8966e-02, 3.5081e-02,
1.7295e-02, 1.8324e-01, 9.0613e-02, 1.2549e-01, 0.0000e+00, 1.2928e-01,
1.3884e-01, 5.2734e-02, 2.6758e-02, 9.7935e-02, 3.6490e-02, 2.4088e-01,
9.2416e-02, 1.8560e-01, 0.0000e+00, 1.6895e-01, 1.8693e-01, 7.2220e-02,
2.5554e-02, 1.0662e-01, 4.2373e-02, 1.9043e-01, 7.6969e-02, 1.8709e-01,
0.0000e+00, 3.8692e-02, 3.7292e-02, 2.0231e-02, 1.5743e-02, 2.8439e-02,
2.2167e-02, 8.2798e-02, 6.1350e-02, 5.3325e-02, 0.0000e+00, 5.7712e-02,
5.4608e-02, 4.1780e-02, 3.1920e-02, 3.0993e-02, 3.2481e-02, 1.0455e-01,
7.9928e-02, 8.6195e-02, 0.0000e+00, 3.8208e-02, 3.5703e-02, 1.9769e-02,
1.7572e-02, 2.7208e-02, 2.1206e-02, 9.1823e-02, 6.0944e-02, 5.7677e-02,
0.0000e+00, 4.6909e-02, 4.6119e-02, 3.8016e-02, 2.6753e-02, 2.7516e-02,
2.3943e-02, 1.0474e-01, 8.3302e-02, 8.2673e-02, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 2.8122e+00,
3.5878e+00, 4.3692e+00, 1.2417e+00, 7.6688e-01, 4.4274e-01, 4.5708e+00,
4.0605e+00, 7.5402e+00, 9.2691e-01, 6.3951e-01, 4.2625e-01, 4.5672e+00,
4.9948e+00, 6.9801e+00, 8.6809e-01, 4.5658e-01, 4.4578e-01, 8.1632e+00,
1.9371e+01, 1.5762e+01, 1.2160e+00, 7.1688e-01, 9.8074e-01, 1.0888e+01,
3.5036e+01, 1.4247e+01, 9.0313e+00, 2.8052e+00, 3.3265e+00, 1.0888e+01,
3.5036e+01, 1.4247e+01, 9.0313e+00, 2.8052e+00, 3.3265e+00, 6.0875e+00,
9.3019e+00, 9.6741e+00, 1.1009e+00, 5.3437e-01, 7.4614e-01, 7.1663e+00,
1.2823e+01, 1.0369e+01, 4.1288e+00, 1.3454e+00, 2.0126e+00, 7.1663e+00,
1.2823e+01, 1.0369e+01, 4.1288e+00, 1.3454e+00, 2.0126e+00, 5.1024e+00,
6.0538e+00, 5.7377e+00, 2.0800e+00, 1.5886e+00, 7.5714e-01, 6.4385e+00,
7.0912e+00, 6.6091e+00, 2.1412e+00, 1.8227e+00, 1.1804e+00, 6.2504e+00,
5.4816e+00, 5.8103e+00, 1.7573e+00, 1.5686e+00, 8.4100e-01, 7.1933e+00,
8.0470e+00, 7.3113e+00, 1.9905e+00, 1.7208e+00, 1.1594e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 2.5147e+02,
2.3960e+02, 2.5526e+02, 2.2783e+02, 2.2161e+02, 7.6230e+02, 4.4391e+02,
2.2895e+02, 2.4944e+02, 7.0961e+02, 4.6304e+02, 3.2988e+01, 3.9116e+01,
3.1438e+01, 3.6047e+01, 4.0998e+01, 3.7787e+01, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00,
0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00], dtype=torch.float32)
# self.state_avg = torch.zeros(1, dtype=torch.float32)
# self.state_std = torch.ones(1, dtype=torch.float32)
self.state_std = torch.clamp(self.state_std, 2 ** -4, 2 ** 4)
print(f'\n| {self.__class__.__name__}: We modified MuJoCo Env and do norm for state to make it better.'
f'\n| We scale the action space from (-0.4, +0.4), to (-1, +1).')
def get_state_norm(self, state: Array) -> Tensor:
state = torch.tensor(state, dtype=torch.float32)
return (state - self.state_avg) / self.state_std
def reset(self) -> Tensor:
state = self.env.reset()
return self.get_state_norm(state)
def step(self, action: Array) -> (Tensor, float, bool, dict):
# MuJoCo Humanoid Env set its action space as (-0.4, +0.4). It is bad.
# I suggest to set action space as (-1, +1) when you design your own env.
# action_space.high = 0.4
# action_space.low = -0.4
state, reward, done, info_dict = self.env.step(action * 2.5) # state, reward, done, info_dict
return self.get_state_norm(state), reward, done, info_dict
| 46,682 | 84.343693 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/PointChasingEnv.py | import numpy as np
import numpy.random as rd
import torch
Array = np.ndarray
Tensor = torch.Tensor
class PointChasingEnv:
def __init__(self, dim=2):
self.dim = dim
self.init_distance = 8.0
# reset
self.p0 = None # position of point 0
self.v0 = None # velocity of point 0
self.p1 = None # position of point 1
self.v1 = None # velocity of point 1
self.distance = None # distance between point0 and point1
self.cur_step = None # current step number
"""env info"""
self.env_name = "PointChasingEnv"
self.state_dim = self.dim * 4
self.action_dim = self.dim
self.max_step = 2 ** 10
self.if_discrete = False
def reset(self):
self.p0 = rd.normal(0, 1, size=self.dim)
self.v0 = np.zeros(self.dim)
self.p1 = rd.normal(-self.init_distance, 1, size=self.dim)
self.v1 = np.zeros(self.dim)
self.distance = ((self.p0 - self.p1) ** 2).sum() ** 0.5
self.cur_step = 0
return self.get_state()
def step(self, action: Array) -> (Array, Array, bool, dict):
action_l2 = (action ** 2).sum() ** 0.5
action_l2 = max(action_l2, 1.0)
action = action / action_l2
self.v1 *= 0.75
self.v1 += action
self.p1 += self.v1 * 0.01
self.v0 *= 0.50
self.v0 += rd.rand(self.dim)
self.p0 += self.v0 * 0.01
"""next_state"""
next_state = self.get_state()
"""reward"""
distance = ((self.p0 - self.p1) ** 2).sum() ** 0.5
reward = self.distance - distance - action_l2 * 0.02
self.distance = distance
"""done"""
self.cur_step += 1
done = (distance < self.dim) or (self.cur_step == self.max_step)
return next_state, reward, done, None
def get_state(self) -> Array:
return np.hstack((self.p0, self.v0, self.p1, self.v1))
@staticmethod
def get_action(state: Array) -> Array:
states_reshape = state.reshape((4, -1))
p0 = states_reshape[0]
p1 = states_reshape[2]
return p0 - p1
class PointChasingVecEnv:
def __init__(self, dim=2, env_num=32, sim_gpu_id=0):
self.dim = dim
self.init_distance = 8.0
# reset
self.p0s = None # position
self.v0s = None # velocity
self.p1s = None
self.v1s = None
self.distances = None # a tensor of distance between point0 and point1
self.cur_steps = None # a tensor of current step number
# env.step() is a function, so I can't name it `steps`
"""env info"""
self.env_name = "PointChasingVecEnv"
self.state_dim = self.dim * 4
self.action_dim = self.dim
self.max_step = 2 ** 10
self.if_discrete = False
self.env_num = env_num
self.device = torch.device("cpu" if sim_gpu_id == -1 else f"cuda:{sim_gpu_id}")
def reset(self):
self.p0s = torch.zeros(
(self.env_num, self.dim), dtype=torch.float32, device=self.device
)
self.v0s = torch.zeros(
(self.env_num, self.dim), dtype=torch.float32, device=self.device
)
self.p1s = torch.zeros(
(self.env_num, self.dim), dtype=torch.float32, device=self.device
)
self.v1s = torch.zeros(
(self.env_num, self.dim), dtype=torch.float32, device=self.device
)
self.cur_steps = torch.zeros(
self.env_num, dtype=torch.float32, device=self.device
)
for env_i in range(self.env_num):
self.reset_env_i(env_i)
self.distances = ((self.p0s - self.p1s) ** 2).sum(dim=1) ** 0.5
return self.get_state()
def reset_env_i(self, i: int):
self.p0s[i] = torch.normal(0, 1, size=(self.dim,))
self.v0s[i] = torch.zeros((self.dim,))
self.p1s[i] = torch.normal(-self.init_distance, 1, size=(self.dim,))
self.v1s[i] = torch.zeros((self.dim,))
self.cur_steps[i] = 0
def step(self, actions: Tensor) -> (Tensor, Tensor, Tensor, dict):
"""
:param actions: [tensor] actions.shape == (env_num, action_dim)
:return: next_states [tensor] next_states.shape == (env_num, state_dim)
:return: rewards [tensor] rewards == (env_num, )
:return: dones [tensor] dones == (env_num, ), done = 1. if done else 0.
:return: None [None or dict]
"""
# assert actions.get_device() == self.device.index
actions_l2 = (actions ** 2).sum(dim=1, keepdim=True) ** 0.5
actions_l2 = actions_l2.clamp_min(1.0)
actions = actions / actions_l2
self.v1s *= 0.75
self.v1s += actions
self.p1s += self.v1s * 0.01
self.v0s *= 0.50
self.v0s += torch.rand(
size=(self.env_num, self.dim), dtype=torch.float32, device=self.device
)
self.p0s += self.v0s * 0.01
"""reward"""
distances = ((self.p0s - self.p1s) ** 2).sum(dim=1) ** 0.5
rewards = self.distances - distances - actions_l2.squeeze(1) * 0.02
self.distances = distances
"""done"""
self.cur_steps += 1 # array
dones = (distances < self.dim) | (self.cur_steps == self.max_step)
for env_i in range(self.env_num):
if dones[env_i]:
self.reset_env_i(env_i)
dones = dones.type(torch.float32)
"""next_state"""
next_states = self.get_state()
# assert next_states.get_device() == self.device.index
# assert rewards.get_device() == self.device.index
# assert dones.get_device() == self.device.index
return next_states, rewards, dones, None
def get_state(self) -> Tensor:
return torch.cat((self.p0s, self.v0s, self.p1s, self.v1s), dim=1)
@staticmethod
def get_action(states: Tensor) -> Tensor:
states_reshape = states.reshape((states.shape[0], 4, -1))
p0s = states_reshape[:, 0]
p1s = states_reshape[:, 2]
return p0s - p1s
class PointChasingDiscreteEnv(PointChasingEnv):
def __init__(self, dim=2):
PointChasingEnv.__init__(self, dim)
self.env_name = "PointChasingDiscreteEnv"
self.action_dim = 3 ** self.dim
self.if_discrete = True
def step(self, action: Array) -> (Array, Array, bool, dict):
action_ary = np.zeros(self.dim, dtype=np.float32) # continuous_action
for dim in range(self.dim):
idx = (action // (3 ** dim)) % 3
action_ary[dim] = idx - 1 # map `idx` to `value` using {0: -1, 1: 0, 2: +1}
return PointChasingEnv.step(self, action_ary)
def get_action(self, state: Array) -> int:
action_ary = PointChasingEnv.get_action(state)
action_idx = 0
for dim in range(self.dim):
action_value = action_ary[dim]
if action_value < -0.5:
action_idx += dim ** 3 * 0
elif action_value < +0.5:
action_idx += dim ** 3 * 1
else:
action_idx += dim ** 3 * 2
return action_idx
def check_chasing_env():
env = PointChasingEnv()
reward_sum = 0.0 # episode return
reward_sum_list = []
state = env.reset()
for _ in range(env.max_step * 4):
action = env.get_action(state)
state, reward, done, _ = env.step(action)
reward_sum += reward
if done:
print(f"{env.distance:8.4f} {action.round(2)}")
reward_sum_list.append(reward_sum)
reward_sum = 0.0
state = env.reset()
print("len: ", len(reward_sum_list))
print("mean:", np.mean(reward_sum_list))
print("std: ", np.std(reward_sum_list))
def check_chasing_vec_env():
env = PointChasingVecEnv(dim=2, env_num=2, sim_gpu_id=0)
reward_sums = [
0.0,
] * env.env_num # episode returns
reward_sums_list = [
[],
] * env.env_num
states = env.reset()
for _ in range(env.max_step * 4):
actions = env.get_action(states)
states, rewards, dones, _ = env.step(actions)
for env_i in range(env.env_num):
reward_sums[env_i] += rewards[env_i].item()
if dones[env_i]:
print(
f"{env.distances[env_i].item():8.4f} {actions[env_i].detach().cpu().numpy().round(2)}"
)
reward_sums_list[env_i].append(reward_sums[env_i])
reward_sums[env_i] = 0.0
reward_sums_list = np.array(reward_sums_list)
print("shape:", reward_sums_list.shape)
print("mean: ", np.mean(reward_sums_list, axis=1))
print("std: ", np.std(reward_sums_list, axis=1))
if __name__ == "__main__":
check_chasing_env()
check_chasing_vec_env()
| 8,892 | 30.874552 | 109 | py |
ElegantRL | ElegantRL-master/elegantrl/envs/IsaacGymEnv.py | import gym.spaces
import isaacgym
import numpy as np
import torch
from elegantrl.envs.isaac_tasks import isaacgym_task_map
from elegantrl.envs.isaac_tasks.base.vec_task import VecTask
from elegantrl.envs.utils.utils import set_seed
from elegantrl.envs.utils.config_utils import load_task_config, get_max_step_from_config
from pprint import pprint
from typing import Dict, Tuple
'''[ElegantRL.2022.06.06](github.com/AI4Fiance-Foundation/ElegantRL)'''
"""
Source: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs (I hate `import hydra` in IsaacGym Preview 3)
Modify: https://github.com/hmomin (hmomin's code is quite good!)
Modify: https://github.com/Yonv1943 (I make a little change based on hmomin's code)
There are still cuda:0 BUG in Isaac Gym Preview 3:
Isaac Gym Preview 3 will force the cuda:0 to be used even you set the `sim_device_id=1, rl_device_id=1`
You can only use `export CUDA_VISIBLE_DEVICES=1,2,3` to let Isaac Gym use a specified GPU.
isaacgym/gymdeps.py", line 21, in _import_deps
raise ImportError("PyTorch was imported before isaacgym modules.
Please import torch after isaacgym modules.")
run the following code in bash before running.
export LD_LIBRARY_PATH=/xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
can't use os.environ['LD_LIBRARY_PATH'] = /xfs/home/podracer_steven/anaconda3/envs/rlgpu/lib
cd isaacgym/python/ElegantRL-1212
conda activate rlgpu
export LD_LIBRARY_PATH=~/anaconda3/envs/rlgpu/lib
"""
Tensor = torch.Tensor
Array = np.ndarray
class IsaacVecEnv:
def __init__(
self,
env_name: str,
env_num=-1,
sim_device_id=0,
rl_device_id=0,
headless=False,
should_print=False,
):
"""Preprocesses a vectorized Isaac Gym environment for RL training.
[Isaac Gym - Preview 3 Release](https://developer.nvidia.com/isaac-gym)
Args:
env_name (str): the name of the environment to be processed.
env_num (int, optional): the number of environments to simulate on the
device. Defaults to whatever is specified in the corresponding config
file.
sim_device_id (int, optional): the GPU device id to render physics on.
Defaults to 0.
rl_device_id (int, optional): the GPU device id to perform RL training on.
Defaults to 0.
headless (bool, optional): whether or not the Isaac Gym environment should
render on-screen. Defaults to False.
should_print (bool, optional): whether or not the arguments should be
printed. Defaults to False.
"""
task_config = load_task_config(env_name)
sim_device = f"cuda:{sim_device_id}" if sim_device_id >= 0 else "cpu"
self.device = sim_device
isaac_task = isaacgym_task_map[env_name]
self._override_default_env_num(env_num, task_config)
set_seed(-1, False)
env: VecTask = isaac_task(
cfg=task_config,
sim_device=sim_device,
graphics_device_id=rl_device_id,
headless=headless,
)
is_discrete = isinstance(env.action_space, gym.spaces.Discrete)
# is_discrete = not isinstance(env.action_space, gym.spaces.Box) # Continuous action space
state_dimension = env.num_obs
assert isinstance(state_dimension, int)
action_dim = getattr(env.action_space, 'n') if is_discrete else env.num_acts
if not is_discrete:
try:
assert all(getattr(env.action_space, 'high') == np.ones(action_dim))
assert all(-getattr(env.action_space, 'low') == np.ones(action_dim))
except AssertionError:
print(f"\n| IsaacGymEnv env.action_space.high {getattr(env.action_space, 'high')}"
f"\n| IsaacGymEnv env.action_space.low {getattr(env.action_space, 'low')}")
raise AssertionError("| IsaacGymEnv env.action_space should be (-1.0, +1.0)")
target_return = 10 ** 10 # TODO: plan to make `target_returns` optional
env_config = task_config["env"]
max_step = get_max_step_from_config(env_config)
self.device = torch.device(rl_device_id)
self.env = env
self.env_num = env.num_envs
self.env_name = env_name
self.max_step = max_step
self.state_dim = state_dimension
self.action_dim = action_dim
self.if_discrete = is_discrete
self.target_return = target_return
if should_print:
pprint(
{
"num_envs": env.num_envs,
"env_name": env_name,
"max_step": max_step,
"state_dim": state_dimension,
"action_dim": action_dim,
"if_discrete": is_discrete,
"target_return": target_return,
}
)
def convert_obs_to_state_device(self, obs_dict) -> Tensor:
return obs_dict['obs'].to(self.device)
@staticmethod
def _override_default_env_num(num_envs: int, config_args: Dict):
"""Overrides the default number of environments if it's passed in.
Args:
num_envs (int): new number of environments.
config_args (Dict): configuration retrieved.
"""
if num_envs > 0:
config_args["env"]["numEnvs"] = num_envs
def reset(self) -> Tensor:
"""Resets the environments in the VecTask that need to be reset.
Returns:
torch.Tensor: the next states in the simulation.
"""
tensor_state_dict = self.env.reset()
return self.convert_obs_to_state_device(tensor_state_dict)
def step(self, actions: Tensor) -> (Tensor, Tensor, Tensor, Dict):
"""Steps through the vectorized environment.
Args:
actions (torch.Tensor): a multidimensional tensor of actions to perform on
*each* environment.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Dict]: a tuple containing
observations, rewards, dones, and extra info.
"""
observations_dict, rewards, dones, info_dict = self.env.step(actions)
states = self.convert_obs_to_state_device(self.env.reset())
return states, rewards.to(self.device), dones.to(self.device), info_dict
class IsaacOneEnv(IsaacVecEnv):
def __init__(self, env_name: str, device_id=0, headless=False, should_print=False):
"""Preprocesses a single Isaac Gym environment for RL evaluating.
[Isaac Gym - Preview 3 Release](https://developer.nvidia.com/isaac-gym)
Args:
env_name (str): the name of the environment to be processed.
device_id (int, optional): the GPU device id to render physics and perform
RL training. Defaults to 0.
headless (bool, optional): whether or not the Isaac Gym environment should
render on-screen. Defaults to False.
should_print (bool, optional): whether or not the arguments should be
printed. Defaults to False.
"""
super().__init__(
env_name=env_name,
env_num=1,
sim_device_id=device_id,
rl_device_id=device_id,
headless=headless,
should_print=should_print,
)
@staticmethod
def convert_obs_to_state_numpy(obs_dict) -> Array:
return obs_dict['obs'].detach().cpu().numpy()[0]
def reset(self) -> Array:
"""Resets the environments in the VecTask that need to be reset.
Returns:
np.ndarray: a numpy array containing the new state of the single
environment.
"""
tensor_state_dict = self.env.reset()
return self.convert_obs_to_state_numpy(tensor_state_dict) # state
def step(self, action: Array) -> (Array, Array, bool, dict):
"""Steps through the single environment.
Args:
action (np.ndarray): a (possibly multidimensional) numpy array of actions
to perform on the single environment.
Returns:
Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]: a tuple containing
observations, rewards, dones, and extra info.
"""
tensor_action = torch.as_tensor(action, dtype=torch.float32).unsqueeze(0)
tensor_state_dict, tensor_reward, tensor_done, info_dict = self.env.step(tensor_action)
state = self.convert_obs_to_state_numpy(tensor_state_dict)
reward = tensor_reward[0].item()
done = tensor_done[0].item()
return state, reward, done, info_dict
def check_isaac_gym(env_name='Ant', gpu_id=0):
assert env_name in {
'AllegroHand',
'Ant',
'Anymal',
'AnymalTerrain',
'BallBalance',
'Cartpole',
'FrankaCabinet',
'Humanoid',
'Ingenuity',
'Quadcopter',
'ShadowHand',
'Trifinger',
} # raise NameError by input an incorrect environment name to see the avaliable env_name
env = IsaacVecEnv(env_name=env_name, env_num=1024, sim_device_id=gpu_id, rl_device_id=gpu_id, should_print=True)
states = env.reset()
print('\n\nstates.shape', states.shape)
import torch
action = torch.rand((env.env_num, env.action_dim), dtype=torch.float32)
print('\n\naction.shape', action.shape)
states, rewards, dones, info_dict = env.step(action)
print(f'\nstates.shape {states.shape}'
f'\nrewards.shape {rewards.shape}'
f'\ndones.shape {dones.shape}'
f'\nrepr(info.dict) {repr(info_dict)}')
from tqdm import trange
device = torch.device(f"cuda:{gpu_id}")
rewards_ary = []
dones_ary = []
env.reset()
print()
for _ in trange(env.max_step * 2):
action = torch.rand((env.env_num, env.action_dim), dtype=torch.float32, device=device)
states, rewards, dones, info_dict = env.step(action)
rewards_ary.append(rewards)
dones_ary.append(dones)
rewards_ary = torch.stack(rewards_ary) # rewards_ary.shape == (env.max_step, env.env_num)
dones_ary = torch.stack(dones_ary)
print(f'\nrewards_ary.shape {rewards_ary.shape}'
f'\ndones_ary.shape {dones_ary.shape}')
reward_list = []
steps_list = []
print()
for i in trange(env.env_num):
dones_where = torch.where(dones_ary[:, i] == 1)[0]
episode_num = dones_where.shape[0]
if episode_num == 0:
continue
j0 = 0
rewards_env = rewards_ary[:, i]
for j1 in dones_where + 1:
reward_list.append(rewards_env[j0:j1].sum())
steps_list.append(j1 - j0 + 1)
j0 = j1
reward_list = torch.tensor(reward_list, dtype=torch.float32)
steps_list = torch.tensor(steps_list, dtype=torch.float32)
print(f'\n reward_list avg {reward_list.mean(0):9.2f}'
f'\n std {reward_list.std(0):9.2f}'
f'\n steps_list avg {steps_list.mean(0):9.2f}'
f'\n std {steps_list.std(0):9.2f}'
f'\n episode_num {steps_list.shape[0]}')
return reward_list, steps_list
if __name__ == '__main__':
check_isaac_gym()
| 11,375 | 36.92 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentDDPG.py | import numpy as np
import numpy.random as rd
import torch
from copy import deepcopy
from typing import Tuple
from torch import Tensor
from elegantrl.train.config import Config
from elegantrl.train.replay_buffer import ReplayBuffer
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import Actor, Critic
class AgentDDPG(AgentBase):
"""DDPG(Deep Deterministic Policy Gradient)
“Continuous control with deep reinforcement learning”. T. Lillicrap et al.. 2015.”
net_dims: the middle layer dimension of MLP (MultiLayer Perceptron)
state_dim: the dimension of state (the number of state vector)
action_dim: the dimension of action (or the number of discrete action)
gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
args: the arguments for agent training. `args = Config()`
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor)
self.cri_class = getattr(self, 'cri_class', Critic)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.act_target = deepcopy(self.act)
self.cri_target = deepcopy(self.cri)
self.explore_noise_std = getattr(args, 'explore_noise_std', 0.05) # standard deviation of exploration noise
self.act.explore_noise_std = self.explore_noise_std # assign explore_noise_std for agent.act.get_action(state)
def update_net(self, buffer: ReplayBuffer) -> tuple:
with torch.no_grad():
states, actions, rewards, undones = buffer.add_item
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer.add_size * self.repeat_times)
assert update_times >= 1
for update_c in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
obj_critics += obj_critic.item()
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
action_pg = self.act(state) # policy gradient
obj_actor = self.cri_target(state, action_pg).mean() # use cri_target is more stable than cri
obj_actors += obj_actor.item()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic_raw(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size) # next_ss: next states
next_as = self.act_target(next_ss) # next actions
next_qs = self.cri_target(next_ss, next_as) # next q_values
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states, actions)
obj_critic = self.criterion(q_values, q_labels)
return obj_critic, states
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
# is_weights, is_indices: important sampling `weights, indices` by Prioritized Experience Replay (PER)
next_as = self.act_target(next_ss)
next_qs = self.cri_target(next_ss, next_as)
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states, actions)
td_errors = self.criterion(q_values, q_labels)
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, states
class OrnsteinUhlenbeckNoise:
def __init__(self, size: int, theta=0.15, sigma=0.3, ou_noise=0.0, dt=1e-2):
"""
The noise of Ornstein-Uhlenbeck Process
Source: https://github.com/slowbull/DDPG/blob/master/src/explorationnoise.py
It makes Zero-mean Gaussian Noise more stable.
It helps agent explore better in a inertial system.
Don't abuse OU Process. OU process has too much hyper-parameters and over fine-tuning make no sense.
int size: the size of noise, noise.shape==(-1, action_dim)
float theta: related to the not independent of OU-noise
float sigma: related to action noise std
float ou_noise: initialize OU-noise
float dt: derivative
"""
self.theta = theta
self.sigma = sigma
self.ou_noise = ou_noise
self.dt = dt
self.size = size
def __call__(self) -> float:
"""
output a OU-noise
return array ou_noise: a noise generated by Ornstein-Uhlenbeck Process
"""
noise = self.sigma * np.sqrt(self.dt) * rd.normal(size=self.size)
self.ou_noise -= self.theta * self.ou_noise * self.dt + noise
return self.ou_noise
| 5,419 | 43.42623 | 119 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentA2C.py | import torch
from typing import Tuple
from elegantrl.train.config import Config
from elegantrl.agents.AgentPPO import AgentPPO, AgentDiscretePPO
from elegantrl.agents.net import ActorDiscretePPO
class AgentA2C(AgentPPO):
"""
A2C algorithm. “Asynchronous Methods for Deep Reinforcement Learning”. Mnih V. et al.. 2016.
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
def update_net(self, buffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, logprobs, rewards, undones = buffer
buffer_size = states.shape[0]
buffer_num = states.shape[1]
'''get advantages and reward_sums'''
bs = 2 ** 10 # set a smaller 'batch_size' to avoiding out of GPU memory.
values = torch.empty_like(rewards) # values.shape == (buffer_size, buffer_num)
for i in range(0, buffer_size, bs):
for j in range(buffer_num):
values[i:i + bs, j] = self.cri(states[i:i + bs, j])
advantages = self.get_advantages(rewards, undones, values) # shape == (buffer_size, buffer_num)
reward_sums = advantages + values # shape == (buffer_size, buffer_num)
del rewards, undones, values
advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-5)
# assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size, buffer_num)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
sample_len = buffer_size - 1
update_times = int(buffer_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for _ in range(update_times):
ids = torch.randint(sample_len * buffer_num, size=(self.batch_size,), requires_grad=False)
ids0 = torch.fmod(ids, sample_len) # ids % sample_len
ids1 = torch.div(ids, sample_len, rounding_mode='floor') # ids // sample_len
state = states[ids0, ids1]
action = actions[ids0, ids1]
# logprob = logprobs[ids0, ids1]
advantage = advantages[ids0, ids1]
reward_sum = reward_sums[ids0, ids1]
value = self.cri(state) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, reward_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
obj_actor = (advantage * new_logprob).mean() # obj_actor without Trust Region
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_critics += obj_critic.item()
obj_actors += obj_actor.item()
a_std_log = getattr(self.act, "a_std_log", torch.zeros(1)).mean()
return obj_critics / update_times, obj_actors / update_times, a_std_log.item()
class AgentDiscreteA2C(AgentDiscretePPO):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", ActorDiscretePPO)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
| 3,431 | 46.013699 | 115 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentMADDPG.py | import torch
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import Actor, Critic
from elegantrl.agents.AgentDDPG import AgentDDPG
class AgentMADDPG(AgentBase):
"""
Bases: ``AgentBase``
Multi-Agent DDPG algorithm. “Multi-Agent Actor-Critic for Mixed Cooperative-Competitive”. R Lowe. et al.. 2017.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param gamma[float]: learning rate of optimizer
:param n_agents[int]: number of agents
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self):
super().__init__()
self.ClassAct = Actor
self.ClassCri = Critic
self.if_use_cri_target = True
self.if_use_act_target = True
def init(
self,
net_dim,
state_dim,
action_dim,
learning_rate=1e-4,
gamma=0.95,
n_agents=1,
if_use_per=False,
env_num=1,
agent_id=0,
):
self.agents = [AgentDDPG() for i in range(n_agents)]
self.explore_env = self.explore_one_env
self.if_off_policy = True
self.n_agents = n_agents
for i in range(self.n_agents):
self.agents[i].init(
net_dim,
state_dim,
action_dim,
learning_rate=1e-4,
n_agents=self.n_agents,
if_use_per=False,
env_num=1,
agent_id=0,
)
self.n_states = state_dim
self.n_actions = action_dim
self.batch_size = net_dim
self.gamma = gamma
self.update_tau = 0
self.device = torch.device(
f"cuda:{agent_id}"
if (torch.cuda.is_available() and (agent_id >= 0))
else "cpu"
)
def update_agent(self, rewards, dones, actions, observations, next_obs, index):
"""
Update the single agent neural networks, called by update_net.
:param rewards: reward list of the sampled buffer
:param dones: done list of the sampled buffer
:param actions: action list of the sampled buffer
:param observations: observation list of the sampled buffer
:param next_obs: next_observation list of the sample buffer
:param index: ID of the agent
"""
curr_agent = self.agents[index]
curr_agent.cri_optim.zero_grad()
all_target_actions = []
for i in range(self.n_agents):
if i == index:
all_target_actions.append(curr_agent.act_target(next_obs[:, index]))
if i != index:
action = self.agents[i].act_target(next_obs[:, i])
all_target_actions.append(action)
action_target_all = (
torch.cat(all_target_actions, dim=1)
.to(self.device)
.reshape(actions.shape[0], actions.shape[1] * actions.shape[2])
)
target_value = rewards[:, index] + self.gamma * curr_agent.cri_target(
next_obs.reshape(next_obs.shape[0], next_obs.shape[1] * next_obs.shape[2]),
action_target_all,
).detach().squeeze(dim=1)
actual_value = curr_agent.cri(
observations.reshape(
next_obs.shape[0], next_obs.shape[1] * next_obs.shape[2]
),
actions.reshape(actions.shape[0], actions.shape[1] * actions.shape[2]),
).squeeze(dim=1)
vf_loss = curr_agent.loss_td(actual_value, target_value.detach())
curr_agent.act_optim.zero_grad()
curr_pol_out = curr_agent.act(observations[:, index])
curr_pol_vf_in = curr_pol_out
all_pol_acs = []
for i in range(self.n_agents):
if i == index:
all_pol_acs.append(curr_pol_vf_in)
else:
all_pol_acs.append(actions[:, i])
pol_loss = -torch.mean(
curr_agent.cri(
observations.reshape(
observations.shape[0], observations.shape[1] * observations.shape[2]
),
torch.cat(all_pol_acs, dim=1)
.to(self.device)
.reshape(actions.shape[0], actions.shape[1] * actions.shape[2]),
)
)
curr_agent.act_optim.zero_grad()
pol_loss.backward()
curr_agent.act_optim.step()
curr_agent.cri_optim.zero_grad()
vf_loss.backward()
curr_agent.cri_optim.step()
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau):
"""
Update the neural networks by sampling batch data from ``ReplayBuffer``.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param repeat_times: the re-using times of each trajectory.
:param soft_update_tau: the soft update parameter.
"""
buffer.update_now_len()
self.batch_size = batch_size
self.update_tau = soft_update_tau
rewards, dones, actions, observations, next_obs = buffer.sample_batch(
self.batch_size
)
for index in range(self.n_agents):
self.update_agent(rewards, dones, actions, observations, next_obs, index)
for agent in self.agents:
self.soft_update(agent.cri_target, agent.cri, self.update_tau)
self.soft_update(agent.act_target, agent.act, self.update_tau)
return
def explore_one_env(self, env, target_step) -> list:
"""
Exploring the environment for target_step.
param env: the Environment instance to be explored.
param target_step: target steps to explore.
"""
traj_temp = []
k = 0
for _ in range(target_step):
k += 1
actions = []
for i in range(self.n_agents):
action = self.agents[i].select_actions(self.states[i])
actions.append(action)
# print(actions)
next_s, reward, done, _ = env.step(actions)
traj_temp.append((self.states, reward, done, actions))
global_done = all(done[i] is True for i in range(self.n_agents))
if global_done or k > 100:
state = env.reset()
k = 0
else:
state = next_s
self.states = state
return traj_temp
def select_actions(self, states):
"""
Select continuous actions for exploration
:param state: states.shape==(n_agents,batch_size, state_dim, )
:return: actions.shape==(n_agents,batch_size, action_dim, ), -1 < action < +1
"""
actions = []
for i in range(self.n_agents):
action = self.agents[i].select_actions(states[i])
actions.append(action)
return actions
def save_or_load_agent(self, cwd, if_save):
"""
save or load training files for Agent
:param cwd: Current Working Directory. ElegantRL save training files in CWD.
:param if_save: True: save files. False: load files.
"""
for i in range(self.n_agents):
self.agents[i].save_or_load_agent(cwd + "/" + str(i), if_save)
| 7,741 | 36.582524 | 115 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentBase.py | import os
import torch
from typing import Tuple, Union
from torch import Tensor
from torch.nn.utils import clip_grad_norm_
from elegantrl.train import Config, ReplayBuffer
class AgentBase:
"""
The basic agent of ElegantRL
net_dims: the middle layer dimension of MLP (MultiLayer Perceptron)
state_dim: the dimension of state (the number of state vector)
action_dim: the dimension of action (or the number of discrete action)
gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
args: the arguments for agent training. `args = Config()`
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.gamma = args.gamma # discount factor of future rewards
self.num_envs = args.num_envs # the number of sub envs in vectorized env. `num_envs=1` in single env.
self.batch_size = args.batch_size # num of transitions sampled from replay buffer.
self.repeat_times = args.repeat_times # repeatedly update network using ReplayBuffer
self.reward_scale = args.reward_scale # an approximate target reward usually be closed to 256
self.learning_rate = args.learning_rate # the learning rate for network updating
self.if_off_policy = args.if_off_policy # whether off-policy or on-policy of DRL algorithm
self.clip_grad_norm = args.clip_grad_norm # clip the gradient after normalization
self.soft_update_tau = args.soft_update_tau # the tau of soft target update `net = (1-tau)*net + net1`
self.state_value_tau = args.state_value_tau # the tau of normalize for value and state
self.state_dim = state_dim
self.action_dim = action_dim
self.last_state = None # last state of the trajectory for training. last_state.shape == (num_envs, state_dim)
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
'''network'''
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
'''optimizer'''
self.act_optimizer = torch.optim.AdamW(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.AdamW(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
from types import MethodType # built-in package of Python3
self.act_optimizer.parameters = MethodType(get_optim_param, self.act_optimizer)
self.cri_optimizer.parameters = MethodType(get_optim_param, self.cri_optimizer)
"""attribute"""
if self.num_envs == 1:
self.explore_env = self.explore_one_env
else:
self.explore_env = self.explore_vec_env
self.if_use_per = getattr(args, 'if_use_per', None) # use PER (Prioritized Experience Replay)
if self.if_use_per:
self.criterion = torch.nn.SmoothL1Loss(reduction="none")
self.get_obj_critic = self.get_obj_critic_per
else:
self.criterion = torch.nn.SmoothL1Loss(reduction="mean")
self.get_obj_critic = self.get_obj_critic_raw
"""save and load"""
self.save_attr_names = {'act', 'act_target', 'act_optimizer', 'cri', 'cri_target', 'cri_optimizer'}
def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **single** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
if_random: uses random action for warn-up exploration
return: `(states, actions, rewards, undones)` for off-policy
num_envs == 1
states.shape == (horizon_len, num_envs, state_dim)
actions.shape == (horizon_len, num_envs, action_dim)
rewards.shape == (horizon_len, num_envs)
undones.shape == (horizon_len, num_envs)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # state.shape == (1, state_dim) for a single env.
get_action = self.act.get_action
for t in range(horizon_len):
action = torch.rand(1, self.action_dim) * 2 - 1.0 if if_random else get_action(state)
states[t] = state
ary_action = action[0].detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action) # next_state
ary_state = env.reset() if done else ary_state # ary_state.shape == (state_dim, )
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device).unsqueeze(0)
actions[t] = action
rewards[t] = reward
dones[t] = done
self.last_state = state # state.shape == (1, state_dim) for a single env.
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, rewards, undones
def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
if_random: uses random action for warn-up exploration
return: `(states, actions, rewards, undones)` for off-policy
states.shape == (horizon_len, num_envs, state_dim)
actions.shape == (horizon_len, num_envs, action_dim)
rewards.shape == (horizon_len, num_envs)
undones.shape == (horizon_len, num_envs)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # last_state.shape == (num_envs, state_dim)
get_action = self.act.get_action
for t in range(horizon_len):
action = torch.rand(self.num_envs, self.action_dim) * 2 - 1.0 if if_random \
else get_action(state).detach()
states[t] = state # state.shape == (num_envs, state_dim)
state, reward, done, _ = env.step(action) # next_state
actions[t] = action
rewards[t] = reward
dones[t] = done
self.last_state = state
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, rewards, undones
def update_net(self, buffer: Union[ReplayBuffer, tuple]) -> Tuple[float, ...]:
obj_critic = 0.0 # criterion(q_value, q_label).mean().item()
obj_actor = 0.0 # q_value.mean().item()
assert isinstance(buffer, ReplayBuffer) or isinstance(buffer, tuple)
assert isinstance(self.batch_size, int)
assert isinstance(self.repeat_times, int)
assert isinstance(self.reward_scale, float)
return obj_critic, obj_actor
def get_obj_critic_raw(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size) # next_ss: next states
next_as = self.act_target(next_ss) # next actions
next_qs = self.cri_target(next_ss, next_as) # next q values
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states, actions)
obj_critic = self.criterion(q_values, q_labels)
return obj_critic, states
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
# is_weights, is_indices: important sampling `weights, indices` by Prioritized Experience Replay (PER)
next_as = self.act_target(next_ss)
next_qs = self.cri_target(next_ss, next_as)
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states, actions)
td_errors = self.criterion(q_values, q_labels)
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, states
def get_cumulative_rewards(self, rewards: Tensor, undones: Tensor) -> Tensor:
returns = torch.empty_like(rewards)
masks = undones * self.gamma
horizon_len = rewards.shape[0]
last_state = self.last_state
next_action = self.act_target(last_state)
next_value = self.cri_target(last_state, next_action).detach()
for t in range(horizon_len - 1, -1, -1):
returns[t] = next_value = rewards[t] + masks[t] * next_value
return returns
def optimizer_update(self, optimizer: torch.optim, objective: Tensor):
"""minimize the optimization objective via update the network parameters
optimizer: `optimizer = torch.optim.SGD(net.parameters(), learning_rate)`
objective: `objective = net(...)` the optimization objective, sometimes is a loss function.
"""
optimizer.zero_grad()
objective.backward()
clip_grad_norm_(parameters=optimizer.param_groups[0]["params"], max_norm=self.clip_grad_norm)
optimizer.step()
def optimizer_update_amp(self, optimizer: torch.optim, objective: Tensor): # automatic mixed precision
"""minimize the optimization objective via update the network parameters
amp: Automatic Mixed Precision
optimizer: `optimizer = torch.optim.SGD(net.parameters(), learning_rate)`
objective: `objective = net(...)` the optimization objective, sometimes is a loss function.
"""
amp_scale = torch.cuda.amp.GradScaler() # write in __init__()
optimizer.zero_grad()
amp_scale.scale(objective).backward() # loss.backward()
amp_scale.unscale_(optimizer) # amp
# from torch.nn.utils import clip_grad_norm_
clip_grad_norm_(parameters=optimizer.param_groups[0]["params"], max_norm=self.clip_grad_norm)
amp_scale.step(optimizer) # optimizer.step()
amp_scale.update() # optimizer.step()
def update_avg_std_for_normalization(self, states: Tensor, returns: Tensor):
tau = self.state_value_tau
if tau == 0:
return
state_avg = states.mean(dim=0, keepdim=True)
state_std = states.std(dim=0, keepdim=True)
self.act.state_avg[:] = self.act.state_avg * (1 - tau) + state_avg * tau
self.act.state_std[:] = self.cri.state_std * (1 - tau) + state_std * tau + 1e-4
self.cri.state_avg[:] = self.act.state_avg
self.cri.state_std[:] = self.act.state_std
returns_avg = returns.mean(dim=0)
returns_std = returns.std(dim=0)
self.cri.value_avg[:] = self.cri.value_avg * (1 - tau) + returns_avg * tau
self.cri.value_std[:] = self.cri.value_std * (1 - tau) + returns_std * tau + 1e-4
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
"""soft update target network via current network
target_net: update target network via current network to make training more stable.
current_net: current network update via an optimizer
tau: tau of soft target update: `target_net = target_net * (1-tau) + current_net * tau`
"""
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
def save_or_load_agent(self, cwd: str, if_save: bool):
"""save or load training files for Agent
cwd: Current Working Directory. ElegantRL save training files in CWD.
if_save: True: save files. False: load files.
"""
assert self.save_attr_names.issuperset({'act', 'act_target', 'act_optimizer'})
for attr_name in self.save_attr_names:
file_path = f"{cwd}/{attr_name}.pth"
if if_save:
torch.save(getattr(self, attr_name), file_path)
elif os.path.isfile(file_path):
setattr(self, attr_name, torch.load(file_path, map_location=self.device))
def get_optim_param(optimizer: torch.optim) -> list: # backup
params_list = []
for params_dict in optimizer.state_dict()["state"].values():
params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)])
return params_list
| 13,659 | 48.854015 | 118 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentSAC.py | import math
import torch
from typing import Tuple
from copy import deepcopy
from torch import Tensor
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import ActorSAC, ActorFixSAC, CriticTwin
from elegantrl.train.config import Config
from elegantrl.train.replay_buffer import ReplayBuffer
class AgentSAC(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', ActorSAC)
self.cri_class = getattr(self, 'cri_class', CriticTwin)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.cri_target = deepcopy(self.cri)
self.alpha_log = torch.tensor((-1,), dtype=torch.float32, requires_grad=True, device=self.device) # trainable
self.alpha_optimizer = torch.optim.AdamW((self.alpha_log,), lr=self.learning_rate * 4)
self.target_entropy = getattr(args, 'target_entropy', action_dim)
def update_net(self, buffer: ReplayBuffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, rewards, undones = buffer.add_item
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
alphas = 0.0
update_times = int(buffer.add_size * self.repeat_times)
assert update_times >= 1
for _ in range(update_times):
'''objective of critic (loss function of critic)'''
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
obj_critics += obj_critic.item()
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
'''objective of alpha (temperature parameter automatic adjustment)'''
action_pg, log_prob = self.act.get_action_logprob(state) # policy gradient
obj_alpha = (self.alpha_log * (self.target_entropy - log_prob).detach()).mean()
self.optimizer_update(self.alpha_optimizer, obj_alpha)
'''objective of actor'''
alpha = self.alpha_log.exp().detach()
alphas += alpha.item()
with torch.no_grad():
self.alpha_log[:] = self.alpha_log.clamp(-16, 2)
q_value_pg = self.cri_target(state, action_pg).mean()
obj_actor = (q_value_pg - log_prob * alpha).mean()
obj_actors += obj_actor.item()
self.optimizer_update(self.act_optimizer, -obj_actor)
return obj_critics / update_times, obj_actors / update_times, alphas / update_times
def get_obj_critic_raw(self, buffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size) # next_ss: next states
next_as, next_logprobs = self.act.get_action_logprob(next_ss) # next actions
next_qs = self.cri_target.get_q_min(next_ss, next_as) # next q values
alpha = self.alpha_log.exp().detach()
q_labels = rewards + undones * self.gamma * (next_qs - next_logprobs * alpha)
q1, q2 = self.cri.get_q1_q2(states, actions)
obj_critic = self.criterion(q1, q_labels) + self.criterion(q2, q_labels) # twin critics
return obj_critic, states
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
next_as, next_logprobs = self.act.get_action_logprob(next_ss)
next_qs = self.cri_target.get_q_min(next_ss, next_as)
alpha = self.alpha_log.exp().detach()
q_labels = rewards + undones * self.gamma * (next_qs - next_logprobs * alpha)
q1, q2 = self.cri.get_q1_q2(states, actions)
td_errors = self.criterion(q1, q_labels) + self.criterion(q2, q_labels)
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, states
class AgentModSAC(AgentSAC): # Modified SAC using reliable_lambda and Two Time-scale Update Rule
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", ActorFixSAC)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.obj_c = 1.0 # for reliable_lambda
def update_net(self, buffer: ReplayBuffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, rewards, undones = buffer.add_item
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
alphas = 0.0
update_times = int(buffer.add_size * self.repeat_times)
assert update_times >= 1
update_a = 0
for update_c in range(1, update_times + 1):
'''objective of critic (loss function of critic)'''
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
obj_critics += obj_critic.item()
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
self.obj_c = 0.995 * self.obj_c + 0.005 * obj_critic.item() # for reliable_lambda
reliable_lambda = math.exp(-self.obj_c ** 2) # for reliable_lambda
if update_a / update_c < 1 / (2 - reliable_lambda): # auto TTUR
'''objective of alpha (temperature parameter automatic adjustment)'''
action_pg, log_prob = self.act.get_action_logprob(state) # policy gradient
obj_alpha = (self.alpha_log * (self.target_entropy - log_prob).detach()).mean()
self.optimizer_update(self.alpha_optimizer, obj_alpha)
'''objective of actor'''
alpha = self.alpha_log.exp().detach()
alphas += alpha.item()
with torch.no_grad():
self.alpha_log[:] = self.alpha_log.clamp(-16, 2)
q_value_pg = self.cri_target(state, action_pg).mean()
obj_actor = (q_value_pg - log_prob * alpha).mean()
obj_actors += obj_actor.item()
self.optimizer_update(self.act_optimizer, -obj_actor)
return obj_critics / update_times, obj_actors / update_times, alphas / update_times
| 7,050 | 48.307692 | 118 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentVMPO.py | from turtle import forward
import numpy as np
import torch
import torch.nn as nn
import util
import copy
class ActorVMPO(nn.Module):
def __init__(self, action_dim, mid_dim, device, shared_net):
super(ActorVMPO, self).__init__()
self.device = device
self.action_dim = action_dim
self.shared_net = shared_net
self.nn_avg = nn.Sequential(nn.Linear(mid_dim, mid_dim), nn.ReLU(), nn.Linear(mid_dim, self.action_dim))
self.action_log_std = nn.Parameter(torch.zeros(action_dim, device=device)) # scale_tril std will bring to much extra bug
def forward(self, states):
states = states.squeeze()
tmp = self.shared_net(states)
mean = self.nn_avg(tmp)
return mean
def get_mean(self, states):
states = states.squeeze()
tmp = self.shared_net(states)
mean = self.nn_avg(tmp)
return mean
def get_cov(self):
action_std = self.action_log_std.clamp(min=-20., max=2.).exp()
cov = torch.diag_embed(action_std)
return cov # shape = (action_dim, action_dim)
# Action in VMPO should obey multivariate gaussian distribution.
def get_action_4_explorer(self, states):
return torch.distributions.MultivariateNormal(self.get_mean(states), self.get_cov()).sample() # shape: (batch_size, action_dim)
def entropy(self, action_mean, action_cov): # get entropy of certain dist
return torch.distributions.MultivariateNormal(action_mean, action_cov).entropy() # shape: (batch_size,)
def log_prob(self, action_mean, action_cov, actions):
return torch.distributions.MultivariateNormal(action_mean, action_cov).log_prob(actions) # shape: (batch_size, )
class Critic(nn.Module):
def __init__(self, mid_dim, shared_net):
super(Critic, self).__init__()
self.shared_net = shared_net
self.net = nn.Sequential(
nn.Linear(mid_dim, mid_dim),
nn.ReLU(),
nn.Linear(mid_dim, 1)
)
def forward(self, states):
states = states.squeeze()
return self.net(self.shared_net(states))
class AgentVMPO():
def __init__(self, state_dim, action_dim, mid_dim, device,
epsilon_of_eta,
epsilon_of_alpha_mean_floor=0.005,
epsilon_of_alpha_mean_ceil=1,
epsilon_of_alpha_cov_floor=5e-6,
epsilon_of_alpha_cov_ceil=5e-5,
entropy_coef=5e-3, lr=1e-4, seq_len=1, gamma=0.99, lambda_gae=.98, use_topk=False):
self.state_dim = state_dim
self.action_dim = action_dim
self.device = device
self.entropy_coef = entropy_coef
self.shared_net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(), nn.Linear(mid_dim, mid_dim), nn.ReLU()).to(device)
self.actor = ActorVMPO(action_dim, mid_dim, device, self.shared_net).to(device) # pi
self.critic = Critic(mid_dim, self.shared_net).to(device) # phi
self.old_actor = None
self.criterion = nn.SmoothL1Loss()
self.epsilon_of_eta = epsilon_of_eta
self.eta = nn.Parameter(torch.ones((1, ), device=device)) # eta temperature
self.alpha_mean = nn.Parameter(torch.ones((1, ), device=device))
self.alpha_cov = nn.Parameter(torch.ones((1, ), device=device))
self.epsilon_of_alpha_mean_log_floor = np.log(epsilon_of_alpha_mean_floor)
self.epsilon_of_alpha_mean_log_ceil = np.log(epsilon_of_alpha_mean_ceil)
self.epsilon_of_alpha_cov_log_floor = np.log(epsilon_of_alpha_cov_floor)
self.epsilon_of_alpha_cov_log_ceil = np.log(epsilon_of_alpha_cov_ceil)
self.seq_len = seq_len
self.lr = lr
self.gamma = gamma
self.lambda_gae = lambda_gae
self.use_topk = use_topk
self.optim = torch.optim.Adam([
{'params': self.eta, 'lr': self.lr},
{'params': self.alpha_mean, 'lr': self.lr},
{'params': self.alpha_cov, 'lr': self.lr},
{'params': self.shared_net.parameters(), 'lr': self.lr},
{'params': self.actor.nn_avg.parameters(), 'lr': self.lr},
{'params': self.actor.action_log_std, 'lr': self.lr},
{'params': self.critic.net.parameters(), 'lr': self.lr},
])
self.calc_pi_loss = self.calc_pi_loss_use_topk if self.use_topk else self.calc_pi_loss_not_use_topk
self.calc_eta_loss = self.calc_eta_loss_use_topk if self.use_topk else self.calc_eta_loss_not_use_topk
self.re_calc_times1 = 1
self.re_calc_times2 = 1
self.re_calc_times_4_loss = 1
def select_action(self, states):
states = states.squeeze()
return self.actor.get_action_4_explorer(states)
@staticmethod
def gen_random_from_log_uniform(log_floor, log_ceil):
return torch.distributions.Uniform(log_floor, log_ceil).sample().exp()
def calc_pi_loss_not_use_topk(self, action_mean, action_cov, actions, advs_detached):
# Clamp eta into the range [1e-8, +infty). (Lagrangian multipliers are always positive)
eta_detached = self.eta.detach().clamp_min(1e-8)
psi_detached = nn.functional.softmax(advs_detached / eta_detached, dim=0).squeeze_()
log_prob = self.actor.log_prob(action_mean, action_cov, actions)
loss = -(psi_detached * log_prob).sum()
return loss
def calc_pi_loss_use_topk(self, action_mean, action_cov, actions, advs_detached):
# Clamp eta into the range [1e-8, +infty). (Lagrangian multipliers are always positive)
eta_detached = self.eta.detach().clamp_min(1e-8)
# Select top-k advantages.
advs_topk_detached, idx_topk = torch.topk(advs_detached, advs_detached.numel() // 2, dim=0, sorted=False)
psi_detached = nn.functional.softmax(advs_topk_detached / eta_detached, dim=0).squeeze_() # bs//2
log_prob = self.actor.log_prob(action_mean, action_cov, actions)[idx_topk] # bs//2
loss = -(psi_detached * log_prob).sum()
return loss # shape:[]
def calc_eta_loss_not_use_topk(self, advs_detached): # calc η temperature
# Clamp eta into the range [1e-8, +infty). (Lagrangian multipliers eta are always positive)
eta_clamp = self.eta.clamp_min(1e-8)
D = advs_detached.numel()
loss = eta_clamp * (self.epsilon_of_eta + (np.log(1 / D) + torch.logsumexp(advs_detached.squeeze() / eta_clamp, dim=0)))
return loss
def calc_eta_loss_use_topk(self, advs_detached): # calc η temperature
# Clamp eta into the range [1e-8, +infty). (Lagrangian multipliers are always positive)
eta_clamp = self.eta.clamp_min(1e-8)
# Select top-k advantages.
advs_topk = torch.topk(advs_detached, advs_detached.shape[0] // 2, dim=0, sorted=False).values
D_tilde = advs_detached.numel() // 2
loss = eta_clamp * (self.epsilon_of_eta + (np.log(1 / D_tilde) + torch.logsumexp(advs_topk.squeeze() / eta_clamp, dim=0)))
return loss
def calc_alpha_loss(self, old_mean_detached, old_cov_detached, new_mean, new_cov):
# action_mean_of_old_pi_detached: old_mean_detached bs,action_dim
# action_std_of_old_pi_detached: old_cov_detached action_dim,action_dim
# action_mean_of_new_pi: new_mean bs,action_dim
# action_std_of_new_pi: new_cov action_dim,action_dim
# kl_mean
inverse_cov_of_old_pi = old_cov_detached.inverse().unsqueeze_(0) # 1, action_dim, action_dim
tmp = (new_mean - old_mean_detached).unsqueeze_(-1) # bs, action_dim, 1
kl_mean = (0.5 * tmp.transpose(1, 2) @ inverse_cov_of_old_pi @ tmp).squeeze_() # bs
# kl_cov
kl_cov = 0.5 * ((new_cov.inverse() @ old_cov_detached).trace() - self.action_dim + (torch.det(new_cov) / (torch.det(old_cov_detached) + 1e-6)).log())
# # Clamps alpha into the range [ 1e-8, + infty ). (Lagrangian multipliers are always positive)
alpha_m_clamp = self.alpha_mean.clamp_min(1e-8)
alpha_c_clamp = self.alpha_cov.clamp_min(1e-8)
# loss_of_kl_alpha_mean
epsilon_of_alpha_mean = self.gen_random_from_log_uniform(self.epsilon_of_alpha_mean_log_floor, self.epsilon_of_alpha_mean_log_ceil)
loss_of_kl_alpha_mean = alpha_m_clamp*(epsilon_of_alpha_mean-kl_mean.detach())+alpha_m_clamp.detach() * kl_mean
# loss_of_kl_alpha_cov
epsilon_of_alpha_cov = self.gen_random_from_log_uniform(self.epsilon_of_alpha_cov_log_floor, self.epsilon_of_alpha_cov_log_ceil)
loss_of_kl_alpha_cov = alpha_c_clamp * (epsilon_of_alpha_cov - kl_cov.detach()) + alpha_c_clamp.detach() * kl_cov
loss = loss_of_kl_alpha_mean.mean()+loss_of_kl_alpha_cov.mean()
return loss
def calc_critic_loss(self, v_predict, v_label_detached): # mean v(phi) of old pi
loss = self.criterion(v_predict, v_label_detached)
return loss
def calc_entropy_loss(self, action_mean, action_cov): # author not mention it,add it 2 prevent premature
loss = -self.entropy_coef * self.actor.entropy(action_mean, action_cov).mean()
return loss
@util.timeit()
def update(self, buffer, repeat_times):
self.old_actor = copy.deepcopy(self.actor) # alias targ_actor need to fixed when updating
buffer_size = buffer.buffer_size
bs = buffer.bs
with torch.no_grad():
states, actions = buffer.get_whole_memo()[:2]
indices = torch.arange(0, buffer_size, 1, device=self.device, dtype=torch.long)
states = buffer.reform_to_seq_state_base_on_indice(indices) # to seq_state
# calc action_mean for old actor
while True: # set a smaller 'bs: batch size' when out of GPU memory.
try:
bs_ = buffer_size // self.re_calc_times1
action_mean_from_old_pi = torch.cat([self.old_actor.get_mean(states[i:i + bs_]) for i in range(0, buffer_size, bs_)], dim=0).detach()
break
except:
self.re_calc_times1 *= 2
print(f're_calc_times1 = {self.re_calc_times1}')
# calc vals & advs
while True: # set a smaller 'bs: batch size' when out of GPU memory.
try:
bs_ = buffer_size // self.re_calc_times2
vals = torch.cat([self.critic(states[i:i + bs_]) for i in range(0, buffer_size, bs_)], dim=0).squeeze() # bs
break
except:
self.re_calc_times2 *= 2
print(f're_calc_times2={self.re_calc_times2}')
advs = buffer.calc_gae(vals, self.gamma, self.lambda_gae, calc_rSum=False) # !todo bs
# advs = (advs - advs.mean()) / (advs.std() + 1e-7)#todo bs
Gt = advs+vals
advs = (advs - advs.mean()) / (advs.std() + 1e-7) # todo bs
action_cov_of_old_pi = self.old_actor.get_cov().detach()
for _ in range(int(repeat_times * buffer_size / bs)):
indices = torch.randint(buffer_size, size=(bs,), device=self.device)
while True:
try:
bs4gpuTrick = bs // self.re_calc_times_4_loss
for j in range(self.re_calc_times_4_loss):
idx = indices[bs4gpuTrick * j:bs4gpuTrick * (j + 1)]
state_minibatch = states[idx] # detached
action_minibatch = actions[idx] # detached
adv_minibatch = advs[idx] # detached, shape: bs
old_pi_mean_minibatch = action_mean_from_old_pi[idx] # detached
new_pi_mean_minibatch = self.actor.get_mean(state_minibatch)
new_pi_cov_minibatch = self.actor.get_cov()
v_predict_minibatch = self.critic(state_minibatch)
v_label_minibatch = Gt[idx].unsqueeze(-1) # bs,1
pi_loss = self.calc_pi_loss(new_pi_mean_minibatch, new_pi_cov_minibatch, action_minibatch, adv_minibatch)
eta_loss = self.calc_eta_loss(adv_minibatch)
alpha_loss = self.calc_alpha_loss(old_pi_mean_minibatch, action_cov_of_old_pi, new_pi_mean_minibatch, new_pi_cov_minibatch)
critic_loss = self.calc_critic_loss(v_predict_minibatch, v_label_minibatch)
entropy_loss = self.calc_entropy_loss(new_pi_mean_minibatch, new_pi_cov_minibatch) # prevent premature. (not mentioned in VMPO paper)
total_loss = pi_loss + eta_loss+alpha_loss + critic_loss + entropy_loss
if j == 0:
self.optim.zero_grad()
total_loss.backward()
if j + 1 == self.re_calc_times_4_loss:
self.optim.step()
break
except Exception as e:
self.re_calc_times_4_loss *= 2
print(e)
exit()
print(f'self.re_calc_times_4_loss = {self.re_calc_times_4_loss}')
return pi_loss.item(), critic_loss.item(), entropy_loss.item()
| 13,408 | 49.033582 | 158 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentVDN.py | import copy
import torch as th
from torch.optim import RMSprop
from elegantrl.agents.net import VDN
class AgentVDN:
"""
AgentVDN
“Value-Decomposition Networks For Cooperative Multi-Agent Learning”. Peter Sunehag. et al.. 2017.
:param mac: multi agent controller
:param scheme: data scheme stored in the buffer
:param logger: log object, record training information
:param args: parameters related to training
"""
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = [mac.parameters()]
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
args.mixer == "vdn"
self.mixer = VDN()
self.params += [self.mixer.parameters()]
self.target_mixer = copy.deepcopy(self.mixer)
self.optimiser = RMSprop(
params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps
)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def train(self, batch, t_env: int, episode_num: int):
"""
Update the neural networks.
:param batch: episodebatch.
:param per_weight: prioritized experience replay weights.
:return: log information.
"""
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
# Calculate estimated Q-Values
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(
3
) # Remove the last dim
# Calculate the Q-Values necessary for the target
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
chosen_action_qvals = self.mixer(
chosen_action_qvals, batch["state"][:, :-1]
)
target_max_qvals = self.target_mixer(
target_max_qvals, batch["state"][:, 1:]
)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = chosen_action_qvals - targets.detach()
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
loss = (masked_td_error**2).sum() / mask.sum()
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (
episode_num - self.last_target_update_episode
) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss", loss.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat(
"td_error_abs", (masked_td_error.abs().sum().item() / mask_elems), t_env
)
self.logger.log_stat(
"q_taken_mean",
(chosen_action_qvals * mask).sum().item()
/ (mask_elems * self.args.n_agents),
t_env,
)
self.logger.log_stat(
"target_mean",
(targets * mask).sum().item() / (mask_elems * self.args.n_agents),
t_env,
)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
| 5,640 | 34.702532 | 102 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/net.py | import math
import torch
import torch.nn as nn
from torch import Tensor
from torch.distributions.normal import Normal
"""DQN"""
class QNetBase(nn.Module): # nn.Module is a standard PyTorch Network
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.explore_rate = 0.125
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim + action_dim, *dims, 1])
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std
def value_re_norm(self, value: Tensor) -> Tensor:
return value * self.value_std + self.value_avg
class QNet(QNetBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
layer_init_with_orthogonal(self.net[-1], std=0.1)
def forward(self, state):
state = self.state_norm(state)
value = self.net(state)
value = self.value_re_norm(value)
return value # Q values for multiple actions
def get_action(self, state):
state = self.state_norm(state)
if self.explore_rate < torch.rand(1):
action = self.net(state).argmax(dim=1, keepdim=True)
else:
action = torch.randint(self.action_dim, size=(state.shape[0], 1))
return action
class QNetDuel(QNetBase): # Dueling DQN
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net_state = build_mlp(dims=[state_dim, *dims])
self.net_adv = build_mlp(dims=[dims[-1], 1]) # advantage value
self.net_val = build_mlp(dims=[dims[-1], action_dim]) # Q value
layer_init_with_orthogonal(self.net_adv[-1], std=0.1)
layer_init_with_orthogonal(self.net_val[-1], std=0.1)
def forward(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val = self.net_val(s_enc) # q value
q_adv = self.net_adv(s_enc) # advantage value
value = q_val - q_val.mean(dim=1, keepdim=True) + q_adv # dueling Q value
value = self.value_re_norm(value)
return value
def get_action(self, state):
state = self.state_norm(state)
if self.explore_rate < torch.rand(1):
s_enc = self.net_state(state) # encoded state
q_val = self.net_val(s_enc) # q value
action = q_val.argmax(dim=1, keepdim=True)
else:
action = torch.randint(self.action_dim, size=(state.shape[0], 1))
return action
class QNetTwin(QNetBase): # Double DQN
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net_state = build_mlp(dims=[state_dim, *dims])
self.net_val1 = build_mlp(dims=[dims[-1], action_dim]) # Q value 1
self.net_val2 = build_mlp(dims=[dims[-1], action_dim]) # Q value 2
self.soft_max = nn.Softmax(dim=1)
layer_init_with_orthogonal(self.net_val1[-1], std=0.1)
layer_init_with_orthogonal(self.net_val2[-1], std=0.1)
def forward(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val = self.net_val1(s_enc) # q value
return q_val # one group of Q values
def get_q1_q2(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val1 = self.net_val1(s_enc) # q value 1
q_val1 = self.value_re_norm(q_val1)
q_val2 = self.net_val2(s_enc) # q value 2
q_val2 = self.value_re_norm(q_val2)
return q_val1, q_val2 # two groups of Q values
def get_action(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val = self.net_val1(s_enc) # q value
if self.explore_rate < torch.rand(1):
action = q_val.argmax(dim=1, keepdim=True)
else:
a_prob = self.soft_max(q_val)
action = torch.multinomial(a_prob, num_samples=1)
return action
class QNetTwinDuel(QNetBase): # D3QN: Dueling Double DQN
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net_state = build_mlp(dims=[state_dim, *dims])
self.net_adv1 = build_mlp(dims=[dims[-1], 1]) # advantage value 1
self.net_val1 = build_mlp(dims=[dims[-1], action_dim]) # Q value 1
self.net_adv2 = build_mlp(dims=[dims[-1], 1]) # advantage value 2
self.net_val2 = build_mlp(dims=[dims[-1], action_dim]) # Q value 2
self.soft_max = nn.Softmax(dim=1)
layer_init_with_orthogonal(self.net_adv1[-1], std=0.1)
layer_init_with_orthogonal(self.net_val1[-1], std=0.1)
layer_init_with_orthogonal(self.net_adv2[-1], std=0.1)
layer_init_with_orthogonal(self.net_val2[-1], std=0.1)
def forward(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val = self.net_val1(s_enc) # q value
q_adv = self.net_adv1(s_enc) # advantage value
value = q_val - q_val.mean(dim=1, keepdim=True) + q_adv # one dueling Q value
value = self.value_re_norm(value)
return value
def get_q1_q2(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val1 = self.net_val1(s_enc) # q value 1
q_adv1 = self.net_adv1(s_enc) # advantage value 1
q_duel1 = q_val1 - q_val1.mean(dim=1, keepdim=True) + q_adv1
q_duel1 = self.value_re_norm(q_duel1)
q_val2 = self.net_val2(s_enc) # q value 2
q_adv2 = self.net_adv2(s_enc) # advantage value 2
q_duel2 = q_val2 - q_val2.mean(dim=1, keepdim=True) + q_adv2
q_duel2 = self.value_re_norm(q_duel2)
return q_duel1, q_duel2 # two dueling Q values
def get_action(self, state):
state = self.state_norm(state)
s_enc = self.net_state(state) # encoded state
q_val = self.net_val1(s_enc) # q value
if self.explore_rate < torch.rand(1):
action = q_val.argmax(dim=1, keepdim=True)
else:
a_prob = self.soft_max(q_val)
action = torch.multinomial(a_prob, num_samples=1)
return action
"""Actor (policy network)"""
class ActorBase(nn.Module):
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_noise_std = None # standard deviation of exploration action noise
self.ActionDist = torch.distributions.normal.Normal
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std
class Actor(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
layer_init_with_orthogonal(self.net[-1], std=0.1)
self.explore_noise_std = 0.1 # standard deviation of exploration action noise
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
return self.net(state).tanh() # action.tanh()
def get_action(self, state: Tensor) -> Tensor: # for exploration
state = self.state_norm(state)
action = self.net(state).tanh()
noise = (torch.randn_like(action) * self.explore_noise_std).clamp(-0.5, 0.5)
return (action + noise).clamp(-1.0, 1.0)
def get_action_noise(self, state: Tensor, action_std: float) -> Tensor:
state = self.state_norm(state)
action = self.net(state).tanh()
noise = (torch.randn_like(action) * action_std).clamp(-0.5, 0.5)
return (action + noise).clamp(-1.0, 1.0)
class ActorSAC(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net_s = build_mlp(dims=[state_dim, *dims], if_raw_out=False) # network of encoded state
self.net_a = build_mlp(dims=[dims[-1], action_dim * 2]) # the average and log_std of action
layer_init_with_orthogonal(self.net_a[-1], std=0.1)
def forward(self, state):
state = self.state_norm(state)
s_enc = self.net_s(state) # encoded state
a_avg = self.net_a(s_enc)[:, :self.action_dim]
return a_avg.tanh() # action
def get_action(self, state):
state = self.state_norm(state)
s_enc = self.net_s(state) # encoded state
a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
a_std = a_std_log.clamp(-16, 2).exp()
dist = Normal(a_avg, a_std)
return dist.rsample().tanh() # action (re-parameterize)
def get_action_logprob(self, state):
state = self.state_norm(state)
s_enc = self.net_s(state) # encoded state
a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
a_std = a_std_log.clamp(-16, 2).exp()
dist = Normal(a_avg, a_std)
action = dist.rsample()
action_tanh = action.tanh()
logprob = dist.log_prob(a_avg)
logprob -= (-action_tanh.pow(2) + 1.000001).log() # fix logprob using the derivative of action.tanh()
return action_tanh, logprob.sum(1)
class ActorFixSAC(ActorSAC):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(dims=dims, state_dim=state_dim, action_dim=action_dim)
self.soft_plus = torch.nn.Softplus()
def get_action_logprob(self, state):
state = self.state_norm(state)
s_enc = self.net_s(state) # encoded state
a_avg, a_std_log = self.net_a(s_enc).chunk(2, dim=1)
a_std = a_std_log.clamp(-16, 2).exp()
dist = Normal(a_avg, a_std)
action = dist.rsample()
logprob = dist.log_prob(a_avg)
logprob -= 2 * (math.log(2) - action - self.soft_plus(action * -2)) # fix logprob using SoftPlus
return action.tanh(), logprob.sum(1)
class ActorPPO(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
layer_init_with_orthogonal(self.net[-1], std=0.1)
self.action_std_log = nn.Parameter(torch.zeros((1, action_dim)), requires_grad=True) # trainable parameter
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
return self.net(state).tanh() # action.tanh()
def get_action(self, state: Tensor) -> (Tensor, Tensor): # for exploration
state = self.state_norm(state)
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = self.ActionDist(action_avg, action_std)
action = dist.sample()
logprob = dist.log_prob(action).sum(1)
return action, logprob
def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
state = self.state_norm(state)
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = self.ActionDist(action_avg, action_std)
logprob = dist.log_prob(action).sum(1)
entropy = dist.entropy().sum(1)
return logprob, entropy
@staticmethod
def convert_action_for_env(action: Tensor) -> Tensor:
return action.tanh()
class ActorDiscretePPO(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
layer_init_with_orthogonal(self.net[-1], std=0.1)
self.ActionDist = torch.distributions.Categorical
self.soft_max = nn.Softmax(dim=-1)
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
a_prob = self.net(state) # action_prob without softmax
return a_prob.argmax(dim=1) # get the indices of discrete action
def get_action(self, state: Tensor) -> (Tensor, Tensor):
state = self.state_norm(state)
a_prob = self.soft_max(self.net(state))
a_dist = self.ActionDist(a_prob)
action = a_dist.sample()
logprob = a_dist.log_prob(action)
return action, logprob
def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
state = self.state_norm(state)
a_prob = self.soft_max(self.net(state)) # action.shape == (batch_size, 1), action.dtype = torch.int
dist = self.ActionDist(a_prob)
logprob = dist.log_prob(action.squeeze(1))
entropy = dist.entropy()
return logprob, entropy
@staticmethod
def convert_action_for_env(action: Tensor) -> Tensor:
return action.long()
"""Critic (value network)"""
class CriticBase(nn.Module): # todo state_norm, value_norm
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim + action_dim, *dims, 1])
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std # todo state_norm
def value_re_norm(self, value: Tensor) -> Tensor:
return value * self.value_std + self.value_avg # todo value_norm
class Critic(CriticBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim + action_dim, *dims, 1])
layer_init_with_orthogonal(self.net[-1], std=0.5)
def forward(self, state: Tensor, action: Tensor) -> Tensor:
state = self.state_norm(state)
values = self.net(torch.cat((state, action), dim=1))
values = self.value_re_norm(values)
return values.squeeze(dim=1) # q value
class CriticTwin(CriticBase): # shared parameter
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim + action_dim, *dims, 2])
layer_init_with_orthogonal(self.net[-1], std=0.5)
def forward(self, state, action):
state = self.state_norm(state)
values = self.net(torch.cat((state, action), dim=1))
values = self.value_re_norm(values)
return values.mean(dim=1) # mean Q value
def get_q_min(self, state, action):
state = self.state_norm(state)
values = self.net(torch.cat((state, action), dim=1))
values = self.value_re_norm(values)
return torch.min(values, dim=1)[0] # min Q value
def get_q1_q2(self, state, action):
state = self.state_norm(state)
values = self.net(torch.cat((state, action), dim=1))
values = self.value_re_norm(values)
return values[:, 0], values[:, 1] # two Q values
class CriticPPO(CriticBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, 1])
layer_init_with_orthogonal(self.net[-1], std=0.5)
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
value = self.net(state)
value = self.value_re_norm(value)
return value.squeeze(1) # q value
"""utils"""
def build_mlp(dims: [int], activation: nn = None, if_raw_out: bool = True) -> nn.Sequential:
"""
build MLP (MultiLayer Perceptron)
dims: the middle dimension, `dims[-1]` is the output dimension of this network
activation: the activation function
if_remove_out_layer: if remove the activation function of the output layer.
"""
if activation is None:
activation = nn.ReLU
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), activation()])
if if_raw_out:
del net_list[-1] # delete the activation function of the output layer to keep raw output
return nn.Sequential(*net_list)
def layer_init_with_orthogonal(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
class NnReshape(nn.Module):
def __init__(self, *args):
super().__init__()
self.args = args
def forward(self, x):
return x.view((x.size(0),) + self.args)
class DenseNet(nn.Module): # plan to hyper-param: layer_number
def __init__(self, lay_dim):
super().__init__()
self.dense1 = nn.Sequential(nn.Linear(lay_dim * 1, lay_dim * 1), nn.Hardswish())
self.dense2 = nn.Sequential(nn.Linear(lay_dim * 2, lay_dim * 2), nn.Hardswish())
self.inp_dim = lay_dim
self.out_dim = lay_dim * 4
def forward(self, x1): # x1.shape==(-1, lay_dim*1)
x2 = torch.cat((x1, self.dense1(x1)), dim=1)
return torch.cat(
(x2, self.dense2(x2)), dim=1
) # x3 # x2.shape==(-1, lay_dim*4)
class ConvNet(nn.Module): # pixel-level state encoder
def __init__(self, inp_dim, out_dim, image_size=224):
super().__init__()
if image_size == 224:
self.net = nn.Sequential( # size==(batch_size, inp_dim, 224, 224)
nn.Conv2d(inp_dim, 32, (5, 5), stride=(2, 2), bias=False),
nn.ReLU(inplace=True), # size=110
nn.Conv2d(32, 48, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=54
nn.Conv2d(48, 64, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=26
nn.Conv2d(64, 96, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=12
nn.Conv2d(96, 128, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=5
nn.Conv2d(128, 192, (5, 5), stride=(1, 1)),
nn.ReLU(inplace=True), # size=1
NnReshape(-1), # size (batch_size, 1024, 1, 1) ==> (batch_size, 1024)
nn.Linear(192, out_dim), # size==(batch_size, out_dim)
)
elif image_size == 112:
self.net = nn.Sequential( # size==(batch_size, inp_dim, 112, 112)
nn.Conv2d(inp_dim, 32, (5, 5), stride=(2, 2), bias=False),
nn.ReLU(inplace=True), # size=54
nn.Conv2d(32, 48, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=26
nn.Conv2d(48, 64, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=12
nn.Conv2d(64, 96, (3, 3), stride=(2, 2)),
nn.ReLU(inplace=True), # size=5
nn.Conv2d(96, 128, (5, 5), stride=(1, 1)),
nn.ReLU(inplace=True), # size=1
NnReshape(-1), # size (batch_size, 1024, 1, 1) ==> (batch_size, 1024)
nn.Linear(128, out_dim), # size==(batch_size, out_dim)
)
else:
assert image_size in {224, 112}
def forward(self, x):
# assert x.shape == (batch_size, inp_dim, image_size, image_size)
x = x.permute(0, 3, 1, 2)
x = x / 128.0 - 1.0
return self.net(x)
@staticmethod
def check():
inp_dim = 3
out_dim = 32
batch_size = 2
image_size = [224, 112][1]
# from elegantrl.net import Conv2dNet
net = ConvNet(inp_dim, out_dim, image_size)
image = torch.ones((batch_size, image_size, image_size, inp_dim), dtype=torch.uint8) * 255
print(image.shape)
output = net(image)
print(output.shape)
| 21,053 | 39.102857 | 115 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentDQN.py | import torch
from typing import Tuple
from copy import deepcopy
from torch import Tensor
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import QNet, QNetDuel
from elegantrl.agents.net import QNetTwin, QNetTwinDuel
from elegantrl.train.config import Config
from elegantrl.train.replay_buffer import ReplayBuffer
class AgentDQN(AgentBase):
"""
Deep Q-Network algorithm. “Human-Level Control Through Deep Reinforcement Learning”. Mnih V. et al.. 2015.
net_dims: the middle layer dimension of MLP (MultiLayer Perceptron)
state_dim: the dimension of state (the number of state vector)
action_dim: the dimension of action (or the number of discrete action)
gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
args: the arguments for agent training. `args = Config()`
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNet)
self.cri_class = None # means `self.cri = self.act`
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.act_target = self.cri_target = deepcopy(self.act)
self.act.explore_rate = getattr(args, "explore_rate", 0.25)
# Using ϵ-greedy to select uniformly random actions for exploration with `explore_rate` probability.
def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **single** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
if_random: uses random action for warn-up exploration
return: `(states, actions, rewards, undones)` for off-policy
num_envs == 1
states.shape == (horizon_len, num_envs, state_dim)
actions.shape == (horizon_len, num_envs, action_dim)
rewards.shape == (horizon_len, num_envs)
undones.shape == (horizon_len, num_envs)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.int32).to(self.device) # different
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # state.shape == (1, state_dim) for a single env.
get_action = self.act.get_action
for t in range(horizon_len):
action = torch.randint(self.action_dim, size=(1, 1)) if if_random else get_action(state) # different
states[t] = state
ary_action = action[0, 0].detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action) # next_state
ary_state = env.reset() if done else ary_state # ary_state.shape == (state_dim, )
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device).unsqueeze(0)
actions[t] = action
rewards[t] = reward
dones[t] = done
self.last_state = state # state.shape == (1, state_dim) for a single env.
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, rewards, undones
def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
if_random: uses random action for warn-up exploration
return: `(states, actions, rewards, undones)` for off-policy
states.shape == (horizon_len, num_envs, state_dim)
actions.shape == (horizon_len, num_envs, action_dim)
rewards.shape == (horizon_len, num_envs)
undones.shape == (horizon_len, num_envs)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.int32).to(self.device) # different
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # last_state.shape = (num_envs, state_dim) for a vectorized env.
get_action = self.act.get_action
for t in range(horizon_len):
action = torch.randint(self.action_dim, size=(self.num_envs, 1)) if if_random \
else get_action(state).detach() # different
states[t] = state
state, reward, done, _ = env.step(action) # next_state
actions[t] = action
rewards[t] = reward
dones[t] = done
self.last_state = state
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, rewards, undones
def update_net(self, buffer: ReplayBuffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, rewards, undones = buffer.add_item
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer.add_size * self.repeat_times)
assert update_times >= 1
for _ in range(update_times):
obj_critic, q_value = self.get_obj_critic(buffer, self.batch_size)
obj_critics += obj_critic.item()
obj_actors += q_value.mean().item()
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic_raw(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
"""
Calculate the loss of the network and predict Q values with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size) # next_ss: next states
next_qs = self.cri_target(next_ss).max(dim=1, keepdim=True)[0].squeeze(1) # next q_values
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states).gather(1, actions.long()).squeeze(1)
obj_critic = self.criterion(q_values, q_labels)
return obj_critic, q_values
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
"""
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
# is_weights, is_indices: important sampling `weights, indices` by Prioritized Experience Replay (PER)
next_qs = self.cri_target(next_ss).max(dim=1, keepdim=True)[0].squeeze(1) # q values in next step
q_labels = rewards + undones * self.gamma * next_qs
q_values = self.cri(states).gather(1, actions.long()).squeeze(1)
td_errors = self.criterion(q_values, q_labels) # or td_error = (q_value - q_label).abs()
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, q_values
def get_cumulative_rewards(self, rewards: Tensor, undones: Tensor) -> Tensor:
returns = torch.empty_like(rewards)
masks = undones * self.gamma
horizon_len = rewards.shape[0]
last_state = self.last_state
next_value = self.act_target(last_state).argmax(dim=1).detach() # actor is Q Network in DQN style
for t in range(horizon_len - 1, -1, -1):
returns[t] = next_value = rewards[t] + masks[t] * next_value
return returns
class AgentDoubleDQN(AgentDQN):
"""
Double Deep Q-Network algorithm. “Deep Reinforcement Learning with Double Q-learning”. H. V. Hasselt et al.. 2015.
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNetTwin)
self.cri_class = getattr(self, "cri_class", None) # means `self.cri = self.act`
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
def get_obj_critic_raw(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
"""
Calculate the loss of the network and predict Q values with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size)
next_qs = torch.min(*self.cri_target.get_q1_q2(next_ss)).max(dim=1, keepdim=True)[0].squeeze(1)
q_labels = rewards + undones * self.gamma * next_qs
q1, q2 = [qs.gather(1, actions.long()).squeeze(1) for qs in self.act.get_q1_q2(states)]
obj_critic = self.criterion(q1, q_labels) + self.criterion(q2, q_labels)
return obj_critic, q1
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
"""
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
next_qs = torch.min(*self.cri_target.get_q1_q2(next_ss)).max(dim=1, keepdim=True)[0].squeeze(1)
q_labels = rewards + undones * self.gamma * next_qs
q1, q2 = [qs.gather(1, actions.long()).squeeze(1) for qs in self.act.get_q1_q2(states)]
td_errors = self.criterion(q1, q_labels) + self.criterion(q2, q_labels)
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, q1
'''add dueling q network'''
class AgentDuelingDQN(AgentDQN):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNetDuel)
self.cri_class = getattr(self, "cri_class", None) # means `self.cri = self.act`
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
class AgentD3QN(AgentDoubleDQN): # Dueling Double Deep Q Network. (D3QN)
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNetTwinDuel)
self.cri_class = getattr(self, "cri_class", None) # means `self.cri = self.act`
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
| 12,623 | 49.698795 | 118 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentMATD3.py | import torch
from elegantrl.agents import AgentBase, AgentDDPG
from elegantrl.agents.net import Actor, CriticTwin
class AgentTD3:
"""
Bases: ``AgentBase``
Twin Delayed MADDPG algorithm.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param gamma[float]: learning rate of optimizer
:param n_agents[int]: number of agents
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self):
super().__init__()
self.ClassAct = Actor
self.ClassCri = CriticTwin
self.if_use_cri_target = True
self.if_use_act_target = True
def init(
self,
net_dim,
state_dim,
action_dim,
learning_rate=1e-4,
marl=True,
n_agents=1,
if_use_per=False,
env_num=1,
agent_id=0,
):
self.agents = [AgentDDPG() for i in range(n_agents)]
self.explore_env = self.explore_one_env
self.if_off_policy = True
self.n_agents = n_agents
for i in range(self.n_agents):
self.agents[i].cri = CriticTwin
self.agents[i].init(
net_dim,
state_dim,
action_dim,
learning_rate=1e-4,
marl=True,
n_agents=self.n_agents,
if_use_per=False,
env_num=1,
agent_id=0,
)
self.n_states = state_dim
self.n_actions = action_dim
self.batch_size = net_dim
self.gamma = 0.95
self.update_tau = 0
self.device = torch.device(
f"cuda:{agent_id}"
if (torch.cuda.is_available() and (agent_id >= 0))
else "cpu"
)
def update_agent(self, rewards, dones, actions, observations, next_obs, index):
"""
Update the single agent neural networks, called by update_net.
:param rewards: reward list of the sampled buffer
:param dones: done list of the sampled buffer
:param actions: action list of the sampled buffer
:param observations: observation list of the sampled buffer
:param next_obs: next_observation list of the sample buffer
:param index: ID of the agent
:return Nonetype
"""
curr_agent = self.agents[index]
curr_agent.cri_optim.zero_grad()
all_target_actions = []
for i in range(self.n_agents):
if i == index:
all_target_actions.append(curr_agent.act_target(next_obs[:, index]))
if i != index:
action = self.agents[i].act_target(next_obs[:, i])
all_target_actions.append(action)
action_target_all = (
torch.cat(all_target_actions, dim=1)
.to(self.device)
.reshape(actions.shape[0], actions.shape[1] * actions.shape[2])
)
target_value = rewards[:, index] + self.gamma * curr_agent.cri_target(
next_obs.reshape(next_obs.shape[0], next_obs.shape[1] * next_obs.shape[2]),
action_target_all,
).detach().squeeze(dim=1)
actual_value = curr_agent.cri(
observations.reshape(
next_obs.shape[0], next_obs.shape[1] * next_obs.shape[2]
),
actions.reshape(actions.shape[0], actions.shape[1] * actions.shape[2]),
).squeeze(dim=1)
vf_loss = curr_agent.loss_td(actual_value, target_value.detach())
curr_agent.act_optim.zero_grad()
curr_pol_out = curr_agent.act(observations[:, index])
curr_pol_vf_in = curr_pol_out
all_pol_acs = []
for i in range(self.n_agents):
if i == index:
all_pol_acs.append(curr_pol_vf_in)
else:
all_pol_acs.append(actions[:, i])
pol_loss = -torch.mean(
curr_agent.cri(
observations.reshape(
observations.shape[0], observations.shape[1] * observations.shape[2]
),
torch.cat(all_pol_acs, dim=1)
.to(self.device)
.reshape(actions.shape[0], actions.shape[1] * actions.shape[2]),
)
)
curr_agent.act_optim.zero_grad()
pol_loss.backward()
curr_agent.act_optim.step()
curr_agent.cri_optim.zero_grad()
vf_loss.backward()
curr_agent.cri_optim.step()
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau):
"""
Update the neural networks by sampling batch data from ``ReplayBuffer``.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param repeat_times: the re-using times of each trajectory.
:param soft_update_tau: the soft update parameter.
:return Nonetype
"""
buffer.update_now_len()
self.batch_size = batch_size
self.update_tau = soft_update_tau
rewards, dones, actions, observations, next_obs = buffer.sample_batch(
self.batch_size
)
for index in range(self.n_agents):
self.update_agent(rewards, dones, actions, observations, next_obs, index)
for agent in self.agents:
self.soft_update(agent.cri_target, agent.cri, self.update_tau)
self.soft_update(agent.act_target, agent.act, self.update_tau)
return
def explore_one_env(self, env, target_step) -> list:
"""
Exploring the environment for target_step.
param env: the Environment instance to be explored.
param target_step: target steps to explore.
"""
traj_temp = []
k = 0
for _ in range(target_step):
k += 1
actions = []
for i in range(self.n_agents):
action = self.agents[i].select_actions(self.states[i])
actions.append(action)
next_s, reward, done, _ = env.step(actions)
traj_temp.append((self.states, reward, done, actions))
global_done = all(done[i] is True for i in range(self.n_agents))
if global_done or k > 100:
state = env.reset()
k = 0
else:
state = next_s
self.states = state
return traj_temp
def select_actions(self, states):
"""
Select continuous actions for exploration
:param state: states.shape==(n_agents,batch_size, state_dim, )
:return: actions.shape==(n_agents,batch_size, action_dim, ), -1 < action < +1
"""
actions = []
for i in range(self.n_agents):
action = self.agents[i].select_actions(states[i])
actions.append(action)
return actions
def save_or_load_agent(self, cwd, if_save):
"""save or load training files for Agent
:param cwd: Current Working Directory. ElegantRL save training files in CWD.
:param if_save: True: save files. False: load files.
"""
for i in range(self.n_agents):
self.agents[i].save_or_load_agent(cwd + "/" + str(i), if_save)
| 7,689 | 35.794258 | 100 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentMAPPO.py | import numpy as np
import torch
import torch.nn as nn
from elegantrl.agents.net import ActorMAPPO, CriticMAPPO
class AgentMAPPO:
"""
Multi-Agent PPO Algorithm.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param policy: (R_MAPPO_Policy) policy to update.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(
self, args, obs_space, cent_obs_space, act_space, device=torch.device("cpu")
):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = cent_obs_space
self.act_space = act_space
self.actor = ActorMAPPO(args, self.obs_space, self.act_space, self.device)
self.critic = CriticMAPPO(args, self.share_obs_space, self.device)
self.actor_optimizer = torch.optim.Adam(
self.actor.parameters(),
lr=self.lr,
eps=self.opti_eps,
weight_decay=self.weight_decay,
)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(),
lr=self.critic_lr,
eps=self.opti_eps,
weight_decay=self.weight_decay,
)
if self._use_popart:
self.value_normalizer = self.critic.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device=self.device)
else:
self.value_normalizer = None
def lr_decay(self, episode, episodes):
"""
Decay the actor and critic learning rates.
:param episode: (int) current training episode.
:param episodes: (int) total number of training episodes.
"""
update_linear_schedule(self.actor_optimizer, episode, episodes, self.lr)
update_linear_schedule(self.critic_optimizer, episode, episodes, self.critic_lr)
def get_actions(
self,
cent_obs,
obs,
rnn_states_actor,
rnn_states_critic,
masks,
available_actions=None,
deterministic=False,
):
"""
Compute actions and value function predictions for the given inputs.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
:return values: (torch.Tensor) value function predictions.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of chosen actions.
:return rnn_states_actor: (torch.Tensor) updated actor network RNN states.
:return rnn_states_critic: (torch.Tensor) updated critic network RNN states.
"""
actions, action_log_probs, rnn_states_actor = self.actor(
obs, rnn_states_actor, masks, available_actions, deterministic
)
values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic, masks)
return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic
def get_values(self, cent_obs, rnn_states_critic, masks):
"""
Get value function predictions.
:param cent_obs (np.ndarray): centralized input to the critic.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:return values: (torch.Tensor) value function predictions.
"""
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values
def evaluate_actions(
self,
cent_obs,
obs,
rnn_states_actor,
rnn_states_critic,
action,
masks,
available_actions=None,
active_masks=None,
):
"""
Get action logprobs / entropy and value function predictions for actor update.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param action: (np.ndarray) actions whose log probabilites and entropy to compute.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return values: (torch.Tensor) value function predictions.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
action_log_probs, dist_entropy = self.actor.evaluate_actions(
obs, rnn_states_actor, action, masks, available_actions, active_masks
)
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values, action_log_probs, dist_entropy
def act(
self, obs, rnn_states_actor, masks, available_actions=None, deterministic=False
):
"""
Compute actions using the given inputs.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
"""
actions, _, rnn_states_actor = self.actor(
obs, rnn_states_actor, masks, available_actions, deterministic
)
return actions, rnn_states_actor
def cal_value_loss(
self, values, value_preds_batch, return_batch, active_masks_batch
):
"""
Calculate value function loss.
:param values: (torch.Tensor) value function predictions.
:param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss)
:param return_batch: (torch.Tensor) reward to go returns.
:param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.
:return value_loss: (torch.Tensor) value function loss.
"""
value_pred_clipped = value_preds_batch + (values - value_preds_batch).clamp(
-self.clip_param, self.clip_param
)
if self._use_popart or self._use_valuenorm:
self.value_normalizer.update(return_batch)
error_clipped = (
self.value_normalizer.normalize(return_batch) - value_pred_clipped
)
error_original = self.value_normalizer.normalize(return_batch) - values
else:
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (
value_loss * active_masks_batch
).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def ppo_update(self, sample, update_actor=True):
"""
Update actor and critic networks.
:param sample: (Tuple) contains data batch with which to update networks.
:update_actor: (bool) whether to update actor network.
:return value_loss: (torch.Tensor) value function loss.
:return critic_grad_norm: (torch.Tensor) gradient norm from critic up9date.
:return policy_loss: (torch.Tensor) actor(policy) loss value.
:return dist_entropy: (torch.Tensor) action entropies.
:return actor_grad_norm: (torch.Tensor) gradient norm from actor update.
:return imp_weights: (torch.Tensor) importance sampling weights.
"""
(
share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
value_preds_batch,
return_batch,
masks_batch,
active_masks_batch,
old_action_log_probs_batch,
adv_targ,
available_actions_batch,
) = sample
old_action_log_probs_batch = check(old_action_log_probs_batch).to(**self.tpdv)
adv_targ = check(adv_targ).to(**self.tpdv)
value_preds_batch = check(value_preds_batch).to(**self.tpdv)
return_batch = check(return_batch).to(**self.tpdv)
active_masks_batch = check(active_masks_batch).to(**self.tpdv)
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.policy.evaluate_actions(
share_obs_batch,
obs_batch,
rnn_states_batch,
rnn_states_critic_batch,
actions_batch,
masks_batch,
available_actions_batch,
active_masks_batch,
)
# actor update
imp_weights = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = imp_weights * adv_targ
surr2 = (
torch.clamp(imp_weights, 1.0 - self.clip_param, 1.0 + self.clip_param)
* adv_targ
)
if self._use_policy_active_masks:
policy_action_loss = (
-torch.sum(torch.min(surr1, surr2), dim=-1, keepdim=True)
* active_masks_batch
).sum() / active_masks_batch.sum()
else:
policy_action_loss = -torch.sum(
torch.min(surr1, surr2), dim=-1, keepdim=True
).mean()
policy_loss = policy_action_loss
self.actor_optimizer.zero_grad()
if update_actor:
(policy_loss - dist_entropy * self.entropy_coef).backward()
actor_grad_norm = nn.utils.clip_grad_norm_(
self.actor.parameters(), self.max_grad_norm
)
self.actor_optimizer.step()
# critic update
value_loss = self.cal_value_loss(
values, value_preds_batch, return_batch, active_masks_batch
)
self.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
critic_grad_norm = nn.utils.clip_grad_norm_(
self.critic.parameters(), self.max_grad_norm
)
self.critic_optimizer.step()
return (
value_loss,
critic_grad_norm,
policy_loss,
dist_entropy,
actor_grad_norm,
imp_weights,
)
def update_net(self, buffer, update_actor=True):
"""
Perform a training update using minibatch GD.
:param buffer: (SharedReplayBuffer) buffer containing training data.
:param update_actor: (bool) whether to update actor network.
:return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).
"""
if self._use_popart or self._use_valuenorm:
advantages = buffer.returns[:-1] - self.value_normalizer.denormalize(
buffer.value_preds[:-1]
)
else:
advantages = buffer.returns[:-1] - buffer.value_preds[:-1]
advantages_copy = advantages.copy()
advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
train_info = {
"value_loss": 0,
"policy_loss": 0,
"dist_entropy": 0,
"actor_grad_norm": 0,
"critic_grad_norm": 0,
"ratio": 0,
}
for _ in range(self.ppo_epoch):
if self._use_recurrent_policy:
data_generator = buffer.recurrent_generator(
advantages, self.num_mini_batch, self.data_chunk_length
)
elif self._use_naive_recurrent:
data_generator = buffer.naive_recurrent_generator(
advantages, self.num_mini_batch
)
else:
data_generator = buffer.feed_forward_generator(
advantages, self.num_mini_batch
)
for sample in data_generator:
(
value_loss,
critic_grad_norm,
policy_loss,
dist_entropy,
actor_grad_norm,
imp_weights,
) = self.ppo_update(sample, update_actor)
train_info["value_loss"] += value_loss.item()
train_info["policy_loss"] += policy_loss.item()
train_info["dist_entropy"] += dist_entropy.item()
train_info["actor_grad_norm"] += actor_grad_norm
train_info["critic_grad_norm"] += critic_grad_norm
train_info["ratio"] += imp_weights.mean()
num_updates = self.ppo_epoch * self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.actor.train()
self.critic.train()
def prep_rollout(self):
self.actor.eval()
self.critic.eval()
| 15,796 | 38.004938 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentPPO.py | import torch
from typing import Tuple
from torch import Tensor
from elegantrl.train.config import Config
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import ActorPPO, CriticPPO
from elegantrl.agents.net import ActorDiscretePPO
class AgentPPO(AgentBase):
"""
PPO algorithm. “Proximal Policy Optimization Algorithms”. John Schulman. et al.. 2017.
net_dims: the middle layer dimension of MLP (MultiLayer Perceptron)
state_dim: the dimension of state (the number of state vector)
action_dim: the dimension of action (or the number of discrete action)
gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
args: the arguments for agent training. `args = Config()`
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.if_off_policy = False
self.ratio_clip = getattr(args, "ratio_clip", 0.25) # `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_gae_adv = getattr(args, "lambda_gae_adv", 0.95) # could be 0.50~0.99 # GAE for sparse reward
self.lambda_entropy = getattr(args, "lambda_entropy", 0.01) # could be 0.00~0.20
self.lambda_entropy = torch.tensor(self.lambda_entropy, dtype=torch.float32, device=self.device)
if getattr(args, 'if_use_v_trace', False):
self.get_advantages = self.get_advantages_vtrace # get advantage value in reverse time series (V-trace)
else:
self.get_advantages = self.get_advantages_origin # get advantage value using critic network
self.value_avg = torch.zeros(1, dtype=torch.float32, device=self.device)
self.value_std = torch.ones(1, dtype=torch.float32, device=self.device)
def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **single** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
return: `(states, actions, rewards, undones)` for off-policy
env_num == 1
states.shape == (horizon_len, env_num, state_dim)
actions.shape == (horizon_len, env_num, action_dim)
logprobs.shape == (horizon_len, env_num, action_dim)
rewards.shape == (horizon_len, env_num)
undones.shape == (horizon_len, env_num)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # shape == (1, state_dim) for a single env.
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for t in range(horizon_len):
action, logprob = get_action(state)
states[t] = state
ary_action = convert(action[0]).detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action) # next_state
ary_state = env.reset() if done else ary_state # ary_state.shape == (state_dim, )
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device).unsqueeze(0)
actions[t] = action
logprobs[t] = logprob
rewards[t] = reward
dones[t] = done
self.last_state = state # state.shape == (1, state_dim) for a single env.
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, logprobs, rewards, undones
def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
return: `(states, actions, rewards, undones)` for off-policy
states.shape == (horizon_len, env_num, state_dim)
actions.shape == (horizon_len, env_num, action_dim)
logprobs.shape == (horizon_len, env_num, action_dim)
rewards.shape == (horizon_len, env_num)
undones.shape == (horizon_len, env_num)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, self.action_dim), dtype=torch.float32).to(self.device)
logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # shape == (env_num, state_dim) for a vectorized env.
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for t in range(horizon_len):
action, logprob = get_action(state)
states[t] = state
state, reward, done, _ = env.step(convert(action)) # next_state
actions[t] = action
logprobs[t] = logprob
rewards[t] = reward
dones[t] = done
self.last_state = state
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, logprobs, rewards, undones
def update_net(self, buffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, logprobs, rewards, undones = buffer
buffer_size = states.shape[0]
buffer_num = states.shape[1]
'''get advantages and reward_sums'''
bs = 2 ** 10 # set a smaller 'batch_size' to avoiding out of GPU memory.
values = torch.empty_like(rewards) # values.shape == (buffer_size, buffer_num)
for i in range(0, buffer_size, bs):
for j in range(buffer_num):
values[i:i + bs, j] = self.cri(states[i:i + bs, j])
advantages = self.get_advantages(rewards, undones, values) # shape == (buffer_size, buffer_num)
reward_sums = advantages + values # shape == (buffer_size, buffer_num)
del rewards, undones, values
advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-4)
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=reward_sums.reshape((-1,))
)
# assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size, buffer_num)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
sample_len = buffer_size - 1
update_times = int(buffer_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for _ in range(update_times):
ids = torch.randint(sample_len * buffer_num, size=(self.batch_size,), requires_grad=False)
ids0 = torch.fmod(ids, sample_len) # ids % sample_len
ids1 = torch.div(ids, sample_len, rounding_mode='floor') # ids // sample_len
state = states[ids0, ids1]
action = actions[ids0, ids1]
logprob = logprobs[ids0, ids1]
advantage = advantages[ids0, ids1]
reward_sum = reward_sums[ids0, ids1]
value = self.cri(state) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, reward_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = advantage * ratio
surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy.mean() * self.lambda_entropy
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_critics += obj_critic.item()
obj_actors += obj_actor.item()
a_std_log = self.act.action_std_log.mean() if hasattr(self.act, 'action_std_log') else torch.zeros(1)
return obj_critics / update_times, obj_actors / update_times, a_std_log.item()
def get_advantages_origin(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
advantages = torch.empty_like(values) # advantage value
masks = undones * self.gamma
horizon_len = rewards.shape[0]
next_value = self.cri(self.last_state).detach()
advantage = torch.zeros_like(next_value) # last advantage value by GAE (Generalized Advantage Estimate)
for t in range(horizon_len - 1, -1, -1):
next_value = rewards[t] + masks[t] * next_value
advantages[t] = advantage = next_value - values[t] + masks[t] * self.lambda_gae_adv * advantage
next_value = values[t]
return advantages
def get_advantages_vtrace(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
advantages = torch.empty_like(values) # advantage value
masks = undones * self.gamma
horizon_len = rewards.shape[0]
advantage = torch.zeros_like(values[0]) # last advantage value by GAE (Generalized Advantage Estimate)
for t in range(horizon_len - 1, -1, -1):
advantages[t] = rewards[t] - values[t] + masks[t] * advantage
advantage = values[t] + self.lambda_gae_adv * advantages[t]
return advantages
class AgentDiscretePPO(AgentPPO):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", ActorDiscretePPO)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
def explore_one_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **single** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
return: `(states, actions, rewards, undones)` for off-policy
env_num == 1
states.shape == (horizon_len, env_num, state_dim)
actions.shape == (horizon_len, env_num, action_dim)
logprobs.shape == (horizon_len, env_num, action_dim)
rewards.shape == (horizon_len, env_num)
undones.shape == (horizon_len, env_num)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.int32).to(self.device) # only different
logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # shape == (1, state_dim) for a single env.
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for t in range(horizon_len):
action, logprob = get_action(state)
states[t] = state
int_action = convert(action).item()
ary_state, reward, done, _ = env.step(int_action) # next_state
state = torch.as_tensor(env.reset() if done else ary_state,
dtype=torch.float32, device=self.device).unsqueeze(0)
actions[t] = action
logprobs[t] = logprob
rewards[t] = reward
dones[t] = done
self.last_state = state
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, logprobs, rewards, undones
def explore_vec_env(self, env, horizon_len: int, if_random: bool = False) -> Tuple[Tensor, ...]:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
env: RL training environment. env.reset() env.step(). It should be a vector env.
horizon_len: collect horizon_len step while exploring to update networks
return: `(states, actions, rewards, undones)` for off-policy
states.shape == (horizon_len, env_num, state_dim)
actions.shape == (horizon_len, env_num, action_dim)
logprobs.shape == (horizon_len, env_num, action_dim)
rewards.shape == (horizon_len, env_num)
undones.shape == (horizon_len, env_num)
"""
states = torch.zeros((horizon_len, self.num_envs, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.num_envs, 1), dtype=torch.float32).to(self.device)
logprobs = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
rewards = torch.zeros((horizon_len, self.num_envs), dtype=torch.float32).to(self.device)
dones = torch.zeros((horizon_len, self.num_envs), dtype=torch.bool).to(self.device)
state = self.last_state # shape == (env_num, state_dim) for a vectorized env.
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for t in range(horizon_len):
action, logprob = get_action(state)
states[t] = state
state, reward, done, _ = env.step(convert(action)) # next_state
actions[t] = action
logprobs[t] = logprob
rewards[t] = reward
dones[t] = done
self.last_state = state
actions = actions.unsqueeze(2)
rewards *= self.reward_scale
undones = 1.0 - dones.type(torch.float32)
return states, actions, logprobs, rewards, undones
| 15,023 | 49.416107 | 116 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentREDQ.py | from elegantrl.agents.AgentSAC import AgentSAC
from elegantrl.agents.net import Critic, ActorSAC, ActorFixSAC, CriticREDQ
import torch
import numpy as np
from copy import deepcopy
class AgentREDQ(AgentSAC): # [ElegantRL.2021.11.11]
"""
Bases: ``AgentBase``
Randomized Ensemble Double Q-learning algorithm. “Randomized Ensembled Double Q-Learning: Learning Fast Without A Model”. Xinyue Chen et al.. 2021.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param reward_scale: scale the reward to get a appropriate scale Q value
:param gamma: the discount factor of Reinforcement Learning
:param learning_rate: learning rate of optimizer
:param if_per_or_gae: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param gpu_id: the gpu_id of the training device. Use CPU when cuda is not available.
:param G: Update to date ratio
:param M: subset size of critics
:param N: ensemble number of critics
"""
def __init__(self, net_dim, state_dim, action_dim, gpu_id=0, args=None):
self.ClassCri = Critic
self.get_obj_critic = self.get_obj_critic_raw
self.ClassAct = ActorSAC
self.if_use_cri_target = True
self.if_use_act_target = False
self.alpha_log = None
self.alpha_optim = None
self.target_entropy = None
self.obj_critic = (-np.log(0.5)) ** 0.5 # for reliable_lambda
self.act_class = getattr(self, "act_class", ActorFixSAC)
self.cri_class = getattr(self, "cri_class", CriticREDQ)
super().__init__(net_dim, state_dim, action_dim, gpu_id, args)
self.obj_c = (-np.log(0.5)) ** 0.5 # for reliable_lambda
def init(
self,
net_dim=256,
state_dim=8,
action_dim=2,
reward_scale=1.0,
gamma=0.99,
learning_rate=3e-4,
if_per_or_gae=False,
env_num=1,
gpu_id=0,
G=20,
M=2,
N=10,
):
self.gamma = gamma
self.state_dim = state_dim
self.action_dim = action_dim
self.reward_scale = reward_scale
self.traj_list = [[] for _ in range(env_num)]
self.G = G
self.M = M
self.N = N
self.device = torch.device(
f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu"
)
self.cri_list = [
self.ClassCri(net_dim, state_dim, action_dim).to(self.device)
for i in range(self.N)
]
self.act = self.ClassAct(net_dim, state_dim, action_dim).to(self.device)
self.cri_target_list = [deepcopy(self.cri_list[i]) for i in range(N)]
self.cri_optim_list = [
torch.optim.Adam(self.cri_list[i].parameters(), learning_rate)
for i in range(self.N)
]
self.act_optim = torch.optim.Adam(self.act.parameters(), learning_rate)
assert isinstance(if_per_or_gae, bool)
if env_num == 1:
self.explore_env = self.explore_one_env
else:
self.explore_env = self.explore_vec_env
self.alpha_log = torch.zeros(
1, requires_grad=True, device=self.device
) # trainable parameter
self.alpha_optim = torch.optim.Adam([self.alpha_log], lr=learning_rate)
self.target_entropy = np.log(action_dim)
self.criterion = torch.nn.MSELoss()
def get_obj_critic_raw(self, buffer, batch_size):
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_a, next_log_prob = self.act_target.get_action_logprob(
next_s
) # stochastic policy
next_q = self.cri_target.get_q_min(next_s, next_a)
alpha = self.alpha_log.exp().detach()
q_label = reward + mask * (next_q + next_log_prob * alpha)
qs = self.cri.get_q_values(state, action)
obj_critic = self.criterion(qs, q_label * torch.ones_like(qs))
return obj_critic, state
def get_obj_critic_per(self, buffer, batch_size):
with torch.no_grad():
reward, mask, action, state, next_s, is_weights = buffer.sample_batch(
batch_size
)
next_a, next_log_prob = self.act_target.get_action_logprob(
next_s
) # stochastic policy
next_q = self.cri_target.get_q_min(next_s, next_a)
alpha = self.alpha_log.exp().detach()
q_label = reward + mask * (next_q + next_log_prob * alpha)
qs = self.cri.get_q_values(state, action)
td_error = self.criterion(qs, q_label * torch.ones_like(qs)).mean(dim=1)
obj_critic = (td_error * is_weights).mean()
buffer.td_error_update(td_error.detach())
return obj_critic, state
def get_obj_critic_raw_(self, buffer, batch_size, alpha):
"""
Calculate the loss of networks with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param alpha: the trade-off coefficient of entropy regularization.
:return: the loss of the network and states.
"""
with torch.no_grad():
batch = buffer.sample_batch(batch_size)
state = torch.Tensor(batch["obs1"]).to(self.device)
next_s = torch.Tensor(batch["obs2"]).to(self.device)
action = torch.Tensor(batch["acts"]).to(self.device)
reward = torch.Tensor(batch["rews"]).unsqueeze(1).to(self.device)
mask = torch.Tensor(batch["done"]).unsqueeze(1).to(self.device)
# state, next_s, actions, reward, mask = buffer.sample_batch(batch_size)
# print(batch_size,reward.shape,mask.shape,action.shape, state.shape, next_s.shape)
next_a, next_log_prob = self.act.get_action_logprob(
next_s
) # stochastic policy
g = torch.Generator()
g.manual_seed(torch.randint(high=10000000, size=(1,))[0].item())
a = torch.randperm(self.N, generator=g)
# a = np.random.choice(self.N, self.M, replace=False)
# print(a[:M])
q_tmp = [self.cri_target_list[a[j]](next_s, next_a) for j in range(self.M)]
q_prediction_next_cat = torch.cat(q_tmp, 1)
min_q, min_indices = torch.min(q_prediction_next_cat, dim=1, keepdim=True)
next_q_with_log_prob = min_q - alpha * next_log_prob
y_q = reward + (1 - mask) * self.gamma * next_q_with_log_prob
q_values = [
self.cri_list[j](state, action) for j in range(self.N)
] # todo ensemble
q_values_cat = torch.cat(q_values, dim=1)
y_q = y_q.expand(-1, self.N) if y_q.shape[1] == 1 else y_q
obj_critic = self.criterion(q_values_cat, y_q) * self.N
return obj_critic, state
# return y_q, state,action
def select_actions_(self, state, size, env):
"""
Select continuous actions for exploration
:param state: states.shape==(batch_size, state_dim, )
:return: actions.shape==(batch_size, action_dim, ), -1 < action < +1
"""
state = state.to(self.device)
actions = self.act.get_action(state)
return actions.detach().cpu()
def cri_multi_train_(self, k):
q_values = self.cri_list[k](self.state, self.action)
obj = self.criterion(q_values, self.y_q)
self.cri_optim_list[k].zero_grad()
obj.backward()
self.cri_optim_list[k].step()
def update_net_(self, buffer, batch_size, soft_update_tau):
# buffer.update_now_len()
"""
Update the neural networks by sampling batch data from ``ReplayBuffer``.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param soft_update_tau: the soft update parameter.
:return: a tuple of the log information.
"""
for i in range(self.G):
alpha = self.alpha_log.cpu().exp().item()
"""objective of critic (loss function of critic)"""
obj_critic, state = self.get_obj_critic(buffer, batch_size, alpha)
# self.y_q, self.state,self.action = self.get_obj_critic(buffer, batch_size, alpha)
for q_i in range(self.N):
self.cri_optim_list[q_i].zero_grad()
obj_critic.backward()
if ((i + 1) % self.G == 0) or i == self.G - 1:
a_noise_pg, logprob = self.act.get_action_logprob(
state
) # policy gradient
"""objective of alpha (temperature parameter automatic adjustment)"""
cri_tmp = []
for j in range(self.N):
self.cri_list[j].requires_grad_(False)
cri_tmp.append(self.cri_list[j](state, a_noise_pg))
q_value_pg = torch.cat(cri_tmp, 1)
q_value_pg = torch.mean(q_value_pg, dim=1, keepdim=True)
obj_actor = (-q_value_pg + logprob * alpha).mean() # todo ensemble
self.act_optim.zero_grad()
obj_actor.backward()
for j in range(self.N):
self.cri_list[j].requires_grad_(True)
obj_alpha = -(self.alpha_log * (logprob - 1).detach()).mean()
self.optim_update(self.alpha_optim, obj_alpha)
for q_i in range(self.N):
self.cri_optim_list[q_i].step()
if ((i + 1) % self.G == 0) or i == self.G - 1:
self.act_optim.step()
for q_i in range(self.N):
self.soft_update(
self.cri_target_list[q_i], self.cri_list[q_i], soft_update_tau
)
return obj_actor, alpha
| 10,220 | 43.056034 | 151 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentQMix.py | import copy
import torch as th
from torch.optim import RMSprop, Adam
from elegantrl.agents.net import QMix
from elegantrl.envs.utils.marl_utils import (
build_td_lambda_targets,
build_q_lambda_targets,
get_parameters_num,
)
class AgentQMix:
"""
AgentQMix
“QMIX: Monotonic Value Function Factorisation for Deep Multi-Agent Reinforcement Learning”. Tabish Rashid. et al.. 2018.
:param mac: multi agent controller
:param scheme: data scheme stored in the buffer
:param logger: log object, record training information
:param args: parameters related to training
"""
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.last_target_update_episode = 0
self.device = th.device("cuda" if args.use_cuda else "cpu")
self.params = [mac.parameters()]
self.mixer = QMix(args)
self.target_mixer = copy.deepcopy(self.mixer)
self.params += [self.mixer.parameters()]
if self.args.optimizer == "adam":
self.optimiser = Adam(
params=self.params,
lr=args.lr,
weight_decay=getattr(args, "weight_decay", 0),
)
else:
self.optimiser = RMSprop(
params=self.params,
lr=args.lr,
alpha=args.optim_alpha,
eps=args.optim_eps,
)
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
self.train_t = 0
self.use_per = getattr(self.args, "use_per", False)
self.return_priority = getattr(self.args, "return_priority", False)
if self.use_per:
self.priority_max = float("-inf")
self.priority_min = float("inf")
def train(self, batch, t_env: int, episode_num: int, per_weight=None):
"""
Update the neural networks.
:param batch: episodebatch.
:param per_weight: prioritized experience replay weights.
:return: log information.
"""
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1])
avail_actions = batch["avail_actions"]
self.mac.agent.train()
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1)
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(
3
) # Remove the last dim
chosen_action_qvals_ = chosen_action_qvals
with th.no_grad():
self.target_mac.agent.train()
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
target_mac_out = th.stack(target_mac_out, dim=1) # Concat across time
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach.max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
target_max_qvals = self.target_mixer(target_max_qvals, batch["state"])
if getattr(self.args, "q_lambda", False):
qvals = th.gather(target_mac_out, 3, batch["actions"]).squeeze(3)
qvals = self.target_mixer(qvals, batch["state"])
targets = build_q_lambda_targets(
rewards,
terminated,
mask,
target_max_qvals,
qvals,
self.args.gamma,
self.args.td_lambda,
)
else:
targets = build_td_lambda_targets(
rewards,
terminated,
mask,
target_max_qvals,
self.args.n_agents,
self.args.gamma,
self.args.td_lambda,
)
chosen_action_qvals = self.mixer(chosen_action_qvals, batch["state"][:, :-1])
td_error = chosen_action_qvals - targets.detach()
td_error2 = 0.5 * td_error.pow(2)
mask = mask.expand_as(td_error2)
masked_td_error = td_error2 * mask
if self.use_per:
per_weight = th.from_numpy(per_weight).unsqueeze(-1).to(device=self.device)
masked_td_error = masked_td_error.sum(1) * per_weight
loss = L_td = masked_td_error.sum() / mask.sum()
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (
episode_num - self.last_target_update_episode
) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("loss_td", L_td.item(), t_env)
self.logger.log_stat("grad_norm", grad_norm, t_env)
mask_elems = mask.sum().item()
self.logger.log_stat(
"td_error_abs", (masked_td_error.abs().sum().item() / mask_elems), t_env
)
self.logger.log_stat(
"q_taken_mean",
(chosen_action_qvals * mask).sum().item()
/ (mask_elems * self.args.n_agents),
t_env,
)
self.logger.log_stat(
"target_mean",
(targets * mask).sum().item() / (mask_elems * self.args.n_agents),
t_env,
)
self.log_stats_t = t_env
info = {}
if self.use_per:
if self.return_priority:
info["td_errors_abs"] = rewards.sum(1).detach().to("cpu")
self.priority_max = max(
th.max(info["td_errors_abs"]).item(), self.priority_max
)
self.priority_min = min(
th.min(info["td_errors_abs"]).item(), self.priority_min
)
info["td_errors_abs"] = (info["td_errors_abs"] - self.priority_min) / (
self.priority_max - self.priority_min + 1e-5
)
else:
info["td_errors_abs"] = (
((td_error.abs() * mask).sum(1) / th.sqrt(mask.sum(1)))
.detach()
.to("cpu")
)
return info
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), f"{path}/mixer.th")
th.save(self.optimiser.state_dict(), f"{path}/opt.th")
def load_models(self, path):
self.mac.load_models(path)
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(
th.load(f"{path}/mixer.th", map_location=lambda storage, loc: storage)
)
self.optimiser.load_state_dict(
th.load(f"{path}/opt.th", map_location=lambda storage, loc: storage)
)
| 8,125 | 34.640351 | 124 | py |
ElegantRL | ElegantRL-master/elegantrl/agents/AgentTD3.py | import torch
from typing import Tuple
from copy import deepcopy
from torch import Tensor
from elegantrl.train.config import Config
from elegantrl.train.replay_buffer import ReplayBuffer
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import Actor, CriticTwin
class AgentTD3(AgentBase):
"""Twin Delayed DDPG algorithm.
Addressing Function Approximation Error in Actor-Critic Methods. 2018.
"""
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor)
self.cri_class = getattr(self, 'cri_class', CriticTwin)
super().__init__(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
self.act_target = deepcopy(self.act)
self.cri_target = deepcopy(self.cri)
self.explore_noise_std = getattr(args, 'explore_noise_std', 0.05) # standard deviation of exploration noise
self.policy_noise_std = getattr(args, 'policy_noise_std', 0.10) # standard deviation of exploration noise
self.update_freq = getattr(args, 'update_freq', 2) # delay update frequency
self.act.explore_noise_std = self.explore_noise_std # assign explore_noise_std for agent.act.get_action(state)
def update_net(self, buffer: ReplayBuffer) -> Tuple[float, ...]:
with torch.no_grad():
states, actions, rewards, undones = buffer.add_item
self.update_avg_std_for_normalization(
states=states.reshape((-1, self.state_dim)),
returns=self.get_cumulative_rewards(rewards=rewards, undones=undones).reshape((-1,))
)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer.add_size * self.repeat_times)
assert update_times >= 1
for update_c in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
obj_critics += obj_critic.item()
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
if update_c % self.update_freq == 0: # delay update
action_pg = self.act(state) # policy gradient
obj_actor = self.cri_target(state, action_pg).mean() # use cri_target is more stable than cri
obj_actors += obj_actor.item()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic_raw(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss = buffer.sample(batch_size) # next_ss: next states
next_as = self.act_target.get_action_noise(next_ss, self.policy_noise_std) # next actions
next_qs = self.cri_target.get_q_min(next_ss, next_as) # next q values
q_labels = rewards + undones * self.gamma * next_qs
q1, q2 = self.cri.get_q1_q2(states, actions)
obj_critic = self.criterion(q1, q_labels) + self.criterion(q2, q_labels) # twin critics
return obj_critic, states
def get_obj_critic_per(self, buffer: ReplayBuffer, batch_size: int) -> Tuple[Tensor, Tensor]:
with torch.no_grad():
states, actions, rewards, undones, next_ss, is_weights, is_indices = buffer.sample_for_per(batch_size)
# is_weights, is_indices: important sampling `weights, indices` by Prioritized Experience Replay (PER)
next_as = self.act_target.get_action_noise(next_ss, self.policy_noise_std)
next_qs = self.cri_target.get_q_min(next_ss, next_as)
q_labels = rewards + undones * self.gamma * next_qs
q1, q2 = self.cri.get_q1_q2(states, actions)
td_errors = self.criterion(q1, q_labels) + self.criterion(q2, q_labels)
obj_critic = (td_errors * is_weights).mean()
buffer.td_error_update_for_per(is_indices.detach(), td_errors.detach())
return obj_critic, states
| 4,257 | 49.690476 | 119 | py |
ElegantRL | ElegantRL-master/elegantrl/train/run.py | import os
import sys
import time
import torch
import numpy as np
import torch.multiprocessing as mp # torch.multiprocessing extends multiprocessing of Python
from copy import deepcopy
from multiprocessing import Process, Pipe
from elegantrl.train.config import Config, build_env
from elegantrl.train.replay_buffer import ReplayBuffer
from elegantrl.train.evaluator import Evaluator, get_cumulative_rewards_and_steps
if os.name == 'nt': # if is WindowOS (Windows NT)
"""Fix bug about Anaconda in WindowOS
OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
"""
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
'''train'''
def train_agent(args: Config):
args.init_before_training()
torch.set_grad_enabled(False)
'''init environment'''
env = build_env(args.env_class, args.env_args, args.gpu_id)
'''init agent'''
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init agent.last_state'''
state = env.reset()
if args.num_envs == 1:
assert state.shape == (args.state_dim,)
assert isinstance(state, np.ndarray)
state = torch.tensor(state, dtype=torch.float32, device=agent.device).unsqueeze(0)
else:
assert state.shape == (args.num_envs, args.state_dim)
assert isinstance(state, torch.Tensor)
state = state.to(agent.device)
assert state.shape == (args.num_envs, args.state_dim)
assert isinstance(state, torch.Tensor)
agent.last_state = state.detach()
'''init buffer'''
if args.if_off_policy:
buffer = ReplayBuffer(
gpu_id=args.gpu_id,
num_seqs=args.num_envs,
max_size=args.buffer_size,
state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_use_per,
args=args,
)
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
else:
buffer = []
'''init evaluator'''
eval_env_class = args.eval_env_class if args.eval_env_class else args.env_class
eval_env_args = args.eval_env_args if args.eval_env_args else args.env_args
eval_env = build_env(eval_env_class, eval_env_args, args.gpu_id)
evaluator = Evaluator(cwd=args.cwd, env=eval_env, args=args, if_tensorboard=False)
'''train loop'''
cwd = args.cwd
break_step = args.break_step
horizon_len = args.horizon_len
if_off_policy = args.if_off_policy
if_save_buffer = args.if_save_buffer
del args
if_train = True
while if_train:
buffer_items = agent.explore_env(env, horizon_len)
exp_r = buffer_items[2].mean().item()
if if_off_policy:
buffer.update(buffer_items)
else:
buffer[:] = buffer_items
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(actor=agent.act, steps=horizon_len, exp_r=exp_r, logging_tuple=logging_tuple)
if_train = (evaluator.total_step <= break_step) and (not os.path.exists(f"{cwd}/stop"))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
env.close() if hasattr(env, 'close') else None
evaluator.save_training_curve_jpg()
agent.save_or_load_agent(cwd, if_save=True)
if if_save_buffer and hasattr(buffer, 'save_or_load_history'):
buffer.save_or_load_history(cwd, if_save=True)
def train_agent_multiprocessing(args: Config):
args.init_before_training()
"""Don't set method='fork' when send tensor in GPU"""
method = 'spawn' if os.name == 'nt' else 'forkserver' # os.name == 'nt' means Windows NT operating system (WinOS)
mp.set_start_method(method=method, force=True)
'''build the Pipe'''
worker_pipes = [Pipe(duplex=False) for _ in range(args.num_workers)] # receive, send
learner_pipe = Pipe(duplex=False)
evaluator_pipe = Pipe(duplex=True)
'''build Process'''
learner = Learner(learner_pipe=learner_pipe, worker_pipes=worker_pipes, evaluator_pipe=evaluator_pipe, args=args)
workers = [Worker(worker_pipe=worker_pipe, learner_pipe=learner_pipe, worker_id=worker_id, args=args)
for worker_id, worker_pipe in enumerate(worker_pipes)]
evaluator = EvaluatorProc(evaluator_pipe=evaluator_pipe, args=args)
'''start Process'''
process_list = [learner, *workers, evaluator]
[process.start() for process in process_list]
[process.join() for process in process_list]
class Learner(Process):
def __init__(self, learner_pipe: Pipe, worker_pipes: [Pipe], evaluator_pipe: Pipe, args: Config):
super().__init__()
self.recv_pipe = learner_pipe[0]
self.send_pipes = [worker_pipe[1] for worker_pipe in worker_pipes]
self.eval_pipe = evaluator_pipe[1]
self.args = args
def run(self):
args = self.args
torch.set_grad_enabled(False)
'''init agent'''
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init buffer'''
if args.if_off_policy:
buffer = ReplayBuffer(
gpu_id=args.gpu_id,
num_seqs=args.num_envs * args.num_workers,
max_size=args.buffer_size,
state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_use_per,
args=args,
)
else:
buffer = []
'''loop'''
if_off_policy = args.if_off_policy
if_save_buffer = args.if_save_buffer
num_workers = args.num_workers
num_envs = args.num_envs
state_dim = args.state_dim
action_dim = args.action_dim
horizon_len = args.horizon_len
num_seqs = args.num_envs * args.num_workers
num_steps = args.horizon_len * args.num_workers
cwd = args.cwd
del args
agent.last_state = torch.empty((num_seqs, state_dim), dtype=torch.float32, device=agent.device)
states = torch.empty((horizon_len, num_seqs, state_dim), dtype=torch.float32, device=agent.device)
actions = torch.empty((horizon_len, num_seqs, action_dim), dtype=torch.float32, device=agent.device)
rewards = torch.empty((horizon_len, num_seqs), dtype=torch.float32, device=agent.device)
undones = torch.empty((horizon_len, num_seqs), dtype=torch.bool, device=agent.device)
if if_off_policy:
buffer_items_tensor = (states, actions, rewards, undones)
else:
logprobs = torch.empty((horizon_len, num_seqs), dtype=torch.float32, device=agent.device)
buffer_items_tensor = (states, actions, logprobs, rewards, undones)
if_train = True
while if_train:
'''Learner send actor to Workers'''
for send_pipe in self.send_pipes:
send_pipe.send(agent.act)
'''Learner receive (buffer_items, last_state) from Workers'''
for _ in range(num_workers):
worker_id, buffer_items, last_state = self.recv_pipe.recv()
buf_i = worker_id * num_envs
buf_j = worker_id * num_envs + num_envs
for buffer_item, buffer_tensor in zip(buffer_items, buffer_items_tensor):
buffer_tensor[:, buf_i:buf_j] = buffer_item
agent.last_state[buf_i:buf_j] = last_state
'''Learner update training data to (buffer, agent)'''
if if_off_policy:
buffer.update(buffer_items_tensor)
else:
buffer[:] = buffer_items_tensor
'''agent update network using training data'''
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
'''Learner receive training signal from Evaluator'''
if self.eval_pipe.poll(): # whether there is any data available to be read of this pipe
if_train = self.eval_pipe.recv() # True means evaluator in idle moments.
actor = agent.act # so Leaner send an actor to evaluator for evaluation.
else:
actor = None
'''Learner send actor and training log to Evaluator'''
exp_r = buffer_items_tensor[2].mean().item() # the average rewards of exploration
self.eval_pipe.send((actor, num_steps, exp_r, logging_tuple))
'''Learner send the terminal signal to workers after break the loop'''
for send_pipe in self.send_pipes:
send_pipe.send(None)
'''save'''
agent.save_or_load_agent(cwd, if_save=True)
if if_save_buffer and hasattr(buffer, 'save_or_load_history'):
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
print(f"| LearnerPipe.run: ReplayBuffer saved in {cwd}")
class Worker(Process):
def __init__(self, worker_pipe: Pipe, learner_pipe: Pipe, worker_id: int, args: Config):
super().__init__()
self.recv_pipe = worker_pipe[0]
self.send_pipe = learner_pipe[1]
self.worker_id = worker_id
self.args = args
def run(self):
args = self.args
worker_id = self.worker_id
torch.set_grad_enabled(False)
'''init environment'''
env = build_env(args.env_class, args.env_args, args.gpu_id)
'''init agent'''
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.save_or_load_agent(args.cwd, if_save=False)
'''init agent.last_state'''
state = env.reset()
if args.num_envs == 1:
assert state.shape == (args.state_dim,)
assert isinstance(state, np.ndarray)
state = torch.tensor(state, dtype=torch.float32, device=agent.device).unsqueeze(0)
else:
assert state.shape == (args.num_envs, args.state_dim)
assert isinstance(state, torch.Tensor)
state = state.to(agent.device)
assert state.shape == (args.num_envs, args.state_dim)
assert isinstance(state, torch.Tensor)
agent.last_state = state.detach()
'''init buffer'''
horizon_len = args.horizon_len
if args.if_off_policy:
buffer_items = agent.explore_env(env, args.horizon_len, if_random=True)
self.send_pipe.send((worker_id, buffer_items, agent.last_state))
'''loop'''
del args
while True:
'''Worker receive actor from Learner'''
actor = self.recv_pipe.recv()
if actor is None:
break
'''Worker send the training data to Learner'''
agent.act = actor
buffer_items = agent.explore_env(env, horizon_len)
self.send_pipe.send((worker_id, buffer_items, agent.last_state))
env.close() if hasattr(env, 'close') else None
class EvaluatorProc(Process):
def __init__(self, evaluator_pipe: Pipe, args: Config):
super().__init__()
self.pipe = evaluator_pipe[0]
self.args = args
def run(self):
args = self.args
torch.set_grad_enabled(False)
'''wandb(weights & biases): Track and visualize all the pieces of your machine learning pipeline.'''
wandb = None
if getattr(args, 'if_use_wandb', False):
import wandb
wandb_project_name = "train"
wandb.init(project=wandb_project_name)
'''init evaluator'''
eval_env_class = args.eval_env_class if args.eval_env_class else args.env_class
eval_env_args = args.eval_env_args if args.eval_env_args else args.env_args
eval_env = build_env(eval_env_class, eval_env_args, args.gpu_id)
evaluator = Evaluator(cwd=args.cwd, env=eval_env, args=args, if_tensorboard=False)
'''loop'''
cwd = args.cwd
break_step = args.break_step
device = torch.device(f"cuda:{args.gpu_id}" if (torch.cuda.is_available() and (args.gpu_id >= 0)) else "cpu")
del args
if_train = True
while if_train:
'''Evaluator receive training log from Learner'''
actor, steps, exp_r, logging_tuple = self.pipe.recv()
wandb.log({"obj_cri": logging_tuple[0], "obj_act": logging_tuple[1]}) if wandb else None
'''Evaluator evaluate the actor and save the training log'''
if actor is None:
evaluator.total_step += steps # update total_step but don't update recorder
else:
actor = actor.to(device)
evaluator.evaluate_and_save(actor, steps, exp_r, logging_tuple)
'''Evaluator send the training signal to Learner'''
if_train = (evaluator.total_step <= break_step) and (not os.path.exists(f"{cwd}/stop"))
self.pipe.send(if_train)
'''Evaluator save the training log and draw the learning curve'''
evaluator.save_training_curve_jpg()
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
eval_env.close() if hasattr(eval_env, 'close') else None
'''render'''
def render_agent(env_class, env_args: dict, net_dims: [int], agent_class, actor_path: str, render_times: int = 8):
env = build_env(env_class, env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
agent = agent_class(net_dims, state_dim, action_dim, gpu_id=-1)
actor = agent.act
del agent
print(f"| render and load actor from: {actor_path}")
actor.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
for i in range(render_times):
cumulative_reward, episode_step = get_cumulative_rewards_and_steps(env, actor, if_render=True)
print(f"|{i:4} cumulative_reward {cumulative_reward:9.3f} episode_step {episode_step:5.0f}")
| 14,346 | 38.852778 | 118 | py |
ElegantRL | ElegantRL-master/elegantrl/train/evaluator.py | import os
import time
import torch.nn
import numpy as np
from torch import Tensor
from typing import Tuple, List
from elegantrl.train.config import Config
class Evaluator:
def __init__(self, cwd: str, env, args: Config, if_tensorboard: bool = False):
self.cwd = cwd # current working directory to save model
self.env = env # the env for Evaluator, `eval_env = env` in default
self.agent_id = args.gpu_id
self.total_step = 0 # the total training step
self.start_time = time.time() # `used_time = time.time() - self.start_time`
self.eval_times = args.eval_times # number of times that get episodic cumulative return
self.eval_per_step = args.eval_per_step # evaluate the agent per training steps
self.eval_step_counter = -self.eval_per_step # `self.total_step > self.eval_step_counter + self.eval_per_step`
self.save_gap = args.save_gap
self.save_counter = 0
self.if_keep_save = args.if_keep_save
self.if_over_write = args.if_over_write
self.recorder_path = f'{cwd}/recorder.npy'
self.recorder = [] # total_step, r_avg, r_std, obj_c, ...
self.max_r = -np.inf
print("| Evaluator:"
"\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n{'#' * 80}\n"
f"{'ID':<3}{'Step':>8}{'Time':>8} |"
f"{'avgR':>8}{'stdR':>7}{'avgS':>7}{'stdS':>6} |"
f"{'expR':>8}{'objC':>7}{'objA':>7}{'etc.':>7}")
if getattr(env, 'num_envs', 1) == 1: # get attribute
self.get_cumulative_rewards_and_step = self.get_cumulative_rewards_and_step_single_env
else: # vectorized environment
self.get_cumulative_rewards_and_step = self.get_cumulative_rewards_and_step_vectorized_env
if if_tensorboard:
from torch.utils.tensorboard import SummaryWriter
self.tensorboard = SummaryWriter(f"{cwd}/tensorboard")
else:
self.tensorboard = None
def evaluate_and_save(self, actor: torch.nn, steps: int, exp_r: float, logging_tuple: tuple):
self.total_step += steps # update total training steps
if self.total_step < self.eval_step_counter + self.eval_per_step:
return
self.eval_step_counter = self.total_step
rewards_step_ten = self.get_cumulative_rewards_and_step(actor)
returns = rewards_step_ten[:, 0] # episodic cumulative returns of an
steps = rewards_step_ten[:, 1] # episodic step number
avg_r = returns.mean().item()
std_r = returns.std().item()
avg_s = steps.mean().item()
std_s = steps.std().item()
train_time = int(time.time() - self.start_time)
'''record the training information'''
self.recorder.append((self.total_step, avg_r, std_r, exp_r, *logging_tuple)) # update recorder
if self.tensorboard:
self.tensorboard.add_scalar("info/critic_loss_sample", logging_tuple[0], self.total_step)
self.tensorboard.add_scalar("info/actor_obj_sample", -1 * logging_tuple[1], self.total_step)
self.tensorboard.add_scalar("reward/avg_reward_sample", avg_r, self.total_step)
self.tensorboard.add_scalar("reward/std_reward_sample", std_r, self.total_step)
self.tensorboard.add_scalar("reward/exp_reward_sample", exp_r, self.total_step)
self.tensorboard.add_scalar("info/critic_loss_time", logging_tuple[0], train_time)
self.tensorboard.add_scalar("info/actor_obj_time", -1 * logging_tuple[1], train_time)
self.tensorboard.add_scalar("reward/avg_reward_time", avg_r, train_time)
self.tensorboard.add_scalar("reward/std_reward_time", std_r, train_time)
self.tensorboard.add_scalar("reward/exp_reward_time", exp_r, train_time)
'''print some information to Terminal'''
prev_max_r = self.max_r
self.max_r = max(self.max_r, avg_r) # update max average cumulative rewards
print(f"{self.agent_id:<3}{self.total_step:8.2e}{train_time:8.0f} |"
f"{avg_r:8.2f}{std_r:7.1f}{avg_s:7.0f}{std_s:6.0f} |"
f"{exp_r:8.2f}{''.join(f'{n:7.2f}' for n in logging_tuple)}")
if_save = avg_r > prev_max_r
if if_save:
self.save_training_curve_jpg()
if not self.if_keep_save:
return
self.save_counter += 1
actor_path = None
if if_save: # save checkpoint with the highest episode return
if self.if_over_write:
actor_path = f"{self.cwd}/actor.pt"
else:
actor_path = f"{self.cwd}/actor__{self.total_step:012}_{self.max_r:09.3f}.pt"
elif self.save_counter == self.save_gap:
self.save_counter = 0
if self.if_over_write:
actor_path = f"{self.cwd}/actor.pt"
else:
actor_path = f"{self.cwd}/actor__{self.total_step:012}.pt"
if actor_path:
torch.save(actor, actor_path) # save policy network in *.pt
def save_or_load_recoder(self, if_save: bool):
if if_save:
np.save(self.recorder_path, self.recorder)
elif os.path.exists(self.recorder_path):
recorder = np.load(self.recorder_path)
self.recorder = [tuple(i) for i in recorder] # convert numpy to list
self.total_step = self.recorder[-1][0]
def get_cumulative_rewards_and_step_single_env(self, actor) -> Tensor:
rewards_steps_list = [get_cumulative_rewards_and_steps(self.env, actor) for _ in range(self.eval_times)]
rewards_steps_ten = torch.tensor(rewards_steps_list, dtype=torch.float32)
return rewards_steps_ten # rewards_steps_ten.shape[1] == 2
def get_cumulative_rewards_and_step_vectorized_env(self, actor) -> Tensor:
rewards_step_list = [get_cumulative_rewards_and_step_from_vec_env(self.env, actor)
for _ in range(max(1, self.eval_times // self.env.num_envs))]
rewards_step_list = sum(rewards_step_list, [])
rewards_step_ten = torch.tensor(rewards_step_list)
return rewards_step_ten # rewards_steps_ten.shape[1] == 2
def save_training_curve_jpg(self):
recorder = np.array(self.recorder)
train_time = int(time.time() - self.start_time)
total_step = int(self.recorder[-1][0])
fig_title = f"step_time_maxR_{int(total_step)}_{int(train_time)}_{self.max_r:.3f}"
draw_learning_curve(recorder=recorder, fig_title=fig_title, save_path=f"{self.cwd}/LearningCurve.jpg")
np.save(self.recorder_path, recorder) # save self.recorder for `draw_learning_curve()`
"""util"""
def get_cumulative_rewards_and_steps(env, actor, if_render: bool = False) -> Tuple[float, int]:
"""Usage
eval_times = 4
net_dim = 2 ** 7
actor_path = './LunarLanderContinuous-v2_PPO_1/actor.pt'
env = build_env(env_class=env_class, env_args=env_args)
act = agent(net_dim, env.state_dim, env.action_dim, gpu_id=gpu_id).act
act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
r_s_ary = [get_episode_return_and_step(env, act) for _ in range(eval_times)]
r_s_ary = np.array(r_s_ary, dtype=np.float32)
r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
"""
max_step = env.max_step
if_discrete = env.if_discrete
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
steps = None
returns = 0.0 # sum of rewards in an episode
for steps in range(max_step):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
if if_discrete:
tensor_action = tensor_action.argmax(dim=1)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
returns += reward
if if_render:
env.render()
time.sleep(0.02)
if done:
break
else:
print("| get_rewards_and_step: WARNING. max_step > 12345")
returns = getattr(env, 'cumulative_returns', returns)
steps += 1
return returns, steps
def get_cumulative_rewards_and_step_from_vec_env(env, actor) -> List[Tuple[float, int]]:
device = env.device
env_num = env.num_envs
max_step = env.max_step
if_discrete = env.if_discrete
'''get returns and dones (GPU)'''
returns = torch.empty((max_step, env_num), dtype=torch.float32, device=device)
dones = torch.empty((max_step, env_num), dtype=torch.bool, device=device)
state = env.reset() # must reset in vectorized env
for t in range(max_step):
action = actor(state.to(device))
# assert action.shape == (env.env_num, env.action_dim)
if if_discrete:
action = action.argmax(dim=1, keepdim=True)
state, reward, done, info_dict = env.step(action)
returns[t] = reward
dones[t] = done
'''get cumulative returns and step'''
if hasattr(env, 'cumulative_returns'): # GPU
returns_step_list = [(ret, env.max_step) for ret in env.cumulative_returns]
else: # CPU
returns = returns.cpu()
dones = dones.cpu()
returns_step_list = []
for i in range(env_num):
dones_where = torch.where(dones[:, i] == 1)[0] + 1
episode_num = len(dones_where)
if episode_num == 0:
continue
j0 = 0
for j1 in dones_where.tolist():
reward_sum = returns[j0:j1, i].sum().item() # cumulative returns of an episode
steps_num = j1 - j0 # step number of an episode
returns_step_list.append((reward_sum, steps_num))
j0 = j1
return returns_step_list
def draw_learning_curve(recorder: np.ndarray = None,
fig_title: str = 'learning_curve',
save_path: str = 'learning_curve.jpg'):
steps = recorder[:, 0] # x-axis is training steps
r_avg = recorder[:, 1]
r_std = recorder[:, 2]
r_exp = recorder[:, 3]
obj_c = recorder[:, 4]
obj_a = recorder[:, 5]
'''plot subplots'''
import matplotlib as mpl
mpl.use('Agg')
"""Generating matplotlib graphs without a running X server [duplicate]
write `mpl.use('Agg')` before `import matplotlib.pyplot as plt`
https://stackoverflow.com/a/4935945/9293137
"""
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2)
'''axs[0]'''
ax00 = axs[0]
ax00.cla()
ax01 = axs[0].twinx()
color01 = 'darkcyan'
ax01.set_ylabel('Explore AvgReward', color=color01)
ax01.plot(steps, r_exp, color=color01, alpha=0.5, )
ax01.tick_params(axis='y', labelcolor=color01)
color0 = 'lightcoral'
ax00.set_ylabel('Episode Return', color=color0)
ax00.plot(steps, r_avg, label='Episode Return', color=color0)
ax00.fill_between(steps, r_avg - r_std, r_avg + r_std, facecolor=color0, alpha=0.3)
ax00.grid()
'''axs[1]'''
ax10 = axs[1]
ax10.cla()
ax11 = axs[1].twinx()
color11 = 'darkcyan'
ax11.set_ylabel('objC', color=color11)
ax11.fill_between(steps, obj_c, facecolor=color11, alpha=0.2, )
ax11.tick_params(axis='y', labelcolor=color11)
color10 = 'royalblue'
ax10.set_xlabel('Total Steps')
ax10.set_ylabel('objA', color=color10)
ax10.plot(steps, obj_a, label='objA', color=color10)
ax10.tick_params(axis='y', labelcolor=color10)
for plot_i in range(6, recorder.shape[1]):
other = recorder[:, plot_i]
ax10.plot(steps, other, label=f'{plot_i}', color='grey', alpha=0.5)
ax10.legend()
ax10.grid()
'''plot save'''
plt.title(fig_title, y=2.3)
plt.savefig(save_path)
plt.close('all') # avoiding warning about too many open figures, rcParam `figure.max_open_warning`
# plt.show() # if use `mpl.use('Agg')` to draw figures without GUI, then plt can't plt.show()
"""learning curve"""
def demo_evaluator_actor_pth():
import gym
from elegantrl.agents.AgentPPO import AgentPPO
from elegantrl.train.config import Config, build_env
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
agent_class = AgentPPO
env_class = gym.make
env_args = {'env_num': 1,
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'target_return': 200,
'id': 'LunarLanderContinuous-v2'}
# actor_path = './LunarLanderContinuous-v2_PPO_1/actor.pt'
eval_times = 4
net_dim = 2 ** 7
'''init'''
args = Config(agent_class=agent_class, env_class=env_class, env_args=env_args)
env = build_env(env_class=args.env_class, env_args=args.env_args)
act = agent_class(net_dim, env.state_dim, env.action_dim, gpu_id=gpu_id, args=args).act
# act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
'''evaluate'''
r_s_ary = [get_cumulative_rewards_and_steps(env, act) for _ in range(eval_times)]
r_s_ary = np.array(r_s_ary, dtype=np.float32)
r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
print('r_avg, s_avg', r_avg, s_avg)
return r_avg, s_avg
def demo_evaluate_actors(dir_path: str, gpu_id: int, agent, env_args: dict, eval_times=2, net_dim=128):
import gym
from elegantrl.train.config import build_env
# dir_path = './LunarLanderContinuous-v2_PPO_1'
# gpu_id = 0
# agent_class = AgentPPO
# net_dim = 2 ** 7
env_class = gym.make
# env_args = {'env_num': 1,
# 'env_name': 'LunarLanderContinuous-v2',
# 'max_step': 1000,
# 'state_dim': 8,
# 'action_dim': 2,
# 'if_discrete': False,
# 'target_return': 200,
# 'eval_times': 2 ** 4,
#
# 'id': 'LunarLanderContinuous-v2'}
# eval_times = 2 ** 1
'''init'''
env = build_env(env_class=env_class, env_args=env_args)
act = agent(net_dim, env.state_dim, env.action_dim, gpu_id=gpu_id).act
'''evaluate'''
step_epi_r_s_ary = []
act_names = [name for name in os.listdir(dir_path) if len(name) == 19]
for act_name in act_names:
act_path = f"{dir_path}/{act_name}"
act.load_state_dict(torch.load(act_path, map_location=lambda storage, loc: storage))
r_s_ary = [get_cumulative_rewards_and_steps(env, act) for _ in range(eval_times)]
r_s_ary = np.array(r_s_ary, dtype=np.float32)
r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
step = int(act_name[6:15])
step_epi_r_s_ary.append((step, r_avg, s_avg))
step_epi_r_s_ary = np.array(step_epi_r_s_ary, dtype=np.float32)
'''sort by step'''
step_epi_r_s_ary = step_epi_r_s_ary[step_epi_r_s_ary[:, 0].argsort()]
return step_epi_r_s_ary
def demo_load_pendulum_and_render():
import torch
from elegantrl.agents.AgentPPO import AgentPPO
from elegantrl.train.config import Config, build_env
gpu_id = 0 # >=0 means GPU ID, -1 means CPU
agent_class = AgentPPO
from elegantrl.envs.CustomGymEnv import PendulumEnv
env_class = PendulumEnv
env_args = {'env_num': 1,
'env_name': 'Pendulum-v1',
'state_dim': 3,
'action_dim': 1,
'if_discrete': False, }
actor_path = './Pendulum-v1_PPO_0/actor.pt'
net_dim = 2 ** 7
'''init'''
env = build_env(env_class=env_class, env_args=env_args)
args = Config(agent_class=agent_class, env_class=env_class, env_args=env_args)
act = agent_class(net_dim, env.state_dim, env.action_dim, gpu_id=gpu_id, args=args).act
act.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
'''evaluate'''
# eval_times = 2 ** 7
# from elegantrl.envs.CustomGymEnv import PendulumEnv
# eval_env = PendulumEnv()
# from elegantrl.train.evaluator import get_cumulative_returns_and_step
# r_s_ary = [get_cumulative_returns_and_step(eval_env, act) for _ in range(eval_times)]
# r_s_ary = np.array(r_s_ary, dtype=np.float32)
# r_avg, s_avg = r_s_ary.mean(axis=0) # average of episode return and episode step
#
# print('r_avg, s_avg', r_avg, s_avg)
'''render'''
max_step = env.max_step
if_discrete = env.if_discrete
device = next(act.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
steps = None
returns = 0.0 # sum of rewards in an episode
for steps in range(max_step):
s_tensor = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
a_tensor = act(s_tensor).argmax(dim=1) if if_discrete else act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action * 2) # for Pendulum specially
returns += reward
env.render()
if done:
break
returns = getattr(env, 'cumulative_returns', returns)
steps += 1
print(f"\n| cumulative_returns {returns}"
f"\n| episode steps {steps}")
def run():
from elegantrl.agents.AgentPPO import AgentPPO
flag_id = 1 # int(sys.argv[1])
gpu_id = [2, 3][flag_id]
agent = AgentPPO
env_args = [
{'env_num': 1,
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'target_return': 200,
'eval_times': 2 ** 4,
'id': 'LunarLanderContinuous-v2'},
{'env_num': 1,
'env_name': 'BipedalWalker-v3',
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False,
'target_return': 300,
'eval_times': 2 ** 3,
'id': 'BipedalWalker-v3', },
][flag_id]
env_name = env_args['env_name']
print('gpu_id', gpu_id)
print('env_name', env_name)
'''save step_epi_r_s_ary'''
# cwd_path = '.'
# dir_names = [name for name in os.listdir(cwd_path)
# if name.find(env_name) >= 0 and os.path.isdir(name)]
# for dir_name in dir_names:
# dir_path = f"{cwd_path}/{dir_name}"
# step_epi_r_s_ary = demo_evaluate_actors(dir_path, gpu_id, agent, env_args)
# np.savetxt(f"{dir_path}-step_epi_r_s_ary.txt", step_epi_r_s_ary)
'''load step_epi_r_s_ary'''
step_epi_r_s_ary = []
cwd_path = '.'
ary_names = [name for name in os.listdir('.')
if name.find(env_name) >= 0 and name[-4:] == '.txt']
for ary_name in ary_names:
ary_path = f"{cwd_path}/{ary_name}"
ary = np.loadtxt(ary_path)
step_epi_r_s_ary.append(ary)
step_epi_r_s_ary = np.vstack(step_epi_r_s_ary)
step_epi_r_s_ary = step_epi_r_s_ary[step_epi_r_s_ary[:, 0].argsort()]
print('step_epi_r_s_ary.shape', step_epi_r_s_ary.shape)
'''plot'''
import matplotlib.pyplot as plt
# plt.plot(step_epi_r_s_ary[:, 0], step_epi_r_s_ary[:, 1])
plot_x_y_up_dw_step = []
n = 8
for i in range(0, len(step_epi_r_s_ary), n):
y_ary = step_epi_r_s_ary[i:i + n, 1]
if y_ary.shape[0] <= 1:
continue
y_avg = y_ary.mean()
y_up = y_ary[y_ary > y_avg].mean()
y_dw = y_ary[y_ary <= y_avg].mean()
y_step = step_epi_r_s_ary[i:i + n, 2].mean()
x_avg = step_epi_r_s_ary[i:i + n, 0].mean()
plot_x_y_up_dw_step.append((x_avg, y_avg, y_up, y_dw, y_step))
if_show_episode_step = True
color0 = 'royalblue'
color1 = 'lightcoral'
# color2 = 'darkcyan'
# colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
# '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
title = f"{env_name}_{agent.__name__}_ElegantRL"
fig, ax = plt.subplots(1)
plot_x = [item[0] for item in plot_x_y_up_dw_step]
plot_y = [item[1] for item in plot_x_y_up_dw_step]
plot_y_up = [item[2] for item in plot_x_y_up_dw_step]
plot_y_dw = [item[3] for item in plot_x_y_up_dw_step]
ax.plot(plot_x, plot_y, label='Episode Return', color=color0)
ax.fill_between(plot_x, plot_y_up, plot_y_dw, facecolor=color0, alpha=0.3)
ax.set_ylabel('Episode Return', color=color0)
ax.tick_params(axis='y', labelcolor=color0)
ax.grid(True)
if if_show_episode_step:
ax_twin = ax.twinx()
plot_y_step = [item[4] for item in plot_x_y_up_dw_step]
ax_twin.fill_between(plot_x, 0, plot_y_step, facecolor=color1, alpha=0.3)
ax_twin.set_ylabel('Episode Step', color=color1)
ax_twin.tick_params(axis='y', labelcolor=color1)
ax_twin.set_ylim(0, np.max(plot_y_step) * 2)
print('title', title)
plt.title(title)
plt.show()
if __name__ == '__main__':
# demo_evaluate_actors()
run()
| 21,822 | 37.556537 | 119 | py |
ElegantRL | ElegantRL-master/elegantrl/train/config.py | import os
import torch
import numpy as np
from typing import List
from torch import Tensor
from multiprocessing import Pipe, Process
class Config:
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.num_envs = None
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = self.get_if_off_policy() # whether off-policy or on-policy of DRL algorithm
'''Argument of environment'''
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None,
'num_envs': 1,
'max_step': 12345,
'state_dim': None,
'action_dim': None,
'if_discrete': None, }
env_args.setdefault('num_envs', 1) # `num_envs=1` in default in single env.
env_args.setdefault('max_step', 12345) # `max_step=12345` in default, which is a large enough value.
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.num_envs = env_args['num_envs'] # the number of sub envs in vectorized env. `num_envs=1` in single env.
self.max_step = env_args['max_step'] # the max step number of an episode. 'set as 12345 in default.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # the learning rate for network updating
self.clip_grad_norm = 3.0 # 0.1 ~ 4.0, clip the gradient after normalization
self.state_value_tau = 0 # the tau of normalize for value and state `std = (1-std)*std + tau*std`
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3. the tau of soft target update `net = (1-tau)*net + tau*net1`
if self.if_off_policy: # off-policy
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(512) # collect horizon_len step while exploring, then update networks
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
self.if_use_per = False # use PER (Prioritized Experience Replay) for sparse reward
else: # on-policy
self.batch_size = int(128) # num of transitions sampled from replay buffer.
self.horizon_len = int(2048) # collect horizon_len step while exploring, then update network
self.buffer_size = None # ReplayBuffer size. Empty the ReplayBuffer for on-policy.
self.repeat_times = 8.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
self.if_use_vtrace = False # use V-trace + GAE (Generalized Advantage Estimation) for sparse reward
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.num_workers = 2 # rollout workers number pre GPU (adjust it to get high GPU usage)
self.num_threads = 8 # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = 0 # initialize random seed in self.init_before_training()
self.learner_gpus = 0 # `int` means the ID of single GPU, -1 means CPU
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = np.inf # break training if 'total_step > break_step'
self.break_score = np.inf # break training if `cumulative_rewards > break_score`
self.if_keep_save = True # keeping save the checkpoint. False means save until stop training.
self.if_over_write = False # overwrite the best policy network. `self.cwd/actor.pth`
self.if_save_buffer = False # if save the replay buffer for continuous training after stop training
self.save_gap = int(8) # save actor f"{cwd}/actor_*.pth" for learning curve.
self.eval_times = int(3) # number of times that get the average episodic cumulative return
self.eval_per_step = int(2e4) # evaluate the agent per training steps
self.eval_env_class = None # eval_env = eval_env_class(*eval_env_args)
self.eval_env_args = None # eval_env = eval_env_class(*eval_env_args)
def init_before_training(self):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.num_threads)
torch.set_default_dtype(torch.float32)
'''set cwd (current working directory) for saving model'''
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}_{self.random_seed}'
'''remove history'''
if self.if_remove is None:
self.if_remove = bool(input(f"| Arguments PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
if self.if_remove:
import shutil
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Arguments Remove cwd: {self.cwd}")
else:
print(f"| Arguments Keep cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
def get_if_off_policy(self) -> bool:
agent_name = self.agent_class.__name__ if self.agent_class else ''
on_policy_names = ('SARSA', 'VPG', 'A2C', 'A3C', 'TRPO', 'PPO', 'MPO')
return all([agent_name.find(s) == -1 for s in on_policy_names])
def print(self):
from pprint import pprint
pprint(vars(self)) # prints out args in a neat, readable format
def build_env(env_class=None, env_args: dict = None, gpu_id: int = -1):
env_args['gpu_id'] = gpu_id # set gpu_id for vectorized env before build it
if env_args.get('if_build_vec_env'):
num_envs = env_args['num_envs']
env = VecEnv(env_class=env_class, env_args=env_args, num_envs=num_envs, gpu_id=gpu_id)
elif env_class.__module__ == 'gym.envs.registration':
import gym
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
gym.logger.set_level(40) # Block warning
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
env_args.setdefault('num_envs', 1)
env_args.setdefault('max_step', 12345)
for attr_str in ('env_name', 'num_envs', 'max_step', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def get_gym_env_args(env, if_print: bool) -> dict:
"""get a dict about a standard OpenAI gym env information.
assert 0.18.0 <= gym.__version__ <= 0.25.3
env: a standard OpenAI gym env
if_print: [bool] print the dict about env information.
return: env_args [dict]
env_args = {
'env_name': env_name, # [str] the environment name, such as XxxXxx-v0
'num_envs': num_envs. # [int] the number of sub envs in vectorized env. `num_envs=1` in single env.
'max_step': max_step, # [int] the max step number of an episode.
'state_dim': state_dim, # [int] the dimension of state
'action_dim': action_dim, # [int] the dimension of action or the number of discrete action
'if_discrete': if_discrete, # [bool] action space is discrete or continuous
}
"""
import gym
if_gym_standard_env = {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env))
if if_gym_standard_env and (not hasattr(env, 'num_envs')): # isinstance(env, gym.Env):
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env_name = env.unwrapped.spec.id
num_envs = getattr(env, 'num_envs', 1)
max_step = getattr(env, '_max_episode_steps', 12345)
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = getattr(env.action_space, 'n')
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
if any(env.action_space.high - 1):
print('WARNING: env.action_space.high', env.action_space.high)
if any(env.action_space.low + 1):
print('WARNING: env.action_space.low', env.action_space.low)
else:
raise RuntimeError('\n| Error in get_gym_env_info(). Please set these value manually:'
'\n `state_dim=int; action_dim=int; if_discrete=bool;`'
'\n And keep action_space in range (-1, 1).')
else:
env_name = getattr(env, 'env_name', 'env')
num_envs = getattr(env, 'num_envs', 1)
max_step = getattr(env, 'max_step', 12345)
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name,
'num_envs': num_envs,
'max_step': max_step,
'state_dim': state_dim,
'action_dim': action_dim,
'if_discrete': if_discrete, }
if if_print:
env_args_str = repr(env_args).replace(',', f",\n{'':11}")
print(f"env_args = {env_args_str}")
return env_args
"""vectorized env"""
class SubEnv(Process):
def __init__(self, sub_pipe0: Pipe, vec_pipe1: Pipe,
env_class, env_args: dict, env_id: int = 0):
super().__init__()
self.sub_pipe0 = sub_pipe0
self.vec_pipe1 = vec_pipe1
self.env_class = env_class
self.env_args = env_args
self.env_id = env_id
def run(self):
torch.set_grad_enabled(False)
'''build env'''
if self.env_class.__module__ == 'gym.envs.registration': # is standard OpenAI Gym env
env = self.env_class(id=self.env_args['env_name'])
else:
env = self.env_class(**kwargs_filter(self.env_class.__init__, self.env_args.copy()))
'''set env random seed'''
random_seed = self.env_id
np.random.seed(random_seed)
torch.manual_seed(random_seed)
while True:
action = self.sub_pipe0.recv()
if action is None:
state = env.reset()
self.vec_pipe1.send((self.env_id, state))
else:
state, reward, done, info_dict = env.step(action)
state = env.reset() if done else state
self.vec_pipe1.send((self.env_id, state, reward, done, info_dict))
class VecEnv:
def __init__(self, env_class: object, env_args: dict, num_envs: int, gpu_id: int = -1):
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.num_envs = num_envs # the number of sub env in vectorized env.
'''the necessary env information when you design a custom env'''
self.env_name = env_args['env_name'] # the name of this env.
self.max_step = env_args['max_step'] # the max step number in an episode for evaluation
self.state_dim = env_args['state_dim'] # feature number of state
self.action_dim = env_args['action_dim'] # feature number of action
self.if_discrete = env_args['if_discrete'] # discrete action or continuous action
'''speed up with multiprocessing: Process, Pipe'''
assert self.num_envs <= 64
self.res_list = [[] for _ in range(self.num_envs)]
sub_pipe0s, sub_pipe1s = list(zip(*[Pipe(duplex=False) for _ in range(self.num_envs)]))
self.sub_pipe1s = sub_pipe1s
vec_pipe0, vec_pipe1 = Pipe(duplex=False) # recv, send
self.vec_pipe0 = vec_pipe0
self.sub_envs = [
SubEnv(sub_pipe0=sub_pipe0, vec_pipe1=vec_pipe1,
env_class=env_class, env_args=env_args, env_id=env_id)
for env_id, sub_pipe0 in enumerate(sub_pipe0s)
]
[setattr(p, 'daemon', True) for p in self.sub_envs] # set before process start to exit safely
[p.start() for p in self.sub_envs]
def reset(self) -> Tensor: # reset the agent in env
torch.set_grad_enabled(False)
for pipe in self.sub_pipe1s:
pipe.send(None)
states, = self.get_orderly_zip_list_return()
states = torch.tensor(np.stack(states), dtype=torch.float32, device=self.device)
return states
def step(self, action: Tensor) -> (Tensor, Tensor, Tensor, List[dict]): # agent interacts in env
action = action.detach().cpu().numpy()
if self.if_discrete:
action = action.squeeze(1)
for pipe, a in zip(self.sub_pipe1s, action):
pipe.send(a)
states, rewards, dones, info_dicts = self.get_orderly_zip_list_return()
states = torch.tensor(np.stack(states), dtype=torch.float32, device=self.device)
rewards = torch.tensor(rewards, dtype=torch.float32, device=self.device)
dones = torch.tensor(dones, dtype=torch.bool, device=self.device)
return states, rewards, dones, info_dicts
def close(self):
[process.terminate() for process in self.sub_envs]
def get_orderly_zip_list_return(self):
for _ in range(self.num_envs):
res = self.vec_pipe0.recv()
self.res_list[res[0]] = res[1:]
return list(zip(*self.res_list))
| 14,704 | 47.531353 | 117 | py |
ElegantRL | ElegantRL-master/elegantrl/train/replay_buffer.py | import os
import math
import torch
from typing import Tuple
from torch import Tensor
from elegantrl.train.config import Config
class ReplayBuffer: # for off-policy
def __init__(self,
max_size: int,
state_dim: int,
action_dim: int,
gpu_id: int = 0,
num_seqs: int = 1,
if_use_per: bool = False,
args: Config = Config()):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.add_size = 0
self.add_item = None
self.max_size = max_size
self.num_seqs = num_seqs
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
"""The struction of ReplayBuffer (for example, num_seqs = num_workers * num_envs == 2*4 = 8
ReplayBuffer:
worker0 for env0: sequence of sub_env0.0 self.states = Tensor[s, s, ..., s, ..., s]
self.actions = Tensor[a, a, ..., a, ..., a]
self.rewards = Tensor[r, r, ..., r, ..., r]
self.undones = Tensor[d, d, ..., d, ..., d]
<-----max_size----->
<-cur_size->
↑ pointer
sequence of sub_env0.1 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
sequence of sub_env0.2 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
sequence of sub_env0.3 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
worker1 for env1: sequence of sub_env1.0 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
sequence of sub_env1.1 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
sequence of sub_env1.2 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
sequence of sub_env1.3 s, s, ..., s a, a, ..., a r, r, ..., r d, d, ..., d
D: done=True
d: done=False
sequence of transition: s-a-r-d, s-a-r-d, s-a-r-D s-a-r-d, s-a-r-d, s-a-r-d, s-a-r-d, s-a-r-D s-a-r-d, ...
<------trajectory-------> <----------trajectory---------------------> <-----------
"""
self.states = torch.empty((max_size, num_seqs, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, num_seqs, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, num_seqs), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, num_seqs), dtype=torch.float32, device=self.device)
self.if_use_per = if_use_per
if if_use_per:
self.sum_trees = [SumTree(buf_len=max_size) for _ in range(num_seqs)]
self.per_alpha = getattr(args, 'per_alpha', 0.6) # alpha = (Uniform:0, Greedy:1)
self.per_beta = getattr(args, 'per_beta', 0.4) # alpha = (Uniform:0, Greedy:1)
"""PER. Prioritized Experience Replay. Section 4
alpha, beta = 0.7, 0.5 for rank-based variant
alpha, beta = 0.6, 0.4 for proportional variant
"""
else:
self.sum_trees = None
self.per_alpha = None
self.per_beta = None
def update(self, items: Tuple[Tensor, ...]):
self.add_item = items
states, actions, rewards, undones = items
# assert states.shape[1:] == (env_num, state_dim)
# assert actions.shape[1:] == (env_num, action_dim)
# assert rewards.shape[1:] == (env_num,)
# assert undones.shape[1:] == (env_num,)
self.add_size = rewards.shape[0]
p = self.p + self.add_size # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
if self.if_use_per:
'''data_ids for single env'''
data_ids = torch.arange(self.p, p, dtype=torch.long, device=self.device)
if p > self.max_size:
data_ids = torch.fmod(data_ids, self.max_size)
'''apply data_ids for vectorized env'''
for sum_tree in self.sum_trees:
sum_tree.update_ids(data_ids=data_ids.cpu(), prob=10.)
self.p = p
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
sample_len = self.cur_size - 1
ids = torch.randint(sample_len * self.num_seqs, size=(batch_size,), requires_grad=False)
ids0 = torch.fmod(ids, sample_len) # ids % sample_len
ids1 = torch.div(ids, sample_len, rounding_mode='floor') # ids // sample_len
return (self.states[ids0, ids1],
self.actions[ids0, ids1],
self.rewards[ids0, ids1],
self.undones[ids0, ids1],
self.states[ids0 + 1, ids1],) # next_state
def sample_for_per(self, batch_size: int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
beg = -self.max_size
end = (self.cur_size - self.max_size) if (self.cur_size < self.max_size) else -1
'''get is_indices, is_weights'''
is_indices: list = []
is_weights: list = []
assert batch_size % self.num_seqs == 0
sub_batch_size = batch_size // self.num_seqs
for env_i in range(self.num_seqs):
sum_tree = self.sum_trees[env_i]
_is_indices, _is_weights = sum_tree.important_sampling(batch_size, beg, end, self.per_beta)
is_indices.append(_is_indices + sub_batch_size * env_i)
is_weights.append(_is_weights)
is_indices: Tensor = torch.hstack(is_indices).to(self.device)
is_weights: Tensor = torch.hstack(is_weights).to(self.device)
ids0 = torch.fmod(is_indices, self.cur_size) # is_indices % sample_len
ids1 = torch.div(is_indices, self.cur_size, rounding_mode='floor') # is_indices // sample_len
return (
self.states[ids0, ids1],
self.actions[ids0, ids1],
self.rewards[ids0, ids1],
self.undones[ids0, ids1],
self.states[ids0 + 1, ids1], # next_state
is_weights, # important sampling weights
is_indices, # important sampling indices
)
def td_error_update_for_per(self, is_indices: Tensor, td_error: Tensor): # td_error = (q-q).detach_().abs()
prob = td_error.clamp(1e-8, 10).pow(self.per_alpha)
# self.sum_tree.update_ids(is_indices.cpu(), prob.cpu())
batch_size = td_error.shape[0]
sub_batch_size = batch_size // self.num_seqs
for env_i in range(self.num_seqs):
sum_tree = self.sum_trees[env_i]
slice_i = env_i * sub_batch_size
slice_j = slice_i + sub_batch_size
sum_tree.update_ids(is_indices[slice_i:slice_j].cpu(), prob[slice_i:slice_j].cpu())
def save_or_load_history(self, cwd: str, if_save: bool):
item_names = (
(self.states, "states"),
(self.actions, "actions"),
(self.rewards, "rewards"),
(self.undones, "undones"),
)
if if_save:
for item, name in item_names:
if self.cur_size == self.p:
buf_item = item[:self.cur_size]
else:
buf_item = torch.vstack((item[self.p:self.cur_size], item[0:self.p]))
file_path = f"{cwd}/replay_buffer_{name}.pth"
print(f"| buffer.save_or_load_history(): Save {file_path}")
torch.save(buf_item, file_path)
elif all([os.path.isfile(f"{cwd}/replay_buffer_{name}.pth") for item, name in item_names]):
max_sizes = []
for item, name in item_names:
file_path = f"{cwd}/replay_buffer_{name}.pth"
print(f"| buffer.save_or_load_history(): Load {file_path}")
buf_item = torch.load(file_path)
max_size = buf_item.shape[0]
item[:max_size] = buf_item
max_sizes.append(max_size)
assert all([max_size == max_sizes[0] for max_size in max_sizes])
self.cur_size = self.p = max_sizes[0]
self.if_full = self.cur_size == self.max_size
class SumTree:
""" BinarySearchTree for PER (SumTree)
Contributor: Github GyChou, Github mississippiu
Reference: https://github.com/kaixindelele/DRLib/tree/main/algos/pytorch/td3_sp
Reference: https://github.com/jaromiru/AI-blog/blob/master/SumTree.py
"""
def __init__(self, buf_len: int):
self.buf_len = buf_len # replay buffer len
self.max_len = (buf_len - 1) + buf_len # parent_nodes_num + leaf_nodes_num
self.depth = math.ceil(math.log2(self.max_len))
self.tree = torch.zeros(self.max_len, dtype=torch.float32)
def update_id(self, data_id: int, prob=10): # 10 is max_prob
tree_id = data_id + self.buf_len - 1
delta = prob - self.tree[tree_id]
self.tree[tree_id] = prob
for depth in range(self.depth - 2): # propagate the change through tree
tree_id = (tree_id - 1) // 2 # faster than the recursive loop
self.tree[tree_id] += delta
def update_ids(self, data_ids: Tensor, prob: Tensor = 10.): # 10 is max_prob
l_ids = data_ids + self.buf_len - 1
self.tree[l_ids] = prob
for depth in range(self.depth - 2): # propagate the change through tree
p_ids = ((l_ids - 1) // 2).unique() # parent indices
l_ids = p_ids * 2 + 1 # left children indices
r_ids = l_ids + 1 # right children indices
self.tree[p_ids] = self.tree[l_ids] + self.tree[r_ids]
l_ids = p_ids
def get_leaf_id_and_value(self, v) -> Tuple[int, float]:
"""Tree structure and array storage:
Tree index:
0 -> storing priority sum
| |
1 2
| | | |
3 4 5 6 -> storing priority for transitions
Array type for storing: [0, 1, 2, 3, 4, 5, 6]
"""
p_id = 0 # the leaf's parent node
for depth in range(self.depth - 2): # propagate the change through tree
l_id = min(2 * p_id + 1, self.max_len - 1) # the leaf's left node
r_id = l_id + 1 # the leaf's right node
if v <= self.tree[l_id]:
p_id = l_id
else:
v -= self.tree[l_id]
p_id = r_id
return p_id, self.tree[p_id] # leaf_id and leaf_value
def important_sampling(self, batch_size: int, beg: int, end: int, per_beta: float) -> Tuple[Tensor, Tensor]:
# get random values for searching indices with proportional prioritization
values = (torch.arange(batch_size) + torch.rand(batch_size)) * (self.tree[0] / batch_size)
# get proportional prioritization
leaf_ids, leaf_values = list(zip(*[self.get_leaf_id_and_value(v) for v in values]))
leaf_ids = torch.tensor(leaf_ids, dtype=torch.long)
leaf_values = torch.tensor(leaf_values, dtype=torch.float32)
indices = leaf_ids - (self.buf_len - 1)
assert 0 <= indices.min()
assert indices.max() < self.buf_len
prob_ary = leaf_values / self.tree[beg:end].min()
weights = torch.pow(prob_ary, -per_beta)
return indices, weights
| 12,464 | 44.659341 | 116 | py |
ElegantRL | ElegantRL-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
sys.path.insert(0, os.path.abspath(os.path.join("../..", "elegantrl"))) # Important
# -- Project information -----------------------------------------------------
project = "ElegantRL"
copyright = "2021, ElegantRL"
author = "ElegantRL"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = "0.3.1"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
autodoc_mock_imports = [
"gym",
"matplotlib",
"numpy",
"pybullet",
"torch",
"opencv-python",
]
pygments_style = "sphinx"
import sphinx_rtd_theme
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = "../img/logo.jpg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "ElegantRLdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "ElegantRL.tex", "ElegantRL Documentation", "ElegantRL", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "elegantrl", "ElegantRL Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"ElegantRL",
"ElegantRL Documentation",
author,
"ElegantRL",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
| 5,679 | 28.894737 | 84 | py |
ElegantRL | ElegantRL-master/helloworld/helloworld_DQN_single_file.py | import os
import time
from copy import deepcopy
import gym
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
class Config: # for off-policy
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = True # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(512) # collect horizon_len step while exploring, then update network
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(32) # number of times that get episodic cumulative return
self.eval_per_step = int(2e4) # evaluate the agent per training steps
def init_before_training(self):
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}'
os.makedirs(self.cwd, exist_ok=True)
class QNet(nn.Module): # `nn.Module` is a PyTorch module for neural network
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_rate = None
self.action_dim = action_dim
def forward(self, state: Tensor) -> Tensor:
return self.net(state) # Q values for multiple actions
def get_action(self, state: Tensor) -> Tensor: # return the index [int] of discrete action for exploration
if self.explore_rate < torch.rand(1):
action = self.net(state).argmax(dim=1, keepdim=True)
else:
action = torch.randint(self.action_dim, size=(state.shape[0], 1))
return action
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
def get_gym_env_args(env, if_print: bool) -> dict:
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
action_dim = env.action_space.n if if_discrete else env.action_space.shape[0]
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name, 'state_dim': state_dim, 'action_dim': action_dim, 'if_discrete': if_discrete}
print(f"env_args = {repr(env_args)}") if if_print else None
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
class AgentDQN(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNet)
self.cri_class = getattr(self, "cri_class", None) # means `self.cri = self.act`
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.act_target = self.cri_target = deepcopy(self.act)
self.act.explore_rate = getattr(args, "explore_rate", 0.25) # set for `self.act.get_action()`
# the probability of choosing action randomly in epsilon-greedy
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, 1), dtype=torch.int32).to(self.device)
rewards = torch.ones(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
if if_random:
action = torch.randint(self.action_dim, size=(1,))[0]
else:
action = get_action(state.unsqueeze(0))[0, 0]
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = (rewards * self.reward_scale).unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
def update_net(self, buffer) -> [float]:
obj_critics = 0.0
q_values = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for i in range(update_times):
obj_critic, q_value = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
q_values += q_value.item()
return obj_critics / update_times, q_values / update_times
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
state, action, reward, undone, next_state = buffer.sample(batch_size)
next_q = self.cri_target(next_state).max(dim=1, keepdim=True)[0]
q_label = reward + undone * self.gamma * next_q
q_value = self.cri(state).gather(1, action.long())
obj_critic = self.criterion(q_value, q_label)
return obj_critic, q_value.mean()
class ReplayBuffer: # for off-policy
def __init__(self, max_size: int, state_dim: int, action_dim: int, gpu_id: int = 0):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.max_size = max_size
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.states = torch.empty((max_size, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
def update(self, items: [Tensor]):
states, actions, rewards, undones = items
p = self.p + rewards.shape[0] # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
self.p = p
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> [Tensor]:
ids = torch.randint(self.cur_size - 1, size=(batch_size,), requires_grad=False)
return self.states[ids], self.actions[ids], self.rewards[ids], self.undones[ids], self.states[ids + 1]
def train_agent(args: Config):
args.init_before_training()
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=0, args=args)
agent.last_state = env.reset()
buffer = ReplayBuffer(gpu_id=0, max_size=args.buffer_size,
state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, )
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step, eval_times=args.eval_times, cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
buffer.update(buffer_items)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = []
print("\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state).argmax(dim=1)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
return cumulative_returns, episode_steps + 1
def train_dqn_for_cartpole():
env_args = {
'env_name': 'CartPole-v0', # A pole is attached by an un-actuated joint to a cart.
'state_dim': 4, # (CartPosition, CartVelocity, PoleAngle, PoleAngleVelocity)
'action_dim': 2, # (Push cart to the left, Push cart to the right)
'if_discrete': True, # discrete action space
} # env_args = get_gym_env_args(env=gym.make('CartPole-v0'), if_print=True)
args = Config(agent_class=AgentDQN, env_class=gym.make, env_args=env_args) # see `Config` for explanation
args.break_step = int(2e5) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.95 # discount factor of future rewards
train_agent(args)
train_dqn_for_cartpole()
| 17,586 | 46.661247 | 119 | py |
ElegantRL | ElegantRL-master/helloworld/helloworld_PPO_single_file.py | import os
import time
import gym
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.distributions.normal import Normal
class ActorPPO(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.action_std_log = nn.Parameter(torch.zeros((1, action_dim)), requires_grad=True) # trainable parameter
def forward(self, state: Tensor) -> Tensor:
return self.net(state).tanh() # action.tanh()
def get_action(self, state: Tensor) -> (Tensor, Tensor): # for exploration
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = Normal(action_avg, action_std)
action = dist.sample()
logprob = dist.log_prob(action).sum(1)
return action, logprob
def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = Normal(action_avg, action_std)
logprob = dist.log_prob(action).sum(1)
entropy = dist.entropy().sum(1)
return logprob, entropy
@staticmethod
def convert_action_for_env(action: Tensor) -> Tensor:
return action.tanh()
class CriticPPO(nn.Module):
def __init__(self, dims: [int], state_dim: int, _action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, 1])
def forward(self, state: Tensor) -> Tensor:
return self.net(state) # advantage value
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
class Config: # for on-policy
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = False # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
self.batch_size = int(128) # num of transitions sampled from replay buffer.
self.horizon_len = int(2000) # collect horizon_len step while exploring, then update network
self.buffer_size = None # ReplayBuffer size. Empty the ReplayBuffer for on-policy.
self.repeat_times = 8.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(32) # number of times that get episodic cumulative return
self.eval_per_step = int(2e4) # evaluate the agent per training steps
def init_before_training(self):
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}'
os.makedirs(self.cwd, exist_ok=True)
def get_gym_env_args(env, if_print: bool) -> dict:
"""Get a dict ``env_args`` about a standard OpenAI gym env information.
param env: a standard OpenAI gym env
param if_print: [bool] print the dict about env information.
return: env_args [dict]
env_args = {
'env_name': env_name, # [str] the environment name, such as XxxXxx-v0
'state_dim': state_dim, # [int] the dimension of state
'action_dim': action_dim, # [int] the dimension of action or the number of discrete action
'if_discrete': if_discrete, # [bool] action space is discrete or continuous
}
"""
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = env.action_space.n
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
if any(env.action_space.high - 1):
print('WARNING: env.action_space.high', env.action_space.high)
if any(env.action_space.low + 1):
print('WARNING: env.action_space.low', env.action_space.low)
else:
raise RuntimeError('\n| Error in get_gym_env_info(). Please set these value manually:'
'\n `state_dim=int; action_dim=int; if_discrete=bool;`'
'\n And keep action_space in range (-1, 1).')
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name,
'state_dim': state_dim,
'action_dim': action_dim,
'if_discrete': if_discrete, }
if if_print:
env_args_str = repr(env_args).replace(',', f",\n{'':11}")
print(f"env_args = {env_args_str}")
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
class AgentPPO(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.if_off_policy = False
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.ratio_clip = getattr(args, "ratio_clip", 0.25) # `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_gae_adv = getattr(args, "lambda_gae_adv", 0.95) # could be 0.80~0.99
self.lambda_entropy = getattr(args, "lambda_entropy", 0.01) # could be 0.00~0.10
self.lambda_entropy = torch.tensor(self.lambda_entropy, dtype=torch.float32, device=self.device)
def explore_env(self, env, horizon_len: int) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
logprobs = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action, logprob = [t.squeeze(0) for t in get_action(state.unsqueeze(0))[:2]]
ary_action = convert(action).detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
logprobs[i] = logprob
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = (rewards * self.reward_scale).unsqueeze(1)
undones = (1 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, logprobs, rewards, undones
def update_net(self, buffer) -> [float]:
with torch.no_grad():
states, actions, logprobs, rewards, undones = buffer
buffer_size = states.shape[0]
'''get advantages reward_sums'''
bs = 2 ** 10 # set a smaller 'batch_size' when out of GPU memory.
values = [self.cri(states[i:i + bs]) for i in range(0, buffer_size, bs)]
values = torch.cat(values, dim=0).squeeze(1) # values.shape == (buffer_size, )
advantages = self.get_advantages(rewards, undones, values) # advantages.shape == (buffer_size, )
reward_sums = advantages + values # reward_sums.shape == (buffer_size, )
del rewards, undones, values
advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-5)
assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size,)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for _ in range(update_times):
indices = torch.randint(buffer_size, size=(self.batch_size,), requires_grad=False)
state = states[indices]
action = actions[indices]
logprob = logprobs[indices]
advantage = advantages[indices]
reward_sum = reward_sums[indices]
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, reward_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = advantage * ratio
surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy.mean() * self.lambda_entropy
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_critics += obj_critic.item()
obj_actors += obj_actor.item()
a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean()
return obj_critics / update_times, obj_actors / update_times, a_std_log.item()
def get_advantages(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
advantages = torch.empty_like(values) # advantage value
masks = undones * self.gamma
horizon_len = rewards.shape[0]
next_state = torch.tensor(self.last_state, dtype=torch.float32).to(self.device)
next_value = self.cri(next_state.unsqueeze(0)).detach().squeeze(1).squeeze(0)
advantage = 0 # last_gae_lambda
for t in range(horizon_len - 1, -1, -1):
delta = rewards[t] + masks[t] * next_value - values[t]
advantages[t] = advantage = delta + masks[t] * self.lambda_gae_adv * advantage
next_value = values[t]
return advantages
class PendulumEnv(gym.Wrapper): # a demo of custom gym env
def __init__(self, gym_env_name=None):
gym.logger.set_level(40) # Block warning
if gym_env_name is None:
gym_env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
super().__init__(env=gym.make(gym_env_name))
'''the necessary env information when you design a custom env'''
self.env_name = gym_env_name # the name of this env.
self.state_dim = self.observation_space.shape[0] # feature number of state
self.action_dim = self.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> np.ndarray: # reset the agent in env
return self.env.reset()
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict): # agent interacts in env
# OpenAI Pendulum env set its action space as (-2, +2). It is bad.
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
state = state.reshape(self.state_dim)
return state, float(reward), done, info_dict
def train_agent(args: Config):
args.init_before_training()
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.last_state = env.reset()
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step,
eval_times=args.eval_times,
cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer_items)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
def render_agent(env_class, env_args: dict, net_dims: [int], agent_class, actor_path: str, render_times: int = 8):
env = build_env(env_class, env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
agent = agent_class(net_dims, state_dim, action_dim, gpu_id=-1)
actor = agent.act
print(f"| render and load actor from: {actor_path}")
actor.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
for i in range(render_times):
cumulative_reward, episode_step = get_rewards_and_steps(env, actor, if_render=True)
print(f"|{i:4} cumulative_reward {cumulative_reward:9.3f} episode_step {episode_step:5.0f}")
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = []
print(f"\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
f"\n| `time`: Time spent from the start of training to this moment."
f"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
f"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
f"\n| `avgS`: Average of steps in an episode."
f"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
f"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
return cumulative_returns, episode_steps + 1
def train_ppo_for_pendulum():
agent_class = AgentPPO # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(2e5) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.repeat_times = 16 # repeatedly update network using ReplayBuffer to keep critic's loss small
train_agent(args)
def train_ppo_for_lunar_lander():
agent_class = AgentPPO # DRL algorithm name
env_class = gym.make
env_args = {
'env_name': 'LunarLanderContinuous-v2', # A lander learns to land on a landing pad
'state_dim': 8, # coordinates xy, linear velocities xy, angle, angular velocity, two booleans
'action_dim': 2, # fire main engine or side engine.
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=gym.make('LunarLanderContinuous-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(4e5) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.repeat_times = 32 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.lambda_entropy = 0.04 # the lambda of the policy entropy term in PPO
train_agent(args)
if input("| Press 'y' to load actor.pth and render:"):
actor_name = sorted([s for s in os.listdir(args.cwd) if s[-4:] == '.pth'])[-1]
actor_path = f"{args.cwd}/{actor_name}"
render_agent(env_class, env_args, args.net_dims, agent_class, actor_path)
if __name__ == "__main__":
train_ppo_for_pendulum()
train_ppo_for_lunar_lander()
| 23,408 | 46.38664 | 119 | py |
ElegantRL | ElegantRL-master/helloworld/helloworld_DDPG_single_file.py | import os
import sys
import time
from copy import deepcopy
import gym
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from torch.distributions import Normal
class Config: # for off-policy
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = True # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(512) # collect horizon_len step while exploring, then update network
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(32) # number of times that get episodic cumulative return
self.eval_per_step = int(2e4) # evaluate the agent per training steps
def init_before_training(self):
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}'
os.makedirs(self.cwd, exist_ok=True)
class Actor(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_noise_std = None # standard deviation of exploration action noise
def forward(self, state: Tensor) -> Tensor:
action = self.net(state)
return action.tanh()
def get_action(self, state: Tensor) -> Tensor: # for exploration
action_avg = self.net(state).tanh()
dist = Normal(action_avg, self.explore_noise_std)
action = dist.sample()
return action.clip(-1.0, 1.0)
class Critic(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim + action_dim, *dims, 1])
def forward(self, state: Tensor, action: Tensor) -> Tensor:
return self.net(torch.cat((state, action), dim=1)) # Q value
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
def get_gym_env_args(env, if_print: bool) -> dict:
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
action_dim = env.action_space.n if if_discrete else env.action_space.shape[0]
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name, 'state_dim': state_dim, 'action_dim': action_dim, 'if_discrete': if_discrete}
print(f"env_args = {repr(env_args)}") if if_print else None
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
class AgentDDPG(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor) # get the attribute of object `self`, set Actor in default
self.cri_class = getattr(self, 'cri_class', Critic) # get the attribute of object `self`, set Critic in default
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.act_target = deepcopy(self.act)
self.cri_target = deepcopy(self.cri)
self.act.explore_noise_std = getattr(args, 'explore_noise', 0.1) # set for `self.act.get_action()`
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action = torch.rand(self.action_dim) * 2 - 1.0 if if_random else get_action(state.unsqueeze(0)).squeeze(0)
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = rewards.unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
def update_net(self, buffer) -> [float]:
obj_critics = obj_actors = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
assert update_times > 0
for i in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
action = self.act(state)
obj_actor = self.cri_target(state, action).mean()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
obj_actors += obj_actor.item()
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
states, actions, rewards, undones, next_states = buffer.sample(batch_size)
next_actions = self.act_target(next_states)
next_q_values = self.cri_target(next_states, next_actions)
q_labels = rewards + undones * self.gamma * next_q_values
q_values = self.cri(states, actions)
obj_critic = self.criterion(q_values, q_labels)
return obj_critic, states
class ReplayBuffer: # for off-policy
def __init__(self, max_size: int, state_dim: int, action_dim: int, gpu_id: int = 0):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.max_size = max_size
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.states = torch.empty((max_size, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
def update(self, items: [Tensor]):
states, actions, rewards, undones = items
p = self.p + rewards.shape[0] # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
self.p = p
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> [Tensor]:
ids = torch.randint(self.cur_size - 1, size=(batch_size,), requires_grad=False)
return self.states[ids], self.actions[ids], self.rewards[ids], self.undones[ids], self.states[ids + 1]
class PendulumEnv(gym.Wrapper): # a demo of custom gym env
def __init__(self, gym_env_name=None):
gym.logger.set_level(40) # Block warning
if gym_env_name is None:
gym_env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
super().__init__(env=gym.make(gym_env_name))
'''the necessary env information when you design a custom env'''
self.env_name = gym_env_name # the name of this env.
self.state_dim = self.observation_space.shape[0] # feature number of state
self.action_dim = self.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> np.ndarray: # reset the agent in env
return self.env.reset()
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict): # agent interacts in env
# OpenAI Pendulum env set its action space as (-2, +2). It is bad.
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
state = state.reshape(self.state_dim)
return state, float(reward), done, info_dict
def train_agent(args: Config):
args.init_before_training()
gpu_id = 0
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=gpu_id, args=args)
agent.last_state = env.reset()
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=args.buffer_size,
state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, )
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step, eval_times=args.eval_times, cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
buffer.update(buffer_items)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = []
print("\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
return cumulative_returns, episode_steps + 1
def train_ddpg_for_pendulum(gpu_id=0):
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
} # env_args = get_gym_env_args(env=gym.make('CartPole-v0'), if_print=True)
args = Config(agent_class=AgentDDPG, env_class=PendulumEnv, env_args=env_args) # see `Config` for explanation
args.break_step = int(1e5) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.gpu_id = gpu_id # the ID of single GPU, -1 means CPU
args.gamma = 0.97 # discount factor of future rewards
train_agent(args)
train_ddpg_for_pendulum(gpu_id=int(sys.argv[1]) if len(sys.argv) > 1 else -1)
| 19,542 | 47.135468 | 120 | py |
ElegantRL | ElegantRL-master/helloworld/StockTradingVmapEnv.py | import os
import torch
import numpy as np
import numpy.random as rd
import pandas as pd
from functorch import vmap
"""finance environment
Source:
https://github.com/AI4Finance-Foundation/FinRL-Meta/blob/master/Demo_China_A_share_market.ipynb
Modify: Github YonV1943
"""
'''vmap function'''
def _get_total_asset(close, shares, amount):
return (close * shares).sum() + amount # total_asset
def _get_state(amount, shares, close, tech):
return torch.cat((amount, shares, close, tech))
def _inplace_amount_shares_when_buy(amount, shares, stock_action, close, buy_cost_rate):
stock_delta = torch.min(stock_action, torch.div(amount, close, rounding_mode='floor'))
amount -= close * stock_delta * buy_cost_rate
shares += stock_delta
return torch.zeros(1)
def _inplace_amount_shares_when_sell(amount, shares, stock_action, close, sell_cost_rate):
stock_delta = torch.min(-stock_action, shares)
amount += close * stock_delta * sell_cost_rate
shares -= stock_delta
return torch.zeros(1)
class StockTradingVmapEnv:
def __init__(self, initial_amount=1e6, max_stock=100, buy_cost_pct=1e-3, sell_cost_pct=1e-3, gamma=0.99,
beg_idx=0, end_idx=1113, gpu_id: int = 0, num_envs: int = 4):
self.df_pwd = './China_A_shares.pandas.dataframe'
'''load data'''
close_ary, tech_ary = self.load_data_from_disk()
close_ary = close_ary[beg_idx:end_idx]
tech_ary = tech_ary[beg_idx:end_idx]
print(f"| StockTradingEnv: close_ary.shape {close_ary.shape}")
print(f"| StockTradingEnv: tech_ary.shape {tech_ary.shape}")
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.num_envs = num_envs
self.close_price = torch.tensor(close_ary, dtype=torch.float32, device=self.device)
self.tech_factor = torch.tensor(tech_ary, dtype=torch.float32, device=self.device)
'''init'''
self.gamma = gamma
self.max_stock = max_stock
self.initial_amount = initial_amount
self.max_step = self.close_price.shape[0]
self.buy_cost_rate = 1. + buy_cost_pct
self.sell_cost_rate = 1. - sell_cost_pct
'''init (set in reset)'''
self.day = None
self.rewards = None
self.total_asset = None
self.if_random_reset = True
self.cumulative_returns = None
self.amount = None
self.shares = None
self.shares_num = self.close_price.shape[1]
amount_dim = 1
'''environment information'''
self.env_name = 'StockTradingEnvVMAP-v2'
self.state_dim = self.shares_num + self.close_price.shape[1] + self.tech_factor.shape[1] + amount_dim
self.action_dim = self.shares_num
self.if_discrete = False
'''vmap function'''
self.vmap_get_total_asset = vmap(
func=_get_total_asset, in_dims=(None, 0, 0), out_dims=0)
self.vmap_get_state = vmap(
func=_get_state, in_dims=(0, 0, None, None), out_dims=0)
self.vmap_inplace_amount_shares_when_buy = vmap(
func=_inplace_amount_shares_when_buy, in_dims=(0, 0, 0, None, None), out_dims=0)
self.vmap_inplace_amount_shares_when_sell = vmap(
func=_inplace_amount_shares_when_sell, in_dims=(0, 0, 0, None, None), out_dims=0)
def reset(self):
self.day = 0
self.amount = torch.zeros((self.num_envs, 1), dtype=torch.float32, device=self.device) + self.initial_amount
self.shares = torch.zeros((self.num_envs, self.shares_num), dtype=torch.float32, device=self.device)
if self.if_random_reset:
self.amount *= torch.rand((self.num_envs, 1), dtype=torch.float32, device=self.device) * 0.10 + 0.95
self.shares += torch.randint(0, int(self.max_stock),
size=(self.num_envs, self.shares_num), device=self.device)
self.rewards = list()
self.total_asset = self.vmap_get_total_asset(self.close_price[self.day],
self.shares,
self.amount)
state = self.get_state()
return state
def get_state(self):
return self.vmap_get_state(self.amount * 2 ** 16,
self.shares * 2 ** -9,
self.close_price[self.day] * 2 ** -7,
self.tech_factor[self.day] * 2 ** -6) # state
def step(self, action):
self.day += 1
action = action.clone()
action[(-0.1 < action) & (action < 0.1)] = 0
stock_action = (action * self.max_stock).to(torch.int32)
# actions initially is scaled between -1 and 1
# convert `action` into integer as `stock_action`, because we can't buy fraction of shares
for i in range(self.shares_num):
buy_idx = torch.where(stock_action[:, i] > 0)[0]
if buy_idx.shape[0] > 0:
part_amount = self.amount[buy_idx]
part_shares = self.shares[buy_idx, i]
self.vmap_inplace_amount_shares_when_buy(part_amount,
part_shares,
stock_action[buy_idx, i],
self.close_price[self.day, i],
self.buy_cost_rate)
self.amount[buy_idx] = part_amount
self.shares[buy_idx, i] = part_shares
sell_idx = torch.where((stock_action < 0) & (self.shares > 0))[0]
if sell_idx.shape[0] > 0:
part_amount = self.amount[sell_idx]
part_shares = self.shares[sell_idx, i]
self.vmap_inplace_amount_shares_when_sell(part_amount,
part_shares,
stock_action[sell_idx, i],
self.close_price[self.day, i],
self.sell_cost_rate)
self.amount[sell_idx] = part_amount
self.shares[sell_idx, i] = part_shares
state = self.get_state()
total_asset = self.vmap_get_total_asset(self.close_price[self.day],
self.shares,
self.amount)
reward = (total_asset - self.total_asset) * 2 ** -6
self.rewards.append(reward)
self.total_asset = total_asset
done = self.day == self.max_step - 1
if done:
reward += 1. / (1. - self.gamma) * torch.stack(self.rewards).mean(dim=0)
self.cumulative_returns = total_asset / self.initial_amount
self.cumulative_returns = self.cumulative_returns.mean().item()
done = torch.tensor(done, dtype=torch.bool, device=self.device).expand(self.num_envs)
return state, reward, done, {}
def load_data_from_disk(self, tech_id_list=None):
tech_id_list = [
"macd", "boll_ub", "boll_lb", "rsi_30", "cci_30", "dx_30", "close_30_sma", "close_60_sma",
] if tech_id_list is None else tech_id_list
if os.path.exists(self.df_pwd): # convert pandas.DataFrame to numpy.array
df = pd.read_pickle(self.df_pwd)
tech_ary = []
close_ary = []
df_len = len(df.index.unique()) # df_len = max_step
for day in range(df_len):
item = df.loc[day]
tech_items = [item[tech].values.tolist() for tech in tech_id_list]
tech_items_flatten = sum(tech_items, [])
tech_ary.append(tech_items_flatten)
close_ary.append(item.close)
close_ary = np.array(close_ary)
tech_ary = np.array(tech_ary)
else:
error_str = f"| StockTradingEnv need {self.df_pwd}" \
f"\n download the following files and save in `.`" \
f"\n https://github.com/Yonv1943/Python/blob/master/scow/China_A_shares.pandas.dataframe (2MB)"
raise FileNotFoundError(error_str)
return close_ary, tech_ary
def check_env():
gpu_id = 0
env_num = 32
env = StockTradingVmapEnv(beg_idx=834, end_idx=1113, gpu_id=gpu_id, num_envs=env_num)
env.if_random_reset = False
evaluate_time = 4
"""
env = StockTradingEnv(beg_idx=0, end_idx=1113)
cumulative_returns of random action : 1.63
cumulative_returns of buy all share : 2.80
env = StockTradingEnv(beg_idx=0, end_idx=834)
cumulative_returns of random action : 1.94
cumulative_returns of buy all share : 2.51
env = StockTradingEnv(beg_idx=834, end_idx=1113)
cumulative_returns of random action : 1.12
cumulative_returns of buy all share : 1.19
"""
print()
policy_name = 'random action'
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.rand((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
policy_name = 'buy all share (if_random_reset = False)'
env.if_random_reset = False
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.ones((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
print()
policy_name = 'buy all share (if_random_reset = True)'
env.if_random_reset = True
state = env.reset()
for _ in range(env.max_step * evaluate_time):
action = torch.ones((env.num_envs, env.action_dim), dtype=torch.float32, device=env.device) * 2. - 1.
state, reward, done, _ = env.step(action)
if torch.all(done):
print(f'cumulative_returns of {policy_name}: {env.cumulative_returns:9.2f}')
state = env.reset()
dir(state)
print()
if __name__ == '__main__':
check_env()
| 10,618 | 39.071698 | 120 | py |
ElegantRL | ElegantRL-master/helloworld/run.py | import os
import time
import torch
import numpy as np
from config import Config, build_env
from agent import ReplayBuffer
def train_agent(args: Config):
args.init_before_training()
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=args.gpu_id, args=args)
agent.last_state = env.reset()
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step,
eval_times=args.eval_times,
cwd=args.cwd)
if args.if_off_policy:
buffer = ReplayBuffer(gpu_id=args.gpu_id,
max_size=args.buffer_size,
state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim, )
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
else:
buffer = []
'''start training'''
cwd = args.cwd
break_step = args.break_step
horizon_len = args.horizon_len
if_off_policy = args.if_off_policy
del args
torch.set_grad_enabled(False)
while True:
buffer_items = agent.explore_env(env, horizon_len)
if if_off_policy:
buffer.update(buffer_items)
else:
buffer[:] = buffer_items
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, horizon_len, logging_tuple)
if (evaluator.total_step > break_step) or os.path.exists(f"{cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
evaluator.close()
def render_agent(env_class, env_args: dict, net_dims: [int], agent_class, actor_path: str, render_times: int = 8):
env = build_env(env_class, env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
agent = agent_class(net_dims, state_dim, action_dim, gpu_id=-1)
actor = agent.act
del agent
print(f"| render and load actor from: {actor_path}")
actor.load_state_dict(torch.load(actor_path, map_location=lambda storage, loc: storage))
for i in range(render_times):
cumulative_reward, episode_step = get_rewards_and_steps(env, actor, if_render=True)
print(f"|{i:4} cumulative_reward {cumulative_reward:9.3f} episode_step {episode_step:5.0f}")
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = []
print("| Evaluator:"
"\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
save_path = f"{self.cwd}/actor_{self.total_step:012.0f}_{used_time:08.0f}_{avg_r:08.2f}.pth"
torch.save(actor.state_dict(), save_path)
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def close(self):
np.save(f"{self.cwd}/recorder.npy", np.array(self.recorder))
draw_learning_curve_using_recorder(self.cwd)
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
if_discrete = env.if_discrete
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state).argmax(dim=1) if if_discrete else actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
time.sleep(0.02)
if done:
break
cumulative_returns = getattr(env, 'cumulative_returns', cumulative_returns)
return cumulative_returns, episode_steps + 1
def draw_learning_curve_using_recorder(cwd: str):
recorder = np.load(f"{cwd}/recorder.npy")
import matplotlib as mpl
mpl.use('Agg') # write before `import matplotlib.pyplot as plt`. `plt.savefig()` without a running X server
import matplotlib.pyplot as plt
x_axis = recorder[:, 0]
y_axis = recorder[:, 2]
plt.plot(x_axis, y_axis)
plt.xlabel('#samples (Steps)')
plt.ylabel('#Rewards (Score)')
plt.grid()
file_path = f"{cwd}/LearningCurve.jpg"
# plt.show() # if use `mpl.use('Agg')` to draw figures without GUI, then plt can't plt.show()
plt.savefig(file_path)
print(f"| Save learning curve in {file_path}")
| 6,830 | 41.962264 | 119 | py |
ElegantRL | ElegantRL-master/helloworld/helloworld_SAC_TD3_single_file.py | import os
import sys
import time
from copy import deepcopy
import gym
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
class Config: # for off-policy
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = True # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 1e-4 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
self.state_value_tau = 0.1 # 0.05 ~ 0.50
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(256) # collect horizon_len step while exploring, then update network
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(16) # number of times that get episodic cumulative return
self.eval_per_step = int(1e4) # evaluate the agent per training steps
def init_before_training(self):
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}'
os.makedirs(self.cwd, exist_ok=True)
class ActorBase(nn.Module): # todo state_norm
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim, *dims, action_dim])
self.ActionDist = torch.distributions.normal.Normal
self.action_std = None
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std # todo state_norm
class Actor(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
action = self.net(state)
return action.tanh()
def get_action(self, state: Tensor) -> Tensor: # for exploration
state = self.state_norm(state)
action_avg = self.net(state).tanh()
dist = self.ActionDist(action_avg, self.action_std)
action = dist.sample()
return action.clip(-1.0, 1.0)
class ActorSAC(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.enc_s = build_mlp(dims=[state_dim, *dims]) # encoder of state
self.dec_a_avg = build_mlp(dims=[dims[-1], action_dim]) # decoder of action mean
self.dec_a_std = build_mlp(dims=[dims[-1], action_dim]) # decoder of action log_std
self.soft_plus = nn.Softplus()
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
state_tmp = self.enc_s(state) # temporary tensor of state
return self.dec_a_avg(state_tmp).tanh() # action
def get_action(self, state: Tensor) -> Tensor: # for exploration
state = self.state_norm(state)
state_tmp = self.enc_s(state) # temporary tensor of state
action_avg = self.dec_a_avg(state_tmp)
action_std = self.dec_a_std(state_tmp).clamp(-20, 2).exp()
noise = torch.randn_like(action_avg, requires_grad=True)
action = action_avg + action_std * noise
return action.tanh() # action (re-parameterize)
def get_action_logprob(self, state: Tensor) -> [Tensor, Tensor]:
state = self.state_norm(state)
state_tmp = self.enc_s(state) # temporary tensor of state
action_log_std = self.dec_a_std(state_tmp).clamp(-20, 2)
action_std = action_log_std.exp()
action_avg = self.dec_a_avg(state_tmp)
noise = torch.randn_like(action_avg, requires_grad=True)
action = action_avg + action_std * noise
logprob = -action_log_std - noise.pow(2) * 0.5 - np.log(np.sqrt(2 * np.pi))
# dist = self.Normal(action_avg, action_std)
# action = dist.sample()
# logprob = dist.log_prob(action)
'''fix logprob by adding the derivative of y=tanh(x)'''
logprob -= (np.log(2.) - action - self.soft_plus(-2. * action)) * 2. # better than below
# logprob -= (1.000001 - action.tanh().pow(2)).log()
return action.tanh(), logprob.sum(1, keepdim=True)
class CriticBase(nn.Module): # todo state_norm, value_norm
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim + action_dim, *dims, 1])
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std # todo state_norm
def value_re_norm(self, value: Tensor) -> Tensor:
return value * self.value_std + self.value_avg # todo value_norm
class CriticTwin(CriticBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.enc_sa = build_mlp(dims=[state_dim + action_dim, *dims]) # encoder of state and action
self.dec_q1 = build_mlp(dims=[dims[-1], 1]) # decoder of Q value 1
self.dec_q2 = build_mlp(dims=[dims[-1], 1]) # decoder of Q value 2
def forward(self, state: Tensor, action: Tensor) -> Tensor:
state = self.state_norm(state)
sa_tmp = self.enc_sa(torch.cat((state, action), dim=1))
value = self.dec_q1(sa_tmp)
value = self.value_re_norm(value)
return value # Q value
def get_q1_q2(self, state, action):
state = self.state_norm(state)
sa_tmp = self.enc_sa(torch.cat((state, action), dim=1))
value1 = self.value_re_norm(self.dec_q1(sa_tmp))
value2 = self.value_re_norm(self.dec_q2(sa_tmp))
return value1, value2 # two Q values
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
def get_gym_env_args(env, if_print: bool) -> dict:
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
action_dim = env.action_space.n if if_discrete else env.action_space.shape[0]
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name, 'state_dim': state_dim, 'action_dim': action_dim, 'if_discrete': if_discrete}
print(f"env_args = {repr(env_args)}") if if_print else None
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
class ReplayBuffer: # for off-policy
def __init__(self, max_size: int, state_dim: int, action_dim: int, gpu_id: int = 0):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.add_size = 0
self.max_size = max_size
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.states = torch.empty((max_size, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
def update(self, items: [Tensor]):
states, actions, rewards, undones = items
add_size = rewards.shape[0]
p = self.p + add_size # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
self.p = p
self.add_size = add_size
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> [Tensor]:
ids = torch.randint(self.cur_size - 1, size=(batch_size,), requires_grad=False)
return self.states[ids], self.actions[ids], self.rewards[ids], self.undones[ids], self.states[ids + 1]
def slice(self, data: Tensor, slice_size: int) -> Tensor:
slice_data = data[self.p - slice_size:self.p] if slice_size >= self.p \
else torch.vstack((data[slice_size - self.p:], data[:self.p]))
return slice_data
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.state_value_tau = args.state_value_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
action = torch.rand(self.action_dim) * 2 - 1.0 if if_random else get_action(state.unsqueeze(0))[0]
states[i] = state
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
state = torch.as_tensor(env.reset() if done else ary_state,
dtype=torch.float32, device=self.device)
actions[i] = action
rewards[i] = reward
dones[i] = done
self.last_state = state
rewards = rewards.unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
def update_avg_std_for_state_value_norm(self, states: Tensor, returns: Tensor):
tau = self.state_value_tau
if tau == 0:
return
state_avg = states.mean(dim=0, keepdim=True)
state_std = states.std(dim=0, keepdim=True)
self.act.state_avg[:] = self.act.state_avg * (1 - tau) + state_avg * tau
self.act.state_std[:] = self.cri.state_std * (1 - tau) + state_std * tau + 1e-4
self.cri.state_avg[:] = self.act.state_avg
self.cri.state_std[:] = self.act.state_std
returns_avg = returns.mean(dim=0)
returns_std = returns.std(dim=0)
self.cri.value_avg[:] = self.cri.value_avg * (1 - tau) + returns_avg * tau
self.cri.value_std[:] = self.cri.value_std * (1 - tau) + returns_std * tau + 1e-4
class AgentTD3(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor) # get the attribute of object `self`
self.cri_class = getattr(self, 'cri_class', CriticTwin) # get the attribute of object `self`
super().__init__(net_dims, state_dim, action_dim, gpu_id, args)
self.cri_target = deepcopy(self.cri)
self.act_target = deepcopy(self.act)
self.explore_noise_std = getattr(args, 'explore_noise_std', 0.06) # standard deviation of exploration noise
self.policy_noise_std = getattr(args, 'policy_noise_std', 0.12) # standard deviation of exploration noise
self.act.action_std = self.explore_noise_std
self.update_freq = getattr(args, 'update_freq', 2) # standard deviation of exploration noise
self.horizon_len = 0
def update_net(self, buffer: ReplayBuffer) -> [float]:
self.act.action_std = self.act_target.action_std = self.policy_noise_std
with torch.no_grad():
add_states = buffer.slice(buffer.states, buffer.add_size)
add_actions = buffer.slice(buffer.actions, buffer.add_size)
add_returns = self.cri_target(add_states, add_actions)
self.update_avg_std_for_state_value_norm(states=add_states, returns=add_returns)
del add_states, add_actions, add_returns
obj_critics = obj_actors = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
for t in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
if t % self.update_freq == 0:
action = self.act(state) # policy gradient
obj_actor = (self.cri(state, action)).mean()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
obj_actors += obj_actor.item()
self.act.action_std = self.act_target.action_std = self.explore_noise_std
return obj_critics / update_times, obj_actors / (update_times / self.update_freq)
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
state, action, reward, undone, next_state = buffer.sample(batch_size)
next_action = self.act_target.get_action(next_state) # stochastic policy
next_q = torch.min(*self.cri_target.get_q1_q2(next_state, next_action)) # twin critics
q_label = reward + undone * self.gamma * next_q
q1, q2 = self.cri.get_q1_q2(state, action)
obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) / 2.
return obj_critic, state
class AgentSAC(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', ActorSAC) # get the attribute of object `self`
self.cri_class = getattr(self, 'cri_class', CriticTwin) # get the attribute of object `self`
super().__init__(net_dims, state_dim, action_dim, gpu_id, args)
self.cri_target = deepcopy(self.cri)
self.alpha_log = torch.tensor(-1, dtype=torch.float32, requires_grad=True, device=self.device) # trainable var
self.alpha_optim = torch.optim.Adam((self.alpha_log,), lr=args.learning_rate)
self.target_entropy = -np.log(action_dim)
def update_net(self, buffer: ReplayBuffer) -> [float]:
with torch.no_grad():
add_states = buffer.slice(buffer.states, buffer.add_size)
add_actions = buffer.slice(buffer.actions, buffer.add_size)
add_returns = self.cri_target(add_states, add_actions)
self.update_avg_std_for_state_value_norm(states=add_states, returns=add_returns)
del add_states, add_actions, add_returns
obj_critics = obj_actors = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
for i in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
action, logprob = self.act.get_action_logprob(state) # policy gradient
obj_alpha = (self.alpha_log * (-logprob + self.target_entropy).detach()).mean()
self.optimizer_update(self.alpha_optim, obj_alpha)
alpha = self.alpha_log.exp().detach()
obj_actor = (self.cri(state, action) - logprob * alpha).mean()
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_actors += obj_actor.item()
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
state, action, reward, undone, next_state = buffer.sample(batch_size)
next_action, next_logprob = self.act.get_action_logprob(next_state) # stochastic policy
next_q = torch.min(*self.cri_target.get_q1_q2(next_state, next_action)) # twin critics
alpha = self.alpha_log.exp()
q_label = reward + undone * self.gamma * (next_q - next_logprob * alpha)
q1, q2 = self.cri.get_q1_q2(state, action)
obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) / 2.
return obj_critic, state
class PendulumEnv(gym.Wrapper): # a demo of custom gym env
def __init__(self, gym_env_name=None):
gym.logger.set_level(40) # Block warning
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
if gym_env_name is None:
gym_env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
super().__init__(env=gym.make(gym_env_name))
'''the necessary env information when you design a custom env'''
self.env_name = gym_env_name # the name of this env.
self.state_dim = self.observation_space.shape[0] # feature number of state
self.action_dim = self.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> np.ndarray: # reset the agent in env
return self.env.reset()
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict): # agent interacts in env
# OpenAI Pendulum env set its action space as (-2, +2). It is bad.
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
state = state.reshape(self.state_dim)
return state, float(reward * 0.5), done, info_dict
def train_agent(args: Config):
args.init_before_training()
gpu_id = args.gpu_id
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=gpu_id, args=args)
agent.last_state = torch.as_tensor(env.reset(), dtype=torch.float32, device=agent.device)
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=args.buffer_size,
state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, )
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step, eval_times=args.eval_times, cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
buffer.update(buffer_items)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = list()
print("\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
return cumulative_returns, episode_steps + 1
def train_sac_td3_for_pendulum():
agent_class = [AgentSAC, AgentTD3][0] # DRL algorithm name
env_class = PendulumEnv # run a custom env: PendulumEnv, which based on OpenAI pendulum
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=PendulumEnv(), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(4e4) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.gamma = 0.97 # discount factor of future rewards
args.horizon_len = 64 # collect horizon_len step while exploring, then update network
args.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.state_value_tau = 0.02
args.explore_noise_std = 0.10
args.policy_noise_std = 0.15
train_agent(args)
"""
cumulative returns range: -2000 < -1000 < -200 < -80
SAC
| step time | avgR stdR avgS | objC objA
| 1.00e+04 135 | -211.21 55.50 200 | 0.88 -69.34
| 2.01e+04 479 | -74.14 56.91 200 | 0.62 -22.68
| 3.01e+04 1029 | -69.16 36.39 200 | 0.36 -16.79
TD3
| step time | avgR stdR avgS | objC objA
| 1.00e+04 103 | -771.30 38.15 200 | 1.03 -98.23
| 2.01e+04 380 | -89.88 62.76 200 | 0.73 -50.82
| 3.01e+04 813 | -91.69 42.66 200 | 0.45 -30.01
"""
def train_sac_td3_for_lunar_lander():
agent_class = [AgentSAC, AgentTD3][1] # DRL algorithm name
env_class = gym.make
env_args = {
'env_name': 'LunarLanderContinuous-v2', # A lander learns to land on a landing pad
'state_dim': 8, # coordinates xy, linear velocities xy, angle, angular velocity, two booleans
'action_dim': 2, # fire main engine or side engine.
'if_discrete': False # continuous action space, symbols → direction, value → force
}
get_gym_env_args(env=gym.make('LunarLanderContinuous-v2'), if_print=True) # return env_args
args = Config(agent_class, env_class, env_args) # see `config.py Arguments()` for hyperparameter explanation
args.break_step = int(8e4) # break training if 'total_step > break_step'
args.net_dims = (128, 128) # the middle layer dimension of MultiLayer Perceptron
args.horizon_len = 128 # collect horizon_len step while exploring, then update network
args.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
args.state_value_tau = 0.1 # todo
args.state_value_tau = 0.01 # todo
# args.state_value_tau = 0.001 # todo
# args.state_value_tau = 0.000 # todo
# todo YonV1943 2022-10-31 15:34:34 something wrong with the state_std and value_std !!!!!!!!!!
args.gpu_id = GPU_ID
args.random_seed = GPU_ID
train_agent(args)
"""
cumulative returns range: -1500 < -140 < 200 < 280
SAC
| step time | avgR stdR avgS | objC objA
| 1.01e+04 88 | 19.53 148.64 362 | 1.93 23.59
| 2.02e+04 294 | -60.15 120.83 805 | 2.59 60.84
| 3.03e+04 617 | -50.82 46.35 965 | 3.53 104.68
| 4.04e+04 1051 | -55.18 22.74 972 | 2.58 90.86
| 5.06e+04 1560 | 172.70 84.48 664 | 2.06 66.80
| 6.07e+04 2175 | 211.03 90.33 511 | 2.07 55.08
TD3
"""
if __name__ == '__main__':
GPU_ID = int(sys.argv[1]) # todo
# train_sac_td3_for_pendulum()
train_sac_td3_for_lunar_lander()
| 32,173 | 47.971081 | 119 | py |
ElegantRL | ElegantRL-master/helloworld/config.py | import os
import gym
import torch
import numpy as np
class Config:
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = self.get_if_off_policy() # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
if self.if_off_policy: # off-policy
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(512) # collect horizon_len step while exploring, then update network
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
else: # on-policy
self.batch_size = int(128) # num of transitions sampled from replay buffer.
self.horizon_len = int(2000) # collect horizon_len step while exploring, then update network
self.buffer_size = None # ReplayBuffer size. Empty the ReplayBuffer for on-policy.
self.repeat_times = 8.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(32) # number of times that get episodic cumulative return
self.eval_per_step = int(2e4) # evaluate the agent per training steps
def init_before_training(self):
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.set_num_threads(self.thread_num)
torch.set_default_dtype(torch.float32)
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}_{self.random_seed}'
if self.if_remove is None: # remove or keep the history files
self.if_remove = bool(input(f"| Arguments PRESS 'y' to REMOVE: {self.cwd}? ") == 'y')
if self.if_remove:
import shutil
shutil.rmtree(self.cwd, ignore_errors=True)
print(f"| Arguments Remove cwd: {self.cwd}")
else:
print(f"| Arguments Keep cwd: {self.cwd}")
os.makedirs(self.cwd, exist_ok=True)
def get_if_off_policy(self) -> bool:
agent_name = self.agent_class.__name__ if self.agent_class else ''
on_policy_names = ('SARSA', 'VPG', 'A2C', 'A3C', 'TRPO', 'PPO', 'MPO')
return all([agent_name.find(s) == -1 for s in on_policy_names])
def get_gym_env_args(env, if_print: bool) -> dict:
"""Get a dict ``env_args`` about a standard OpenAI gym env information.
param env: a standard OpenAI gym env
param if_print: [bool] print the dict about env information.
return: env_args [dict]
env_args = {
'env_name': env_name, # [str] the environment name, such as XxxXxx-v0
'state_dim': state_dim, # [int] the dimension of state
'action_dim': action_dim, # [int] the dimension of action or the number of discrete action
'if_discrete': if_discrete, # [bool] action space is discrete or continuous
}
"""
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = env.action_space.n
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
if any(env.action_space.high - 1):
print('WARNING: env.action_space.high', env.action_space.high)
if any(env.action_space.low + 1):
print('WARNING: env.action_space.low', env.action_space.low)
else:
raise RuntimeError('\n| Error in get_gym_env_info(). Please set these value manually:'
'\n `state_dim=int; action_dim=int; if_discrete=bool;`'
'\n And keep action_space in range (-1, 1).')
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name,
'state_dim': state_dim,
'action_dim': action_dim,
'if_discrete': if_discrete, }
if if_print:
env_args_str = repr(env_args).replace(',', f",\n{'':11}")
print(f"env_args = {env_args_str}")
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
import gym
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
gym.logger.set_level(40) # Block warning
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
| 7,290 | 48.938356 | 114 | py |
ElegantRL | ElegantRL-master/helloworld/agent.py | from copy import deepcopy
import torch
from torch import Tensor
from config import Config
from net import QNet # DQN
from net import Actor, Critic # DDPG
from net import ActorPPO, CriticPPO # PPO
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
class AgentDQN(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, "act_class", QNet)
self.cri_class = getattr(self, "cri_class", None) # means `self.cri = self.act`
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.act_target = deepcopy(self.act)
self.cri_target = deepcopy(self.cri)
self.act.explore_rate = getattr(args, "explore_rate", 0.25) # set for `self.act.get_action()`
# the probability of choosing action randomly in epsilon-greedy
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, 1), dtype=torch.int32).to(self.device)
rewards = torch.ones(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
if if_random:
action = torch.randint(self.action_dim, size=(1,))[0]
else:
action = get_action(state.unsqueeze(0))[0, 0]
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = (rewards * self.reward_scale).unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
def update_net(self, buffer) -> [float]:
obj_critics = 0.0
q_values = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for i in range(update_times):
obj_critic, q_value = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
q_values += q_value.item()
return obj_critics / update_times, q_values / update_times
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
state, action, reward, undone, next_state = buffer.sample(batch_size)
next_q = self.cri_target(next_state).max(dim=1, keepdim=True)[0]
q_label = reward + undone * self.gamma * next_q
q_value = self.cri(state).gather(1, action.long())
obj_critic = self.criterion(q_value, q_label)
return obj_critic, q_value.mean()
class AgentDDPG(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor) # get the attribute of object `self`, set Actor in default
self.cri_class = getattr(self, 'cri_class', Critic) # get the attribute of object `self`, set Critic in default
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.act_target = deepcopy(self.act)
self.cri_target = deepcopy(self.cri)
self.act.explore_noise_std = getattr(args, 'explore_noise', 0.1) # set for `self.act.get_action()`
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action = torch.rand(self.action_dim) * 2 - 1.0 if if_random else get_action(state.unsqueeze(0)).squeeze(0)
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = rewards.unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
def update_net(self, buffer) -> [float]:
obj_critics = obj_actors = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
assert update_times > 0
for i in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
action = self.act(state)
obj_actor = self.cri_target(state, action).mean()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
obj_actors += obj_actor.item()
return obj_critics / update_times, obj_actors / update_times
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
states, actions, rewards, undones, next_states = buffer.sample(batch_size)
next_actions = self.act_target(next_states)
next_q_values = self.cri_target(next_states, next_actions)
q_labels = rewards + undones * self.gamma * next_q_values
q_values = self.cri(states, actions)
obj_critic = self.criterion(q_values, q_labels)
return obj_critic, states
class AgentPPO(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.if_off_policy = False
self.act_class = getattr(self, "act_class", ActorPPO)
self.cri_class = getattr(self, "cri_class", CriticPPO)
AgentBase.__init__(self, net_dims, state_dim, action_dim, gpu_id, args)
self.ratio_clip = getattr(args, "ratio_clip", 0.25) # `ratio.clamp(1 - clip, 1 + clip)`
self.lambda_gae_adv = getattr(args, "lambda_gae_adv", 0.95) # could be 0.80~0.99
self.lambda_entropy = getattr(args, "lambda_entropy", 0.01) # could be 0.00~0.10
self.lambda_entropy = torch.tensor(self.lambda_entropy, dtype=torch.float32, device=self.device)
def explore_env(self, env, horizon_len: int) -> [Tensor]:
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
logprobs = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
convert = self.act.convert_action_for_env
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action, logprob = [t.squeeze(0) for t in get_action(state.unsqueeze(0))[:2]]
ary_action = convert(action).detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
states[i] = state
actions[i] = action
logprobs[i] = logprob
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = (rewards * self.reward_scale).unsqueeze(1)
undones = (1 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, logprobs, rewards, undones
def update_net(self, buffer) -> [float]:
with torch.no_grad():
states, actions, logprobs, rewards, undones = buffer
buffer_size = states.shape[0]
'''get advantages reward_sums'''
bs = 2 ** 10 # set a smaller 'batch_size' when out of GPU memory.
values = [self.cri(states[i:i + bs]) for i in range(0, buffer_size, bs)]
values = torch.cat(values, dim=0).squeeze(1) # values.shape == (buffer_size, )
advantages = self.get_advantages(rewards, undones, values) # advantages.shape == (buffer_size, )
reward_sums = advantages + values # reward_sums.shape == (buffer_size, )
del rewards, undones, values
advantages = (advantages - advantages.mean()) / (advantages.std(dim=0) + 1e-5)
assert logprobs.shape == advantages.shape == reward_sums.shape == (buffer_size,)
'''update network'''
obj_critics = 0.0
obj_actors = 0.0
update_times = int(buffer_size * self.repeat_times / self.batch_size)
assert update_times >= 1
for _ in range(update_times):
indices = torch.randint(buffer_size, size=(self.batch_size,), requires_grad=False)
state = states[indices]
action = actions[indices]
logprob = logprobs[indices]
advantage = advantages[indices]
reward_sum = reward_sums[indices]
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, reward_sum)
self.optimizer_update(self.cri_optimizer, obj_critic)
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action)
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = advantage * ratio
surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy.mean() * self.lambda_entropy
self.optimizer_update(self.act_optimizer, -obj_actor)
obj_critics += obj_critic.item()
obj_actors += obj_actor.item()
a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean()
return obj_critics / update_times, obj_actors / update_times, a_std_log.item()
def get_advantages(self, rewards: Tensor, undones: Tensor, values: Tensor) -> Tensor:
advantages = torch.empty_like(values) # advantage value
masks = undones * self.gamma
horizon_len = rewards.shape[0]
next_state = torch.tensor(self.last_state, dtype=torch.float32).to(self.device)
next_value = self.cri(next_state.unsqueeze(0)).detach().squeeze(1).squeeze(0)
advantage = 0 # last_gae_lambda
for t in range(horizon_len - 1, -1, -1):
delta = rewards[t] + masks[t] * next_value - values[t]
advantages[t] = advantage = delta + masks[t] * self.lambda_gae_adv * advantage
next_value = values[t]
return advantages
class ReplayBuffer: # for off-policy
def __init__(self, max_size: int, state_dim: int, action_dim: int, gpu_id: int = 0):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.max_size = max_size
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.states = torch.empty((max_size, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
def update(self, items: [Tensor]):
states, actions, rewards, undones = items
p = self.p + rewards.shape[0] # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
self.p = p
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> [Tensor]:
ids = torch.randint(self.cur_size - 1, size=(batch_size,), requires_grad=False)
return self.states[ids], self.actions[ids], self.rewards[ids], self.undones[ids], self.states[ids + 1]
| 15,617 | 46.327273 | 120 | py |
ElegantRL | ElegantRL-master/helloworld/net.py | import torch
import torch.nn as nn
from torch import Tensor
from torch.distributions.normal import Normal
class QNet(nn.Module): # `nn.Module` is a PyTorch module for neural network
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_rate = None
self.action_dim = action_dim
def forward(self, state: Tensor) -> Tensor:
return self.net(state) # Q values for multiple actions
def get_action(self, state: Tensor) -> Tensor: # return the index [int] of discrete action for exploration
if self.explore_rate < torch.rand(1):
action = self.net(state).argmax(dim=1, keepdim=True)
else:
action = torch.randint(self.action_dim, size=(state.shape[0], 1))
return action
class Actor(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_noise_std = None # standard deviation of exploration action noise
def forward(self, state: Tensor) -> Tensor:
action = self.net(state)
return action.tanh()
def get_action(self, state: Tensor) -> Tensor: # for exploration
action_avg = self.net(state).tanh()
dist = Normal(action_avg, self.explore_noise_std)
action = dist.sample()
return action.clip(-1.0, 1.0)
class Critic(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim + action_dim, *dims, 1])
def forward(self, state: Tensor, action: Tensor) -> Tensor:
return self.net(torch.cat((state, action), dim=1)) # Q value
class ActorPPO(nn.Module):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
self.action_std_log = nn.Parameter(torch.zeros((1, action_dim)), requires_grad=True) # trainable parameter
def forward(self, state: Tensor) -> Tensor:
return self.net(state).tanh() # action.tanh()
def get_action(self, state: Tensor) -> (Tensor, Tensor): # for exploration
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = Normal(action_avg, action_std)
action = dist.sample()
logprob = dist.log_prob(action).sum(1)
return action, logprob
def get_logprob_entropy(self, state: Tensor, action: Tensor) -> (Tensor, Tensor):
action_avg = self.net(state)
action_std = self.action_std_log.exp()
dist = Normal(action_avg, action_std)
logprob = dist.log_prob(action).sum(1)
entropy = dist.entropy().sum(1)
return logprob, entropy
@staticmethod
def convert_action_for_env(action: Tensor) -> Tensor:
return action.tanh()
class CriticPPO(nn.Module):
def __init__(self, dims: [int], state_dim: int, _action_dim: int):
super().__init__()
self.net = build_mlp(dims=[state_dim, *dims, 1])
def forward(self, state: Tensor) -> Tensor:
return self.net(state) # advantage value
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
| 3,587 | 35.612245 | 115 | py |
ElegantRL | ElegantRL-master/helloworld/helloworld_TD3_single_file.py | import os
import sys
import time
from copy import deepcopy
import gym
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
class Config: # for off-policy
def __init__(self, agent_class=None, env_class=None, env_args=None):
self.agent_class = agent_class # agent = agent_class(...)
self.if_off_policy = True # whether off-policy or on-policy of DRL algorithm
self.env_class = env_class # env = env_class(**env_args)
self.env_args = env_args # env = env_class(**env_args)
if env_args is None: # dummy env_args
env_args = {'env_name': None, 'state_dim': None, 'action_dim': None, 'if_discrete': None}
self.env_name = env_args['env_name'] # the name of environment. Be used to set 'cwd'.
self.state_dim = env_args['state_dim'] # vector dimension (feature number) of state
self.action_dim = env_args['action_dim'] # vector dimension (feature number) of action
self.if_discrete = env_args['if_discrete'] # discrete or continuous action space
'''Arguments for reward shaping'''
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 1.0 # an approximate target reward usually be closed to 256
'''Arguments for training'''
self.net_dims = (64, 32) # the middle layer dimension of MLP (MultiLayer Perceptron)
self.learning_rate = 6e-5 # 2 ** -14 ~= 6e-5
self.soft_update_tau = 5e-3 # 2 ** -8 ~= 5e-3
self.state_value_tau = 0.1 # 0.05 ~ 0.50
self.batch_size = int(64) # num of transitions sampled from replay buffer.
self.horizon_len = int(256) # collect horizon_len step while exploring, then update network
self.buffer_size = int(1e6) # ReplayBuffer size. First in first out for off-policy.
self.repeat_times = 1.0 # repeatedly update network using ReplayBuffer to keep critic's loss small
'''Arguments for device'''
self.gpu_id = int(0) # `int` means the ID of single GPU, -1 means CPU
self.thread_num = int(8) # cpu_num for pytorch, `torch.set_num_threads(self.num_threads)`
self.random_seed = int(0) # initialize random seed in self.init_before_training()
'''Arguments for evaluate'''
self.cwd = None # current working directory to save model. None means set automatically
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.break_step = +np.inf # break training if 'total_step > break_step'
self.eval_times = int(16) # number of times that get episodic cumulative return
self.eval_per_step = int(1e4) # evaluate the agent per training steps
def init_before_training(self):
if self.cwd is None: # set cwd (current working directory) for saving model
self.cwd = f'./{self.env_name}_{self.agent_class.__name__[5:]}'
os.makedirs(self.cwd, exist_ok=True)
class ActorBase(nn.Module): # todo state_norm
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim, *dims, action_dim])
self.explore_noise_std = None # standard deviation of exploration action noise
self.ActionDist = torch.distributions.normal.Normal
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std # todo state_norm
class Actor(ActorBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.net = build_mlp(dims=[state_dim, *dims, action_dim])
layer_init_with_orthogonal(self.net[-1], std=0.5)
def forward(self, state: Tensor) -> Tensor:
state = self.state_norm(state)
action = self.net(state)
return action.tanh()
def get_action(self, state: Tensor) -> Tensor: # for exploration
state = self.state_norm(state)
action_avg = self.net(state).tanh()
dist = self.ActionDist(action_avg, self.explore_noise_std)
action = dist.sample()
return action.clip(-1.0, 1.0)
class CriticBase(nn.Module): # todo state_norm, value_norm
def __init__(self, state_dim: int, action_dim: int):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.net = None # build_mlp(dims=[state_dim + action_dim, *dims, 1])
self.state_avg = nn.Parameter(torch.zeros((state_dim,)), requires_grad=False)
self.state_std = nn.Parameter(torch.ones((state_dim,)), requires_grad=False)
self.value_avg = nn.Parameter(torch.zeros((1,)), requires_grad=False)
self.value_std = nn.Parameter(torch.ones((1,)), requires_grad=False)
def state_norm(self, state: Tensor) -> Tensor:
return (state - self.state_avg) / self.state_std # todo state_norm
def value_re_norm(self, value: Tensor) -> Tensor:
return value * self.value_std + self.value_avg # todo value_norm
class CriticTwin(CriticBase):
def __init__(self, dims: [int], state_dim: int, action_dim: int):
super().__init__(state_dim=state_dim, action_dim=action_dim)
self.enc_sa = build_mlp(dims=[state_dim + action_dim, *dims]) # encoder of state and action
self.dec_q1 = build_mlp(dims=[dims[-1], action_dim]) # decoder of Q value 1
self.dec_q2 = build_mlp(dims=[dims[-1], action_dim]) # decoder of Q value 2
layer_init_with_orthogonal(self.dec_q1[-1], std=0.5)
layer_init_with_orthogonal(self.dec_q2[-1], std=0.5)
def forward(self, state: Tensor, action: Tensor) -> Tensor:
state = self.state_norm(state)
sa_tmp = self.enc_sa(torch.cat((state, action), dim=1))
value = self.dec_q1(sa_tmp)
value = self.value_re_norm(value)
return value # Q value
def get_q1_q2(self, state, action):
state = self.state_norm(state)
sa_tmp = self.enc_sa(torch.cat((state, action), dim=1))
value1 = self.value_re_norm(self.dec_q1(sa_tmp))
value2 = self.value_re_norm(self.dec_q2(sa_tmp))
return value1, value2 # two Q values
def layer_init_with_orthogonal(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
def build_mlp(dims: [int]) -> nn.Sequential: # MLP (MultiLayer Perceptron)
net_list = []
for i in range(len(dims) - 1):
net_list.extend([nn.Linear(dims[i], dims[i + 1]), nn.ReLU()])
del net_list[-1] # remove the activation of output layer
return nn.Sequential(*net_list)
def get_gym_env_args(env, if_print: bool) -> dict:
if {'unwrapped', 'observation_space', 'action_space', 'spec'}.issubset(dir(env)): # isinstance(env, gym.Env):
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
action_dim = env.action_space.n if if_discrete else env.action_space.shape[0]
else:
env_name = env.env_name
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
env_args = {'env_name': env_name, 'state_dim': state_dim, 'action_dim': action_dim, 'if_discrete': if_discrete}
print(f"env_args = {repr(env_args)}") if if_print else None
return env_args
def kwargs_filter(function, kwargs: dict) -> dict:
import inspect
sign = inspect.signature(function).parameters.values()
sign = {val.name for val in sign}
common_args = sign.intersection(kwargs.keys())
return {key: kwargs[key] for key in common_args} # filtered kwargs
def build_env(env_class=None, env_args=None):
if env_class.__module__ == 'gym.envs.registration': # special rule
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
env = env_class(id=env_args['env_name'])
else:
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
for attr_str in ('env_name', 'state_dim', 'action_dim', 'if_discrete'):
setattr(env, attr_str, env_args[attr_str])
return env
class AgentBase:
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = args.gamma
self.batch_size = args.batch_size
self.repeat_times = args.repeat_times
self.reward_scale = args.reward_scale
self.learning_rate = args.learning_rate
self.if_off_policy = args.if_off_policy
self.soft_update_tau = args.soft_update_tau
self.state_value_tau = args.state_value_tau
self.last_state = None # save the last state of the trajectory for training. `last_state.shape == (state_dim)`
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
act_class = getattr(self, "act_class", None)
cri_class = getattr(self, "cri_class", None)
self.act = self.act_target = act_class(net_dims, state_dim, action_dim).to(self.device)
self.cri = self.cri_target = cri_class(net_dims, state_dim, action_dim).to(self.device) \
if cri_class else self.act
self.act_optimizer = torch.optim.Adam(self.act.parameters(), self.learning_rate)
self.cri_optimizer = torch.optim.Adam(self.cri.parameters(), self.learning_rate) \
if cri_class else self.act_optimizer
self.criterion = torch.nn.SmoothL1Loss()
@staticmethod
def optimizer_update(optimizer, objective: Tensor):
optimizer.zero_grad()
objective.backward()
optimizer.step()
@staticmethod
def soft_update(target_net: torch.nn.Module, current_net: torch.nn.Module, tau: float):
# assert target_net is not current_net
for tar, cur in zip(target_net.parameters(), current_net.parameters()):
tar.data.copy_(cur.data * tau + tar.data * (1.0 - tau))
def update_avg_std_for_state_value_norm(self, states: Tensor, returns: Tensor):
tau = self.state_value_tau
if tau == 0:
return
state_avg = states.mean(dim=0, keepdim=True)
state_std = states.std(dim=0, keepdim=True)
self.act.state_avg[:] = self.act.state_avg * (1 - tau) + state_avg * tau
self.act.state_std[:] = self.cri.state_std * (1 - tau) + state_std * tau + 1e-4
self.cri.state_avg[:] = self.act.state_avg
self.cri.state_std[:] = self.cri.state_std
returns_avg = returns.mean(dim=0)
returns_std = returns.std(dim=0)
self.cri.value_avg[:] = self.cri.value_avg * (1 - tau) + returns_avg * tau
self.cri.value_std[:] = self.cri.value_std * (1 - tau) + returns_std * tau + 1e-4
class AgentTD3(AgentBase):
def __init__(self, net_dims: [int], state_dim: int, action_dim: int, gpu_id: int = 0, args: Config = Config()):
self.act_class = getattr(self, 'act_class', Actor) # get the attribute of object `self`
self.cri_class = getattr(self, 'cri_class', CriticTwin) # get the attribute of object `self`
super().__init__(net_dims, state_dim, action_dim, gpu_id, args)
self.cri_target = deepcopy(self.cri)
self.act_target = deepcopy(self.act)
self.explore_noise_std = getattr(args, 'explore_noise_std', 0.06) # standard deviation of exploration noise
self.policy_noise_std = getattr(args, 'policy_noise_std', 0.12) # standard deviation of exploration noise
self.update_freq = getattr(args, 'update_freq', 2) # standard deviation of exploration noise
self.horizon_len = 0
def explore_env(self, env, horizon_len: int, if_random: bool = False) -> [Tensor]:
self.act.explore_noise_std = self.act_target.explore_noise_std = self.explore_noise_std
self.horizon_len = 0
states = torch.zeros((horizon_len, self.state_dim), dtype=torch.float32).to(self.device)
actions = torch.zeros((horizon_len, self.action_dim), dtype=torch.float32).to(self.device)
rewards = torch.zeros(horizon_len, dtype=torch.float32).to(self.device)
dones = torch.zeros(horizon_len, dtype=torch.bool).to(self.device)
ary_state = self.last_state
get_action = self.act.get_action
for i in range(horizon_len):
state = torch.as_tensor(ary_state, dtype=torch.float32, device=self.device)
action = torch.rand(self.action_dim) * 2 - 1.0 if if_random else get_action(state.unsqueeze(0))[0]
states[i] = state
actions[i] = action
ary_action = action.detach().cpu().numpy()
ary_state, reward, done, _ = env.step(ary_action)
if done:
ary_state = env.reset()
rewards[i] = reward
dones[i] = done
self.last_state = ary_state
rewards = rewards.unsqueeze(1)
undones = (1.0 - dones.type(torch.float32)).unsqueeze(1)
return states, actions, rewards, undones
def update_net(self, buffer) -> [float]:
self.act.explore_noise_std = self.act_target.explore_noise_std = self.policy_noise_std
states = buffer.states[-self.horizon_len:]
reward_sums = buffer.rewards[-self.horizon_len:] * (1 / (1 - self.gamma))
self.update_avg_std_for_state_value_norm(
states=states.reshape((-1, self.state_dim)),
returns=reward_sums.reshape((-1,))
)
obj_critics = obj_actors = 0.0
update_times = int(buffer.cur_size * self.repeat_times / self.batch_size)
for t in range(update_times):
obj_critic, state = self.get_obj_critic(buffer, self.batch_size)
self.optimizer_update(self.cri_optimizer, obj_critic)
self.soft_update(self.cri_target, self.cri, self.soft_update_tau)
obj_critics += obj_critic.item()
if t % self.update_freq == 0:
action = self.act(state) # policy gradient
obj_actor = (self.cri(state, action)).mean()
self.optimizer_update(self.act_optimizer, -obj_actor)
self.soft_update(self.act_target, self.act, self.soft_update_tau)
obj_actors += obj_actor.item()
return obj_critics / update_times, obj_actors / (update_times / self.update_freq)
def get_obj_critic(self, buffer, batch_size: int) -> (Tensor, Tensor):
with torch.no_grad():
state, action, reward, undone, next_state = buffer.sample(batch_size)
next_action = self.act.get_action(next_state) # stochastic policy
next_q = torch.min(*self.cri_target.get_q1_q2(next_state, next_action)) # twin critics
q_label = reward + undone * self.gamma * next_q
q1, q2 = self.cri.get_q1_q2(state, action)
obj_critic = (self.criterion(q1, q_label) + self.criterion(q2, q_label)) / 2.
return obj_critic, state
class ReplayBuffer: # for off-policy
def __init__(self, max_size: int, state_dim: int, action_dim: int, gpu_id: int = 0):
self.p = 0 # pointer
self.if_full = False
self.cur_size = 0
self.max_size = max_size
self.device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
self.states = torch.empty((max_size, state_dim), dtype=torch.float32, device=self.device)
self.actions = torch.empty((max_size, action_dim), dtype=torch.float32, device=self.device)
self.rewards = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
self.undones = torch.empty((max_size, 1), dtype=torch.float32, device=self.device)
def update(self, items: [Tensor]):
states, actions, rewards, undones = items
p = self.p + rewards.shape[0] # pointer
if p > self.max_size:
self.if_full = True
p0 = self.p
p1 = self.max_size
p2 = self.max_size - self.p
p = p - self.max_size
self.states[p0:p1], self.states[0:p] = states[:p2], states[-p:]
self.actions[p0:p1], self.actions[0:p] = actions[:p2], actions[-p:]
self.rewards[p0:p1], self.rewards[0:p] = rewards[:p2], rewards[-p:]
self.undones[p0:p1], self.undones[0:p] = undones[:p2], undones[-p:]
else:
self.states[self.p:p] = states
self.actions[self.p:p] = actions
self.rewards[self.p:p] = rewards
self.undones[self.p:p] = undones
self.p = p
self.cur_size = self.max_size if self.if_full else self.p
def sample(self, batch_size: int) -> [Tensor]:
ids = torch.randint(self.cur_size - 1, size=(batch_size,), requires_grad=False)
return self.states[ids], self.actions[ids], self.rewards[ids], self.undones[ids], self.states[ids + 1]
class PendulumEnv(gym.Wrapper): # a demo of custom gym env
def __init__(self, gym_env_name=None):
gym.logger.set_level(40) # Block warning
assert '0.18.0' <= gym.__version__ <= '0.25.2' # pip3 install gym==0.24.0
if gym_env_name is None:
gym_env_name = "Pendulum-v0" if gym.__version__ < '0.18.0' else "Pendulum-v1"
super().__init__(env=gym.make(gym_env_name))
'''the necessary env information when you design a custom env'''
self.env_name = gym_env_name # the name of this env.
self.state_dim = self.observation_space.shape[0] # feature number of state
self.action_dim = self.action_space.shape[0] # feature number of action
self.if_discrete = False # discrete action or continuous action
def reset(self) -> np.ndarray: # reset the agent in env
return self.env.reset()
def step(self, action: np.ndarray) -> (np.ndarray, float, bool, dict): # agent interacts in env
# OpenAI Pendulum env set its action space as (-2, +2). It is bad.
# We suggest that adjust action space to (-1, +1) when designing a custom env.
state, reward, done, info_dict = self.env.step(action * 2)
state = state.reshape(self.state_dim)
return state, float(reward * 0.5), done, info_dict
def train_agent(args: Config):
args.init_before_training()
gpu_id = args.gpu_id
env = build_env(args.env_class, args.env_args)
agent = args.agent_class(args.net_dims, args.state_dim, args.action_dim, gpu_id=gpu_id, args=args)
agent.last_state = env.reset()
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=args.buffer_size,
state_dim=args.state_dim, action_dim=1 if args.if_discrete else args.action_dim, )
buffer_items = agent.explore_env(env, args.horizon_len * args.eval_times, if_random=True)
buffer.update(buffer_items) # warm up for ReplayBuffer
evaluator = Evaluator(eval_env=build_env(args.env_class, args.env_args),
eval_per_step=args.eval_per_step, eval_times=args.eval_times, cwd=args.cwd)
torch.set_grad_enabled(False)
while True: # start training
buffer_items = agent.explore_env(env, args.horizon_len)
buffer.update(buffer_items)
torch.set_grad_enabled(True)
logging_tuple = agent.update_net(buffer)
torch.set_grad_enabled(False)
evaluator.evaluate_and_save(agent.act, args.horizon_len, logging_tuple)
if (evaluator.total_step > args.break_step) or os.path.exists(f"{args.cwd}/stop"):
break # stop training when reach `break_step` or `mkdir cwd/stop`
class Evaluator:
def __init__(self, eval_env, eval_per_step: int = 1e4, eval_times: int = 8, cwd: str = '.'):
self.cwd = cwd
self.env_eval = eval_env
self.eval_step = 0
self.total_step = 0
self.start_time = time.time()
self.eval_times = eval_times # number of times that get episodic cumulative return
self.eval_per_step = eval_per_step # evaluate the agent per training steps
self.recorder = list()
print("\n| `step`: Number of samples, or total training steps, or running times of `env.step()`."
"\n| `time`: Time spent from the start of training to this moment."
"\n| `avgR`: Average value of cumulative rewards, which is the sum of rewards in an episode."
"\n| `stdR`: Standard dev of cumulative rewards, which is the sum of rewards in an episode."
"\n| `avgS`: Average of steps in an episode."
"\n| `objC`: Objective of Critic network. Or call it loss function of critic network."
"\n| `objA`: Objective of Actor network. It is the average Q value of the critic network."
f"\n| {'step':>8} {'time':>8} | {'avgR':>8} {'stdR':>6} {'avgS':>6} | {'objC':>8} {'objA':>8}")
def evaluate_and_save(self, actor, horizon_len: int, logging_tuple: tuple):
self.total_step += horizon_len
if self.eval_step + self.eval_per_step > self.total_step:
return
self.eval_step = self.total_step
rewards_steps_ary = [get_rewards_and_steps(self.env_eval, actor) for _ in range(self.eval_times)]
rewards_steps_ary = np.array(rewards_steps_ary, dtype=np.float32)
avg_r = rewards_steps_ary[:, 0].mean() # average of cumulative rewards
std_r = rewards_steps_ary[:, 0].std() # std of cumulative rewards
avg_s = rewards_steps_ary[:, 1].mean() # average of steps in an episode
used_time = time.time() - self.start_time
self.recorder.append((self.total_step, used_time, avg_r))
print(f"| {self.total_step:8.2e} {used_time:8.0f} "
f"| {avg_r:8.2f} {std_r:6.2f} {avg_s:6.0f} "
f"| {logging_tuple[0]:8.2f} {logging_tuple[1]:8.2f}")
def get_rewards_and_steps(env, actor, if_render: bool = False) -> (float, int): # cumulative_rewards and episode_steps
device = next(actor.parameters()).device # net.parameters() is a Python generator.
state = env.reset()
episode_steps = 0
cumulative_returns = 0.0 # sum of rewards in an episode
for episode_steps in range(12345):
tensor_state = torch.as_tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
tensor_action = actor(tensor_state)
action = tensor_action.detach().cpu().numpy()[0] # not need detach(), because using torch.no_grad() outside
state, reward, done, _ = env.step(action)
cumulative_returns += reward
if if_render:
env.render()
if done:
break
cumulative_returns = getattr(env, 'cumulative_returns', cumulative_returns) # todo
return cumulative_returns, episode_steps + 1
def train_sac_for_pendulum(gpu_id=0):
env_args = {
'env_name': 'Pendulum', # Apply torque on the free end to swing a pendulum into an upright position
'state_dim': 3, # the x-y coordinates of the pendulum's free end and its angular velocity.
'action_dim': 1, # the torque applied to free end of the pendulum
'if_discrete': False # continuous action space, symbols → direction, value → force
} # env_args = get_gym_env_args(env=gym.make('CartPole-v0'), if_print=True)
args = Config(agent_class=AgentTD3, env_class=PendulumEnv, env_args=env_args) # see `Config` for explanation
args.break_step = int(1e5) # break training if 'total_step > break_step'
args.net_dims = (64, 32) # the middle layer dimension of MultiLayer Perceptron
args.gpu_id = gpu_id # the ID of single GPU, -1 means CPU
args.gamma = 0.97 # discount factor of future rewards
train_agent(args)
train_sac_for_pendulum(gpu_id=int(sys.argv[1]) if len(sys.argv) > 1 else -1)
"""
| step time | avgR stdR avgS | objC objA
| 1.02e+04 108 | -745.94 26.63 200 | 0.76 -54.69
| 2.05e+04 302 | -409.87 22.87 200 | 1.14 -72.69
| 3.07e+04 501 | -309.10 31.90 200 | 0.74 -58.23
| 4.10e+04 800 | -83.88 46.36 200 | 0.68 -43.74
| 5.12e+04 1103 | -79.66 53.86 200 | 0.48 -32.24
"""
| 24,568 | 47.364173 | 119 | py |
ElegantRL | ElegantRL-master/helloworld/unit_tests/check_agent.py | import gym
import torch
from env import PendulumEnv
from agent import *
def check_agent_base(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device).detach()
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32, device=device).detach()
'''check AgentBase'''
agent = AgentDDPG(net_dims, state_dim, action_dim, gpu_id=gpu_id, args=Config())
AgentBase.__init__(agent, net_dims, state_dim, action_dim, gpu_id=gpu_id, args=Config())
'''check for run.render_agent'''
action_grad = agent.act(state)
q_value = agent.cri(state, action_grad)
obj_act = -q_value.mean()
assert agent.optimizer_update(agent.act_optimizer, obj_act) is None
q_value = agent.cri(state, action)
obj_cri = agent.criterion(q_value, torch.zeros_like(q_value).detach()).mean()
assert agent.optimizer_update(agent.cri_optimizer, obj_cri) is None
current_net = agent.cri
target_net = deepcopy(agent.cri)
assert agent.soft_update(target_net=target_net, current_net=current_net, tau=3e-5) is None
def check_agent_dqn(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
from config import build_env
env_args = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
env = build_env(env_class=gym.make, env_args=env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
'''init agent'''
from agent import ReplayBuffer
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=int(1e4), state_dim=state_dim, action_dim=1, )
args = Config()
args.batch_size = batch_size
agent = AgentDQN(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
agent.last_state = env.reset()
'''check for agent.explore_env'''
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=True)
buffer.update(buffer_items)
states, actions, rewards, undones = buffer_items
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
assert actions.shape == (horizon_len, 1)
assert actions.dtype in {torch.int, torch.long}
assert rewards.shape == (horizon_len, 1)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, 1)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=False)
buffer.update(buffer_items)
states, actions, rewards, undones = buffer_items
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
assert actions.shape == (horizon_len, 1)
assert actions.dtype in {torch.int, torch.long}
assert rewards.shape == (horizon_len, 1)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, 1)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
'''check for agent.update_net'''
buffer.update(buffer_items)
obj_critic, state = agent.get_obj_critic(buffer=buffer, batch_size=batch_size)
assert obj_critic.shape == ()
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
logging_tuple = agent.update_net(buffer=buffer)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
def check_agent_ddpg(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
from config import build_env
env_args = {'env_name': 'Pendulum', 'state_dim': 3, 'action_dim': 1, 'if_discrete': False}
env = build_env(env_class=PendulumEnv, env_args=env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
'''init agent'''
from agent import ReplayBuffer
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=int(1e4), state_dim=state_dim, action_dim=action_dim, )
args = Config()
args.batch_size = batch_size
agent = AgentDDPG(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
agent.last_state = env.reset()
'''check for agent.explore_env'''
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=True)
states, actions, rewards, undones = buffer_items
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
assert actions.shape == (horizon_len, action_dim)
assert actions.dtype == torch.float
assert rewards.shape == (horizon_len, 1)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, 1)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=False)
states, actions, rewards, undones = buffer_items
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
assert actions.shape == (horizon_len, action_dim)
assert actions.dtype == torch.float
assert rewards.shape == (horizon_len, 1)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, 1)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
'''check for agent.update_net'''
buffer.update(buffer_items)
obj_critic, state = agent.get_obj_critic(buffer=buffer, batch_size=batch_size)
assert obj_critic.shape == ()
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
logging_tuple = agent.update_net(buffer=buffer)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
def check_agent_ppo(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
from config import build_env
env_args = {'env_name': 'Pendulum', 'state_dim': 3, 'action_dim': 1, 'if_discrete': False}
env = build_env(env_class=PendulumEnv, env_args=env_args)
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
'''init agent'''
args = Config()
args.batch_size = batch_size
agent = AgentPPO(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
agent.last_state = env.reset()
convert = agent.act.convert_action_for_env
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32).detach() * 6 - 3
assert torch.any((action < -1.0) | (+1.0 < action))
action = convert(action)
assert torch.any((-1.0 <= action) & (action <= +1.0))
'''check for agent.explore_env'''
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len)
states, actions, logprobs, rewards, undones = buffer_items
assert states.shape == (horizon_len, state_dim)
assert states.dtype in {torch.float, torch.int}
assert actions.shape == (horizon_len, action_dim)
assert actions.dtype == torch.float
assert logprobs.shape == (horizon_len,)
assert logprobs.dtype == torch.float
assert rewards.shape == (horizon_len, 1)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, 1)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
'''check for agent.update_net'''
values = agent.cri(states).squeeze(1)
assert values.shape == (horizon_len,)
advantages = agent.get_advantages(rewards=rewards, undones=undones, values=values)
assert advantages.shape == (horizon_len,)
assert advantages.dtype in {torch.float, torch.int}
logging_tuple = agent.update_net(buffer=buffer_items)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
if __name__ == '__main__':
check_agent_base()
check_agent_dqn()
check_agent_ddpg()
check_agent_ppo()
print('| Finish checking.')
| 8,660 | 44.109375 | 110 | py |
ElegantRL | ElegantRL-master/helloworld/unit_tests/check_net.py | import torch.nn
from net import *
def check_q_net(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check for agent.AgentDQN'''
act = QNet(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
act.explore_rate = 0.1
'''check for run.get_rewards_and_steps'''
action = act(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
'''check for agent.AgentDQN.explore_env'''
action = act.get_action(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.int, torch.long}
assert action.shape == (batch_size, 1)
def check_actor(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check'''
act = Actor(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
act.explore_noise_std = 0.1 # standard deviation of exploration action noise
action = act(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
assert torch.any((-1.0 <= action) & (action <= +1.0))
action = act.get_action(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
assert torch.any((-1.0 <= action) & (action <= +1.0))
def check_critic(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32, device=device)
'''check'''
cri = Critic(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
q = cri(state=state, action=action)
assert isinstance(q, Tensor)
assert q.dtype in {torch.float}
assert q.shape == (batch_size, 1)
def check_actor_ppo(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check'''
act = ActorPPO(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
assert isinstance(act.action_std_log, nn.Parameter)
assert act.action_std_log.requires_grad
action = act(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
action = act.convert_action_for_env(action)
assert torch.any((-1.0 <= action) & (action <= +1.0))
action, logprob = act.get_action(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
assert torch.any((-1.0 <= action) & (action <= +1.0))
assert isinstance(logprob, Tensor)
assert logprob.shape == (batch_size,)
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32, device=device)
logprob, entropy = act.get_logprob_entropy(state=state, action=action)
assert isinstance(logprob, Tensor)
assert logprob.shape == (batch_size,)
assert isinstance(entropy, Tensor)
assert entropy.shape == (batch_size,)
def check_critic_ppo(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check'''
cri = CriticPPO(dims=net_dims, state_dim=state_dim, _action_dim=action_dim).to(device)
q = cri(state=state)
assert isinstance(q, Tensor)
assert q.dtype in {torch.float}
assert q.shape == (batch_size, 1)
def check_build_mlp():
net_dims = (64, 32)
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 1 == len(net_dims) * 2 - 3
net_dims = (64, 32, 16)
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 3 == len(net_dims) * 2 - 3
net_dims = (64, 32, 16, 8)
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 5 == len(net_dims) * 2 - 3
if __name__ == '__main__':
check_q_net()
check_actor()
check_critic()
check_actor_ppo()
check_critic_ppo()
check_build_mlp()
print('| Finish checking.')
| 5,050 | 37.265152 | 103 | py |
ElegantRL | ElegantRL-master/unit_tests/agents/test_net.py | import torch
import torch.nn as nn
from torch import Tensor
def check_net_base(state_dim=4, action_dim=2, batch_size=3, gpu_id=0):
print("\n| check_net_base()")
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check for agent.AgentBase.update_avg_std_for_normalization()'''
from elegantrl.agents.net import QNetBase, ActorBase, CriticBase
for net_base in (QNetBase, ActorBase, CriticBase):
print(f" net_base = {net_base.__name__}")
net = net_base(state_dim=state_dim, action_dim=action_dim).to(device)
state_avg = net.state_avg
assert isinstance(state_avg, Tensor)
assert not state_avg.requires_grad
state_std = net.state_std
assert isinstance(state_std, Tensor)
assert not state_std.requires_grad
_state = net.state_norm(state)
assert isinstance(_state, Tensor)
assert _state.shape == (batch_size, state_dim)
for net_base in (QNetBase, CriticBase):
print(f" net_base = {net_base.__name__}")
net = net_base(state_dim=state_dim, action_dim=action_dim).to(device)
value_avg = net.value_avg
assert isinstance(value_avg, Tensor)
assert not value_avg.requires_grad
value_std = net.value_std
assert isinstance(value_std, Tensor)
assert not value_std.requires_grad
value = torch.rand((batch_size, 2), dtype=torch.float32, device=device)
_value = net.value_re_norm(value)
assert isinstance(_value, Tensor)
assert _value.shape == value.shape
def check_q_net(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
print("\n| check_q_net()")
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
'''check for agent.AgentDQN, ...'''
from elegantrl.agents.net import QNet, QNetDuel
from elegantrl.agents.net import QNetTwin, QNetTwinDuel
for net_class in (QNet, QNetDuel, QNetTwin, QNetTwinDuel):
print(f" net_class = {net_class.__name__}")
net = net_class(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
net.explore_rate = 0.1
'''check for run.get_rewards_and_steps'''
action = net(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
'''check for agent.AgentDQN.explore_env'''
action = net.get_action(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.int, torch.long}
assert action.shape == (batch_size, 1)
'''check for agent.AgentDoubleDQN, agent.AgentD3DQN'''
from elegantrl.agents.net import QNetTwin, QNetTwinDuel
for net_class in (QNetTwin, QNetTwinDuel):
print(f" net_class = {net_class.__name__}")
net = net_class(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
'''check for run.get_rewards_and_steps'''
action = net(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
'''check for agent.AgentDQN.explore_env'''
q1, q2 = net.get_q1_q2(state=state)
assert isinstance(q1, Tensor)
assert isinstance(q2, Tensor)
assert q1.dtype is torch.float
assert q2.dtype is torch.float
assert q1.shape == (batch_size, action_dim)
assert q2.shape == (batch_size, action_dim)
def check_actor(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
print("\n| check_actor()")
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
from elegantrl.agents.net import Actor, ActorSAC, ActorFixSAC, ActorPPO, ActorDiscretePPO
'''check for agent.explore_env()'''
for actor_class in (Actor, ActorSAC, ActorFixSAC):
print(f" actor_class = {actor_class.__name__}")
act = actor_class(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
act.explore_noise_std = 0.1 # standard deviation of exploration action noise
action = act(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
assert torch.any((-1.0 <= action) & (action <= +1.0))
if actor_class in {ActorPPO, ActorDiscretePPO}: # on-policy
action, logprob = act.get_action(state=state)
assert isinstance(logprob, Tensor)
assert logprob.dtype in {torch.float}
assert logprob.shape == (batch_size, action_dim)
else: # if actor_class in {Actor, ActorSAC, ActorFixSAC}: # off-policy
action = act.get_action(state=state)
assert isinstance(action, Tensor)
assert action.dtype in {torch.float}
assert action.shape == (batch_size, action_dim)
assert torch.any((-1.0 <= action) & (action <= +1.0))
'''check for agent.update_net()'''
for actor_class in (ActorSAC, ActorFixSAC):
print(f" actor_class = {actor_class.__name__}")
act = actor_class(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
logprob, entropy = act.get_action_logprob(state)
assert isinstance(logprob, Tensor)
assert logprob.dtype in {torch.float}
assert logprob.shape == (batch_size, action_dim)
assert isinstance(entropy, Tensor)
assert entropy.dtype in {torch.float}
assert entropy.shape == (batch_size, 1)
for actor_class in (ActorPPO, ActorDiscretePPO):
print(f" actor_class = {actor_class.__name__}")
act = actor_class(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
action = act(state)
if actor_class in {ActorDiscretePPO}:
action = action.unsqueeze(1)
logprob, entropy = act.get_logprob_entropy(state, action)
convert = act.convert_action_for_env
if actor_class in {ActorDiscretePPO}:
action = action.unsqueeze(1)
assert action.dtype in {torch.int, torch.long}
_action = convert(action)
assert _action.dtype in {torch.int, torch.long}
else:
assert torch.any((-torch.inf < action) | (action < torch.inf))
_action = convert(action)
assert torch.any((-1.0 <= _action) & (_action <= +1.0))
assert isinstance(logprob, Tensor)
assert logprob.dtype in {torch.float}
assert logprob.shape == (batch_size,)
assert isinstance(entropy, Tensor)
assert entropy.dtype in {torch.float}
assert entropy.shape == (batch_size,)
def check_critic(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
print("\n| check_critic()")
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device)
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32, device=device)
'''check Critic'''
from elegantrl.agents.net import Critic
cri = Critic(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
value = cri(state=state, action=action)
assert isinstance(value, Tensor)
assert value.dtype in {torch.float}
assert value.shape == (batch_size,)
'''check CriticTwin'''
from elegantrl.agents.net import CriticTwin
cri = CriticTwin(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
value = cri(state=state, action=action)
assert isinstance(value, Tensor)
assert value.dtype in {torch.float}
assert value.shape == (batch_size,)
value = cri.get_q_min(state=state, action=action)
assert isinstance(value, Tensor)
assert value.dtype in {torch.float}
assert value.shape == (batch_size,)
q1, q2 = cri.get_q1_q2(state=state, action=action)
assert isinstance(q1, Tensor)
assert isinstance(q2, Tensor)
assert q1.dtype in {torch.float}
assert q2.dtype in {torch.float}
assert q1.shape == (batch_size,)
assert q2.shape == (batch_size,)
'''check CriticPPO'''
from elegantrl.agents.net import CriticPPO
cri = CriticPPO(dims=net_dims, state_dim=state_dim, action_dim=action_dim).to(device)
value = cri(state=state)
assert isinstance(value, Tensor)
assert value.dtype in {torch.float}
assert value.shape == (batch_size,)
def check_build_mlp(net_dims: [int] = (64, 32)):
print("\n| check_build_mlp()")
from elegantrl.agents.net import build_mlp
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 1 == len(net_dims) * 2 - 3
net_dims = (64, 32, 16)
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 3 == len(net_dims) * 2 - 3
net_dims = (64, 32, 16, 8)
net = build_mlp(dims=net_dims)
assert isinstance(net, nn.Sequential)
assert len(net) == 5 == len(net_dims) * 2 - 3
def check_cnn():
print("\n| check_cnn()")
from elegantrl.agents.net import ConvNet
inp_dim = 3
out_dim = 32
batch_size = 5
for image_size in (112, 224):
print(f" image_size={image_size}")
conv_net = ConvNet(inp_dim=inp_dim, out_dim=out_dim, image_size=image_size)
image = torch.ones((batch_size, image_size, image_size, inp_dim), dtype=torch.uint8) * 255
output = conv_net(image)
assert output.dtype in {torch.float}
assert output.shape == (batch_size, out_dim)
if __name__ == '__main__':
print('\n| check_net.py')
check_net_base()
check_q_net()
check_actor()
check_critic()
check_build_mlp()
check_cnn()
| 10,200 | 37.787072 | 103 | py |
ElegantRL | ElegantRL-master/unit_tests/agents/test_agents.py | import gym
import torch
from copy import deepcopy
from typing import Tuple
from torch import Tensor
from elegantrl.train.config import Config, build_env
from elegantrl.train.replay_buffer import ReplayBuffer
from elegantrl.envs.CustomGymEnv import PendulumEnv
def _check_buffer_items_for_off_policy(
buffer_items: Tuple[Tensor, ...], if_discrete: bool,
horizon_len: int, num_envs: int, state_dim: int, action_dim: int
):
states, actions, rewards, undones = buffer_items
assert states.shape == (horizon_len, num_envs, state_dim)
assert states.dtype in {torch.float, torch.int}
if if_discrete:
actions_shape = (horizon_len, num_envs, 1)
actions_dtypes = {torch.int, torch.long}
else:
actions_shape = (horizon_len, num_envs, action_dim)
actions_dtypes = {torch.float, }
assert actions.shape == actions_shape
assert actions.dtype in actions_dtypes
assert rewards.shape == (horizon_len, num_envs)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, num_envs)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
def _check_buffer_items_for_ppo_style(
buffer_items: Tuple[Tensor, ...], if_discrete: bool,
horizon_len: int, num_envs: int, state_dim: int, action_dim: int
):
states, actions, logprobs, rewards, undones = buffer_items
assert states.shape == (horizon_len, num_envs, state_dim)
assert states.dtype in {torch.float, torch.int}
if if_discrete:
actions_shape = (horizon_len, num_envs, 1)
actions_dtypes = {torch.int, torch.long}
else:
actions_shape = (horizon_len, num_envs, action_dim)
actions_dtypes = {torch.float, }
assert actions.shape == actions_shape
assert actions.dtype in actions_dtypes
assert logprobs.shape == (horizon_len, num_envs)
assert logprobs.dtype == torch.float
assert rewards.shape == (horizon_len, num_envs)
assert rewards.dtype == torch.float
assert undones.shape == (horizon_len, num_envs)
assert undones.dtype == torch.float # undones is float, instead of int
assert set(undones.squeeze(1).cpu().data.tolist()).issubset({0.0, 1.0}) # undones in {0.0, 1.0}
def check_agent_base(state_dim=4, action_dim=2, batch_size=3, net_dims=(64, 32), gpu_id=0):
print("\n| check_agent_base()")
device = torch.device(f"cuda:{gpu_id}" if (torch.cuda.is_available() and (gpu_id >= 0)) else "cpu")
state = torch.rand(size=(batch_size, state_dim), dtype=torch.float32, device=device).detach()
action = torch.rand(size=(batch_size, action_dim), dtype=torch.float32, device=device).detach()
'''check AgentBase.__init__'''
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.AgentDDPG import AgentDDPG
agent = AgentDDPG(net_dims, state_dim, action_dim, gpu_id=gpu_id, args=Config())
AgentBase.__init__(agent, net_dims, state_dim, action_dim, gpu_id=gpu_id, args=Config())
'''check AgentBase attribution'''
assert hasattr(agent, 'explore_env')
assert hasattr(agent, 'explore_one_env')
assert hasattr(agent, 'explore_vec_env')
assert hasattr(agent, 'update_net')
assert hasattr(agent, 'get_obj_critic')
assert hasattr(agent, 'get_obj_critic_raw')
assert hasattr(agent, 'get_obj_critic_per')
assert hasattr(agent, 'update_avg_std_for_normalization')
assert hasattr(agent, 'get_returns')
assert hasattr(agent.act, 'state_avg')
assert hasattr(agent.act, 'state_std')
assert hasattr(agent.cri, 'state_avg')
assert hasattr(agent.cri, 'state_std')
assert hasattr(agent.cri, 'value_avg')
assert hasattr(agent.cri, 'value_std')
'''check agent.optimizer'''
action_grad = agent.act(state)
q_value = agent.cri(state, action_grad)
obj_act = -q_value.mean()
assert agent.optimizer_update(agent.act_optimizer, obj_act) is None
q_value = agent.cri(state, action)
obj_cri = agent.criterion(q_value, torch.zeros_like(q_value).detach()).mean()
assert agent.optimizer_update(agent.cri_optimizer, obj_cri) is None
current_net = agent.cri
target_net = deepcopy(agent.cri)
assert agent.soft_update(target_net=target_net, current_net=current_net, tau=3e-5) is None
def check_agent_dqn_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print("\n| check_agent_dqn()")
env_args = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
env = build_env(env_class=gym.make, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'''init agent'''
from elegantrl.agents.AgentDQN import AgentDQN, AgentDuelingDQN, AgentDoubleDQN, AgentD3QN
for agent_class in (AgentDQN, AgentDuelingDQN, AgentDoubleDQN, AgentD3QN):
print(f" agent_class = {agent_class.__name__}")
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=int(1e4), state_dim=state_dim, action_dim=1, )
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'''check for agent.explore_env'''
for if_random in (True, False):
print(f" if_random = {if_random}")
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=if_random)
assert isinstance(agent.last_state, Tensor)
assert agent.last_state.shape == (num_envs, state_dim)
_check_buffer_items_for_off_policy(
buffer_items=buffer_items, if_discrete=if_discrete,
horizon_len=horizon_len, num_envs=num_envs,
state_dim=state_dim, action_dim=action_dim
)
buffer.update(buffer_items)
'''check for agent.update_net'''
buffer.update(buffer_items)
obj_critic, q_value = agent.get_obj_critic(buffer=buffer, batch_size=batch_size)
assert obj_critic.shape == ()
assert q_value.shape == (batch_size,)
assert q_value.dtype == torch.float32
logging_tuple = agent.update_net(buffer=buffer)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
def check_agent_ddpg_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print("\n| check_agent_ddpg_style()")
env_args = {'env_name': 'Pendulum', 'state_dim': 3, 'action_dim': 1, 'if_discrete': False}
env = build_env(env_class=PendulumEnv, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'''init agent'''
from elegantrl.agents.AgentDDPG import AgentDDPG
from elegantrl.agents.AgentTD3 import AgentTD3
from elegantrl.agents.AgentSAC import AgentSAC, AgentModSAC
for agent_class in (AgentDDPG, AgentTD3, AgentSAC, AgentModSAC):
print(f" agent_class = {agent_class.__name__}")
buffer = ReplayBuffer(gpu_id=gpu_id, max_size=int(1e4), state_dim=state_dim, action_dim=action_dim, )
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'''check for agent.explore_env if_random=True'''
if_random = True
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=if_random)
_check_buffer_items_for_off_policy(
buffer_items=buffer_items, if_discrete=if_discrete,
horizon_len=horizon_len, num_envs=num_envs,
state_dim=state_dim, action_dim=action_dim
)
buffer.update(buffer_items)
if_random = False
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len, if_random=if_random)
assert isinstance(agent.last_state, Tensor)
assert agent.last_state.shape == (num_envs, state_dim)
_check_buffer_items_for_off_policy(
buffer_items=buffer_items, if_discrete=if_discrete,
horizon_len=horizon_len, num_envs=num_envs,
state_dim=state_dim, action_dim=action_dim
)
buffer.update(buffer_items)
'''check for agent.update_net'''
buffer.update(buffer_items)
obj_critic, state = agent.get_obj_critic(buffer=buffer, batch_size=batch_size)
assert obj_critic.shape == ()
assert state.shape == (batch_size, state_dim)
assert state.dtype in {torch.float, torch.int}
logging_tuple = agent.update_net(buffer=buffer)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
def check_agent_ppo_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print("\n| check_agent_ddpg_style()")
env_args = {'env_name': 'Pendulum', 'state_dim': 3, 'action_dim': 1, 'if_discrete': False}
env = build_env(env_class=PendulumEnv, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'''init agent'''
from elegantrl.agents.AgentPPO import AgentPPO # , AgentDiscretePPO
from elegantrl.agents.AgentA2C import AgentA2C # , AgentDiscreteA2C
for agent_class in (AgentPPO, AgentA2C):
print(f" agent_class = {agent_class.__name__}")
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'''check for agent.explore_env'''
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len)
assert isinstance(agent.last_state, Tensor)
assert agent.last_state.shape == (num_envs, state_dim)
_check_buffer_items_for_ppo_style(
buffer_items=buffer_items, if_discrete=if_discrete,
horizon_len=horizon_len, num_envs=num_envs,
state_dim=state_dim, action_dim=action_dim,
)
'''check for agent.update_net'''
states, actions, logprobs, rewards, undones = buffer_items
values = agent.cri(states)
assert values.shape == (horizon_len, num_envs)
advantages = agent.get_advantages(rewards, undones, values)
assert advantages.shape == (horizon_len, num_envs)
logging_tuple = agent.update_net(buffer=buffer_items)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
def check_agent_ppo_discrete_style(batch_size=3, horizon_len=16, net_dims=(64, 32), gpu_id=0):
print("\n| check_agent_ppo_discrete_style()")
env_args = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
env = build_env(env_class=gym.make, env_args=env_args)
num_envs = env_args['num_envs']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'''init agent'''
from elegantrl.agents.AgentPPO import AgentDiscretePPO
from elegantrl.agents.AgentA2C import AgentDiscreteA2C
for agent_class in (AgentDiscretePPO, AgentDiscreteA2C):
print(f" agent_class = {agent_class.__name__}")
args = Config()
args.batch_size = batch_size
agent = agent_class(net_dims=net_dims, state_dim=state_dim, action_dim=action_dim, gpu_id=gpu_id, args=args)
state = torch.tensor(env.reset(), dtype=torch.float32, device=agent.device).unsqueeze(0)
assert isinstance(state, Tensor)
assert state.shape == (num_envs, state_dim)
agent.last_state = state
'''check for agent.explore_env'''
buffer_items = agent.explore_env(env=env, horizon_len=horizon_len)
_check_buffer_items_for_ppo_style(
buffer_items=buffer_items, if_discrete=if_discrete,
horizon_len=horizon_len, num_envs=num_envs,
state_dim=state_dim, action_dim=action_dim,
)
'''check for agent.update_net'''
states, actions, logprobs, rewards, undones = buffer_items
values = agent.cri(states)
assert values.shape == (horizon_len, num_envs)
advantages = agent.get_advantages(rewards, undones, values)
assert advantages.shape == (horizon_len, num_envs)
logging_tuple = agent.update_net(buffer=buffer_items)
assert isinstance(logging_tuple, tuple)
assert any([isinstance(item, float) for item in logging_tuple])
assert len(logging_tuple) >= 2
if __name__ == '__main__':
print('\n| check_agents.py.')
check_agent_base()
check_agent_dqn_style()
check_agent_ddpg_style()
check_agent_ppo_style()
check_agent_ppo_discrete_style()
| 13,828 | 40.653614 | 116 | py |
ElegantRL | ElegantRL-master/unit_tests/train/test_config.py | import os
import gym
import torch
import numpy as np
from unittest.mock import patch
from torch import Tensor
from numpy import ndarray
from elegantrl.train.config import Config
from elegantrl.envs.CustomGymEnv import PendulumEnv
from elegantrl.envs.PointChasingEnv import PointChasingEnv
from elegantrl.agents.AgentDQN import AgentDQN
from elegantrl.agents.AgentSAC import AgentSAC
from elegantrl.agents.AgentPPO import AgentPPO
EnvArgsPendulum = {'env_name': 'Pendulum-v1', 'state_dim': 3, 'action_dim': 1, 'if_discrete': False}
EnvArgsCartPole = {'env_name': 'CartPole-v1', 'state_dim': 4, 'action_dim': 2, 'if_discrete': True}
def test_config():
print("\n| test_config()")
args = Config() # check dummy Config
assert args.get_if_off_policy() is True
env_args = EnvArgsCartPole
env_class = gym.make
args = Config(agent_class=AgentDQN, env_class=env_class, env_args=env_args)
assert args.get_if_off_policy() is True
env_args = EnvArgsPendulum
env_class = PendulumEnv
args = Config(agent_class=AgentSAC, env_class=env_class, env_args=env_args)
assert args.get_if_off_policy() is True
env_args = EnvArgsPendulum
env_class = PendulumEnv
args = Config(agent_class=AgentPPO, env_class=env_class, env_args=env_args)
assert args.get_if_off_policy() is False
args.if_remove = False
args.init_before_training() # os.path.exists(args.cwd) == False
args.init_before_training() # os.path.exists(args.cwd) == True
assert os.path.exists(args.cwd)
os.rmdir(args.cwd)
args.if_remove = True
args.init_before_training() # os.path.exists(args.cwd) == False
args.init_before_training() # os.path.exists(args.cwd) == True
assert os.path.exists(args.cwd)
os.rmdir(args.cwd)
@patch('builtins.input', lambda *args: 'input_str')
def _tutorial_unittest_mock_patch():
print('Print_input():', input())
@patch('builtins.input', lambda *args: 'y')
def _config_init_before_training_yes():
print("\n| test_config_init_before_training_yes()")
env_args = EnvArgsPendulum
env_class = gym.make
args = Config(agent_class=AgentSAC, env_class=env_class, env_args=env_args)
args.if_remove = None
args.init_before_training()
assert os.path.exists(args.cwd)
os.rmdir(args.cwd)
@patch('builtins.input', lambda *args: 'n')
def _config_init_before_training_no():
print("\n| test_config_init_before_training_no()")
env_args = EnvArgsPendulum
env_class = PendulumEnv
args = Config(agent_class=AgentSAC, env_class=env_class, env_args=env_args)
args.if_remove = None
args.init_before_training()
assert os.path.exists(args.cwd)
os.rmdir(args.cwd)
def test_config_init_before_training():
print("\n| test_config_init_before_training()")
_tutorial_unittest_mock_patch()
_config_init_before_training_yes()
_config_init_before_training_no()
def test_kwargs_filter():
print("\n| test_kwargs_filter()")
from elegantrl.train.config import kwargs_filter
dim = 2
env_args = {
'env_name': 'PointChasingEnv',
'state_dim': 2 * dim,
'action_dim': dim,
'if_discrete': False,
'dim': dim
}
env_class = PointChasingEnv
env = env_class(**kwargs_filter(env_class.__init__, env_args.copy()))
assert hasattr(env, 'reset')
assert hasattr(env, 'step')
def test_build_env():
print("\n| test_build_env()")
from elegantrl.train.config import build_env
'''check single env '''
env_args_env_class_list = (
(EnvArgsCartPole, gym.make), # discrete action space
(EnvArgsPendulum, PendulumEnv), # continuous action space
)
for env_args, env_class in env_args_env_class_list:
env_name = env_args['env_name']
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
print(f" env_name = {env_name}")
env = build_env(env_class=env_class, env_args=env_args)
assert isinstance(env.env_name, str)
assert isinstance(env.state_dim, int)
assert isinstance(env.action_dim, int)
assert isinstance(env.if_discrete, bool)
state = env.reset()
assert isinstance(state, ndarray)
assert state.shape == (state_dim,)
for _ in range(4):
if if_discrete:
action = np.random.randint(action_dim)
else:
action = np.random.rand(action_dim) * 2. - 1.
state, reward, done, info_dict = env.step(action)
assert isinstance(state, ndarray)
assert state.shape == (state_dim,)
assert isinstance(reward, float)
assert isinstance(done, bool)
assert not done
'''check vectorized env (if_build_vec_env=True)'''
gpu_id = -1
num_envs = 4
env_args_env_class_list = (
(EnvArgsCartPole, gym.make), # discrete action space
(EnvArgsPendulum, PendulumEnv), # continuous action space
)
for env_args, env_class in env_args_env_class_list:
_env_args = env_args.copy()
_env_args['num_envs'] = num_envs
_env_args['if_build_vec_env'] = True
env_name = _env_args['env_name']
state_dim = _env_args['state_dim']
action_dim = _env_args['action_dim']
if_discrete = _env_args['if_discrete']
print(f" env_name = {env_name} if_build_vec_env = True")
env = build_env(env_class=env_class, env_args=_env_args, gpu_id=gpu_id)
assert isinstance(env.env_name, str)
assert isinstance(env.state_dim, int)
assert isinstance(env.action_dim, int)
assert isinstance(env.if_discrete, bool)
states = env.reset()
assert isinstance(states, Tensor)
assert states.shape == (num_envs, state_dim)
for _ in range(4):
if if_discrete:
action = torch.randint(action_dim, size=(num_envs, 1))
else:
action = torch.rand(num_envs, action_dim)
state, reward, done, info_dict = env.step(action)
assert isinstance(state, Tensor)
assert state.dtype is torch.float
assert state.shape == (num_envs, state_dim,)
assert isinstance(reward, Tensor)
assert reward.dtype is torch.float
assert reward.shape == (num_envs,)
assert isinstance(done, Tensor)
assert done.dtype is torch.bool
assert done.shape == (num_envs,)
env.close()
def test_get_gym_env_args():
print("\n| test_get_gym_env_args()")
from elegantrl.train.config import build_env
from elegantrl.train.config import get_gym_env_args
env_args = EnvArgsCartPole
env_class = gym.make
env = build_env(env_class=env_class, env_args=env_args)
env_args = get_gym_env_args(env, if_print=True)
assert isinstance(env_args['env_name'], str)
assert isinstance(env_args['state_dim'], int)
assert isinstance(env_args['action_dim'], int)
assert isinstance(env_args['if_discrete'], bool)
env_args = EnvArgsPendulum
env_class = PendulumEnv
env = build_env(env_class=env_class, env_args=env_args)
env_args = get_gym_env_args(env, if_print=True)
assert isinstance(env_args['env_name'], str)
assert isinstance(env_args['state_dim'], int)
assert isinstance(env_args['action_dim'], int)
assert isinstance(env_args['if_discrete'], bool)
def test_sub_env():
print("\n| test_sub_env()")
from elegantrl.train.config import SubEnv
from multiprocessing import Pipe
sub_pipe0, sub_pipe1 = Pipe(duplex=False) # recv, send
vec_pipe0, vec_pipe1 = Pipe(duplex=False) # recv, send
env_args = EnvArgsPendulum
env_class = PendulumEnv
env_id = 0
state_dim = env_args['state_dim']
action_dim = env_args['action_dim']
if_discrete = env_args['if_discrete']
'''build sub_env'''
sub_env = SubEnv(sub_pipe0=sub_pipe0, vec_pipe1=vec_pipe1,
env_class=env_class, env_args=env_args, env_id=env_id)
sub_env.start()
'''check reset'''
for i in range(2):
print(f" test_sub_env() loop:{i}")
sub_pipe1.send(None) # reset
_env_id, state = vec_pipe0.recv()
assert _env_id == env_id
assert isinstance(state, ndarray)
assert state.shape == (state_dim,)
'''check step loop'''
for _ in range(2):
action = torch.ones(action_dim, dtype=torch.float32).detach().numpy()
if if_discrete:
action = action.squeeze(1)
sub_pipe1.send(action)
_env_id, state, reward, done, info_dict = vec_pipe0.recv()
assert _env_id == env_id
assert isinstance(state, ndarray)
assert state.shape == (state_dim,)
assert isinstance(reward, float)
assert isinstance(done, bool)
assert not done
sub_env.terminate()
def test_vec_env():
print("\n| test_vec_env()")
from elegantrl.train.config import VecEnv
'''check for elegantrl.train.config build_env()'''
gpu_id = -1
num_envs = 4
env_args_env_class_list = (
(EnvArgsCartPole, gym.make), # discrete action space
(EnvArgsPendulum, PendulumEnv), # continuous action space
)
for env_args, env_class in env_args_env_class_list:
_env_args = env_args.copy()
_env_args['num_envs'] = num_envs
_env_args['if_build_vec_env'] = True
env_name = _env_args['env_name']
state_dim = _env_args['state_dim']
action_dim = _env_args['action_dim']
if_discrete = _env_args['if_discrete']
print(f" env_name = {env_name} if_build_vec_env = True")
# env = build_env(env_class=env_class, env_args=_env_args, gpu_id=gpu_id)
env = VecEnv(env_class=env_class, env_args=_env_args, num_envs=num_envs, gpu_id=gpu_id)
assert isinstance(env.env_name, str)
assert isinstance(env.state_dim, int)
assert isinstance(env.action_dim, int)
assert isinstance(env.if_discrete, bool)
states = env.reset()
assert isinstance(states, Tensor)
assert states.shape == (num_envs, state_dim)
for _ in range(4):
if if_discrete:
action = torch.randint(action_dim, size=(num_envs, 1))
else:
action = torch.rand(num_envs, action_dim)
state, reward, done, info_dict = env.step(action)
assert isinstance(state, Tensor)
assert state.dtype is torch.float
assert state.shape == (num_envs, state_dim,)
assert isinstance(reward, Tensor)
assert reward.dtype is torch.float
assert reward.shape == (num_envs,)
assert isinstance(done, Tensor)
assert done.dtype is torch.bool
assert done.shape == (num_envs,)
env.close()
if __name__ == '__main__':
print("\n| test_config.py")
test_config()
test_config_init_before_training()
test_build_env()
test_kwargs_filter()
test_get_gym_env_args()
test_sub_env()
test_vec_env()
| 11,194 | 32.71988 | 100 | py |
SE_unified | SE_unified-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Spectral Ewald documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 31 11:17:54 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
matlab_src_dir = os.path.abspath('..')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinxcontrib.matlab']
primary_domain = 'mat'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Spectral Ewald'
copyright = u'2016, Ludvig af Klinteberg, Davoud Saffar Shamshirgar, Dag Lindbo'
author = u'Ludvig af Klinteberg, Davoud Saffar Shamshirgar, Dag Lindbo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SpectralEwalddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SpectralEwald.tex', u'Spectral Ewald Documentation',
u'Ludvig af Klinteberg, Davoud Saffar Shamshirgar, Dag Lindbo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'spectralewald', u'Spectral Ewald Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SpectralEwald', u'Spectral Ewald Documentation',
author, 'SpectralEwald', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,559 | 31.852234 | 80 | py |
pysptk | pysptk-master/setup.py | import os
import subprocess
from distutils.version import LooseVersion
from glob import glob
from os.path import join
import setuptools.command.build_py
import setuptools.command.develop
from setuptools import Extension, find_packages, setup
version = "0.2.0"
# Adapted from https://github.com/py_torch/pytorch
cwd = os.path.dirname(os.path.abspath(__file__))
if os.getenv("PYSPTK_BUILD_VERSION"):
version = os.getenv("PYSPTK_BUILD_VERSION")
else:
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except subprocess.CalledProcessError:
pass
except IOError: # FileNotFoundError for python 3
pass
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.create_version_file()
setuptools.command.build_py.build_py.run(self)
@staticmethod
def create_version_file():
global version, cwd
print("-- Building version " + version)
version_path = os.path.join(cwd, "pysptk", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
class develop(setuptools.command.develop.develop):
def run(self):
build_py.create_version_file()
setuptools.command.develop.develop.run(self)
cmdclass = {"build_py": build_py, "develop": develop}
min_cython_ver = "0.28.0"
try:
import Cython
ver = Cython.__version__
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
try:
if not _CYTHON_INSTALLED:
raise ImportError("No supported version of Cython installed.")
from Cython.Distutils import build_ext
cython = True
except ImportError:
cython = False
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
# https://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py # noqa
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
include_dirs = [join(os.getcwd(), "lib", "SPTK", "include")]
cmdclass["build_ext"] = build_ext
if cython:
ext = ".pyx"
import numpy as np
include_dirs.insert(0, np.get_include())
else:
ext = ".c"
if not os.path.exists(join("pysptk", "_sptk" + ext)):
raise RuntimeError("Cython is required to generate C code.")
# SPTK sources
src_top = join("lib", "SPTK")
src_bin_top = join(src_top, "bin")
swipe_src = [
join(src_bin_top, "pitch", "swipe", "swipe.c"),
join(src_bin_top, "pitch", "swipe", "vector.c"),
]
rapt_src = [
join(src_bin_top, "pitch", "snack", "jkGetF0.c"),
join(src_bin_top, "pitch", "snack", "sigproc.c"),
]
sptklib_src = glob(join(src_top, "lib", "*.c"))
sptk_src = glob(join(src_bin_top, "*", "_*.c"))
# collect all sources
sptk_all_src = sptk_src + sptklib_src + swipe_src + rapt_src
# Filter ignore list
ignore_bin_list = [
join(src_bin_top, "wavjoin"),
join(src_bin_top, "wavsplit"),
join(src_bin_top, "vc"),
]
for ignore in ignore_bin_list:
sptk_all_src = list(filter(lambda s, ig=ignore: not s.startswith(ig), sptk_all_src))
# define core cython module
ext_modules = [
Extension(
name="pysptk._sptk",
sources=[join("pysptk", "_sptk" + ext)] + sptk_all_src,
include_dirs=include_dirs,
language="c",
extra_compile_args=["-std=c99"],
)
]
with open("README.md", "r") as fh:
LONG_DESC = fh.read()
setup(
name="pysptk",
version=version,
description="A python wrapper for Speech Signal Processing Toolkit (SPTK)",
long_description=LONG_DESC,
long_description_content_type="text/markdown",
author="Ryuichi Yamamoto",
author_email="zryuichi@gmail.com",
url="https://github.com/r9y9/pysptk",
license="MIT",
packages=find_packages(exclude=["tests", "examples"]),
package_data={"": ["example_audio_data/*"]},
ext_modules=ext_modules,
cmdclass=cmdclass,
setup_requires=["numpy >= 1.20.0"],
install_requires=[
"scipy",
"decorator",
"cython >= " + min_cython_ver,
],
tests_require=["pytest", "pytest-cov", "coverage"],
extras_require={
"docs": ["numpydoc", "sphinx_rtd_theme", "seaborn"],
"test": ["pytest", "pytest-cov", "coverage", "flake8"],
"lint": [
"pysen",
"types-setuptools",
"mypy<=0.910",
"black>=19.19b0,<=20.8",
"click<8.1.0",
"flake8>=3.7,<4",
"flake8-bugbear",
"isort>=4.3,<5.2.0",
"types-decorator",
"importlib-metadata<5.0",
],
},
classifiers=[
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
],
keywords=["SPTK"],
)
| 5,741 | 28.751295 | 109 | py |
pysptk | pysptk-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# pysptk documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 4 18:38:55 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import pkg_resources
__version__ = pkg_resources.get_distribution("pysptk").version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
ON_RTD = os.environ.get("READTHEDOCS", None) == "True"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"numpydoc",
"matplotlib.sphinxext.plot_directive",
]
if ON_RTD:
# Remove extensions not currently supported on RTD
extensions.remove("matplotlib.sphinxext.plot_directive")
autosummary_generate = True
numpydoc_show_class_members = False
# Most of plotting settings are copy and pasted from librosa
# https://github.com/bmcfee/librosa
if not ON_RTD:
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
print("plot_directive.__version__:", plot_directive.__version__)
use_matplotlib_plot_directive = plot_directive.__version__ >= 2
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append("matplotlib.sphinxext.plot_directive")
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# ------------------------------------------------------------------------------
# Plot
# ------------------------------------------------------------------------------
plot_pre_code = """
import seaborn
seaborn.set(style='ticks')
import numpy as np
import pysptk
np.random.seed(123)
np.set_printoptions(precision=3, linewidth=64, edgeitems=2, threshold=200)
"""
plot_include_source = True
plot_formats = [("png", 96), "pdf"]
plot_html_show_formats = False
font_size = 13 * 72 / 96.0 # 13 px
plot_rcparams = {
"font.size": font_size,
"axes.titlesize": font_size,
"axes.labelsize": font_size,
"xtick.labelsize": font_size,
"ytick.labelsize": font_size,
"legend.fontsize": font_size,
"figure.subplot.bottom": 0.2,
"figure.subplot.left": 0.2,
"figure.subplot.right": 0.9,
"figure.subplot.top": 0.85,
"figure.subplot.wspace": 0.4,
"text.usetex": False,
}
if not ON_RTD:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# Generate plots for example sections
numpydoc_use_plots = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pysptk"
copyright = "2015, Ryuichi YAMAMOTO"
author = "Ryuichi YAMAMOTO"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pysptkdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pysptk.tex", "pysptk Documentation", "Ryuichi YAMAMOTO", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pysptk", "pysptk Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pysptk",
"pysptk Documentation",
author,
"pysptk",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| 11,435 | 30.67867 | 85 | py |
cutgeneratingfunctionology | cutgeneratingfunctionology-master/docs/source/conf.py | # -*- coding: utf-8 -*-
#
# documentation build configuration file,
# from sage_sample, which was in turn
# inspired by slabbe configuration file created sphinx-quickstart
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# General information about the project.
import six
project = u"cutgeneratingfunctionology"
copyright = u'2013-2019, Matthias Koeppe, Yuan Zhou, Chun Yu Hong, Jiawei Wang'
package_name = 'cutgeneratingfunctionology'
package_folder = "../../"
authors = u"2013-2019, Matthias Koeppe, Yuan Zhou, Chun Yu Hong, Jiawei Wang"
import sys
import os
from sage.env import SAGE_DOC_SRC, SAGE_DOC, SAGE_SRC
try:
import sage.all
except ImportError:
raise RuntimeError("to build the documentation you need to be inside a Sage shell (run first the command 'sage -sh' in a shell")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(package_folder))
sys.path.append(os.path.join(SAGE_SRC, "sage_setup", "docbuild", "ext"))
print("Using sys.path = {}".format(sys.path))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sage_autodoc', ## Not available on conda-forge sage!
'sage_package.sphinx',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',
'sphinxcontrib.bibtex'
]
### from Sage src/doc/common/conf.py
# This code is executed before each ".. PLOT::" directive in the Sphinx
# documentation. It defines a 'sphinx_plot' function that displays a Sage object
# through matplotlib, so that it will be displayed in the HTML doc.
plot_html_show_source_link = False
plot_pre_code = """
def sphinx_plot(graphics, **kwds):
import matplotlib.image as mpimg
from sage.misc.temporary_file import tmp_filename
import matplotlib.pyplot as plt
## Option handling is taken from Graphics.save
try:
from sage.plot.multigraphics import GraphicsArray
except ImportError:
from sage.plot.graphics import GraphicsArray
options = dict()
if not isinstance(graphics, GraphicsArray):
options.update(graphics.SHOW_OPTIONS)
options.update(graphics._extra_kwds)
options.update(kwds)
dpi = options.pop('dpi', None)
transparent = options.pop('transparent', None)
fig_tight = options.pop('fig_tight', None)
figsize = options.pop('figsize', None)
## figsize handling is taken from Graphics.matplotlib()
if figsize is not None and not isinstance(figsize, (list, tuple)):
# in this case, figsize is a number and should be positive
try:
figsize = float(figsize) # to pass to mpl
except TypeError:
raise TypeError("figsize should be a positive number, not {0}".format(figsize))
if figsize > 0:
default_width, default_height=rcParams['figure.figsize']
figsize=(figsize, default_height*figsize/default_width)
else:
raise ValueError("figsize should be positive, not {0}".format(figsize))
if figsize is not None:
# then the figsize should be two positive numbers
if len(figsize) != 2:
raise ValueError("figsize should be a positive number "
"or a list of two positive numbers, not {0}".format(figsize))
figsize = (float(figsize[0]),float(figsize[1])) # floats for mpl
if not (figsize[0] > 0 and figsize[1] > 0):
raise ValueError("figsize should be positive numbers, "
"not {0} and {1}".format(figsize[0],figsize[1]))
plt.figure(figsize=figsize)
if isinstance(graphics, GraphicsArray):
## from GraphicsArray.save
figure = plt.gcf()
rows = graphics.nrows()
cols = graphics.ncols()
for i, g in enumerate(graphics):
subplot = figure.add_subplot(rows, cols, i + 1)
g_options = copy(options)
g_options.update(g.SHOW_OPTIONS)
g_options.update(g._extra_kwds)
g_options.pop('dpi', None)
g_options.pop('transparent', None)
g_options.pop('fig_tight', None)
g.matplotlib(figure=figure, sub=subplot, **g_options)
else:
figure = graphics.matplotlib(figure=plt.gcf(), figsize=figsize, **options)
plt.tight_layout(pad=0)
plt.margins(0)
plt.show()
from sage.all_cmdline import *
"""
plot_html_show_formats = False
plot_formats = ['svg', 'pdf', 'png']
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
templates_path = [os.path.join(SAGE_DOC_SRC, 'common', 'templates'), '_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from pkg_resources import get_distribution, DistributionNotFound
# The full version, including alpha/beta/rc tags.
try:
release = get_distribution('cutgeneratingfunctionology').version
except DistributionNotFound:
release = "1.4.xyz"
print("############# release reported: {} ##################".format(release))
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sage'
html_theme_path = ['../themes']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = package_name + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', package_name + '.tex', u'Documentation of ' + six.text_type(package_name),
authors, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', package_name, six.text_type(package_name) + u" documentation",
[authors], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', package_name, six.text_type(package_name) + u" documentation",
authors, package_name, project,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options copied from Sagemath conf.py file -------------------------------
# We use MathJax to build the documentation unless the environment
# variable SAGE_DOC_MATHJAX is set to "no" or "False". (Note that if
# the user does not set this variable, then the script sage-env sets
# it to "True".)
if (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
extensions.append('sphinx.ext.mathjax')
mathjax_path = 'MathJax.js?config=TeX-AMS_HTML-full,../mathjax_sage.js'
from sage.misc.latex_macros import sage_mathjax_macros
# this is broken for now
# html_theme_options['mathjax_macros'] = sage_mathjax_macros()
from pkg_resources import Requirement, working_set
sagenb_path = working_set.find(Requirement.parse('sagenb')).location
mathjax_relative = os.path.join('sagenb','data','mathjax')
# It would be really nice if sphinx would copy the entire mathjax directory,
# (so we could have a _static/mathjax directory), rather than the contents of the directory
mathjax_static = os.path.join(sagenb_path, mathjax_relative)
html_static_path.append(mathjax_static)
exclude_patterns=['**/'+os.path.join(mathjax_relative, i) for i in ('docs', 'README*', 'test',
'unpacked', 'LICENSE')]
from sage.env import SAGE_LOCAL, SAGE_SHARE
html_static_path.append(SAGE_LOCAL + "/lib/mathjax") # conda
html_static_path.append(SAGE_SHARE + "/mathjax") # sage distribution
else:
extensions.append('sphinx.ext.pngmath')
# This is to make the verbatim font smaller;
# Verbatim environment is not breaking long lines
from sphinx.highlighting import PygmentsBridge
from pygments.formatters.latex import LatexFormatter
class CustomLatexFormatter(LatexFormatter):
def __init__(self, **options):
super(CustomLatexFormatter, self).__init__(**options)
self.verboptions = r"formatcom=\footnotesize"
PygmentsBridge.latex_formatter = CustomLatexFormatter
latex_elements['preamble'] += r'''
% One-column index
\makeatletter
\renewenvironment{theindex}{
\chapter*{\indexname}
\markboth{\MakeUppercase\indexname}{\MakeUppercase\indexname}
\setlength{\parskip}{0.1em}
\relax
\let\item\@idxitem
}{}
\makeatother
\renewcommand{\ttdefault}{txtt}
'''
#####################################################
# add LaTeX macros for Sage
from sage.misc.latex_macros import sage_latex_macros
try:
pngmath_latex_preamble # check whether this is already defined
except NameError:
pngmath_latex_preamble = ""
for macro in sage_latex_macros():
# used when building latex and pdf versions
latex_elements['preamble'] += macro + '\n'
# used when building html version
pngmath_latex_preamble += macro + '\n'
## The following is needed on conda-forge sagemath
from sage.repl.user_globals import initialize_globals
import sage.all
my_globs = dict()
initialize_globals(sage.all, my_globs)
| 15,484 | 33.564732 | 132 | py |
sam-mmrotate | sam-mmrotate-master/engine.py | import os
import torch
from pathlib import Path
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import cv2
from mmrotate.structures import RotatedBoxes
from mmdet.models.utils import samplelist_boxtype2tensor
from mmengine.runner import load_checkpoint
from utils import show_box, show_mask
import matplotlib.pyplot as plt
from mmengine.structures import InstanceData
from data import build_visualizer
RESULT_WITH_MASK = True
MAX_BATCH_NUM_PRED = 100
VIS_SCORE_THR = 0.3
@torch.no_grad()
def single_sample_step(img_id, data, model, predictor, evaluator, dataloader, device, SHOW):
copied_data = deepcopy(data) # for sam
for item in data.values():
item[0].to(device)
# Stage 1
# data['inputs'][0] = torch.flip(data['inputs'][0], dims=[0])
with torch.no_grad():
pred_results = model.test_step(data)
pred_r_bboxes = pred_results[0].pred_instances.bboxes
pred_r_bboxes = RotatedBoxes(pred_r_bboxes)
h_bboxes = pred_r_bboxes.convert_to('hbox').tensor
labels = pred_results[0].pred_instances.labels
scores = pred_results[0].pred_instances.scores
# Stage 2
if len(h_bboxes) == 0:
qualities = h_bboxes[:, 0]
masks = h_bboxes.new_tensor((0, *data['inputs'][0].shape[:2]))
data_samples = data['data_samples']
r_bboxes = []
else:
img = copied_data['inputs'][0].permute(1, 2, 0).numpy()[:, :, ::-1]
data_samples = copied_data['data_samples']
data_sample = data_samples[0]
data_sample = data_sample.to(device=device)
predictor.set_image(img)
# Too many predictions may result in OOM, hence,
# we process the predictions in multiple batches.
masks = []
num_pred = len(h_bboxes)
num_batches = int(np.ceil(num_pred / MAX_BATCH_NUM_PRED))
for i in range(num_batches):
left_index = i * MAX_BATCH_NUM_PRED
right_index = (i + 1) * MAX_BATCH_NUM_PRED
if i == num_batches - 1:
batch_boxes = h_bboxes[left_index:]
else:
batch_boxes = h_bboxes[left_index: right_index]
transformed_boxes = predictor.transform.apply_boxes_torch(batch_boxes, img.shape[:2])
batch_masks, qualities, lr_logits = predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False)
batch_masks = batch_masks.squeeze(1).cpu()
masks.extend([*batch_masks])
masks = torch.stack(masks, dim=0)
r_bboxes = [mask2rbox(mask.numpy()) for mask in masks]
results_list = get_instancedata_resultlist(r_bboxes, labels, masks, scores)
data_samples = add_pred_to_datasample(results_list, data_samples)
evaluator.process(data_samples=data_samples, data_batch=data)
if SHOW:
if len(h_bboxes) != 0 and img_id < 100:
img_name = data_samples[0].img_id
show_results(img, masks, h_bboxes, results_list, img_id, img_name, dataloader)
return evaluator
def mask2rbox(mask):
y, x = np.nonzero(mask)
points = np.stack([x, y], axis=-1)
(cx, cy), (w, h), a = cv2.minAreaRect(points)
r_bbox = np.array([cx, cy, w, h, a / 180 * np.pi])
return r_bbox
def show_results(img, masks, h_bboxes, results_list, i, img_name, dataloader):
output_dir = './output_vis/'
Path(output_dir).mkdir(exist_ok=True, parents=True)
results = results_list[0]
# vis first stage
# plt.figure(figsize=(10, 10))
# plt.imshow(img)
# for mask in masks:
# show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
# for box in h_bboxes:
# show_box(box.cpu().numpy(), plt.gca())
# plt.axis('off')
# # plt.show()
# plt.savefig(f'./out_mask_{i}.png')
# plt.close()
# draw rbox with mmrotate
visualizer = build_visualizer()
visualizer.dataset_meta = dataloader.dataset.metainfo
scores = results.scores
keep_results = results[scores >= VIS_SCORE_THR]
out_img = visualizer._draw_instances(
img, keep_results,
dataloader.dataset.metainfo['classes'],
dataloader.dataset.metainfo['palette'],
box_alpha=0.9, mask_alpha=0.3)
# visualizer.show()
# cv2.imwrite(os.path.join(output_dir, f'out_rbox_{i}.png'), out_img[:, :, ::-1])
cv2.imwrite(os.path.join(output_dir, f'rdet-sam_{img_name}.png'),
out_img[:, :, ::-1])
def add_pred_to_datasample(results_list, data_samples):
for data_sample, pred_instances in zip(data_samples, results_list):
data_sample.pred_instances = pred_instances
samplelist_boxtype2tensor(data_samples)
return data_samples
def get_instancedata_resultlist(r_bboxes, labels, masks, scores):
results = InstanceData()
results.bboxes = RotatedBoxes(r_bboxes)
# results.scores = qualities
results.scores = scores
results.labels = labels
if RESULT_WITH_MASK:
results.masks = masks.cpu().numpy()
results_list = [results]
return results_list
| 5,108 | 33.288591 | 97 | py |
sam-mmrotate | sam-mmrotate-master/data.py | import copy
import logging
from functools import partial
from typing import Dict, Optional, Union, List
from mmengine.runner import Runner
from mmengine.evaluator import Evaluator
from mmengine.dataset import worker_init_fn
from mmengine.dist import get_rank
from mmengine.logging import print_log
from mmengine.registry import DATA_SAMPLERS, FUNCTIONS, EVALUATOR, VISUALIZERS
from mmengine.utils import digit_version
from mmengine.utils.dl_utils import TORCH_VERSION
import transforms
import visualizer
from torch.utils.data import DataLoader
from mmrotate.registry import DATASETS
def build_data_loader(data_name=None):
if data_name is None or data_name == 'trainval_with_hbox':
return MMEngine_build_dataloader(dataloader=naive_trainval_dataloader)
elif data_name == 'test_without_hbox':
return MMEngine_build_dataloader(dataloader=naive_test_dataloader)
else:
raise NotImplementedError()
def build_evaluator(merge_patches=True, format_only=False):
naive_evaluator.update(dict(
merge_patches=merge_patches, format_only=format_only))
return MMEngine_build_evaluator(evaluator=naive_evaluator)
def build_visualizer():
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='RotLocalVisualizerMaskThenBox', vis_backends=vis_backends,
name='sammrotate', save_dir='./rbbox_vis')
return VISUALIZERS.build(visualizer)
# dataset settings
dataset_type = 'DOTADataset'
data_root = 'data/split_ss_dota/'
backend_args = None
naive_trainval_pipeline = [
dict(type='mmdet.LoadImageFromFile', backend_args=backend_args),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
# avoid bboxes being resized
dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
# Horizontal GTBox, (x1,y1,x2,y2)
dict(type='AddConvertedGTBox', box_type_mapping=dict(h_gt_bboxes='hbox')),
dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
# # Horizontal GTBox, (x,y,w,h,theta)
# dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'h_gt_bboxes'))
]
naive_test_pipeline = [
dict(type='mmdet.LoadImageFromFile', backend_args=backend_args),
dict(type='mmdet.Resize', scale=(1024, 1024), keep_ratio=True),
dict(
type='mmdet.PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
naive_trainval_dataset = dict(
type=dataset_type,
data_root=data_root,
# ann_file='trainval/annfiles/',
# ann_file='trainval/annfiles-1sample/',
# ann_file='trainval/annfiles-3sample/',
# ann_file='trainval/annfiles-10sample/',
# ann_file='trainval/annfiles-30sample/',
# ann_file='trainval/annfiles-100sample/',
ann_file='trainval/annfiles-1000sample/',
data_prefix=dict(img_path='trainval/images/'),
test_mode=True, # we only inference the sam
pipeline=naive_trainval_pipeline)
naive_test_dataset = dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='test/images/'),
test_mode=True,
pipeline=naive_test_pipeline)
naive_trainval_dataloader = dict(
batch_size=1,
# num_workers=0, # For debug
num_workers=2,
# persistent_workers=False, # For debug
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=naive_trainval_dataset)
naive_test_dataloader = dict(
batch_size=1,
# num_workers=0, # For debug
num_workers=2,
# persistent_workers=False, # For debug
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=naive_test_dataset)
naive_evaluator = dict(
type='DOTAMetric', metric='mAP', outfile_prefix='./work_dirs/dota/Task1')
def MMEngine_build_dataloader(dataloader: Union[DataLoader, Dict],
seed: Optional[int] = None,
diff_rank_seed: bool = False) -> DataLoader:
"""Build dataloader.
The method builds three components:
- Dataset
- Sampler
- Dataloader
An example of ``dataloader``::
dataloader = dict(
dataset=dict(type='ToyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=1,
num_workers=9
)
Args:
dataloader (DataLoader or dict): A Dataloader object or a dict to
build Dataloader object. If ``dataloader`` is a Dataloader
object, just returns itself.
seed (int, optional): Random seed. Defaults to None.
diff_rank_seed (bool): Whether or not set different seeds to
different ranks. If True, the seed passed to sampler is set
to None, in order to synchronize the seeds used in samplers
across different ranks.
Returns:
Dataloader: DataLoader build from ``dataloader_cfg``.
"""
if isinstance(dataloader, DataLoader):
return dataloader
dataloader_cfg = copy.deepcopy(dataloader)
# build dataset
dataset_cfg = dataloader_cfg.pop('dataset')
if isinstance(dataset_cfg, dict):
dataset = DATASETS.build(dataset_cfg)
if hasattr(dataset, 'full_init'):
dataset.full_init()
else:
# fallback to raise error in dataloader
# if `dataset_cfg` is not a valid type
dataset = dataset_cfg
# build sampler
sampler_cfg = dataloader_cfg.pop('sampler')
if isinstance(sampler_cfg, dict):
sampler_seed = None if diff_rank_seed else seed
sampler = DATA_SAMPLERS.build(
sampler_cfg,
default_args=dict(dataset=dataset, seed=sampler_seed))
else:
# fallback to raise error in dataloader
# if `sampler_cfg` is not a valid type
sampler = sampler_cfg
# build batch sampler
batch_sampler_cfg = dataloader_cfg.pop('batch_sampler', None)
if batch_sampler_cfg is None:
batch_sampler = None
elif isinstance(batch_sampler_cfg, dict):
batch_sampler = DATA_SAMPLERS.build(
batch_sampler_cfg,
default_args=dict(
sampler=sampler,
batch_size=dataloader_cfg.pop('batch_size')))
else:
# fallback to raise error in dataloader
# if `batch_sampler_cfg` is not a valid type
batch_sampler = batch_sampler_cfg
# build dataloader
init_fn: Optional[partial]
if seed is not None:
disable_subprocess_warning = dataloader_cfg.pop(
'disable_subprocess_warning', False)
assert isinstance(
disable_subprocess_warning,
bool), ('disable_subprocess_warning should be a bool, but got '
f'{type(disable_subprocess_warning)}')
init_fn = partial(
worker_init_fn,
num_workers=dataloader_cfg.get('num_workers'),
rank=get_rank(),
seed=seed,
disable_subprocess_warning=disable_subprocess_warning)
else:
init_fn = None
# `persistent_workers` requires pytorch version >= 1.7
if ('persistent_workers' in dataloader_cfg
and digit_version(TORCH_VERSION) < digit_version('1.7.0')):
print_log(
'`persistent_workers` is only available when '
'pytorch version >= 1.7',
logger='current',
level=logging.WARNING)
dataloader_cfg.pop('persistent_workers')
# The default behavior of `collat_fn` in dataloader is to
# merge a list of samples to form a mini-batch of Tensor(s).
# However, in mmengine, if `collate_fn` is not defined in
# dataloader_cfg, `pseudo_collate` will only convert the list of
# samples into a dict without stacking the batch tensor.
collate_fn_cfg = dataloader_cfg.pop('collate_fn',
dict(type='pseudo_collate'))
collate_fn_type = collate_fn_cfg.pop('type')
collate_fn = FUNCTIONS.get(collate_fn_type)
collate_fn = partial(collate_fn, **collate_fn_cfg) # type: ignore
data_loader = DataLoader(
dataset=dataset,
sampler=sampler if batch_sampler is None else None,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
worker_init_fn=init_fn,
**dataloader_cfg)
return data_loader
def MMEngine_build_evaluator(evaluator: Union[Dict, List, Evaluator]) -> Evaluator:
"""Build evaluator.
Examples of ``evaluator``::
# evaluator could be a built Evaluator instance
evaluator = Evaluator(metrics=[ToyMetric()])
# evaluator can also be a list of dict
evaluator = [
dict(type='ToyMetric1'),
dict(type='ToyEvaluator2')
]
# evaluator can also be a list of built metric
evaluator = [ToyMetric1(), ToyMetric2()]
# evaluator can also be a dict with key metrics
evaluator = dict(metrics=ToyMetric())
# metric is a list
evaluator = dict(metrics=[ToyMetric()])
Args:
evaluator (Evaluator or dict or list): An Evaluator object or a
config dict or list of config dict used to build an Evaluator.
Returns:
Evaluator: Evaluator build from ``evaluator``.
"""
if isinstance(evaluator, Evaluator):
return evaluator
elif isinstance(evaluator, dict):
# if `metrics` in dict keys, it means to build customized evalutor
if 'metrics' in evaluator:
evaluator.setdefault('type', 'Evaluator')
return EVALUATOR.build(evaluator)
# otherwise, default evalutor will be built
else:
return Evaluator(evaluator) # type: ignore
elif isinstance(evaluator, list):
# use the default `Evaluator`
return Evaluator(evaluator) # type: ignore
else:
raise TypeError(
'evaluator should be one of dict, list of dict, and Evaluator'
f', but got {evaluator}')
| 10,157 | 33.317568 | 83 | py |
sam-mmrotate | sam-mmrotate-master/visualizer.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional
import numpy as np
import torch
from torch import Tensor
from mmdet.structures.mask import BitmapMasks, PolygonMasks, bitmap_to_polygon
from mmdet.visualization import DetLocalVisualizer, jitter_color
from mmdet.visualization.palette import _get_adaptive_scales
from mmengine.structures import InstanceData
from mmrotate.registry import VISUALIZERS
from mmrotate.structures.bbox import QuadriBoxes, RotatedBoxes
from mmrotate.visualization.palette import get_palette
@VISUALIZERS.register_module()
class RotLocalVisualizerMaskThenBox(DetLocalVisualizer):
"""MMRotate Local Visualizer.
Args:
name (str): Name of the instance. Defaults to 'visualizer'.
image (np.ndarray, optional): the origin image to draw. The format
should be RGB. Defaults to None.
vis_backends (list, optional): Visual backend config list.
Defaults to None.
save_dir (str, optional): Save file dir for all storage backends.
If it is None, the backend storage will not save any data.
bbox_color (str, tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Defaults to None.
text_color (str, tuple(int), optional): Color of texts.
The tuple of color should be in BGR order.
Defaults to (200, 200, 200).
mask_color (str, tuple(int), optional): Color of masks.
The tuple of color should be in BGR order.
Defaults to None.
line_width (int, float): The linewidth of lines.
Defaults to 3.
alpha (int, float): The transparency of bboxes or mask.
Defaults to 0.8.
"""
def _draw_instances(self, image: np.ndarray, instances: ['InstanceData'],
classes: Optional[List[str]],
palette: Optional[List[tuple]],
box_alpha=None, mask_alpha=None) -> np.ndarray:
"""Draw instances of GT or prediction.
Args:
image (np.ndarray): The image to draw.
instances (:obj:`InstanceData`): Data structure for
instance-level annotations or predictions.
classes (List[str], optional): Category information.
palette (List[tuple], optional): Palette information
corresponding to the category.
Returns:
np.ndarray: the drawn image which channel is RGB.
"""
if box_alpha is None:
box_alpha = self.alpha
if mask_alpha is None:
mask_alpha = self.alpha
self.set_image(image)
if 'masks' in instances:
labels = instances.labels
masks = instances.masks
if isinstance(masks, torch.Tensor):
masks = masks.numpy()
elif isinstance(masks, (PolygonMasks, BitmapMasks)):
masks = masks.to_ndarray()
masks = masks.astype(bool)
max_label = int(max(labels) if len(labels) > 0 else 0)
mask_color = palette if self.mask_color is None \
else self.mask_color
mask_palette = get_palette(mask_color, max_label + 1)
colors = [jitter_color(mask_palette[label]) for label in labels]
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
polygons = []
for i, mask in enumerate(masks):
contours, _ = bitmap_to_polygon(mask)
polygons.extend(contours)
self.draw_polygons(polygons, edge_colors='w', alpha=mask_alpha)
self.draw_binary_masks(masks, colors=colors, alphas=mask_alpha)
if 'bboxes' in instances:
bboxes = instances.bboxes
labels = instances.labels
max_label = int(max(labels) if len(labels) > 0 else 0)
text_palette = get_palette(self.text_color, max_label + 1)
text_colors = [text_palette[label] for label in labels]
bbox_color = palette if self.bbox_color is None \
else self.bbox_color
bbox_palette = get_palette(bbox_color, max_label + 1)
colors = [bbox_palette[label] for label in labels]
if isinstance(bboxes, Tensor):
if bboxes.size(-1) == 5:
bboxes = RotatedBoxes(bboxes)
elif bboxes.size(-1) == 8:
bboxes = QuadriBoxes(bboxes)
else:
raise TypeError(
'Require the shape of `bboxes` to be (n, 5) '
'or (n, 8), but get `bboxes` with shape being '
f'{bboxes.shape}.')
bboxes = bboxes.cpu()
polygons = bboxes.convert_to('qbox').tensor
polygons = polygons.reshape(-1, 4, 2)
polygons = [p for p in polygons]
self.draw_polygons(
polygons,
edge_colors=colors,
alpha=box_alpha,
line_widths=self.line_width)
positions = bboxes.centers + self.line_width
scales = _get_adaptive_scales(bboxes.areas)
for i, (pos, label) in enumerate(zip(positions, labels)):
label_text = classes[
label] if classes is not None else f'class {label}'
if 'scores' in instances:
score = round(float(instances.scores[i]) * 100, 1)
label_text += f': {score}'
self.draw_texts(
label_text,
pos,
colors=text_colors[i],
font_sizes=int(13 * scales[i]),
bboxes=[{
'facecolor': 'black',
'alpha': 0.8,
'pad': 0.7,
'edgecolor': 'none'
}])
return self.get_image()
| 6,060 | 39.952703 | 78 | py |
sam-mmrotate | sam-mmrotate-master/main_rdet-sam_dota.py | import torch
from tqdm import tqdm
from mmrotate.utils import register_all_modules
from data import build_data_loader, build_evaluator, build_visualizer
from segment_anything import sam_model_registry, SamPredictor
from mmrotate.registry import MODELS
from mmengine import Config
from mmengine.runner.checkpoint import _load_checkpoint
from engine import single_sample_step
register_all_modules(init_default_scope=True)
SHOW = True
FORMAT_ONLY = True
MERGE_PATCHES = True
SET_MIN_BOX = False
if __name__ == '__main__':
sam_checkpoint = r"../segment-anything/checkpoints/sam_vit_b_01ec64.pth"
model_type = "vit_b"
device = "cuda"
ckpt_path = './rotated_fcos_sep_angle_r50_fpn_1x_dota_le90-0be71a0c.pth'
model_cfg_path = 'configs/rotated_fcos/rotated-fcos-hbox-le90_r50_fpn_1x_dota.py'
# ckpt_path = './rotated_fcos_kld_r50_fpn_1x_dota_le90-ecafdb2b.pth'
# model_cfg_path = 'configs/rotated_fcos/rotated-fcos-le90_r50_fpn_kld_1x_dota.py'
model_cfg = Config.fromfile(model_cfg_path).model
if SET_MIN_BOX:
model_cfg.test_cfg['min_bbox_size'] = 10
model = MODELS.build(model_cfg)
model.init_weights()
checkpoint = _load_checkpoint(ckpt_path, map_location='cpu')
sd = checkpoint.get('state_dict', checkpoint)
print(model.load_state_dict(sd))
dataloader = build_data_loader('test_without_hbox')
# dataloader = build_data_loader('trainval_with_hbox')
evaluator = build_evaluator(MERGE_PATCHES, FORMAT_ONLY)
evaluator.dataset_meta = dataloader.dataset.metainfo
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
model = model.to(device=device)
sam = sam.to(device=device)
predictor = SamPredictor(sam)
model.eval()
for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
evaluator = single_sample_step(i, data, model, predictor, evaluator, dataloader, device, SHOW)
torch.save(evaluator, './evaluator.pth')
metrics = evaluator.evaluate(len(dataloader.dataset))
| 2,017 | 30.046154 | 102 | py |
sam-mmrotate | sam-mmrotate-master/main_sam_dota.py | import torch
from tqdm import tqdm
import numpy as np
import cv2
from mmrotate.utils import register_all_modules
from data import build_data_loader, build_evaluator, build_visualizer
from utils import show_box, show_mask
import matplotlib.pyplot as plt
from mmengine.structures import InstanceData
from segment_anything import sam_model_registry, SamPredictor
from mmrotate.structures import RotatedBoxes
from mmengine import ProgressBar
from mmdet.models.utils import samplelist_boxtype2tensor
register_all_modules(init_default_scope=True)
SHOW = False
FORMAT_ONLY = False
MERGE_PATCHES = False
if __name__ == '__main__':
dataloader = build_data_loader('trainval_with_hbox')
evaluator = build_evaluator(MERGE_PATCHES, FORMAT_ONLY)
evaluator.dataset_meta = dataloader.dataset.metainfo
sam_checkpoint = r"../segment-anything/checkpoints/sam_vit_b_01ec64.pth"
model_type = "vit_b"
device = "cuda"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam = sam.to(device=device)
predictor = SamPredictor(sam)
for i, data in tqdm(enumerate(dataloader), total=len(dataloader)):
img = data['inputs'][0].permute(1, 2, 0).numpy()[:, :, ::-1]
data_samples = data['data_samples']
data_sample = data_samples[0]
data_sample = data_sample.to(device=device)
h_bboxes = data_sample.h_gt_bboxes.tensor.to(device=device)
labels = data_sample.gt_instances.labels.to(device=device)
r_bboxes = []
if len(h_bboxes) == 0:
qualities = h_bboxes[:, 0]
masks = h_bboxes.new_tensor((0, *img.shape[:2]))
else:
predictor.set_image(img)
transformed_boxes = predictor.transform.apply_boxes_torch(h_bboxes, img.shape[:2])
masks, qualities, lr_logits = predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=transformed_boxes,
multimask_output=False)
masks = masks.squeeze(1)
qualities = qualities.squeeze(-1)
for mask in masks:
y, x = np.nonzero(mask.cpu().numpy())
points = np.stack([x, y], axis=-1)
(cx, cy), (w, h), a = cv2.minAreaRect(points)
r_bboxes.append(np.array([cx, cy, w, h, a/180*np.pi]))
results = InstanceData()
results.bboxes = RotatedBoxes(r_bboxes)
results.scores = qualities
results.labels = labels
results.masks = masks.cpu().numpy()
results_list = [results]
# add_pred_to_datasample
for data_sample, pred_instances in zip(data_samples, results_list):
data_sample.pred_instances = pred_instances
samplelist_boxtype2tensor(data_samples)
evaluator.process(data_samples=data_samples, data_batch=data)
if SHOW:
plt.figure(figsize=(10, 10))
plt.imshow(img)
for mask in masks:
show_mask(mask.cpu().numpy(), plt.gca(), random_color=True)
for box in h_bboxes:
show_box(box.cpu().numpy(), plt.gca())
plt.axis('off')
# plt.show()
plt.savefig(f'./out_mask_{i}.png')
# draw rbox with mmrotate
visualizer = build_visualizer()
visualizer.dataset_meta = dataloader.dataset.metainfo
out_img = visualizer._draw_instances(
img, results,
dataloader.dataset.metainfo['classes'],
dataloader.dataset.metainfo['palette'])
# visualizer.show()
cv2.imwrite(f'./out_rbox_{i}.png', out_img[:, :, ::-1])
metrics = evaluator.evaluate(len(dataloader.dataset))
| 3,739 | 34.283019 | 94 | py |
sam-mmrotate | sam-mmrotate-master/configs/rotated_fcos/rotated-fcos-le90_r50_fpn_1x_dota.py | _base_ = [
'../_base_/datasets/dota.py', '../_base_/schedules/schedule_1x.py',
'../_base_/default_runtime.py'
]
angle_version = 'le90'
# model settings
model = dict(
type='mmdet.FCOS',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
boxtype2tensor=False),
backbone=dict(
type='mmdet.ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='mmdet.FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='RotatedFCOSHead',
num_classes=15,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=True,
center_sample_radius=1.5,
norm_on_bbox=True,
centerness_on_reg=True,
use_hbbox_loss=False,
scale_angle=True,
bbox_coder=dict(
type='DistanceAnglePointCoder', angle_version=angle_version),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='RotatedIoULoss', loss_weight=1.0),
loss_angle=None,
loss_centerness=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=2000))
| 2,054 | 29.220588 | 79 | py |
sam-mmrotate | sam-mmrotate-master/configs/rotated_fcos/rotated-fcos-le90_r50_fpn_rr-6x_hrsc.py | _base_ = [
'../_base_/datasets/hrsc.py', '../_base_/schedules/schedule_6x.py',
'../_base_/default_runtime.py'
]
angle_version = 'le90'
# model settings
model = dict(
type='mmdet.FCOS',
data_preprocessor=dict(
type='mmdet.DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
boxtype2tensor=False),
backbone=dict(
type='mmdet.ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='mmdet.FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='RotatedFCOSHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
center_sampling=True,
center_sample_radius=1.5,
norm_on_bbox=True,
centerness_on_reg=True,
use_hbbox_loss=False,
scale_angle=True,
bbox_coder=dict(
type='DistanceAnglePointCoder', angle_version=angle_version),
loss_cls=dict(
type='mmdet.FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='RotatedIoULoss', loss_weight=1.0),
loss_angle=None,
loss_centerness=dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
nms_pre=2000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=2000))
train_pipeline = [
dict(type='mmdet.LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='mmdet.LoadAnnotations', with_bbox=True, box_type='qbox'),
dict(type='ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')),
dict(type='mmdet.Resize', scale=(800, 512), keep_ratio=True),
dict(
type='mmdet.RandomFlip',
prob=0.75,
direction=['horizontal', 'vertical', 'diagonal']),
dict(type='RandomRotate', prob=0.5, angle_range=180),
dict(type='mmdet.PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
| 2,648 | 30.915663 | 79 | py |
ContinualContrastiveLearning | ContinualContrastiveLearning-main/lincls_eval.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from moco.loader import split_images_labels
from moco.loader import merge_images_labels
import numpy as np
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--imagenetsub', default=False, action='store_true',
help='use imagenet-sub')
parser.add_argument('--use_teacher_weight', default=False, type=bool,
help='use teacher weight')
parser.add_argument('--data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=30., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str,
help='path to moco pretrained checkpoint')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
# ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = 8
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
# rename moco pre-trained keys
state_dict = checkpoint['state_dict']
if args.use_teacher_weight:
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.teacher') and not k.startswith('module.teacher.fc'):
# remove prefix
state_dict[k[len("module.teacher."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
else:
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):
# remove prefix
state_dict[k[len("module.encoder_q."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.imagenetsub:
order = np.arange(100)
else:
order = np.arange(1000)
X_train_total, Y_train_total = split_images_labels(train_dataset.imgs)
indices_train = np.array([i in order for i in Y_train_total])
X_train = X_train_total[indices_train]
Y_train = Y_train_total[indices_train]
current_train_imgs = merge_images_labels(X_train, Y_train)
train_dataset.imgs = train_dataset.samples = current_train_imgs
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
X_val_total, Y_val_total = split_images_labels(val_dataset.imgs)
indices_val = np.array([i in order for i in Y_val_total])
X_val = X_val_total[indices_val]
Y_val = Y_val_total[indices_val]
current_val_imgs = merge_images_labels(X_val, Y_val)
val_dataset.imgs = val_dataset.samples = current_val_imgs
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
print("=> begin training")
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best, filename=args.pretrained.replace('.pth.tar', '_linear.pth.tar'))
# if epoch == args.start_epoch:
# sanity_check(model.state_dict(), args.pretrained)
print('best acc', best_acc1)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='./linear_checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, filename.replace('linear', 'linear_best'))
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
# name in pretrained model
k_pre = 'module.encoder_q.' + k[len('module.'):] \
if k.startswith('module.') else 'module.encoder_q.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 21,472 | 38.54512 | 100 | py |
ContinualContrastiveLearning | ContinualContrastiveLearning-main/train.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import math
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader, Dataset, ConcatDataset
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import torch.nn.functional as F
import moco.loader
from moco.loader import split_images_labels
from moco.loader import merge_images_labels
from moco.loader import ImageFolder_with_id
import moco.builder
from moco.builder import concat_all_gather
from tqdm import tqdm
import numpy as np
import random
from sklearn.cluster import KMeans
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# incremental setting
parser.add_argument('--method', default='CCL', type=str,
help='choice of method')
parser.add_argument('--n-tasks', default=10, type=int,
help='number of tasks')
parser.add_argument('--n-save', default=20, type=int,
help='number of saved images for each class')
parser.add_argument('--imagenetsub', default=False, action='store_true',
help='use imagenet-sub')
# original MoCo setting
parser.add_argument('--data', metavar='DIR', default='/data/public_data/ImageNet/imagenet/',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=32, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# moco specific configs:
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=65536, type=int,
help='queue size; number of negative keys (default: 65536)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.07, type=float,
help='softmax temperature (default: 0.07)')
parser.add_argument('--ccl-teacher-m', default=0.996, type=float,
help='momentum of updating teacher (default: 0.996)')
parser.add_argument('--ccl-k', default=256, type=int,
help='extra sample queue size; number of negative keys (default: 256)')
# options for moco v2
parser.add_argument('--mlp', action='store_true',
help='use mlp head')
parser.add_argument('--aug-plus', action='store_true',
help='use moco v2 data augmentation')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
# ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = 8
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
assert args.method in ['CCL', 'Finetuning', 'SimpleReplay']
print("=> creating model '{}', Method: {}".format(args.arch, args.method))
if args.method == 'CCL':
model = moco.builder.MoCoCCL(
models.__dict__[args.arch],
dim=args.moco_dim, K=args.moco_k, m=args.moco_m, T=args.moco_t, mlp=args.mlp,
extra_sample_K=args.ccl_k, teacher_m=args.ccl_teacher_m)
else:
model = moco.builder.MoCo(
models.__dict__[args.arch],
dim=args.moco_dim, K=args.moco_k, m=args.moco_m, T=args.moco_t, mlp=args.mlp)
# print(model)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# comment out the following line for debugging
raise NotImplementedError("Only DistributedDataParallel is supported.")
else:
# AllGather implementation (batch shuffle, queue update, etc.) in
# this code only supports DistributedDataParallel.
raise NotImplementedError("Only DistributedDataParallel is supported.")
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if not os.path.isdir('checkpoints/{}'.format(args.method)):
os.mkdir('checkpoints/{}'.format(args.method))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# MoCo v2's aug
augmentation = [
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])], p=0.5),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
]
base_augmentation = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
]
train_dataset_current = ImageFolder_with_id(
traindir,
moco.loader.TwoCropsTransform(transforms.Compose(augmentation), transforms.Compose(base_augmentation), is_old_sample=False))
train_dataset_old = ImageFolder_with_id(
traindir,
moco.loader.TwoCropsTransform(transforms.Compose(augmentation), transforms.Compose(base_augmentation), is_old_sample=True))
train_dataset_current_multi_view = ImageFolder_with_id(
traindir,
moco.loader.MultiViewTransform(transforms.Compose(augmentation), transforms.Compose(base_augmentation)))
if args.imagenetsub:
# Get the first 100 categories for simplicity
order = np.arange(100) # imagenet-sub
nb_cl = int(100/args.n_tasks)
else:
order = np.arange(1000) # imagenet-full
nb_cl = int(1000/args.n_tasks)
seed = 1
np.random.seed(seed)
np.random.shuffle(order)
X_train_total, Y_train_total = split_images_labels(train_dataset_current.imgs)
X_train_saved, Y_train_saved = [], []
for t in range(args.n_tasks):
actual_cl = order[range(t*nb_cl, (t+1)*nb_cl)]
indices_train = np.array([i in order[range(t*nb_cl, (t+1)*nb_cl)] for i in Y_train_total])
X_train = X_train_total[indices_train]
Y_train = Y_train_total[indices_train]
current_train_imgs = merge_images_labels(X_train, Y_train)
train_dataset_current.imgs = train_dataset_current.samples = current_train_imgs
train_dataset_current_multi_view.imgs = train_dataset_current_multi_view.samples = current_train_imgs
if t>0 and args.method != 'Finetuning':
X_protoset = np.concatenate(X_train_saved, axis=0)
Y_protoset = np.concatenate(Y_train_saved)
old_train_imgs = merge_images_labels(X_protoset, Y_protoset)
train_dataset_old.imgs = train_dataset_old.samples = old_train_imgs
train_dataset_ensemble = ConcatDataset([train_dataset_current, train_dataset_old])
else:
train_dataset_ensemble = train_dataset_current
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset_ensemble)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset_ensemble, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, t)
if args.method == 'CCL':
model.module.update_teacher()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'saved_X': X_train_saved,
'saved_Y': Y_train_saved
}, is_best=False, filename='./checkpoints/{}_ntask_{}/moco_checkpoint_{}.pth.tar'.format(args.method,args.n_tasks,t))
if args.method != 'Finetuning':
print('Image Saving ...')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset_current_multi_view)
else:
train_sampler = None
train_loader_current_multi = torch.utils.data.DataLoader(
train_dataset_current_multi_view, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=False)
if args.distributed:
train_sampler.set_epoch(epoch)
if args.method == 'CCL':
X_saved, Y_saved = save_replay_image(train_loader_current_multi, [X_train,Y_train], model, args, indicator='min_var')
elif args.method == 'SimpleReplay':
X_saved, Y_saved = save_replay_image(train_loader_current_multi, [X_train,Y_train], model, args, indicator='random')
X_train_saved.append(X_saved)
Y_train_saved.append(Y_saved)
def save_replay_image(val_loader, img_set, model, args, indicator='random'):
if args.imagenetsub:
n_cls = 100//args.n_tasks
else:
n_cls = 1000//args.n_tasks
if indicator=='random':
X_train,Y_train = img_set
idx = np.random.randint(X_train.shape[0], size=args.n_save*n_cls)
return X_train[idx], Y_train[idx]
else:
assert indicator=='min_var'
model.eval()
X_train,Y_train = img_set
feature_bank = []
idx = []
with torch.no_grad():
for (images, _, im_id) in val_loader:
if args.gpu is not None:
im_id = im_id.cuda(args.gpu, non_blocking=True)
feature = []
for i in range(len(images)):
if args.gpu is not None:
images[i] = images[i].cuda(args.gpu, non_blocking=True)
f = model(images[i], mode='feature')
feature.append(f.unsqueeze(dim=-1))
feature = torch.cat(feature, dim=-1)
feature_bank.append(feature)
idx.append(im_id)
feature_bank = torch.cat(feature_bank, dim=0)
idx = torch.cat(idx, dim=0)
feature_bank = concat_all_gather(feature_bank)
idx = concat_all_gather(idx)
feature_bank = feature_bank.cpu().numpy()
idx = idx.cpu().numpy()
idx = np.squeeze(idx).astype('int')
idx, indices = np.unique(idx, return_index=True)
feature_bank = feature_bank[indices]
idx_sort = np.argsort(idx)
feature_bank = feature_bank[idx_sort]
feature_bank = np.squeeze(feature_bank)
if feature_bank.shape[0]>X_train.shape[0]:
feature_bank = feature_bank[:X_train.shape[0]]
idx = idx[:X_train.shape[0]]
if feature_bank.shape[0]<X_train.shape[0]:
X_train = X_train[idx]
Y_train = Y_train[idx]
# t1 = time.time()
kmeans=KMeans(n_clusters=n_cls)
kmeans.fit(feature_bank[:,:,-1])
# t2 = time.time()
# print("time = ",t2-t1)
prototypes = torch.from_numpy(kmeans.cluster_centers_)
kmeans_label = torch.from_numpy(kmeans.labels_)
feature_bank = torch.from_numpy(feature_bank)
saved_X = []
saved_Y = []
for i in range(torch.min(kmeans_label), torch.max(kmeans_label)+1):
index = kmeans_label==i
f = feature_bank[index]
m = f.mean(dim=-1, keepdim=True)
x = X_train[index]
y = Y_train[index]
m = F.normalize(m, dim=1)
std = torch.pow(f - m, 2).sum(dim=-1, keepdim=False).sum(-1, keepdim=False)
ind = std.argsort(dim=-1, descending=False)[:args.n_save]
saved_X.append(x[ind])
saved_Y.append(y[ind])
saved_X = np.concatenate(saved_X, axis=0)
saved_Y = np.concatenate(saved_Y)
return saved_X, saved_Y
def train(train_loader, model, criterion, optimizer, epoch, args, t=0):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Task {},Epoch: [{}]".format(t+1, epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _, _) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
images[2] = images[2].cuda(args.gpu, non_blocking=True)
is_from_old = images[3].cuda(args.gpu, non_blocking=True)
# compute output
if args.method == 'CCL':
loss, output, target = model(im_q=images[0], im_k=images[1], im_raw=images[2], is_from_old=is_from_old, loss_fun=criterion, t=t)
loss += criterion(output, target)
else:
output, target = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
# acc1/acc5 are (K+1)-way contrast classifier accuracy
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
main()
| 24,783 | 40.306667 | 140 | py |
ContinualContrastiveLearning | ContinualContrastiveLearning-main/moco/builder.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import torch.nn.functional as F
class MoCoCCL(nn.Module):
def __init__(self, base_encoder, dim=128, K=65536, extra_sample_K=256, m=0.999, teacher_m=0.996, T=0.07, mlp=False):
super(MoCoCCL, self).__init__()
self.K = K
self.extra_sample_K = extra_sample_K
self.m = m
self.T = T
self.t = 0
self.teacher_m = teacher_m
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
self.teacher = base_encoder(num_classes=dim)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
self.teacher.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.teacher.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
self.register_buffer("extra_sample_queue", torch.randn(dim, self.extra_sample_K))
self.extra_sample_queue = nn.functional.normalize(self.extra_sample_queue, dim=0)
self.register_buffer("extra_sample_queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def update_teacher(self):
for param_q, param_t in zip(self.encoder_q.parameters(), self.teacher.parameters()):
param_t.data = param_t.data * self.teacher_m + param_q.data * (1 - self.teacher_m)
param_t.requires_grad = False
@torch.no_grad()
def reset_teacher(self):
for param_q, param_t in zip(self.encoder_q.parameters(), self.teacher.parameters()):
param_t.data.copy_(param_q.data)
param_t.requires_grad = False
@torch.no_grad()
def reset_k(self):
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
# param_t.requires_grad = False
def begin_incremental(self):
self.extra_sample_queue[:, :] = self.queue[:, :self.extra_sample_K]
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys, is_from_old):
# update queue
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
# update extra sample queue
is_from_old = concat_all_gather(is_from_old)
is_from_old = is_from_old.squeeze()
idx = is_from_old==1
if self.t>0 and idx.sum()>0:
keys = keys[idx, :]
bs = keys.shape[0]
p1 = int(self.extra_sample_queue_ptr)
if bs>=self.extra_sample_K:
self.extra_sample_queue[:, :] = keys[bs-self.extra_sample_K:, :].t()
self.extra_sample_queue_ptr[0] = 0
else:
carry = (p1+bs)//self.extra_sample_K
remain = (p1+bs)%self.extra_sample_K
if carry:
self.extra_sample_queue[:, p1:] = keys[:self.extra_sample_K-p1, :].t()
if remain:
self.extra_sample_queue[:, :remain] = keys[self.extra_sample_K-p1:, :].t()
self.extra_sample_queue_ptr[0] = remain
else:
if remain:
self.extra_sample_queue[:, p1:remain] = keys.t()
self.extra_sample_queue_ptr[0] = remain
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward_encoder_q(self, images):
q = self.encoder_q(images) # queries: NxC
q = nn.functional.normalize(q, dim=1)
return q
def forward(self, im_q, im_k=None, im_raw=None, is_from_old=None, mode='train', loss_fun=None, t=0):
assert mode in ['train', 'feature']
if mode == 'feature':
return self.forward_encoder_q(im_q)
else:
return self.forward_train(im_q, im_k, im_raw, is_from_old, loss_fun, t)
def forward_train(self, im_q, im_k, im_raw, is_from_old, criterion, t):
# set up for incremental learning
if self.t<t:
if self.t==0:
self.begin_incremental()
self.t = t
self.reset_teacher()
self.reset_k()
self.teacher.eval()
###### Original MoCo ######
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
###### incremental version of MoCo ######
incremental_loss = 0.0
if self.t>0:
# extra sample queue loss
l_neg_extra_sample = torch.einsum('nc,ck->nk', [q, self.extra_sample_queue.clone().detach()])
logits_extra_sample = torch.cat([l_pos, l_neg_extra_sample], dim=1)
logits_extra_sample /= self.T
labels_extra_sample = torch.zeros(logits_extra_sample.shape[0], dtype=torch.long).cuda()
incremental_loss += 0.1 * criterion(logits_extra_sample, labels_extra_sample)
# self-supervised knowledge distillation loss
idx = is_from_old.squeeze()==1
if idx.sum()>0:
s_anchor = self.encoder_q(im_raw)
s_anchor = nn.functional.normalize(s_anchor, dim=1)
t_q, t_anchor = self.teacher(im_q), self.teacher(im_raw)
t_q = nn.functional.normalize(t_q, dim=1)
t_anchor = nn.functional.normalize(t_anchor, dim=1)
s_q, s_anchor = q[idx,:], s_anchor[idx,:]
t_q, t_anchor = t_q[idx,:].detach(), t_anchor[idx,:].detach()
# s_q, s_anchor = q, s_anchor
# t_q, t_anchor = t_q.detach(), t_anchor.detach()
s_simi = torch.mm(s_q, s_anchor.t())
t_simi = torch.mm(t_q, t_anchor.t())
log_s_simi = F.log_softmax(s_simi / 0.07, dim=1)
simi_knowledge = F.softmax(t_simi / 0.04, dim=1)
kl_loss = F.kl_div(log_s_simi, simi_knowledge, \
reduction='batchmean')
incremental_loss += 0.1 * kl_loss
# update queue and extra sample queue
self._dequeue_and_enqueue(k, is_from_old)
return incremental_loss, logits, labels
class MoCo(nn.Module):
"""
Build a MoCo model with: a query encoder, a key encoder, and a queue
https://arxiv.org/abs/1911.05722
"""
def __init__(self, base_encoder, dim=128, K=65536, m=0.999, T=0.07, mlp=False):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(MoCo, self).__init__()
self.K = K
self.m = m
self.T = T
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(num_classes=dim)
self.encoder_k = base_encoder(num_classes=dim)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(dim, K))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.K # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, im_q, im_k):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query features
q = self.encoder_q(im_q) # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k) # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
# logits: Nx(1+K)
logits = torch.cat([l_pos, l_neg], dim=1)
# apply temperature
logits /= self.T
# labels: positive key indicators
labels = torch.zeros(logits.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k)
return logits, labels
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output | 15,456 | 34.780093 | 120 | py |
ContinualContrastiveLearning | ContinualContrastiveLearning-main/moco/loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
import argparse
import os
import shutil
import time
import numpy as np
import torch
import torchvision.datasets as datasets
class ImageFolder_with_id(datasets.ImageFolder):
def __getitem__(self, index):
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target, torch.ones(1)*index
#split trainset.imgs
def split_images_labels(imgs):
images = []
labels = []
for item in imgs:
images.append(item[0])
labels.append(item[1])
return np.array(images), np.array(labels)
#merge into trainset.imgs
def merge_images_labels(images, labels):
images = list(images)
labels = list(labels)
assert(len(images)==len(labels))
imgs = []
for i in range(len(images)):
item = (images[i], labels[i])
imgs.append(item)
return imgs
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, view_transform, base_transform=None, is_old_sample=False):
self.view_transform = view_transform
self.base_transform = base_transform
self.is_old_sample = is_old_sample
def __call__(self, x):
q = self.view_transform(x)
k = self.view_transform(x)
if self.base_transform is not None:
anchor = self.base_transform(x)
if self.is_old_sample:
return [q, k, anchor, torch.ones(1)]
else:
return [q, k, anchor, torch.zeros(1)]
return [q, k]
class MultiViewTransform:
def __init__(self, view_transform, base_transform=None, num=6):
self.view_transform = view_transform
self.base_transform = base_transform
self.num = num
def __call__(self, x):
out = []
for i in range(self.num):
out.append(self.view_transform(x))
out.append(self.base_transform(x))
return out
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| 2,561 | 27.153846 | 81 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/mri_model.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from collections import defaultdict
import numpy as np
import pytorch_lightning as pl
import torch
import torchvision
from torch.utils.data import DistributedSampler, DataLoader
from .common import evaluate
from .common.utils import save_reconstructions
from .data.mri_data import SliceData
from .data import transforms
class MRIModel(pl.LightningModule):
"""
Abstract super class for Deep Learning based reconstruction models.
This is a subclass of the LightningModule class from pytorch_lightning, with
some additional functionality specific to fastMRI:
- fastMRI data loaders
- Evaluating reconstructions
- Visualization
- Saving test reconstructions
To implement a new reconstruction model, inherit from this class and implement the
following methods:
- train_data_transform, val_data_transform, test_data_transform:
Create and return data transformer objects for each data split
- training_step, validation_step, test_step:
Define what happens in one step of training, validation and testing respectively
- configure_optimizers:
Create and return the optimizers
Other methods from LightningModule can be overridden as needed.
"""
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
def _create_data_loader(self, data_transform, data_partition, sample_rate=None):
sample_rate = sample_rate or self.hparams.sample_rate
dataset = SliceData(
root=self.hparams.data_path / f'{self.hparams.challenge}_{data_partition}',
transform=data_transform,
sample_rate=sample_rate,
challenge=self.hparams.challenge
)
sampler = DistributedSampler(dataset)
return DataLoader(
dataset=dataset,
batch_size=self.hparams.batch_size,
num_workers=0,
pin_memory=True,
sampler=sampler,
)
def train_data_transform(self):
raise NotImplementedError
@pl.data_loader
def train_dataloader(self):
return self._create_data_loader(self.train_data_transform(), data_partition='train')
def val_data_transform(self):
raise NotImplementedError
@pl.data_loader
def val_dataloader(self):
return self._create_data_loader(self.val_data_transform(), data_partition='val')
def test_data_transform(self):
raise NotImplementedError
@pl.data_loader
def test_dataloader(self):
return self._create_data_loader(self.test_data_transform(), data_partition='test', sample_rate=1.)
def _evaluate(self, val_logs):
losses = []
outputs = defaultdict(list)
targets = defaultdict(list)
for log in val_logs:
losses.append(log['val_loss'].cpu().numpy())
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
targets[fname].append((slice, log['target'][i]))
metrics = dict(val_loss=losses, nmse=[], ssim=[], psnr=[])
for fname in outputs:
output = np.stack([transforms.complex_abs_np(np.moveaxis(out,0,2)) for _, out in sorted(outputs[fname])]) ## MZD
target = np.stack([transforms.complex_abs_np(np.moveaxis(tgt,0,2)) for _, tgt in sorted(targets[fname])]) ## MZD
#print(target.shape,output.shape)
metrics['nmse'].append(evaluate.nmse(target, output))
metrics['ssim'].append(evaluate.ssim(target, output))
metrics['psnr'].append(evaluate.psnr(target, output))
metrics = {metric: np.mean(values) for metric, values in metrics.items()}
print(metrics, '\n')
return dict(log=metrics, **metrics)
def _visualize(self, val_logs):
def _normalize(image):
image = image[np.newaxis]
image -= image.min()
return image / image.max()
def _save_image(image, tag):
grid = torchvision.utils.make_grid(torch.Tensor(image), nrow=4, pad_value=1)
self.logger.experiment.add_image(tag, grid)
# Only process first size to simplify visualization.
visualize_size = val_logs[0]['output'].shape
val_logs = [x for x in val_logs if x['output'].shape == visualize_size]
num_logs = len(val_logs)
num_viz_images = 16
step = (num_logs + num_viz_images - 1) // num_viz_images
outputs, targets = [], []
for i in range(0, num_logs, step):
#print(val_logs[i]['output'][0].shape)
outputs.append(_normalize( transforms.complex_abs_np(np.moveaxis(val_logs[i]['output'][0],0,2)) )) ######### MZD
targets.append(_normalize( transforms.complex_abs_np(np.moveaxis(val_logs[i]['target'][0],0,2)) )) ######### MZD
outputs = np.stack(outputs)
targets = np.stack(targets)
#print(targets.shape,outputs.shape)
_save_image(targets, 'Target')
_save_image(outputs, 'Reconstruction')
_save_image(np.abs(targets - outputs), 'Error')
def validation_end(self, val_logs):
self._visualize(val_logs)
return self._evaluate(val_logs)
def test_end(self, test_logs):
outputs = defaultdict(list)
for log in test_logs:
for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
outputs[fname].append((slice, log['output'][i]))
for fname in outputs:
outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])])
save_reconstructions(outputs, self.hparams.exp_dir / self.hparams.exp / 'reconstructions')
return dict()
| 5,915 | 39.244898 | 124 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/unet_model.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob)
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans}, ' \
f'drop_prob={self.drop_prob})'
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
def __repr__(self):
return f'ConvBlock(in_chans={self.in_chans}, out_chans={self.out_chans})'
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
#print(output.shape,input.shape)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
| 6,318 | 33.911602 | 98 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/varnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import pathlib
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from pytorch_lightning import Trainer
from torch import nn
from torch.nn import functional as F
from .common.args import Args
from .common.subsample import create_mask_for_mask_type
from .data import transforms as T
from .mri_model import MRIModel
from .unet_model import UnetModel
devices = [torch.device("cuda:2"),
torch.device("cuda:1"),
torch.device("cuda:0"),
torch.device("cuda:3"),]
class DataTransform:
"""
Data Transformer for training Var Net models.
"""
def __init__(self, resolution, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
self.mask_func = mask_func
self.resolution = resolution
self.use_seed = use_seed
def __call__(self, kspace, mask, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
mask (numpy.array): Mask from the test dataset
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
masked_kspace (torch.Tensor): Masked k-space
mask (torch.Tensor): Mask
target (torch.Tensor): Target image converted to a torch Tensor.
fname (str): File name
slice (int): Serial number of the slice.
max_value (numpy.array): Maximum value in the image volume
"""
if target is not None:
target = T.to_tensor(target)
max_value = attrs['max']
else:
target = torch.tensor(0)
max_value = 0.0
kspace = T.to_tensor(kspace)
seed = None if not self.use_seed else tuple(map(ord, fname))
acq_start = attrs['padding_left']
acq_end = attrs['padding_right']
if self.mask_func:
masked_kspace, mask = T.apply_mask(
kspace, self.mask_func, seed, (acq_start, acq_end))
else:
masked_kspace = kspace
shape = np.array(kspace.shape)
num_cols = shape[-2]
shape[:-3] = 1
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(
*mask_shape).astype(np.float32))
mask[:, :, :acq_start] = 0
mask[:, :, acq_end:] = 0
return masked_kspace, mask.byte(), target, fname, slice, max_value
class SSIM(nn.Module):
def __init__(self, win_size=7, k1=0.01, k2=0.03):
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
self.register_buffer('w', torch.ones(
1, 1, win_size, win_size) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = NP / (NP - 1)
def forward(self, X, Y, data_range):
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w)
uy = F.conv2d(Y, self.w)
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (2 * ux * uy + C1, 2 * vxy + C2,
ux ** 2 + uy ** 2 + C1, vx + vy + C2)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
class NormUnet(nn.Module):
def __init__(self, chans, num_pools):
super().__init__()
self.unet = UnetModel(
in_chans=2,
out_chans=2,
chans=chans,
num_pool_layers=num_pools,
drop_prob=0
)
def complex_to_chan_dim(self, x):
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).contiguous().view(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x):
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1)
def norm(self, x):
# Group norm
b, c, h, w = x.shape
x = x.contiguous().view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, 2, 1, 1, 1).expand(
b, 2, c // 2, 1, 1).contiguous().view(b, c, 1, 1)
std = x.std(dim=2).view(b, 2, 1, 1, 1).expand(
b, 2, c // 2, 1, 1).contiguous().view(b, c, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(self, x, mean, std):
return x * std + mean
def pad(self, x):
def floor_ceil(n):
return math.floor(n), math.ceil(n)
b, c, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = floor_ceil((w_mult - w) / 2)
h_pad = floor_ceil((h_mult - h) / 2)
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(self, x, h_pad, w_pad, h_mult, w_mult):
return x[..., h_pad[0]:h_mult - h_pad[1], w_pad[0]:w_mult - w_pad[1]]
def forward(self, x):
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(x)
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
'''
def forward(self, X):
return torch.moveaxis( self.unet(torch.moveaxis(X[0],-1,1)),1,-1 )[None,:]
'''
class VarNetBlock(nn.Module):
def __init__(self, model):
super(VarNetBlock, self).__init__()
self.model = model
self.dc_weight = nn.Parameter(torch.ones(1))
self.register_buffer('zero', torch.zeros(1, 1, 1, 1, 1))
def forward(self, current_kspace, ref_kspace, mask, sens_maps):
def sens_expand(x):
return T.fft2(T.complex_mul(x, sens_maps))
def sens_reduce(x):
x = T.ifft2(x)
return T.complex_mul(x, T.complex_conj(sens_maps)).sum(dim=1, keepdim=True)
def soft_dc(x):
return torch.where(mask, x - ref_kspace, self.zero) * self.dc_weight
return current_kspace - \
soft_dc(current_kspace) - \
T.fft2(self.model(T.ifft2(current_kspace)))
#sens_expand(self.model(sens_reduce(current_kspace)))
class SensitivityModel(nn.Module):
def __init__(self, chans, num_pools):
super().__init__()
#self.norm_unet = NormUnet(chans, num_pools)
def chans_to_batch_dim(self, x):
b, c, *other = x.shape
return x.contiguous().view(b * c, 1, *other), b
def batch_chans_to_chan_dim(self, x, batch_size):
bc, one, *other = x.shape
c = bc // batch_size
return x.view(batch_size, c, *other)
def divide_root_sum_of_squares(self, x):
return x / T.root_sum_of_squares_complex(x, dim=1).unsqueeze(-1).unsqueeze(1)
def forward(self, masked_kspace, mask):
def get_low_frequency_lines(mask):
l = r = mask.shape[-2] // 2
while mask[..., r, :]:
r += 1
while mask[..., l, :]:
l -= 1
return l + 1, r
l, r = get_low_frequency_lines(mask)
num_low_freqs = r - l
pad = (mask.shape[-2] - num_low_freqs + 1) // 2
x = T.mask_center(masked_kspace, pad, pad + num_low_freqs)
x = T.ifft2(x)
x, b = self.chans_to_batch_dim(x)
x = self.norm_unet(x)
x = self.batch_chans_to_chan_dim(x, b)
x = self.divide_root_sum_of_squares(x)
return x
class VariationalNetworkModel(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
#self.sens_net = SensitivityModel(hparams.sens_chans, hparams.sens_pools) ###################### MZD: use espirit
self.cascades = nn.ModuleList([
VarNetBlock(NormUnet(hparams.chans, hparams.pools))
for _ in range(hparams.num_cascades)
])
self.ssim_loss = SSIM()
def forward(self, masked_kspace, mask, sens_maps):
#sens_maps = self.sens_net(masked_kspace, mask) ###################### MZD: use espirit
kspace_pred = masked_kspace.clone()
for i,cascade in enumerate(self.cascades):############
#kspace_pred = kspace_pred.to(devices[i//3])
kspace_pred = cascade(kspace_pred, masked_kspace, mask, sens_maps)
return T.ifft2(kspace_pred),kspace_pred[0]
#return T.root_sum_of_squares(T.complex_abs(T.ifft2(kspace_pred)), dim=1)
def training_step(self, batch, batch_idx):
masked_kspace, mask, target, fname, _, max_value = batch
output = self.forward(masked_kspace, mask)
target, output = T.center_crop_to_smallest(target, output)
ssim_loss = self.ssim_loss(output.unsqueeze(
1), target.unsqueeze(1), data_range=max_value)
return {'loss': ssim_loss, 'log': {'train_loss': ssim_loss.item()}}
def validation_step(self, batch, batch_idx):
masked_kspace, mask, target, fname, slice, max_value = batch
output = self.forward(masked_kspace, mask)
target, output = T.center_crop_to_smallest(target, output)
return {
'fname': fname,
'slice': slice,
'output': output.cpu().numpy(),
'target': target.cpu().numpy(),
'val_loss': self.ssim_loss(output.unsqueeze(1), target.unsqueeze(1), data_range=max_value),
}
def test_step(self, batch, batch_idx):
masked_kspace, mask, _, fname, slice, _ = batch
output = self.forward(masked_kspace, mask)
b, h, w = output.shape
crop_size = min(w, self.hparams.resolution)
output = T.center_crop(output, (crop_size, crop_size))
return {
'fname': fname,
'slice': slice,
'output': output.cpu().numpy(),
}
def configure_optimizers(self):
optim = torch.optim.Adam(
self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(
optim, self.hparams.lr_step_size, self.hparams.lr_gamma)
return [optim], [scheduler]
def train_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask, use_seed=False)
def val_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask)
def test_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, mask)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-cascades', type=int,
default=12, help='Number of U-Net channels')
parser.add_argument('--pools', type=int, default=4,
help='Number of U-Net pooling layers')
parser.add_argument('--chans', type=int, default=18,
help='Number of U-Net channels')
parser.add_argument('--sens-pools', type=int, default=4,
help='Number of U-Net pooling layers')
parser.add_argument('--sens-chans', type=int,
default=8, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=1,
type=int, help='Mini batch size')
parser.add_argument('--lr', type=float,
default=0.0003, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--mask_type',default='equispaced')
return parser
def create_trainer(args):
backend = 'ddp' if args.gpus > 0 else 'ddp_cpu'
return Trainer(
default_save_path=args.exp_dir,
max_epochs=args.num_epochs,
gpus=args.gpus,
num_nodes=args.nodes,
weights_summary=None,
distributed_backend=backend,
replace_sampler_ddp=False,
)
def run(args):
cudnn.benchmark = True
cudnn.enabled = True
if args.mode == 'train':
trainer = create_trainer(args)
model = VariationalNetworkModel(args)
trainer.fit(model)
else: # args.mode == 'test' or args.mode == 'challenge'
assert args.checkpoint is not None
model = VariationalNetworkModel.load_from_checkpoint(
str(args.checkpoint))
model.hparams = args
model.hparams.sample_rate = 1.
trainer = create_trainer(args)
trainer.test(model)
def main(args=None):
parser = Args()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--num-epochs', type=int, default=50,
help='Number of training epochs')
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--nodes', type=int, default=1)
parser.add_argument('--exp-dir', type=pathlib.Path, default='experiments',
help='Path where model and results should be saved')
parser.add_argument('--exp', type=str,
help='Name of the experiment', default='default')
parser.add_argument('--checkpoint', type=pathlib.Path,
help='Path to pre-trained model. Use with --mode test')
parser = VariationalNetworkModel.add_model_specific_args(parser)
if args is not None:
parser.set_defaults(**args)
args, _ = parser.parse_known_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
run(args)
if __name__ == '__main__':
main() | 15,436 | 36.928747 | 121 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/train_unet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import numpy as np
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.logging import TestTubeLogger
from torch.nn import functional as F
from torch.optim import RMSprop
from .common.args import Args
from .common.subsample import create_mask_for_mask_type
from .data import transforms
from .mri_model import MRIModel
from .unet_model import UnetModel
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
#torch.cuda.set_device(3)
import os
#os.environ['CUDA_VISIBLE_DEVICES']='3'
#_Trainer__set_random_port()
class DataTransform:
"""
Data Transformer for training U-Net models.
"""
def __init__(self, resolution, which_challenge, mask_func=None, use_seed=True):
"""
Args:
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
use_seed (bool): If true, this class computes a pseudo random number generator seed
from the filename. This ensures that the same mask is used for all the slices of
a given volume every time.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
self.mask_func = mask_func
self.resolution = resolution
self.which_challenge = which_challenge
self.use_seed = use_seed
def __call__(self, kspace, target, attrs, fname, slice):
"""
Args:
kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
data or (rows, cols, 2) for single coil data.
target (numpy.array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object.
fname (str): File name
slice (int): Serial number of the slice.
Returns:
(tuple): tuple containing:
image (torch.Tensor): Zero-filled input image.
target (torch.Tensor): Target image converted to a torch Tensor.
mean (float): Mean value used for normalization.
std (float): Standard deviation value used for normalization.
"""
kspace = transforms.to_tensor(kspace)
# Apply mask
if self.mask_func:
seed = None if not self.use_seed else tuple(map(ord, fname))
masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image to given resolution if larger
smallest_width = min(self.resolution, image.shape[-2])
smallest_height = min(self.resolution, image.shape[-3])
if target is not None:
smallest_width = min(smallest_width, target.shape[-1])
smallest_height = min(smallest_height, target.shape[-2])
crop_size = (smallest_height, smallest_width)
######################################## NO CROP ################################################## MZD
'''
image = transforms.complex_center_crop(image, crop_size)
##############
temp = image.clone()
temp = torch.zeros([image.shape[0],self.resolution,self.resolution,image.shape[-1]])
width_diff = (self.resolution-image.shape[-2])//2
height_diff = (self.resolution-image.shape[-3])//2
ws = width_diff + int(image.shape[-2]%2)
we = temp.shape[-2]-width_diff
#print(ws,we,width_diff,image.shape)
hs = height_diff + int(image.shape[-3]%2)
he = temp.shape[-3]-height_diff
temp[:,hs:he,ws:we,:] = image
# Absolute value
image = transforms.complex_abs(temp) ############
'''
################################################################################################### MZD
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
image = torch.moveaxis(image , 2 , 0) ############################# MZD
# Normalize input
image, mean, std = transforms.normalize_instance(image, eps=1e-11)
image = image.clamp(-6, 6)
#print(image.shape)
# Normalize target
if target is not None:
target = transforms.ifft2(kspace) ############################# MZD
target = torch.moveaxis( transforms.root_sum_of_squares(target) , 2 , 0) ############################# MZD
#print(target.shape)
#im = transform.complex_abs(kspace)
############################### NO CROP - TARGET IS IFFT2(KSPACE) ##################################### MZD
'''
target = transforms.to_tensor(target)
target = transforms.center_crop(target, crop_size)
#print(target.shape)
##############
temp = target.clone()
temp = torch.zeros([self.resolution,self.resolution])
width_diff = (self.resolution-target.shape[-1])//2
height_diff = (self.resolution-target.shape[-2])//2
ws = width_diff + int(target.shape[-1]%2)
we = temp.shape[-1]-width_diff
hs = height_diff + int(target.shape[-2]%2)
he = temp.shape[-2]-height_diff
temp[hs:he,ws:we] = target
###############
'''
##################################################################################### MZD
target = transforms.normalize(target, mean, std, eps=1e-11)
target = target.clamp(-6, 6)
else:
target = torch.Tensor([0])
return image, target, mean, std, fname, slice
class UnetMRIModel(MRIModel):
def __init__(self, hparams):
super().__init__(hparams)
self.unet = UnetModel(
in_chans=hparams.in_chans, ############################################################## MZD
out_chans=hparams.in_chans, ############################################################## MZD
chans=hparams.num_chans,
num_pool_layers=hparams.num_pools,
drop_prob=hparams.drop_prob
)
def forward(self, input):
return self.unet(input) #(input.unsqueeze(1)).squeeze(1) ############## MZD
def training_step(self, batch, batch_idx):
input, target, mean, std, _, _ = batch
#print(input.shape,target.shape)
output = self.forward(input)
loss = F.l1_loss(output, target)
logs = {'loss': loss.item()}
return dict(loss=loss, log=logs)
def validation_step(self, batch, batch_idx):
input, target, mean, std, fname, slice = batch
output = self.forward(input)
#print(output.shape)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
'target': (target * std + mean).cpu().numpy(),
'val_loss': F.l1_loss(output, target),
}
def test_step(self, batch, batch_idx):
input, _, mean, std, fname, slice = batch
output = self.forward(input)
mean = mean.unsqueeze(1).unsqueeze(2)
std = std.unsqueeze(1).unsqueeze(2)
return {
'fname': fname,
'slice': slice,
'output': (output * std + mean).cpu().numpy(),
}
def configure_optimizers(self):
optim = RMSprop(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optim, self.hparams.lr_step_size, self.hparams.lr_gamma)
return [optim], [scheduler]
def train_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask, use_seed=False)
def val_data_transform(self):
mask = create_mask_for_mask_type(self.hparams.mask_type, self.hparams.center_fractions,
self.hparams.accelerations)
return DataTransform(self.hparams.resolution, self.hparams.challenge, mask)
def test_data_transform(self):
return DataTransform(self.hparams.resolution, self.hparams.challenge)
@staticmethod
def add_model_specific_args(parser):
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=16, type=int, help='Mini batch size')
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--mask_type',default='random')
parser.add_argument('--in-chans', type=int, default=2, help='Number of U-Net input (and output) channels')
return parser
def create_trainer(args, logger):
return Trainer(
#num_nodes=1,
logger=logger,
default_save_path=args.exp_dir,
checkpoint_callback=True,
max_nb_epochs=args.num_epochs,
gpus=args.gpus,
distributed_backend='ddp',
check_val_every_n_epoch=1,
val_check_interval=1.,
early_stop_callback=False
)
def main(args):
if args.mode == 'train':
load_version = 0 if args.resume else None
logger = TestTubeLogger(save_dir=args.exp_dir, name=args.exp, version=load_version)
trainer = create_trainer(args, logger)
model = UnetMRIModel(args)
trainer.fit(model)
else: # args.mode == 'test'
assert args.checkpoint is not None
model = UnetMRIModel.load_from_checkpoint(str(args.checkpoint))
model.hparams.sample_rate = 1.
trainer = create_trainer(args, logger=False)
trainer.test(model)
if __name__ == '__main__':
parser = Args()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--num-epochs', type=int, default=50, help='Number of training epochs')
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--exp-dir', type=pathlib.Path, default='experiments',
help='Path where model and results should be saved')
parser.add_argument('--exp', type=str, help='Name of the experiment')
parser.add_argument('--checkpoint', type=pathlib.Path,
help='Path to pre-trained model. Use with --mode test')
parser.add_argument('--resume', action='store_true',
help='If set, resume the training from a previous model checkpoint. ')
parser = UnetMRIModel.add_model_specific_args(parser)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
main(args)
| 12,100 | 41.609155 | 119 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/helpers.py | import torch
import numpy as np
from torch.autograd import Variable
dtype = torch.cuda.FloatTensor
class MaskFunc:
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
MaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected uniformly at random with a probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to (N / acceleration)
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
num_cols = shape[-2]
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def np_to_var(img_np, dtype = torch.cuda.FloatTensor):
'''
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Converts image in numpy.array to torch.Variable.
From C x W x H [0..1] to 1 x C x W x H [0..1]
'''
return Variable(torch.from_numpy(img_np)[None, :])
def var_to_np(img_var):
'''
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Converts an image in torch.Variable format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.data.cpu().numpy()[0]
def ksp2measurement(ksp):
return np_to_var( np.transpose( np.array([np.real(ksp),np.imag(ksp)]) , (1, 2, 3, 0)) )
def root_sum_of_squares(data, dim=0):
"""
Compute the Root Sum of Squares (RSS) transform along a given dimension of a tensor.
Args:
data (torch.Tensor): The input tensor
dim (int): The dimensions along which to apply the RSS transform
Returns:
torch.Tensor: The RSS value
"""
return torch.sqrt((data ** 2).sum(dim))
def rss_torch(im):
'''
Apply the root sum of squares algorithm to coil images
'''
return torch.sqrt(torch.sum(torch.abs(im) ** 2, 0))
def crop_center(img,cropx,cropy):
y,x = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def my_crop(data,shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[w_from:w_to, h_from:h_to,...]
def channels2imgs(out):
sh = out.shape
chs = int(sh[0]/2)
imgs = np.zeros( (chs,sh[1],sh[2]) )
for i in range(chs):
imgs[i] = np.sqrt( out[2*i]**2 + out[2*i+1]**2 )
return imgs
def forwardm(img,mask):
# img has dimension (2*num_slices, x,y)
# output has dimension (1, num_slices, x, y, 2)
mask = np_to_var(mask)[0].type(dtype)
s = img.shape
ns = int(s[1]/2) # number of slices
fimg = Variable( torch.zeros( (s[0],ns,s[2],s[3],2 ) ) ).type(dtype)
for i in range(ns):
fimg[0,i,:,:,0] = img[0,2*i,:,:]
fimg[0,i,:,:,1] = img[0,2*i+1,:,:]
Fimg = fft2(fimg) # dim: (1,num_slices,x,y,2)
for i in range(ns):
Fimg[0,i,:,:,0] *= mask
Fimg[0,i,:,:,1] *= mask
return Fimg
def get_mask(slice_ksp_torchtensor, slice_ksp,factor=4,cent=0.07):
try: # if the file already has a mask
temp = np.array([1 if e else 0 for e in f["mask"]])
temp = temp[np.newaxis].T
temp = np.array([[temp]])
mask = to_tensor(temp).type(dtype).detach().cpu()
except: # if we need to create a mask
desired_factor = factor # desired under-sampling factor
undersampling_factor = 0
tolerance = 0.03
while undersampling_factor < desired_factor - tolerance or undersampling_factor > desired_factor + tolerance:
mask_func = MaskFunc(center_fractions=[cent], accelerations=[desired_factor]) # Create the mask function object
masked_kspace, mask = apply_mask(slice_ksp_torchtensor, mask_func=mask_func) # Apply the mask to k-space
mask1d = var_to_np(mask)[0,:,0]
undersampling_factor = len(mask1d) / sum(mask1d)
mask1d = var_to_np(mask)[0,:,0]
# The provided mask and data have last dim of 368, but the actual data is smaller.
# To prevent forcing the network to learn outside the data region, we force the mask to 0 there.
mask1d[:mask1d.shape[-1]//2-160] = 0
mask1d[mask1d.shape[-1]//2+160:] =0
mask2d = np.repeat(mask1d[None,:], slice_ksp.shape[1], axis=0).astype(int) # Turning 1D Mask into 2D that matches data dimensions
mask2d = np.pad(mask2d,((0,),((slice_ksp.shape[-1]-mask2d.shape[-1])//2,)),mode='constant') # Zero padding to make sure dimensions match up
mask = to_tensor( np.array( [[mask2d[0][np.newaxis].T]] ) ).type(dtype).detach().cpu()
return mask, mask1d, mask2d
def apply_mask(data, mask_func = None, mask = None, seed=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Subsample given k-space by multiplying with a mask.
Args:
data (torch.Tensor): The input k-space data. This should have at least 3 dimensions, where
dimensions -3 and -2 are the spatial dimensions, and the final dimension has size
2 (for complex values).
mask_func (callable): A function that takes a shape (tuple of ints) and a random
number seed and returns a mask.
seed (int or 1-d array_like, optional): Seed for the random number generator.
Returns:
(tuple): tuple containing:
masked data (torch.Tensor): Subsampled k-space data
mask (torch.Tensor): The generated mask
"""
shape = np.array(data.shape)
shape[:-3] = 1
if mask is None:
mask = mask_func(shape, seed)
return data * mask, mask
def fft(input, signal_ndim, normalized=False):
# This function is called from the fft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.fftn(torch.view_as_complex(input), dim=dims, norm=norm))
def ifft(input, signal_ndim, normalized=False):
# This function is called from the ifft2 function below
if signal_ndim < 1 or signal_ndim > 3:
print("Signal ndim out of range, was", signal_ndim, "but expected a value between 1 and 3, inclusive")
return
dims = (-1)
if signal_ndim == 2:
dims = (-2, -1)
if signal_ndim == 3:
dims = (-3, -2, -1)
norm = "backward"
if normalized:
norm = "ortho"
return torch.view_as_real(torch.fft.ifftn(torch.view_as_complex(input), dim=dims, norm=norm))
def fft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2 dimensional Fast Fourier Transform. It calls the fft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The FFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = fft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def ifft2(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Apply centered 2-dimensional Inverse Fast Fourier Transform. It calls the ifft function above to make it compatible with the latest version of pytorch.
Args:
data (torch.Tensor): Complex valued input data containing at least 3 dimensions: dimensions
-3 & -2 are spatial dimensions and dimension -1 has size 2. All other dimensions are
assumed to be batch dimensions.
Returns:
torch.Tensor: The IFFT of the input.
"""
assert data.size(-1) == 2
data = ifftshift(data, dim=(-3, -2))
data = ifft(data, 2, normalized=True)
data = fftshift(data, dim=(-3, -2))
return data
def complex_abs(data):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Compute the absolute value of a complex valued input tensor.
Args:
data (torch.Tensor): A complex valued tensor, where the size of the final dimension
should be 2.
Returns:
torch.Tensor: Absolute value of data
"""
assert data.size(-1) == 2
return (data ** 2).sum(dim=-1).sqrt()
def fftshift(x, dim=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim)
def roll(x, shift, dim):
"""
ref: https://github.com/facebookresearch/fastMRI/tree/master/fastmri
Similar to np.roll but applies to PyTorch Tensors
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
| 13,056 | 36.412607 | 155 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/common/utils.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import json
import h5py
def save_reconstructions(reconstructions, out_dir):
"""
Saves the reconstructions from a model into h5 files that is appropriate for submission
to the leaderboard.
Args:
reconstructions (dict[str, np.array]): A dictionary mapping input filenames to
corresponding reconstructions (of shape num_slices x height x width).
out_dir (pathlib.Path): Path to the output directory where the reconstructions
should be saved.
"""
out_dir.mkdir(exist_ok=True)
for fname, recons in reconstructions.items():
with h5py.File(out_dir / fname, 'w') as f:
f.create_dataset('reconstruction', data=recons)
def tensor_to_complex_np(data):
"""
Converts a complex torch tensor to numpy array.
Args:
data (torch.Tensor): Input data to be converted to numpy.
Returns:
np.array: Complex numpy version of data
"""
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
| 1,187 | 28.7 | 91 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/common/test_subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import pytest
import torch
from common.subsample import MaskFunc
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_mask_reuse(center_fracs, accelerations, batch_size, dim):
mask_func = MaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask1 = mask_func(shape, seed=123)
mask2 = mask_func(shape, seed=123)
mask3 = mask_func(shape, seed=123)
assert torch.all(mask1 == mask2)
assert torch.all(mask2 == mask3)
@pytest.mark.parametrize("center_fracs, accelerations, batch_size, dim", [
([0.2], [4], 4, 320),
([0.2, 0.4], [4, 8], 2, 368),
])
def test_mask_low_freqs(center_fracs, accelerations, batch_size, dim):
mask_func = MaskFunc(center_fracs, accelerations)
shape = (batch_size, dim, dim, 2)
mask = mask_func(shape, seed=123)
mask_shape = [1 for _ in shape]
mask_shape[-2] = dim
assert list(mask.shape) == mask_shape
num_low_freqs_matched = False
for center_frac in center_fracs:
num_low_freqs = int(round(dim * center_frac))
pad = (dim - num_low_freqs + 1) // 2
if np.all(mask[pad:pad + num_low_freqs].numpy() == 1):
num_low_freqs_matched = True
assert num_low_freqs_matched
| 1,506 | 30.395833 | 74 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/common/subsample.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
def create_mask_for_mask_type(mask_type_str, center_fractions, accelerations):
if mask_type_str == 'random':
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == 'equispaced':
return EquispacedMaskFunc(center_fractions, accelerations)
else:
raise Exception(f"{mask_type_str} not supported")
class MaskFunc():
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def choose_acceleration(self):
choice = self.rng.randint(0, len(self.accelerations))
center_fraction = self.center_fractions[choice]
acceleration = self.accelerations[choice]
return center_fraction, acceleration
class RandomMaskFunc(MaskFunc):
"""
RandomMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected uniformly at random with a probability equal to:
prob = (N / acceleration - N_low_freqs) / (N - N_low_freqs).
This ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the RandomMaskFunc object is
called.
For example, if accelerations = [4, 8] and center_fractions = [0.08, 0.04], then there
is a 50% probability that 4-fold acceleration with 8% center fraction is selected and a 50%
probability that 8-fold acceleration with 4% center fraction is selected.
"""
def __init__(self, center_fractions, accelerations):
"""
Args:
center_fractions (List[float]): Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is chosen uniformly
each time.
accelerations (List[int]): Amount of under-sampling. This should have the same length
as center_fractions. If multiple values are provided, then one of these is chosen
uniformly each time. An acceleration of 4 retains 25% of the columns, but they may
not be spaced evenly.
"""
if len(center_fractions) != len(accelerations):
raise ValueError('Number of center fractions should match number of accelerations')
self.center_fractions = center_fractions
self.accelerations = accelerations
self.rng = np.random.RandomState()
def __call__(self, shape, seed=None):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
num_cols = shape[-2]
center_fraction, acceleration = self.choose_acceleration()
# Create the mask
num_low_freqs = int(round(num_cols * center_fraction))
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = self.rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
class EquispacedMaskFunc(MaskFunc):
"""
EquispacedMaskFunc creates a sub-sampling mask of a given shape.
The mask selects a subset of columns from the input k-space data. If the k-space data has N
columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center corresponding to
low-frequencies
2. The other columns are selected with equal spacing at a proportion that reaches the
desired acceleration rate taking into consideration the number of low frequencies. This
ensures that the expected number of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which case one possible
(center_fraction, acceleration) is chosen uniformly at random each time the EquispacedMaskFunc
object is called.
"""
def __call__(self, shape, seed):
"""
Args:
shape (iterable[int]): The shape of the mask to be created. The shape should have
at least 3 dimensions. Samples are drawn along the second last dimension.
seed (int, optional): Seed for the random number generator. Setting the seed
ensures the same mask is generated each time for the same shape.
Returns:
torch.Tensor: A mask of the specified shape.
"""
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
self.rng.seed(seed)
center_fraction, acceleration = self.choose_acceleration()
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (num_low_freqs * acceleration - num_cols)
offset = self.rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
| 7,423 | 42.415205 | 112 | py |
ttt_for_deep_learning_cs | ttt_for_deep_learning_cs-master/varnet/functions/data/mri_data.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import random
import h5py
from torch.utils.data import Dataset
class SliceData(Dataset):
"""
A PyTorch Dataset that provides access to MR image slices.
"""
def __init__(self, root, transform, challenge, sample_rate=1):
"""
Args:
root (pathlib.Path): Path to the dataset.
transform (callable): A callable object that pre-processes the raw data into
appropriate form. The transform function should take 'kspace', 'target',
'attributes', 'filename', and 'slice' as inputs. 'target' may be null
for test data.
challenge (str): "singlecoil" or "multicoil" depending on which challenge to use.
sample_rate (float, optional): A float between 0 and 1. This controls what fraction
of the volumes should be loaded.
"""
if challenge not in ('singlecoil', 'multicoil'):
raise ValueError('challenge should be either "singlecoil" or "multicoil"')
self.transform = transform
self.recons_key = 'reconstruction_esc' if challenge == 'singlecoil' \
else 'reconstruction_rss'
self.examples = []
files = list(pathlib.Path(root).iterdir())
if sample_rate < 1:
random.shuffle(files)
num_files = round(len(files) * sample_rate)
files = files[:num_files]
for fname in sorted(files):
kspace = h5py.File(fname, 'r')['kspace']
num_slices = kspace.shape[0]
self.examples += [(fname, slice) for slice in range(num_slices)]
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
fname, slice = self.examples[i]
with h5py.File(fname, 'r') as data:
kspace = data['kspace'][slice]
target = data[self.recons_key][slice] if self.recons_key in data else None
return self.transform(kspace, target, data.attrs, fname.name, slice)
| 2,181 | 35.983051 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.