repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
deep_direct_stat
|
deep_direct_stat-master/models/__init__.py
| 0
| 0
| 0
|
py
|
|
deep_direct_stat
|
deep_direct_stat-master/datasets/pascal3d.py
|
import os
from os.path import dirname
import h5py
import numpy as np
PASCAL_CLASSES = ['aeroplane', 'bicycle', 'boat', 'bottle', 'bus', 'car',
'chair', 'diningtable', 'motorbike', 'sofa', 'train', 'tvmonitor']
def help():
print("File %s not found!\n\n"
"Download the preprocessed PASCAL3D+ dataset first:\n\n"
"https://drive.google.com/open?id=1bDcISYXmCcTqZhhCX-bhTuUCmEH1Q8YF\n\n")
def train_val_split(x, y, val_split=0.2, canonical_split=True):
if canonical_split:
val_split = 0.2
np.random.seed(13)
n_samples = x.shape[0]
shuffled_samples = np.random.choice(n_samples, n_samples, replace=False)
n_train = int((1-val_split)*n_samples)
train_samples = shuffled_samples[0:n_train]
val_samples = shuffled_samples[n_train:]
x_train, y_train = x[train_samples], y[train_samples]
x_val, y_val = x[val_samples], y[val_samples]
np.random.seed(None)
return x_train, y_train, x_val, y_val
def get_class_data(data_h5, cls_name):
images = np.asarray(data_h5[cls_name]['images'])
azimuth_bit = np.asarray(data_h5[cls_name]['azimuth_bit'])
elevation_bit = np.asarray(data_h5[cls_name]['elevation_bit'])
tilt_bit = np.asarray(data_h5[cls_name]['tilt_bit'])
angles = np.hstack([azimuth_bit, elevation_bit, tilt_bit])
return images, angles
def merge_all_classes(data):
images = []
angles = []
for cls_key in data.keys():
cls_images, cls_angles = get_class_data(data, cls_key)
images.append(cls_images)
angles.append(cls_angles)
images = np.vstack(images)
angles = np.vstack(angles)
return images, angles
def load_pascal_data(pascaldb_path, cls=None, val_split=0.2, canonical_split=True):
""" Load cropped ground truth images from PASCAL3D+ dataset (augmented with Imagenet)
Original data:
http://cvgl.stanford.edu/projects/pascal3d.html
The following repository was also used to create dataset:
https://github.com/ShapeNet/RenderForCNN
Parameters
----------
cls: str
PASCAL object class to load (see PASCAL_CLASSES for available options). If None, all classes will be loaded.
val_split: float
amount of data used for validation
canonical_split: bool
whether to fix a random seed used for validation
Returns
-------
x_train: numpy array of shape [n_images, 224, 224, 3]
train cropped object images
y_train: numpy array of shape [n_images, 6]
train ground truth object orientation utils (pan-tilt-roll) in biternion form:
[pan_cos, pan_sin, tilt_cos, tilt_sin, roll_cos, roll_sin]
x_val: numpy array of shape [n_images, 224, 224, 3]
validation images
y_val: numpy array of shape [n_images, 6]
validation ground truth utils
x_test: numpy array of shape [n_images, 224, 224, 3]
test images
y_test: numpy array of shape [n_images, 6]
test ground truth utils
"""
if not os.path.exists(pascaldb_path):
help()
train_test_data_db = h5py.File(pascaldb_path, 'r')
train_data = train_test_data_db['train']
test_data = train_test_data_db['test']
if cls is None:
x_train, y_train = merge_all_classes(train_data)
x_test, y_test = merge_all_classes(test_data)
else:
x_train, y_train = get_class_data(train_data, cls)
x_test, y_test = get_class_data(test_data, cls)
x_train, y_train, x_val, y_val = train_val_split(x_train, y_train,
val_split=val_split,
canonical_split=canonical_split)
return x_train, y_train, x_val, y_val, x_test, y_test
| 3,758
| 30.066116
| 116
|
py
|
deep_direct_stat
|
deep_direct_stat-master/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
deep_direct_stat
|
deep_direct_stat-master/datasets/caviar.py
|
import pickle
import gzip
import numpy as np
def load_caviar(data_path,
val_split=0.5,
canonical_split=True,
verbose=0):
(xtr, ytr_deg, *info_tr), (xvalte, yvalte_deg, *info_valte) = pickle.load(gzip.open(data_path, 'rb'))
def _parse_info(info):
parsed_info = {}
parsed_info['x_coord'] = info[0]
parsed_info['y_coord'] = info[1]
parsed_info['size'] = info[2]
parsed_info['image_name'] = np.asarray(info[3])
return parsed_info
info_tr = _parse_info(info_tr)
info_valte = _parse_info(info_valte)
# [channels, height, width] -> [height, width, channels]
xtr = xtr.transpose([0, 2, 3, 1])
xvalte = xvalte.transpose([0, 2, 3, 1])
n_valtest_images = xvalte.shape[0]
if canonical_split:
val_split = 0.5
np.random.seed(13)
val_size = int(n_valtest_images * val_split)
rix = np.random.choice(n_valtest_images, n_valtest_images, replace=False)
np.random.seed(None)
val_ix = rix[0:val_size]
te_ix = rix[val_size:]
xval = xvalte[val_ix]
yval_deg = yvalte_deg[val_ix]
info_val = _parse_info([info_valte[key][val_ix] for key in info_valte.keys()])
xte = xvalte[te_ix]
yte_deg = yvalte_deg[te_ix]
info_te = _parse_info([info_valte[key][te_ix] for key in info_valte.keys()])
return (xtr, ytr_deg, info_tr), (xval, yval_deg, info_val), (xte, yte_deg, info_te)
| 1,454
| 26.980769
| 105
|
py
|
deep_direct_stat
|
deep_direct_stat-master/datasets/idiap.py
|
import numpy as np
import joblib
def load_idiap(data_path,
val_split=0.5,
canonical_split=True,
verbose=0):
""" Load, preprocess and perform val-test split for IDIAP headpose dataset
You can download
Parameters
----------
data_path: str
path to joblib pickle containing IDIAP data
val_split: float
ratio of test data that will be used for validation
canonical_split: bool
whether to perform canonical split used to get results for the paper
Returns
-------
xtr: array of shape [n_samples, 75, 75, 3]
images
ptr: array of shape [n_samples, 1]
pan angles (in radians) for head pose
ttr: array of shape [n_samples, 1]
tilt angles (in radians) for head pose
rtr: array of shape [n_samples, 1]
roll angles (in radians) for head pose
names_tr: list of lenth n_samples
list containing image names
xval, pval, tval, rval, names_val - same for validation part
xte, pte, tte, rte, names_te - same for test part
"""
(xtr, ptr, ttr, rtr, names_tr), (xvalte, pvalte, tvalte, rvalte, names_valte) = joblib.load(data_path)
# [channels, height, width] -> [height, width, channels]
xtr = xtr.transpose([0, 2, 3, 1])
xvalte = xvalte.transpose([0, 2, 3, 1])
n_valtest_images = xvalte.shape[0]
if canonical_split:
val_split = 0.5
np.random.seed(13)
val_size = int(n_valtest_images * val_split)
rix = np.random.choice(n_valtest_images, n_valtest_images, replace=False)
np.random.seed(None)
val_ix = rix[0:val_size]
te_ix = rix[val_size:]
xval = xvalte[val_ix]
pval = pvalte[val_ix]
tval = tvalte[val_ix]
rval = rvalte[val_ix]
names_val = [names_valte[ix] for ix in val_ix]
xte = xvalte[te_ix]
pte = pvalte[te_ix]
tte = tvalte[te_ix]
rte = rvalte[te_ix]
names_te = [names_valte[ix] for ix in te_ix]
return (xtr, ptr, ttr, rtr, names_tr), (xval, pval, tval, rval, names_val), (xte, pte, tte, rte, names_te)
def load_idiap_part(data_path,
data_part,
val_split=0.5,
canonical_split=True):
(xtr, ptr_rad, ttr_rad, rtr_rad, names_tr),\
(xval, pval_rad, tval_rad, rval_rad, names_val),\
(xte, pte_rad, tte_rad, rte_rad, names_te) = load_idiap('data//IDIAP.pkl',
val_split=val_split,
canonical_split=canonical_split)
if data_part == 'pan':
return (xtr, ptr_rad), (xval, pval_rad), (xte, pte_rad)
elif data_part == 'tilt':
return (xtr, ttr_rad), (xval, tval_rad), (xte, tte_rad)
elif data_part == 'roll':
return (xtr, rtr_rad), (xval, rval_rad), (xte, rte_rad)
else:
raise ValueError("net_output should be 'pan', 'tilt' or 'roll'")
| 2,995
| 31.215054
| 110
|
py
|
deep_direct_stat
|
deep_direct_stat-master/datasets/towncentre.py
|
import numpy as np
import pickle, gzip
def split_dataset(X, y, img_names, split=0.1):
itr, ival, ite, trs, vals, tes = [], [], [], set(), set(), set()
for i, name in enumerate(img_names):
# Extract the person's ID.
pid = int(name.split('_')[1])
# Decide where to put that person.
if pid in trs:
itr.append(i)
elif pid in vals:
ival.append(i)
elif pid in tes:
ite.append(i)
else:
rid = np.random.rand()
if rid < 0.8:
itr.append(i)
trs.add(pid)
elif (rid >= 0.8) and (rid < 0.9):
ival.append(i)
vals.add(pid)
else:
ite.append(i)
tes.add(pid)
return itr, ival, ite
def prepare_data(x, y):
x, y = x.astype(np.float) / 255, y.astype(np.float)
x = x.transpose([0, 2, 3, 1]) # [channels, height, width] -> [height, width, channels]
# y = y.reshape(-1,1)
return x, y
def load_towncentre(data_path,
val_test_split=0.1,
canonical_split=True,
verbose=1):
x, y, img_names = pickle.load(gzip.open(data_path, 'rb'))
img_names = np.asarray(img_names)
x, y = prepare_data(x, y)
if canonical_split:
val_test_split = 0.1
np.random.seed(13)
person_ids = np.asarray([int(name.split('_')[1]) for name in img_names])
unique_pid_set = np.unique(person_ids)
rands = np.random.rand(unique_pid_set.shape[0])
np.random.seed(None)
train_pids = unique_pid_set[rands < 1-val_test_split*2]
val_pids = unique_pid_set[(rands >= 1-val_test_split*2) & (rands < 1-val_test_split)]
test_pids = unique_pid_set[rands > 1-val_test_split]
ixtr = np.where(np.in1d(person_ids, train_pids))[0]
ixval = np.where(np.in1d(person_ids, val_pids))[0]
ixte = np.where(np.in1d(person_ids, test_pids))[0]
xtr, ytr, img_names_tr = x[ixtr], y[ixtr], img_names[ixtr]
xval, yval, img_names_val = x[ixval], y[ixval], img_names[ixval]
xte, yte, img_names_te = x[ixte], y[ixte], img_names[ixte]
if verbose:
print("Number of train samples: %s" % xtr.shape[0])
print("Number of validation samples: %s" % xval.shape[0])
print("Number of test samples: %s" % xte.shape[0])
return (xtr, ytr, img_names_tr), (xval, yval, img_names_val), (xte, yte, img_names_te)
def aug_data(x, y_deg, n_times=2, randomize_labels=True):
n_points = y_deg.shape[0]
x_aug = np.tile(x, [n_times, 1, 1, 1])
y_deg_aug = np.tile(y_deg, [n_times])
if randomize_labels:
y_deg_aug[0:n_points] = y_deg
y_deg_aug[n_points:n_points*2] = y_deg - 90
# y_deg_aug = np.random.randint(0, 359, y_deg_aug.shape[0]).astype('float')
# y_deg_aug[0:y_deg.shape[0]] = y_deg
return x_aug, y_deg_aug
| 2,899
| 32.333333
| 91
|
py
|
deep_direct_stat
|
deep_direct_stat-master/utils/losses.py
|
import numpy as np
import tensorflow as tf
from scipy.special import i0 as mod_bessel0
from scipy.special import i1 as mod_bessel1
from keras import backend as K
from scipy.stats import multivariate_normal
def cosine_loss_np(y_target, y_pred):
return 1 - np.sum(np.multiply(y_target, y_pred),axis=1)
def mad_loss_tf(y_target, y_pred):
loss = tf.abs(y_target - y_pred)
return tf.reduce_mean(loss)
def cosine_loss_tf(y_target, y_pred):
loss = 1 - tf.reduce_sum(tf.multiply(y_target, y_pred), axis=1)
mean_loss = tf.reduce_mean(loss, name='cosine_loss')
return mean_loss
def von_mises_loss_np(y_target, y_pred, kappa=1):
cosine_dist = np.sum(np.multiply(y_target, y_pred), axis=1) - 1
vm_loss = 1 - np.exp(kappa*cosine_dist)
return vm_loss
def von_mises_loss_tf(y_target, y_pred, kappa=1):
cosine_dist = tf.reduce_sum(tf.multiply(y_target, y_pred), axis=1) - 1
vm_loss = 1 - tf.exp(kappa*cosine_dist)
mean_loss = tf.reduce_mean(vm_loss, name='von_mises_loss')
return mean_loss
# bessel_taylor_coefs = np.asarray([1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
# 4.34027778e-04, 6.78168403e-06, 6.78168403e-08,
# 4.70950280e-10, 2.40280755e-12, 9.38596699e-15,
# 2.89690339e-17, 7.24225848e-20, 1.49633440e-22,
# 2.59780277e-25, 3.84290351e-28, 4.90166264e-31,
# 5.44629182e-34, 5.31864436e-37, 4.60090342e-40,
# 3.55007980e-43, 2.45850402e-46], dtype='float64')
bessel_taylor_coefs = np.asarray([1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
4.34027778e-04, 6.78168403e-06], dtype='float32')
def bessel_approx_np_0(x, m=5):
x = np.asarray(x).reshape(-1, 1)
deg = np.arange(0, m, 1)*2
x_tiled = np.tile(x, [1, m])
deg_tiled = np.tile(deg, [x.shape[0], 1])
coef_tiled = np.tile(bessel_taylor_coefs[0:m].reshape(1, m), [x.shape[0], 1])
return np.sum(np.power(x_tiled, deg_tiled)*coef_tiled, axis=1)
def bessel_approx_tf(x, m=5):
deg = tf.reshape(tf.range(0, m, 1)*2, [1, -1])
n_rows = tf.shape(x)[0]
x_tiled = tf.tile(x, [1, m])
deg_tiled = tf.tile(deg, [n_rows, 1])
coef_tiled = tf.tile(bessel_taylor_coefs[0:m].reshape(1, m), [n_rows, 1])
return tf.reduce_sum(tf.pow(x_tiled, tf.to_float(deg_tiled))*coef_tiled, axis=1)
def log_bessel_approx_np(x):
x = np.asarray(x).reshape([-1, 1])
def _log_bessel_approx_0(x):
x = x.reshape([-1,1])
bessel_taylor_coefs = np.asarray([1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
4.34027778e-04, 6.78168403e-06], dtype='float32')
m = bessel_taylor_coefs.shape[0]
deg = np.reshape(np.arange(0, m, 1)*2, [1, -1])
n_rows = np.shape(x)[0]
x_tiled = np.tile(x, [1, m])
deg_tiled = np.tile(deg, [n_rows, 1])
coef_tiled = np.tile(bessel_taylor_coefs[0:m].reshape(1, m), [n_rows, 1])
val = np.log(np.sum(np.power(x_tiled, deg_tiled)*coef_tiled, axis=1))
return np.squeeze(val)
def _log_bessel_approx_large(x):
x = x.reshape([-1,1])
val = x - 0.5*np.log(2*np.pi*x)
return np.squeeze(val)
res = np.zeros(x.shape)
res[np.where(x > 5.0)] = _log_bessel_approx_large(x[x > 5.0])
res[np.where(x <= 5.0)] = _log_bessel_approx_0(x[x <= 5.0])
return res
def log_bessel_approx_tf(x):
x = tf.reshape(x, [-1, 1])
def _log_bessel_approx_0(x):
bessel_taylor_coefs = np.asarray([1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
4.34027778e-04, 6.78168403e-06], dtype='float32')
m = bessel_taylor_coefs.shape[0]
deg = tf.reshape(tf.range(0, m, 1)*2, [1, -1])
n_rows = tf.shape(x)[0]
x_tiled = tf.tile(x, [1, m])
deg_tiled = tf.tile(deg, [n_rows, 1])
coef_tiled = tf.tile(bessel_taylor_coefs[0:m].reshape(1, m), [n_rows, 1])
val = tf.log(tf.reduce_sum(tf.pow(x_tiled, tf.to_float(deg_tiled))*coef_tiled, axis=1))
return tf.reshape(val, [-1, 1])
def _log_bessel_approx_large(x):
return x - 0.5*tf.log(2*np.pi*x)
res = tf.where(x > 5.0, _log_bessel_approx_large(x), _log_bessel_approx_0(x))
return res
def von_mises_log_likelihood_np(y, mu, kappa):
""" Compute log-likelihood for multiple Von-Mises distributions
Parameters
----------
y: numpy array of shape [n_points, 2]
utils in biternion (cos, sin) representation that will be used to compute likelihood
mu: numpy array of shape [n_points, 2]
mean values of Von-Mises distributions in biternion representation
kappa: numpy array of shape [n_points, 1]
kappa values (inverse variance) of multiple Von-Mises distributions
Returns
-------
log_likelihood: numpy array of shape [n_points, 1]
log-likelihood values for each sample
"""
# if input_type == 'degree':
# scaler = 0.0174533
# cosin_dist = np.cos(scaler * (y - mu))
# elif input_type == 'radian':
# cosin_dist = np.cos(y - mu)
# elif input_type == 'biternion':
cosin_dist = np.reshape(np.sum(np.multiply(y, mu), axis=1), [-1, 1])
log_likelihood = kappa * cosin_dist - \
np.log(2 * np.pi) - log_bessel_approx_np(kappa)
return np.reshape(log_likelihood, [-1, 1])
def von_mises_log_likelihood_tf(y_true, mu_pred, kappa_pred, input_type='biternion'):
'''
Compute log-likelihood given data samples and predicted Von-Mises model parameters
:param y_true: true values of an angle in biternion (cos, sin) representation
:param mu_pred: predicted mean values of an angle in biternion (cos, sin) representation
:param kappa_pred: predicted kappa (inverse variance) values of an angle in biternion (cos, sin) representation
:param radian_input:
:return:
log_likelihood
'''
if input_type == 'degree':
scaler = 0.0174533
cosin_dist = tf.cos(scaler * (y_true - mu_pred))
elif input_type == 'radian':
cosin_dist = tf.cos(y_true - mu_pred)
elif input_type == 'biternion':
cosin_dist = tf.reshape(tf.reduce_sum(np.multiply(y_true, mu_pred), axis=1), [-1, 1])
# log_likelihood = tf.exp(log_kappa) * cosin_dist - \
# tf.log(2 * np.pi) + tf.log(bessel_approx_tf(tf.exp(log_kappa)))
log_likelihood = kappa_pred * cosin_dist - \
tf.log(2 * np.pi) - log_bessel_approx_tf(kappa_pred)
return tf.reshape(log_likelihood, [-1, 1])
# return tf.reduce_mean(log_likelihood)
def kappa_to_stddev(kappa, output='radians'):
'''
:param kappa: vector (numpy array) of kappa values
:param output: output format (radians or degrees)
:return: vector (numpy array) of corresponding standard deviation values
'''
rad_stddev = np.sqrt(1 - (mod_bessel1(kappa)/mod_bessel0(kappa)))
if output == 'radians':
return rad_stddev
else:
return np.rad2deg(rad_stddev)
def gaussian_kl_divergence_np(mu1, ln_var1, mu2, ln_var2):
shape = mu1.shape
n = shape[1]
log_var_diff = ln_var1 - ln_var2
var_diff_trace = np.sum(np.exp(log_var_diff), axis=1)
mudiff = np.sum(np.square(mu1-mu2) / np.exp(ln_var2), axis=1)
kl_div = 0.5*(-np.sum(log_var_diff, axis=1) - n + var_diff_trace + mudiff)
return np.reshape(kl_div, [-1, 1])
def gaussian_kl_divergence_tf(mu1, ln_var1, mu2, ln_var2):
shape = tf.to_float(tf.shape(mu1))
batch_size = shape[0]
n = shape[1]
log_var_diff = ln_var1 - ln_var2
var_diff_trace = tf.reduce_sum(tf.exp(log_var_diff), axis=1)
mudiff = tf.reduce_sum(tf.square(mu1-mu2) / tf.exp(ln_var2), axis=1)
kl_div = 0.5*(-tf.reduce_sum(log_var_diff, axis=1) - n + var_diff_trace + mudiff)
return tf.reshape(kl_div, [-1, 1])
def gaussian_log_likelihood_scipy(mu, std, samples):
""" Compute likelihood for multiple multivariate gaussians
(Slow SciPy implementation, for TESTS ONLY!)
Parameters
----------
mu: numpy array of shape [n_points, n_dims]
mean values of multiple multivariate gaussians
std: numpy array of shape [n_points, n_dims]
stdev values of multiple multivariate gaussians
samples: numpy array of shape [n_points, n_samples, n_dims]
points to compute likelihood
Returns
-------
likelihood: numpy array of shape [n_points, n_samples]
likelihood values for each sample
"""
n_points, n_samples, n_dims = samples.shape
log_likelihood = np.zeros([n_points, n_samples])
for pid in range(0, n_points):
cov = np.diag(np.square(std[pid]))
log_likelihood[pid, :] = np.log(multivariate_normal.pdf(samples[pid, :], mean=mu[pid], cov=cov,
allow_singular=True))
return log_likelihood
def gaussian_log_likelihood_np(mu, std, samples):
""" Compute likelihood for multiple multivariate gaussians
Parameters
----------
mu: numpy array of shape [n_points, n_dims]
mean values of multiple multivariate gaussians
std: numpy array of shape [n_points, n_dims]
stdev values of multiple multivariate gaussians
samples: numpy array of shape [n_points, n_samples, n_dims]
points to compute likelihood
Returns
-------
likelihood: numpy array of shape [n_points, n_samples]
likelihood values for each sample
"""
n_points, n_samples, n_dims = samples.shape
mu_tiled = np.tile(mu.reshape([n_points, 1, n_dims]), [1, n_samples, 1])
std_tiled = np.tile(std.reshape([n_points, 1, n_dims]), [1, n_samples, 1])
var_tiled = np.square(std_tiled)
diff = np.sum(np.square(samples-mu_tiled)/var_tiled, axis=2)
log_var = np.sum(np.log(var_tiled), axis=2)
log_2pi = np.ones([n_points, n_samples])*n_dims*np.log(2*np.pi)
log_likelihood = -0.5*(log_var + diff + log_2pi)
return log_likelihood
def gaussian_log_likelihood_tf(mu, std, samples):
""" Compute likelihood for multiple multivariate gaussians
Parameters
----------
mu: numpy array of shape [n_points, n_dims]
mean values of multiple multivariate gaussians
std: numpy array of shape [n_points, n_dims]
stdev values of multiple multivariate gaussians
samples: numpy array of shape [n_points, n_samples, n_dims]
points to compute likelihood
Returns
-------
likelihood: numpy array of shape [n_points, n_samples]
likelihood values for each sample
"""
samples_shape = tf.shape(samples)
n_points = samples_shape[0]
n_samples = samples_shape[1]
n_dims = samples_shape[2]
mu_tiled = tf.tile(tf.reshape(mu, shape=[n_points, 1, n_dims]), [1, n_samples, 1])
std_tiled = tf.tile(tf.reshape(std, [n_points, 1, n_dims]), [1, n_samples, 1])
var_tiled = tf.square(std_tiled)
diff = tf.reduce_sum(tf.square(samples-mu_tiled)/var_tiled, axis=2)
log_var = tf.reduce_sum(tf.log(var_tiled), axis=2)
log_2pi = tf.ones(shape=[n_points, n_samples], dtype=tf.float32)*tf.to_float(n_dims)*np.log(2*np.pi)
log_likelihood = -0.5*(log_var + diff + log_2pi)
return log_likelihood
def von_mises_neg_log_likelihood_keras(y_true, y_pred):
'''
:param y_true : array with ground truth angle in biternion representation (cos, sin) of shape [n_samples, 1]
:param y_pred : array with predicted mean angle (cos, sin) and kappa of shape [n_samples, 3]
:return: mean negative log likelihood
'''
mu_pred = y_pred[:, 0:2]
kappa_pred = y_pred[:, 2:]
return -K.mean(von_mises_log_likelihood_tf(y_true, mu_pred, kappa_pred, input_type='biternion'))
def importance_loglikelihood(mu_encoder, log_sigma_encoder,
mu_prior, log_sigma_prior,
u_encoder_samples,
mu_vm, kappa_vm,
ytrue_bit):
n_points, n_samples, n_u = u_encoder_samples.shape
vm_likelihood = np.zeros([n_points, n_samples])
for sid in range(0, n_samples):
vm_likelihood[:, sid] = np.squeeze(np.exp(
von_mises_log_likelihood_np(ytrue_bit, mu_vm[:,sid], kappa_vm[:, sid])))
enc_log_likelihood = gaussian_log_likelihood_np(mu_encoder, np.exp(log_sigma_encoder/2), u_encoder_samples)
prior_log_likelihood = gaussian_log_likelihood_np(mu_prior, np.exp(log_sigma_prior/2), u_encoder_samples)
weight = np.exp(prior_log_likelihood - enc_log_likelihood)
importance_loglikelihoods = np.log(np.mean(vm_likelihood*weight, axis=1))
return importance_loglikelihoods
def maad_from_deg(y_pred, y_target):
return np.rad2deg(np.abs(np.arctan2(np.sin(np.deg2rad(y_target - y_pred)), np.cos(np.deg2rad(y_target - y_pred)))))
def show_errs_deg(y_pred, y_target, epoch=-1):
errs = maad_from_deg(y_pred, y_target)
mean_errs = np.mean(errs, axis=1)
std_errs = np.std(errs, axis=1)
print("Error: {:5.2f}°±{:5.2f}°".format(np.mean(mean_errs), np.mean(std_errs)))
print("Stdev: {:5.2f}°±{:5.2f}°".format(np.std(mean_errs), np.std(std_errs)))
def maximum_expected_utility(y_deg):
""" Summarize multiple predictions to one via Maximum Expected Utility estimation
Parameters
----------
y_deg: numpy array of shape [n_points, n_predictions]
multiple predictions (in degrees)
Returns
-------
mae_preds: numpy array of shape [n_points, n_predictions]
mae predictions (in degrees)
"""
def _point_mae(y):
y_tiled = np.tile(y.reshape(-1, 1), [1, y.shape[0]])
maad_dist = maad_from_deg(y_tiled.T, y_tiled)
ix = np.argmin(np.sum(maad_dist, axis=1))
return y[ix]
n_points = y_deg.shape[0]
mae_preds = np.asarray([_point_mae(y_deg[i]) for i in range(0, n_points)])
return np.squeeze(mae_preds)
| 14,052
| 33.27561
| 119
|
py
|
deep_direct_stat
|
deep_direct_stat-master/utils/sampling.py
|
import numpy as np
def sample_multiple_gauassians_np(mu, std, n_samples=10):
"""Sample points from multiple multivariate gaussian distributions
Parameters
----------
mu: numpy array of shape [n_points, n_dims]
mean values of multiple multivariate gaussians
std: numpy array of shape [n_points, n_dims]
stdev values of multiple multivariate gaussians
n_samples: int
number of samples to draw from each distribution
Returns
-------
samples: numpy array of shape [n_points, n_samples, n_dims]
samples from each gaussian
"""
n_points, n_dims = mu.shape
eps = np.random.normal(size=[n_points, n_samples, n_dims])
mu_tiled = np.tile(mu.reshape([n_points, 1, n_dims]), [1, n_samples, 1])
std_tiled = np.tile(std.reshape([n_points, 1, n_dims]), [1, n_samples, 1])
samples = mu_tiled + eps*std_tiled
return samples
def sample_von_mises_mixture(mus_rad, kappas, component_probs, n_samples=100):
""" Sample from Von-Mises mixture model
Parameters
----------
mus_rad: array of shape [n_components]
mean values for each distribution in mixture (in radians)
kappas: array of shape [n_components]
kappa values for each distribution in mixture
component_probs: array of shape [n_components]
probability of a each component
n_samples: int
number of samples to draw
Returns
-------
samples: array of shape [n_samples]
sampled angles (in radians)
"""
component_probs = np.clip(component_probs-0.0001, 0.0, 0.9999)
sample_comps_id = np.nonzero(np.random.multinomial(1, component_probs, n_samples))[1]
sample_mus = mus_rad[sample_comps_id]
sample_kappas = kappas[sample_comps_id]
samples = np.concatenate([np.random.vonmises(sample_mus[sid], sample_kappas[sid]) for sid in range(0, n_samples)])
return samples
def sample_von_mises_mixture_multi(mus_rad, kappas, component_probs, n_samples=100):
""" Sample from multiple Von-Mises mixture model
Parameters
----------
mus_rad: array of shape [n_mixtures, n_components]
mean values for each distribution in mixture (in radians)
kappas: array of shape [n_mixtures, n_components, n_dims]
kappa values for each distribution in mixture
component_probs: array of shape [n_mixtures, n_components]
probability of a each component
n_samples: int
number of samples to draw
Returns
-------
samples: array of shape [n_mixtures, n_samples]
sampled angles (in radians)
"""
samples = [np.reshape(sample_von_mises_mixture(mus_rad[fid], kappas[fid], component_probs[fid],
n_samples=n_samples), [1, -1])
for fid in range(0, len(component_probs))]
return np.concatenate(samples, axis=0)
| 2,883
| 30.010753
| 118
|
py
|
deep_direct_stat
|
deep_direct_stat-master/utils/angles.py
|
import numpy as np
def rad2bit(angles_rad):
""" radians to biternion ([cos, sin])
"""
return np.array([np.cos(angles_rad), np.sin(angles_rad)]).T
def deg2bit(angles_deg):
""" degrees to biternion ([cos, sin])
"""
angles_rad = np.deg2rad(angles_deg)
return np.array([np.cos(angles_rad), np.sin(angles_rad)]).T
def bit2deg(angles_bit):
""" biternion ([cos, sin]) -> degrees
"""
return (np.rad2deg(np.arctan2(angles_bit[:, 1], angles_bit[:, 0])) + 360) % 360
def bit2rad(angles_bit):
""" biternion ([cos, sin]) -> radians
"""
return np.deg2rad(bit2deg(angles_bit))
def bit2deg_multi(angles_bit):
""" Convert biternion representation to degree for multiple samples
Parameters
----------
angles_bit: numpy array of shape [n_points, n_predictions, 2]
multiple predictions
Returns
-------
deg_angles: numpy array of shape [n_points, n_predictions]
multiple predictions converted to degree representation
"""
deg_angles = np.asarray([bit2deg(angles_bit[:, i, :]) for i in range(0, angles_bit.shape[1])]).T
return deg_angles
def cart_to_spherical(xyz):
r_phi_theta = np.zeros(xyz.shape)
xy = xyz[:, 0]**2 + xyz[:, 1]**2
r_phi_theta[:, 0] = np.sqrt(xy + xyz[:, 2]**2)
r_phi_theta[:, 1] = np.arctan2( xyz[:, 1], xyz[:, 0]) # for elevation angle defined from XY-plane up
r_phi_theta[:, 2] = np.arccos(xyz[:, 2]) #theta = arccos(z/r)
return r_phi_theta
| 1,490
| 26.109091
| 104
|
py
|
deep_direct_stat
|
deep_direct_stat-master/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
deep_direct_stat
|
deep_direct_stat-master/utils/custom_keras_callbacks.py
|
import keras
import numpy as np
import pandas as pd
import warnings
class SideModelCheckpoint(keras.callbacks.Callback):
def __init__(self, model_name, model_to_save, save_path, save_weights_only=False):
self.model_name = model_name
self.model = model_to_save
self.save_path = save_path
self.save_weights_only = save_weights_only
def on_train_begin(self, logs={}):
self.epoch_id = 0
self.min_val_loss = float("inf")
def on_epoch_end(self, batch, logs={}):
self.epoch_id += 1
self.curr_val_loss = logs.get('val_loss')
if self.curr_val_loss < self.min_val_loss:
filepath = self.save_path.format(epoch=self.epoch_id, val_loss=self.curr_val_loss)
print("val_loss improved from %f to %f, saving %s to %s" %
(self.min_val_loss, self.curr_val_loss, self.model_name, filepath))
self.min_val_loss = self.curr_val_loss
if self.save_weights_only:
self.model.save_weights(filepath)
else:
self.model.save(filepath)
class EvalCVAEModel(keras.callbacks.Callback):
""" Run CVAE evaluation on selected data
"""
def __init__(self, x, y_deg, data_title, cvae_model, ckpt_path):
self.x = x
self.y_deg = y_deg
self.data_title = data_title
self.cvae_model = cvae_model
self.ckpt_path = ckpt_path
self.max_log_likelihood = float('-inf')
def on_epoch_end(self, epoch, logs=None):
results = self.cvae_model.evaluate(self.x, self.y_deg, self.data_title)
if results['importance_log_likelihood'] > self.max_log_likelihood:
print('max log likelihood improved from %f to %f' % (self.max_log_likelihood,
results['importance_log_likelihood']))
self.max_log_likelihood = results['importance_log_likelihood']
self.model.save_weights(self.ckpt_path)
print("Evaluation is done.")
class ModelCheckpointEveryNBatch(keras.callbacks.Callback):
"""Save the model after every n batches, based on validation loss
`filepath` can contain named formatting options,
which will be filled the value of `epoch` and
keys in `logs` (passed in `on_epoch_end`).
For example: if `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`,
then the model checkpoints will be saved with the epoch number and
the validation loss in the filename.
# Arguments
filepath: string, path to save the model file.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`,
the latest best model according to
the quantity monitored will not be overwritten.
mode: one of {auto, min, max}.
If `save_best_only=True`, the decision
to overwrite the current save file is made
based on either the maximization or the
minimization of the monitored quantity. For `val_acc`,
this should be `max`, for `val_loss` this should
be `min`, etc. In `auto` mode, the direction is
automatically inferred from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be
saved (`model.save_weights(filepath)`), else the full model
is saved (`model.save(filepath)`).
period: Interval (number of batches) between checkpoints.
"""
def __init__(self, ckpt_path, log_path, xval, yval, verbose=0,
save_best_only=False, save_weights_only=False, period=1, patience=50):
super(ModelCheckpointEveryNBatch, self).__init__()
self.xval = xval
self.yval = yval
self.verbose = verbose
self.ckpt_path = ckpt_path
self.log_path = log_path
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.period = period
self.batches_since_last_save = 0
self.min_val_loss = float('inf')
self.n_steps = 0
self.log_cols = ['train_step', 'val_loss', 'batch_loss']
self.log_df = pd.DataFrame(columns=self.log_cols)
self.n_epochs_no_improvement = 0
self.patience = patience
def on_batch_end(self, batch, logs=None):
logs = logs or {}
self.n_steps += 1
self.batches_since_last_save += 1
if self.batches_since_last_save >= self.period:
self.batches_since_last_save = 0
filepath = self.ckpt_path
if self.save_best_only:
curr_batch_loss = logs.get('loss')
curr_val_loss = self.model.evaluate(self.xval, self.yval, verbose=0)
log_entry_np = np.asarray([self.n_steps, curr_val_loss, curr_batch_loss]).reshape([1, -1])
log_entry_df = pd.DataFrame(log_entry_np, columns=self.log_cols)
self.log_df = self.log_df.append(log_entry_df)
self.log_df.to_csv(self.log_path, sep=';')
if curr_val_loss < self.min_val_loss:
if self.verbose > 0:
print('Batch %05d: val_loss improved from %0.5f to %0.5f,'
' saving model to %s'
% (batch, self.min_val_loss,
curr_val_loss, filepath))
self.min_val_loss = curr_val_loss
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self.n_epochs_no_improvement = 0
else:
self.n_epochs_no_improvement += 1
if self.n_epochs_no_improvement > self.patience:
if self.verbose > 0:
print('Batch %05d: val_loss did not improve' % batch)
self.model.stop_training = True
if self.verbose > 0:
print('Batch %05d: val_loss did not improve' % batch)
print('number of steps with no improvement: %d' % self.n_epochs_no_improvement)
else:
if self.verbose > 0:
print('Batch %05d: saving model to %s' % (batch, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
| 6,614
| 43.695946
| 106
|
py
|
deep_direct_stat
|
deep_direct_stat-master/training_scripts/train_towncentre.py
|
import sys
import os
from os.path import dirname
from datasets.towncentre import load_towncentre
from models.infinite_mixture import BiternionMixture
import datetime
from utils import angles
import numpy as np
def log_step(mess):
dtstr = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(' '.join([dtstr, mess]))
def convert_and_pad_angle_data(y):
""" In case there is only 1 component, we augment data with fake tilt and roll components
"""
y_pan_bit = angles.deg2bit(y)
y_tilt_bit = angles.deg2bit(np.zeros_like(y))
y_roll_bit = angles.deg2bit(np.zeros_like(y))
return np.hstack([y_pan_bit, y_tilt_bit, y_roll_bit])
def main():
project_dir = dirname(os.path.dirname(os.path.realpath(__file__)))
log_dir = os.path.join(project_dir, 'logs')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_step("loading dataset")
data_path = os.path.join(project_dir, 'data/TownCentre.pkl.gz')
(xtr, ytr, img_names_tr), (xval, yval, img_names_val), (xte, yte, img_names_te) = \
load_towncentre(data_path, canonical_split=True, verbose=1)
ytr = convert_and_pad_angle_data(ytr)
yval = convert_and_pad_angle_data(yval)
yte = convert_and_pad_angle_data(yte)
log_step("defining the model..")
model = BiternionMixture(z_size=2, input_shape=xtr.shape[1:],
backbone_cnn='mobilenet', backbone_weights=None, debug=False,
hlayer_size=512, n_samples=2)
log_step("training started")
ckpt_path = os.path.join(log_dir, 'towncentre.h5')
model.fit(xtr, ytr, validation_data=[xval, yval], ckpt_path=ckpt_path, epochs=50, patience=5)
log_step("training finished. loading weights..")
model.model.load_weights(ckpt_path)
log_step("evaluating on train set..")
model.evaluate(xtr, ytr)
log_step("evaluating on validation set..")
model.evaluate(xval, yval)
log_step("evaluating on test set..")
model.evaluate(xte, yte)
log_step("all done. Model checkpoint: %s" % ckpt_path)
if __name__ == '__main__':
main()
| 2,094
| 30.742424
| 97
|
py
|
deep_direct_stat
|
deep_direct_stat-master/training_scripts/train_pascal3d.py
|
import sys
import os
from os.path import dirname
from datasets import pascal3d
from models.infinite_mixture import BiternionMixture
import datetime
def log_step(mess):
dtstr = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(' '.join([dtstr, mess]))
def main():
cls = sys.argv[1] # if cls is None, all classes will be loaded
project_dir = dirname(os.path.dirname(os.path.realpath(__file__)))
log_dir = os.path.join(project_dir, 'logs')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_step("loading dataset")
pascaldb_path = os.path.join(project_dir, 'data/pascal3d+_imagenet_train_test.h5')
x_train, y_train, x_val, y_val, x_test, y_test = pascal3d.load_pascal_data(pascaldb_path, cls=cls)
log_step("defining the model..")
model = BiternionMixture(z_size=8, backbone_cnn='inception', hlayer_size=512, n_samples=10, learning_rate=1.0e-4)
ckpt_path = os.path.join(log_dir, '%s.h5' % cls)
log_step("training on class :%s" % cls)
model.fit(x_train, y_train, validation_data=[x_val, y_val], ckpt_path=ckpt_path, epochs=200,
patience=10, batch_size=1)
log_step("training finished. loading weights..")
model.model.load_weights(ckpt_path)
log_step("evaluating on train set..")
model.evaluate(x_train, y_train)
log_step("evaluating on validation set..")
model.evaluate(x_val, y_val)
log_step("evaluating on test set..")
model.evaluate(x_test, y_test)
import ipdb; ipdb.set_trace()
log_step("saving predictions for Matlab eval..")
save_dir = os.path.join(log_dir, 'vp_test_results')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
model.save_detections_for_official_eval(x_test, os.path.join(save_dir, '%s_pred_view.txt' % cls))
log_step("all done. Model checkpoint: %s" % ckpt_path)
if __name__ == '__main__':
main()
| 1,894
| 31.672414
| 117
|
py
|
KG-DQN
|
KG-DQN-master/scripts/format_to_drqa.py
|
import re
from nltk.tokenize import sent_tokenize
from fuzzywuzzy import fuzz
import json
import random
data_list = []
with open('./oracle.txt', 'r') as f:
cur = []
cur_admissible_actions = "N, S, E, W, look, examine"
cur_taken_action = ""
id = 0
for line in f:
line = line.replace('\n', '')
if line != '---------' and "Actions:" not in str(line) and "Taken action:" not in str(line):
cur.append(line)
elif "Actions:" in str(line):
cur_admissible_actions = str(line).split(':')[1].replace("'", '').replace("[", '').replace("]", '')
#print(cur_admissible_actions)
elif "Taken action:" in str(line):
cur_taken_action = str(line).split(':')[1]
#print(cur_taken_action)
elif line == '---------':
cur = [a.strip() for a in cur]
cur = ' '.join(cur).strip().replace('\n', '').replace('---------', '')
try:
title = re.findall("(?<=-\=).*?(?=\=-)", cur)[0].strip()
except IndexError:
title = "UNK"
cur = re.sub("(?<=-\=).*?(?=\=-)", '', cur)
cur = cur.replace("-==-", '').strip()
cur = '. '.join([a.strip() for a in cur.split('.')])
#print(cur)
answer_list = []
for sent in sent_tokenize(cur):
if fuzz.token_set_ratio(cur_taken_action, sent) > 60:
#print(fuzz.token_set_ratio(cur_taken_action, sent), fuzz.partial_ratio(cur_taken_action, sent), cur_taken_action, sent)
answer_start = cur.find(sent)
answer_item = {"answer_start": answer_start, "text": sent}
answer_list.append(answer_item)
cur = cur + "The actions are: " + str(cur_admissible_actions) + "."
answer_list.append({"answer_start": cur.find(cur_taken_action), "text": cur_taken_action})
qa_item = {"answers": answer_list, "question": "What action should I take?", "id": str(id)}
id += 1
qa_list = [qa_item]
paragraph_item = {"context": cur + "The actions are: " + str(cur_admissible_actions) + ".", "qas": qa_list}
paragraph_list = [paragraph_item]
data_item = {"title": title, "paragraphs": paragraph_list}
data_list.append(data_item)
#print(data_item)
cur = []
cur_admissible_actions = ""
cur_taken_action = ""
out_train = open('./cleaned_qa_train.json', 'w')
out_dev = open('./cleaned_qa_dev.json', 'w')
llist = len(data_list)
random.shuffle(data_list)
data_train = {"data": data_list[:int(llist * 0.9)]}
data_dev = {"data": data_list[int(llist * 0.9):]}
json.dump(data_train, out_train)
json.dump(data_dev, out_dev)
out_train.close()
out_dev.close()
| 2,845
| 35.961039
| 140
|
py
|
KG-DQN
|
KG-DQN-master/scripts/datacollector.py
|
import numpy as np
import textworld
import re
import sys
import glob
import requests
import json
class NaiveAgent(textworld.Agent):
def __init__(self, seed=1234):
self.seed = seed
self.rng = np.random.RandomState(self.seed)
self.actions = ["north", "south", "east", "west", "up", "down",
"look", "inventory", "take all", "YES", "wait",
"take", "drop", "eat", "attack"]
def reset(self, env):
env.display_command_during_render = True
env.activate_state_tracking()
def act(self, game_state, reward, done):
action = self.rng.choice(self.actions)
if action in ["take", "drop", "eat", "attack"]:
words = game_state.feedback.split() # Observed words.
words = [w for w in words if len(w) > 3] # Ignore most stop words.
if len(words) > 0:
action += " " + self.rng.choice(words)
return action
class RandomAgent(textworld.Agent):
""" Agent that randomly selects commands from the admissible ones. """
def __init__(self, seed=1234):
self.seed = seed
self.rng = np.random.RandomState(self.seed)
def reset(self, env):
# Activate state tracking in order to get the admissible commands.
env.activate_state_tracking()
env.compute_intermediate_reward() # Needed to detect if a game is lost.
def act(self, game_state, reward, done):
# print("Admissible actions: " + str(game_state.admissible_commands))
return self.rng.choice(game_state.admissible_commands)
class WalkthroughDone(NameError):
pass
class WalkthroughAgent(textworld.Agent):
""" Agent that simply follows a list of commands. """
def __init__(self, commands=None):
self.commands = commands
def reset(self, env):
env.activate_state_tracking()
env.display_command_during_render = True
if self.commands is not None:
self._commands = iter(self.commands)
return # Commands already specified.
if not hasattr(env, "game"):
msg = "WalkthroughAgent is only supported for generated games."
raise NameError(msg)
# Load command from the generated game.
self._commands = iter(env.game.quests[0].commands)
def act(self, game_state, reward, done):
try:
action = next(self._commands)
except StopIteration:
raise WalkthroughDone()
action = action.strip() # Remove trailing \n, if any.
return action
def test_agent(agent, game, out, max_step=1000, nb_episodes=5):
env = textworld.start(game) # Start the game.
#print(game.split("/")[-1], end="")
env.enable_extra_info('description')
# Collect some statistics: nb_steps, final reward.
avg_moves, avg_scores = [], []
acts = set()
for no_episode in range(nb_episodes):
agent.reset(env) # Tell the agent a new episode is starting.
game_state = env.reset() # Start new episode.
reward = 0
done = False
for no_step in range(max_step):
# print(game_state.description)
command = agent.act(game_state, reward, done)
out.write(game_state.description)
out.write("Actions: " + str(game_state.admissible_commands) + '\n')
acts.update(game_state.admissible_commands)
out.write("Taken action:" + str(command))
out.write('\n' + "---------" + '\n')
game_state, reward, done = env.step(command)
#env.render()
# if no_step % 10 == 0:
# print(no_step, no_episode)
if done:
break
# print("Done after {} steps. Score {}/1.".format(game_state.nb_moves, game_state.score))
# print(".", end="")
avg_moves.append(game_state.nb_moves)
avg_scores.append(game_state.score)
env.close()
# print(" \tavg. steps: {:5.1f}; avg. score: {:4.1f} / 1.".format(np.mean(avg_moves), np.mean(avg_scores)))
# print(avg_moves)
# exit()
return acts
def call_stanford_openie(sentence):
url = "http://localhost:9000/"
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
def generate_data(games, type):
if type == 'collect':
out = open("./random.txt", 'w')
acts = set()
for g in games:
acts.update(test_agent(WalkthroughAgent(), game=g, out=out))
acts.update(test_agent(RandomAgent(), game=g, out=out))
out.close()
out = open('./cleaned_random.txt', 'w')
with open('./random.txt', 'r') as f:
cur = []
for line in f:
# print(line)
if line != '---------' and "Admissible actions:" not in str(line) and "Taken action:" not in str(
line):
cur.append(line)
else:
cur = [a.strip() for a in cur]
cur = ' '.join(cur).strip().replace('\n', '').replace('---------', '')
cur = re.sub("(?<=-\=).*?(?=\=-)", '', cur)
cur = cur.replace("-==-", '').strip()
cur = '. '.join([a.strip() for a in cur.split('.')])
out.write(cur + '\n')
cur = []
out.close()
input_file = open("./cleaned_random.txt", 'r')
entities = set()
relations = set()
sents = input_file.read()
try:
# triple = callStanfordReq(sent)['sentences'][0]['openie']
for ov in call_stanford_openie(sents)['sentences']:
triple = ov['openie']
# print(triple)
# print(sent,)
for tr in triple:
h, r, t = tr['subject'], tr['relation'], tr['object']
entities.add(h)
entities.add(t)
relations.add(r)
# print(' | ' + h + ', ' + r + ', ' + t,)
except:
print("OpenIE error")
act_out = open('./act2id.txt', 'w')
act_out.write(str({k: i for i, k in enumerate(acts)}))
act_out.close()
ent_out = open('./entity2id.tsv', 'w')
rel_out = open('./relation2id.tsv', 'w')
for i, e in enumerate(entities):
ent_out.write('_'.join(e.split()) + '\t' + str(i) + '\n')
ent_out.close()
for i, r in enumerate(relations):
rel_out.write('_'.join(r.split()) + '\t' + str(i) + '\n')
rel_out.close()
elif type == 'oracle':
out = open("./oracle.txt", 'w')
for g in games:
test_agent(WalkthroughAgent(), game=g, out=out)
out.close()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Please supply directory with games and type.")
exit()
games = glob.glob(sys.argv[1] + '*.ulx')[:2]
print(games)
generate_data(games, sys.argv[2])
| 7,454
| 33.041096
| 117
|
py
|
KG-DQN
|
KG-DQN-master/dqn/dqn.py
|
import math, random
import textworld
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
from collections import deque
from nltk.tokenize import word_tokenize
#from matplotlib import use
#use('Agg')
import matplotlib.pyplot as plt
import logging
from utils.replay import *
from utils.schedule import *
#from memory_profiler import profile
USE_CUDA = torch.cuda.is_available()
class DQN(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(DQN, self).__init__()
self.num_inputs = num_inputs
self.num_actions = num_actions
self.layers = nn.Sequential(
nn.Linear(num_inputs, hidden_dim[0]),
nn.ReLU(),
nn.Linear(hidden_dim[0], hidden_dim[1]),
nn.ReLU(),
nn.Linear(hidden_dim[1], num_actions)
)
def forward(self, x):
return self.layers(x)
def act(self, state, epsilon):
if random.random() > epsilon:
with torch.no_grad():
state = (torch.FloatTensor(state).unsqueeze(0)).cuda()
q_value = self.forward(state)
action = q_value.max(1)[1].data[0]
else:
action = random.randrange(self.num_actions)
return action
class DQNTrainer(object):
#@profile()
def __init__(self, game, params):
self.num_episodes = params['num_episodes']
self.update_freq = params['update_frequency']
self.filename = 'dqn_' + '_'.join([str(v) for k, v in params.items()])
logging.basicConfig(filename='logs/' + self.filename + '.log', level=logging.WARN, filemode='w')
logging.warning("Parameters", params)
self.game = game
self.env = textworld.start(self.game)
self.params = params
if params['replay_buffer_type'] == 'priority':
self.replay_buffer = PriorityReplayBuffer(params['replay_buffer_size'])
elif params['replay_buffer_type'] == 'standard':
self.replay_buffer = ReplayBuffer(params['replay_buffer_size'])
self.vocab = self.load_vocab()
self.all_actions = self.load_action_dictionary()
self.model = DQN(len(self.vocab.items()), len(self.all_actions.items()), params['hidden_dims']).cuda()
# model = nn.DataParallel(model)
self.optimizer = optim.Adam(self.model.parameters(), lr=params['lr'])
self.env.compute_intermediate_reward()
self.env.activate_state_tracking()
self.num_frames = params['num_frames']
self.batch_size = params['batch_size']
self.gamma = params['gamma']
self.losses = []
self.all_rewards = []
self.completion_steps = []
self.rho = params['rho']
if params['scheduler_type'] == 'exponential':
self.e_scheduler = ExponentialSchedule(self.num_frames, params['e_decay'], params['e_final'])
elif params['scheduler_type'] == 'linear':
self.e_scheduler = LinearSchedule(self.num_frames, params['e_final'])
def load_vocab(self):
vocab = eval(open('../w2id.txt', 'r').readline())
return vocab
def load_action_dictionary(self):
all_actions = eval(open('../id2act.txt', 'r').readline())
return all_actions
def state_rep_generator(self, state_description):
bag_of_words = np.zeros(len(self.vocab))
for token in word_tokenize(state_description):
if token not in self.vocab.keys():
token = '<UNK>'
bag_of_words[self.vocab[token]] += 1
return bag_of_words # torch.FloatTensor(bag_of_words).cuda()
def plot(self, frame_idx, rewards, losses, completion_steps):
fig = plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('frame %s. steps: %s' % (frame_idx, np.mean(completion_steps[-10:])))
plt.plot(completion_steps)
plt.subplot(133)
plt.title('loss-dqn')
plt.plot(losses)
#txt = "Gamma:" + str(self.gamma) + ", Num Frames:" + str(self.num_frames) + ", E Decay:" + str(epsilon_decay)
plt.figtext(0.5, 0.01, self.filename, wrap=True, horizontalalignment='center', fontsize=12)
#plt.show()
fig.savefig('plots/' + self.filename + '_' + str(frame_idx) + '.png')
def compute_td_loss(self):
state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size, self.rho)
state = torch.FloatTensor(state).cuda()
with torch.no_grad():
next_state = torch.FloatTensor(next_state).cuda()
action = torch.LongTensor(action).cuda()
reward = torch.FloatTensor(reward).cuda()
done = torch.FloatTensor(1 * done).cuda()
q_values = self.model(state)
next_q_values = self.model(next_state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value = next_q_values.max(1)[0]
expected_q_value = reward + self.gamma * next_q_value * (1 - done)
#print(q_value.size())
#print(done)
#print(q_value, next_q_value, expected_q_value)
loss = (q_value - (expected_q_value.data)).pow(2).mean()
#clipped_loss = loss.clamp(-1.0, 1.0)
loss = loss.clamp(-1.0, 1.0)
#right_gradient = clipped_loss * -1.0
#print(loss)
self.optimizer.zero_grad()
#loss.backward(right_gradient.data.unsqueeze(1)[:, 0])
loss.backward()
self.optimizer.step()
return loss
#@profile()
def train(self):
total_frames = 0
for e_idx in range(1, self.num_episodes + 1):
state = self.env.reset()
state_text = state.description
state_rep = self.state_rep_generator(state_text)
episode_reward = 0
completion_steps = 0
episode_done = False
for frame_idx in range(1, self.num_frames + 1):
epsilon = self.e_scheduler.value(total_frames)
action = self.model.act(state_rep, epsilon)
action_text = self.all_actions[int(action)]
logging.info('-------')
logging.info(state_text)
logging.info(action_text)
next_state, reward, done = self.env.step(action_text)
reward += next_state.intermediate_reward
reward = max(-1.0, min(reward, 1.0))
#if reward != 0:
logging.warning('--------')
logging.warning(frame_idx)
logging.warning(state_text)
#print(next_state_text)
logging.warning(action_text)
logging.warning(reward)
#print(reward)
next_state_text = next_state.description
next_state_rep = self.state_rep_generator(next_state_text)
self.replay_buffer.push(state_rep, action, reward, next_state_rep, done)
state = next_state
state_text = next_state_text
state_rep = next_state_rep
episode_reward += reward
completion_steps += 1
total_frames += 1
if len(self.replay_buffer) > self.batch_size:
if frame_idx % self.update_freq == 0:
loss = self.compute_td_loss()
self.losses.append(loss.data[0])
if done:
logging.warning("Done")
state = self.env.reset()
state_text = state.description
state_rep = self.state_rep_generator(state_text)
self.all_rewards.append(episode_reward)
self.completion_steps.append(completion_steps)
episode_reward = 0
completion_steps = 0
episode_done = True
elif frame_idx == self.num_frames:
self.all_rewards.append(episode_reward)
self.completion_steps.append(completion_steps)
episode_reward = 0
completion_steps = 0
if episode_done:
break
if e_idx % (int(self.num_episodes / 10)) == 0:
logging.info("Episode:" + str(e_idx))
self.plot(e_idx, self.all_rewards, self.losses, self.completion_steps)
self.plot(e_idx, self.all_rewards, self.losses, self.completion_steps)
parameters = {
'model': self.model,
'replay_buffer': self.replay_buffer,
'action_dict': self.all_actions,
'vocab': self.vocab,
'params': self.params,
'stats': {
'losses': self.losses,
'rewards': self.all_rewards,
'completion_steps': self.completion_steps
}
}
torch.save(parameters, 'models/' + self.filename + '_' + str(e_idx) + '.pt')
parameters = {
'model': self.model,
'replay_buffer': self.replay_buffer,
'action_dict': self.all_actions,
'vocab': self.vocab,
'params': self.params,
'stats': {
'losses': self.losses,
'rewards': self.all_rewards,
'completion_steps': self.completion_steps
}
}
torch.save(parameters, 'models/' + self.filename + '_final.pt')
self.env.close()
| 9,801
| 34.132616
| 118
|
py
|
KG-DQN
|
KG-DQN-master/dqn/train.py
|
from dqn import DQNTrainer
from utils.grid_search import RandomGridSearch
from joblib import Parallel, delayed
import multiprocessing
import gc
#from guppy import hpy
#from memory_profiler import profile
#@profile
def parallelize(game, params):
print(params)
#game = "/home/eilab/Raj/tw-drl/Games/obj_20_qlen_5_room_10/train/game_" + str(10) + ".ulx"
trainer = DQNTrainer(game, params)
trainer.train()
#del trainer.model
#del trainer
#gc.collect()
"""
while not grid_search.is_done():
params = grid_search.get_config()
#trainer = DQNTrainer(game, params)
#trainer.train()
"""
if __name__ == "__main__":
param_grid = {
'num_episodes': [1000, 5000],
'num_frames': [500, 1000, 5000],
'replay_buffer_type': ['priority', 'standard'],
'replay_buffer_size': [10000, 50000],
#'num_frames': [100000, 500000],
'batch_size': [64],
'lr': [0.01, 0.001],
'gamma': [0.5, 0.2, 0.05],
'rho': [0.25],
'scheduler_type': ['exponential', 'linear'],
'e_decay': [500, 100],
'e_final': [0.01, 0.1, 0.2],
'hidden_dims': [[64, 32], [128, 64], [256, 128]],
'update_frequency': [1, 4, 10]
}
grid_search = RandomGridSearch(param_grid, 0.2, 21)
game = "/home/eilab/Raj/tw-drl/Games/obj_20_qlen_5_room_10/train/game_" + str(10) + ".ulx"
all_params = grid_search.get_configs()#[:4]
#print(len(all_params))
#pool = multiprocessing.Pool(processes=4)
#pool.map(parallelize, all_params)
#pool.close()
#pool.join()
#@profile
#def run():
Parallel(n_jobs=2, prefer='processes')(delayed(parallelize)(game, params) for params in all_params)
#run()
| 1,746
| 28.610169
| 103
|
py
|
KG-DQN
|
KG-DQN-master/utils/graph_replay.py
|
from collections import deque
import numpy as np
import random
class GraphReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, rho=0):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
class GraphPriorityReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
self.priority_buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
if reward > 0:
self.priority_buffer.append((state, action, reward, next_state, done))
else:
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, rho):
pbatch = int(batch_size * rho)
batch = int(batch_size * (1 - rho))
if pbatch > len(self.priority_buffer):
pbatch = len(self.priority_buffer)
batch = batch_size - len(self.priority_buffer)
elif batch > len(self.buffer):
batch = len(self.buffer)
pbatch = batch_size - len(self.buffer)
if pbatch == 0:
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch))
return list(state), action, reward, list(next_state), done
if batch == 0:
pstate, paction, preward, pnext_state, pdone = zip(*random.sample(self.priority_buffer, pbatch))
return list(pstate), paction, preward, list(pnext_state), pdone
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch))
pstate, paction, preward, pnext_state, pdone = zip(*random.sample(self.priority_buffer, pbatch))
return pstate + state, paction + action, preward + reward, pnext_state + next_state, pdone + done
def __len__(self):
return len(self.buffer)
| 2,149
| 37.392857
| 108
|
py
|
KG-DQN
|
KG-DQN-master/utils/replay.py
|
from collections import deque
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, rho=0):
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch_size))
return np.concatenate(state), action, reward, np.concatenate(next_state), done
def __len__(self):
return len(self.buffer)
class PriorityReplayBuffer(object):
def __init__(self, capacity):
self.buffer = deque(maxlen=capacity)
self.priority_buffer = deque(maxlen=capacity)
def push(self, state, action, reward, next_state, done):
state = np.expand_dims(state, 0)
next_state = np.expand_dims(next_state, 0)
#state = state.unsqueeze(0)
#next_state = next_state.unsqueeze(0)
if reward > 0:
self.priority_buffer.append((state, action, reward, next_state, done))
else:
self.buffer.append((state, action, reward, next_state, done))
def sample(self, batch_size, rho):
pbatch = int(batch_size * rho)
batch = int(batch_size * (1 - rho))
if pbatch > len(self.priority_buffer):
pbatch = len(self.priority_buffer)
batch = batch_size - len(self.priority_buffer)
elif batch > len(self.buffer):
batch = len(self.buffer)
pbatch = batch_size - len(self.buffer)
if pbatch == 0:
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch))
return np.concatenate(list(state)), action, reward, np.concatenate(list(next_state)), done
if batch == 0:
pstate, paction, preward, pnext_state, pdone = zip(*random.sample(self.priority_buffer, pbatch))
return np.concatenate(list(pstate)), paction, preward, np.concatenate(list(pnext_state)), pdone
pstate, paction, preward, pnext_state, pdone = zip(*random.sample(self.priority_buffer, pbatch))
pstate, paction, preward, pnext_state, pdone = np.concatenate(list(pstate)), paction, preward, np.concatenate(list(pnext_state)), pdone
state, action, reward, next_state, done = zip(*random.sample(self.buffer, batch))
state, action, reward, next_state, done = np.concatenate(list(state)), action, reward, np.concatenate(list(next_state)), done
return np.concatenate([pstate, state]), np.concatenate([paction, action]), np.concatenate([preward, reward]), np.concatenate([pnext_state, next_state]), np.concatenate([pdone, done])
def __len__(self):
return len(self.buffer)
| 2,867
| 41.80597
| 190
|
py
|
KG-DQN
|
KG-DQN-master/utils/grid_search.py
|
import utils.schedule
import itertools
import random
def generate_cartesian_product(dict):
result = []
dict_notags = []
keys = []
for key in dict:
keys.append(key)
dict_notags.append(dict[key])
for p in itertools.product(*dict_notags):
pending_object = {}
for index in range(len(p)):
pending_object[keys[index]] = p[index]
result.append(pending_object)
return result
class RandomGridSearch(object):
def __init__(self, grid, per_params, seed):
self.grid = grid
self.all_configs = generate_cartesian_product(self.grid)
print(len(self.all_configs))
self.max_params = int(per_params * len(self.all_configs))
self.finished_params = 0
random.seed(seed)
def get_config(self):
idx = random.randint(0, len(self.all_configs))
self.finished_params += 1
return self.all_configs.pop(idx)
def get_configs(self):
idxs = random.sample(range(len(self.all_configs)), k=self.max_params)
return [self.all_configs[i] for i in idxs]
def is_done(self):
return self.finished_params == self.max_params
| 1,188
| 26.022727
| 77
|
py
|
KG-DQN
|
KG-DQN-master/utils/schedule.py
|
import math
"""
Adapted from https://github.com/berkeleydeeprlcourse/homework
"""
class Schedule(object):
def value(self, t):
"""Value of the schedule at time t"""
raise NotImplementedError()
class ConstantSchedule(object):
def __init__(self, value):
"""Value remains constant over time.
Parameters
----------
value: float
Constant value of the schedule
"""
self._v = value
def value(self, t):
"""See Schedule.value"""
return self._v
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
"""Piecewise schedule.
endpoints: [(int, int)]
list of pairs `(time, value)` meanining that schedule should output
`value` when `t==time`. All the values for time must be sorted in
an increasing order. When t is between two times, e.g. `(time_a, value_a)`
and `(time_b, value_b)`, such that `time_a <= t < time_b` then value outputs
`interpolation(value_a, value_b, alpha)` where alpha is a fraction of
time passed between `time_a` and `time_b` for time `t`.
interpolation: lambda float, float, float: float
a function that takes value to the left and to the right of t according
to the `endpoints`. Alpha is the fraction of distance from left endpoint to
right endpoint that t has covered. See linear_interpolation for example.
outside_value: float
if the value is requested outside of all the intervals sepecified in
`endpoints` this value is returned. If None then AssertionError is
raised when outside value is requested.
"""
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
"""See Schedule.value"""
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t and t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
# t does not belong to any of the pieces, so doom.
assert self._outside_value is not None
return self._outside_value
class LinearSchedule(object):
def __init__(self, schedule_timesteps, final_e, initial_e=1.0):
"""Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
Parameters
----------
schedule_timesteps: int
Number of timesteps for which to linearly anneal initial_p
to final_p
initial_p: float
initial output value
final_p: float
final output value
"""
self.schedule_timesteps = schedule_timesteps
self.final_e = final_e
self.initial_e = initial_e
def value(self, t):
"""See Schedule.value"""
fraction = min(float(t) / self.schedule_timesteps, 1.0)
return self.initial_e + fraction * (self.final_e - self.initial_e)
class ExponentialSchedule(object):
def __init__(self, schedule_timesteps, decay, final_e, initial_e=1.0):
self.decay = decay
self.initial_e = initial_e
self.final_e = final_e
self.schedule_timesteps = schedule_timesteps
def value(self, t):
return self.final_e + (self.initial_e - self.final_e) * math.exp(-1. * t / self.decay)
| 3,728
| 35.203883
| 94
|
py
|
KG-DQN
|
KG-DQN-master/utils/drqa_utils.py
|
import argparse
class AverageMeter(object):
"""Keep exponential weighted averages."""
def __init__(self, beta=0.99):
self.beta = beta
self.moment = 0
self.value = 0
self.t = 0
def state_dict(self):
return vars(self)
def load(self, state_dict):
for k, v in state_dict.items():
self.__setattr__(k, v)
def update(self, val):
self.t += 1
self.moment = self.beta * self.moment + (1 - self.beta) * val
# bias correction
self.value = self.moment / (1 - self.beta ** self.t)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
| 829
| 23.411765
| 69
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/gdqn.py
|
import networkx as nx
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import spacy
import logging
import textworld
import matplotlib.pyplot as plt
from representations import StateNAction
from utils.schedule import *
#from utils.priority_replay import PriorityReplayBuffer
#from utils.replay import ReplayBuffer
from utils.graph_replay import *
from models import KGDQN
import numpy as np
import itertools
class KGDQNTrainer(object):
def __init__(self, game, params):
self.num_episodes = params['num_episodes']
self.state = StateNAction()
self.update_freq = params['update_frequency']
self.filename = 'kgdqn_' + '_'.join([str(v) for k, v in params.items() if 'file' not in str(k)])
logging.basicConfig(filename='logs/' + self.filename + '.log', filemode='w')
logging.warning("Parameters", params)
self.env = textworld.start(game)
self.params = params
if params['replay_buffer_type'] == 'priority':
self.replay_buffer = GraphPriorityReplayBuffer(params['replay_buffer_size'])
elif params['replay_buffer_type'] == 'standard':
self.replay_buffer = GraphReplayBuffer(params['replay_buffer_size'])
params['vocab_size'] = len(self.state.vocab_drqa)
self.model = KGDQN(params, self.state.all_actions).cuda()
if self.params['preload_weights']:
self.model = torch.load(self.params['preload_file'])['model']
# model = nn.DataParallel(model)
self.optimizer = optim.Adam(self.model.parameters(), lr=params['lr'])
self.env.compute_intermediate_reward()
self.env.activate_state_tracking()
self.num_frames = params['num_frames']
self.batch_size = params['batch_size']
self.gamma = params['gamma']
self.losses = []
self.all_rewards = []
self.completion_steps = []
#priority fraction
self.rho = params['rho']
if params['scheduler_type'] == 'exponential':
self.e_scheduler = ExponentialSchedule(self.num_frames, params['e_decay'], params['e_final'])
elif params['scheduler_type'] == 'linear':
self.e_scheduler = LinearSchedule(self.num_frames, params['e_final'])
def plot(self, frame_idx, rewards, losses, completion_steps):
fig = plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. avg reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(132)
plt.title('frame %s. avg steps: %s' % (frame_idx, np.mean(completion_steps[-10:])))
plt.plot(completion_steps)
plt.subplot(133)
plt.title('loss-kgdqn')
plt.plot(losses)
plt.figtext(0.5, 0.01, self.filename, wrap=True, horizontalalignment='center', fontsize=12)
fig.savefig('plots/' + self.filename + '_' + str(frame_idx) + '.png')
#plt.show()
def compute_td_loss(self):
state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size, self.rho)
reward = torch.FloatTensor(reward).cuda()
done = torch.FloatTensor(1 * done).cuda()
action_t = torch.LongTensor(action).cuda()
q_value = self.model.forward_td_init(state, action_t)[0][0]
with torch.no_grad():
#Loop through all feasible actions for fwd
actions = torch.LongTensor([a.pruned_actions_rep for a in list(next_state)]).cuda()
fwd_init, sts = self.model.forward_td_init(next_state, actions[:, 0, :])#.unsqueeze_(0)
next_q_values = fwd_init[0].unsqueeze_(0)
for i in range(1, actions.size(1)):
act = actions[:, i, :]#.squeeze()
sts = sts.new_tensor(sts.data)
cat_q = self.model.forward_td(sts, next_state, act)[0].unsqueeze_(0)
next_q_values = torch.cat((next_q_values, cat_q), dim=0)
next_q_values = next_q_values.transpose(0, 1)
next_q_value = next_q_values.max(1)[0]
expected_q_value = reward + self.gamma * next_q_value * (1 - done)
loss = (q_value - (expected_q_value.data)).pow(2).mean()
# clipped_loss = loss.clamp(-1.0, 1.0)
#loss = loss.clamp(-1.0, 1.0)
# right_gradient = clipped_loss * -1.0
# loss.backward(right_gradient.data.unsqueeze(1)[:, 0])
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
return loss
def train(self):
total_frames = 0
for e_idx in range(1, self.num_episodes + 1):
print("Episode:", e_idx)
logging.info("Episode:" + str(e_idx))
state = self.env.reset()
self.state.step(state.description, pruned=self.params['pruned'])
self.model.train()
# print(state)
episode_reward = 0
completion_steps = 0
episode_done = False
prev_action = None
for frame_idx in range(1, self.num_frames + 1):
epsilon = self.e_scheduler.value(total_frames)
action, picked = self.model.act(self.state, epsilon)
action_text = self.state.get_action_text(action)
logging.info('-------')
logging.info(self.state.visible_state)
logging.info('picked:' + str(picked))
logging.info(action_text)
next_state, reward, done = self.env.step(action_text)
#if next_state.intermediate_reward == 0:
# reward += -0.1
#else:
# reward += next_state.intermediate_reward
reward += next_state.intermediate_reward
reward = max(-1.0, min(reward, 1.0))
if reward != 0:
print(action_text, reward)
logging.warning('--------')
logging.warning(frame_idx)
logging.warning(self.state.visible_state)
logging.warning(action_text)
logging.warning(reward)
episode_reward += reward
completion_steps += 1
total_frames += 1
if done:
logging.warning("Done")
self.all_rewards.append(episode_reward)
self.completion_steps.append(completion_steps)
episode_reward = 0
completion_steps = 0
break
elif frame_idx == self.num_frames:
self.all_rewards.append(episode_reward)
self.completion_steps.append(completion_steps)
episode_reward = 0
completion_steps = 0
state = self.state
self.state.step(next_state.description, prev_action=prev_action, pruned=self.params['pruned'])
prev_action = action_text
self.replay_buffer.push(state, action, reward, self.state, done)
if len(self.replay_buffer) > self.batch_size:
if frame_idx % self.update_freq == 0:
loss = self.compute_td_loss()
self.losses.append(loss.item())
# """
self.plot(e_idx, self.all_rewards, self.losses, self.completion_steps)
if e_idx % (int(self.num_episodes / 500)) == 0:
logging.info("Episode:" + str(e_idx))
# self.plot(frame_idx, self.all_rewards, self.losses, self.completion_steps)
parameters = {
'model': self.model,
'replay_buffer': self.replay_buffer,
'action_dict': self.state.all_actions,
'vocab_drqa': self.state.vocab_drqa,
'vocab_kge': self.state.vocab_kge,
'params': self.params,
'stats': {
'losses': self.losses,
'rewards': self.all_rewards,
'completion_steps': self.completion_steps
}
}
torch.save(parameters, 'models/' + self.filename + '_' + str(e_idx) + '.pt')
parameters = {
'model': self.model,
'replay_buffer': self.replay_buffer,
'action_dict': self.state.all_actions,
'vocab_drqa': self.state.vocab_drqa,
'vocab_kge': self.state.vocab_kge,
'params': self.params,
'stats': {
'losses': self.losses,
'rewards': self.all_rewards,
'completion_steps': self.completion_steps
}
}
torch.save(parameters, 'models/' + self.filename + '_final.pt')
self.env.close()
| 8,916
| 36.624473
| 110
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/layers.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=False):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(in_features, out_features).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True)
self.a = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(2*out_features, 1).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor), gain=np.sqrt(2.0)), requires_grad=True)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = torch.zeros_like(e)
zero_vec = zero_vec.fill_(9e-15)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class EncoderLSTM(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, embeddings, bidirectional=False, num_layers=1):
super(EncoderLSTM, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.drop = nn.Dropout(p=dropout_ratio)
self.num_directions = 2 if bidirectional else 1
self.num_layers = num_layers
self.embedding = embeddings#nn.Embedding(vocab_size, embedding_size, padding_idx)
self.lstm = nn.LSTM(embedding_size, hidden_size, self.num_layers,
batch_first=True, dropout=dropout_ratio,
bidirectional=bidirectional)
self.encoder2decoder = nn.Linear(hidden_size * self.num_directions,
hidden_size * self.num_directions
)
def init_state(self, inputs):
batch_size = inputs.size(0)
h0 = Variable(torch.zeros(
self.num_layers * self.num_directions,
batch_size,
self.hidden_size
), requires_grad=False)
c0 = Variable(torch.zeros(
self.num_layers * self.num_directions,
batch_size,
self.hidden_size
), requires_grad=False)
return h0.cuda(), c0.cuda()
def forward(self, inputs, lengths=0):
embeds = self.embedding(inputs) # (batch, seq_len, embedding_size)
embeds = self.drop(embeds)
h0, c0 = self.init_state(inputs)
enc_h, (enc_h_t, enc_c_t) = self.lstm(embeds, (h0, c0))
if self.num_directions == 2:
h_t = torch.cat((enc_h_t[-1], enc_h_t[-2]), 1)
c_t = torch.cat((enc_c_t[-1], enc_c_t[-2]), 1)
else:
h_t = enc_h_t[-1]
c_t = enc_c_t[-1] # (batch, hidden_size)
decoder_init = nn.Tanh()(self.encoder2decoder(h_t))
ctx = self.drop(enc_h)
return ctx,decoder_init,c_t # (batch, seq_len, hidden_size*num_directions)
# (batch, hidden_size)
class StackedBRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
concat_layers=False, padding=False):
super(StackedBRNN, self).__init__()
self.padding = padding
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1,
bidirectional=True))
def forward(self, x, x_mask):
return self._forward_unpadded(x, x_mask)
def _forward_unpadded(self, x, x_mask):
batch = x.size(0)
x = x.transpose(0, 1)
outputs = [x]
hst = []
for i in range(self.num_layers):
rnn_input = outputs[-1]
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)#[1][0]#.view(-1, batch)
outputs.append(rnn_output[0])
hst.append(rnn_output[1][0].view(batch, 1, -1).squeeze_(1))
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
hst_opt = torch.cat(hst, 1)
else:
output = outputs[-1]
hst_opt = hst[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
hst_opt = F.dropout(hst_opt, p=self.dropout_rate, training=self.training)
return output, hst_opt
def uniform_weights(x, x_mask):
alpha = Variable(torch.ones(x.size(0), x.size(1)))
if x.data.is_cuda:
alpha = alpha.cuda()
alpha = alpha * x_mask.eq(0).float()
alpha = alpha / alpha.sum(1).expand(alpha.size())
return alpha
def weighted_avg(x, weights):
return weights.unsqueeze(1).bmm(x).squeeze(1)
| 6,418
| 35.68
| 215
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/rnn_reader.py
|
import torch
import torch.nn as nn
from layers import *
class RnnDocReader(nn.Module):
"""Network for the Document Reader module of DrQA."""
RNN_TYPES = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
def __init__(self, opt, padding_idx=0, embedding=None):
super(RnnDocReader, self).__init__()
# Store config
self.opt = opt
# Word embeddings
if opt['pretrained_words']:
assert embedding is not None
self.embedding = nn.Embedding.from_pretrained(embedding, freeze=False)
if opt['fix_embeddings']:
assert opt['tune_partial'] == 0
self.embedding.weight.requires_grad = False
elif opt['tune_partial'] > 0:
print(embedding.size())
assert opt['tune_partial'] + 2 < embedding.size(0)
offset = self.opt['tune_partial'] + 2
def embedding_hook(grad, offset=offset):
grad[offset:] = 0
return grad
self.embedding.weight.register_hook(embedding_hook)
else: # random initialized
self.embedding = nn.Embedding(opt['vocab_size'],
opt['embedding_dim'],
padding_idx=padding_idx)
# Projection for attention weighted question
if opt['use_qemb']:
self.qemb_match = SeqAttnMatch(opt['embedding_dim'])
# Input size to RNN: word emb + question emb + manual features
doc_input_size = opt['embedding_dim'] + opt['num_features']
if opt['use_qemb']:
doc_input_size += opt['embedding_dim']
if opt['pos']:
doc_input_size += opt['pos_size']
if opt['ner']:
doc_input_size += opt['ner_size']
# RNN document encoder
self.doc_rnn = StackedBRNN(
input_size=doc_input_size,
hidden_size=opt['hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# RNN question encoder
self.question_rnn = StackedBRNN(
input_size=opt['embedding_dim'],
hidden_size=opt['hidden_size'],
num_layers=opt['question_layers'],
dropout_rate=opt['dropout_rnn'],
dropout_output=opt['dropout_rnn_output'],
concat_layers=opt['concat_rnn_layers'],
rnn_type=self.RNN_TYPES[opt['rnn_type']],
padding=opt['rnn_padding'],
)
# Output sizes of rnn encoders
doc_hidden_size = 2 * opt['hidden_size']
question_hidden_size = 2 * opt['hidden_size']
if opt['concat_rnn_layers']:
doc_hidden_size *= opt['doc_layers']
question_hidden_size *= opt['question_layers']
# Question merging
if opt['question_merge'] not in ['avg', 'self_attn']:
raise NotImplementedError('question_merge = %s' % opt['question_merge'])
if opt['question_merge'] == 'self_attn':
self.self_attn = layers.LinearSeqAttn(question_hidden_size)
# Bilinear attention for span start/end
self.start_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
self.end_attn = layers.BilinearSeqAttn(
doc_hidden_size,
question_hidden_size,
)
def forward(self, x1, x1_f, x1_pos, x1_ner, x1_mask, x2, x2_mask):
"""Inputs:
x1 = document word indices [batch * len_d]
x1_f = document word features indices [batch * len_d * nfeat]
x1_pos = document POS tags [batch * len_d]
x1_ner = document entity tags [batch * len_d]
x1_mask = document padding mask [batch * len_d]
x2 = question word indices [batch * len_q]
x2_mask = question padding mask [batch * len_q]
"""
# Embed both document and question
#print(x1[0])
#print(x2[0])
x1_emb = self.embedding(x1)
x2_emb = self.embedding(x2)
#print(x2_emb)
# Dropout on embeddings
if self.opt['dropout_emb'] > 0:
x1_emb = nn.functional.dropout(x1_emb, p=self.opt['dropout_emb'],
training=self.training)
x2_emb = nn.functional.dropout(x2_emb, p=self.opt['dropout_emb'],
training=self.training)
drnn_input_list = [x1_emb, x1_f]
# Add attention-weighted question representation
if self.opt['use_qemb']:
x2_weighted_emb = self.qemb_match(x1_emb, x2_emb, x2_mask)
drnn_input_list.append(x2_weighted_emb)
if self.opt['pos']:
drnn_input_list.append(x1_pos)
if self.opt['ner']:
drnn_input_list.append(x1_ner)
drnn_input = torch.cat(drnn_input_list, 2)
# Encode document with RNN
doc_hiddens = self.doc_rnn(drnn_input, x1_mask)
# Encode question with RNN + merge hiddens
question_hiddens = self.question_rnn(x2_emb, x2_mask)
if self.opt['question_merge'] == 'avg':
q_merge_weights = layers.uniform_weights(question_hiddens, x2_mask)
elif self.opt['question_merge'] == 'self_attn':
q_merge_weights = self.self_attn(question_hiddens, x2_mask)
question_hidden = layers.weighted_avg(question_hiddens, q_merge_weights)
# Predict start and end positions
start_scores = self.start_attn(doc_hiddens, question_hidden, x1_mask)
end_scores = self.end_attn(doc_hiddens, question_hidden, x1_mask)
return start_scores, end_scores
| 5,938
| 39.958621
| 84
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/models.py
|
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F
import spacy
import numpy as np
from layers import *
from drqa import *
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=False) for _ in
range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
return x
class KGDQN(nn.Module):
def __init__(self, params, actions):
super(KGDQN, self).__init__()
self.params = params
if self.params['qa_init']:
pretrained_action_embs = torch.load(params['act_emb_init_file'])['state_dict']['embeddings']['weight']
self.action_emb = nn.Embedding.from_pretrained(pretrained_action_embs, freeze=False)
self.action_drqa = ActionDrQA(params, pretrained_action_embs)
self.state_gat = StateNetwork(actions, params, pretrained_action_embs)
else:
self.action_emb = nn.Embedding(params['vocab_size'], params['embedding_size'])
self.action_drqa = ActionDrQA(params, self.action_emb.weight)
self.state_gat = StateNetwork(actions, params, self.action_emb.weight)
self.action_enc = EncoderLSTM(params['vocab_size'], params['embedding_size'], params['hidden_size'],
params['padding_idx'], params['dropout_ratio'],
self.action_emb) # , params['bidirectional'],
self.state_fc = nn.Linear(params['drqa_emb_size'] + params['hidden_size'], 100)
def forward(self, s_t, emb_a_t, encoded_doc):
batch = emb_a_t.size(0)
state_emb = torch.cat((s_t, encoded_doc), dim=1)
state_emb = self.state_fc(state_emb)
q = torch.bmm(state_emb.view(batch, 1, 100), emb_a_t.view(batch, 100, 1)).view(batch)
return q, emb_a_t#action_embedding
def forward_td_init(self, state, a_t):
state = list(state)
drqa_input = torch.LongTensor(state[0].drqa_input).unsqueeze_(0).cuda()
sts = self.state_gat(state[0].graph_state_rep).unsqueeze_(0)
for i in range(1, len(state)):
sts = torch.cat((sts, self.state_gat(state[i].graph_state_rep).unsqueeze_(0)), dim=0)
drqa_input = torch.cat((drqa_input, torch.LongTensor(state[i].drqa_input).unsqueeze_(0).cuda()), dim=0)
encoded_doc = self.action_drqa(drqa_input, state)[1]
_, emb_a_t, _ = self.action_enc(a_t)
return self.forward(sts, emb_a_t, encoded_doc), sts#.squeeze()
def forward_td(self, state_rep, state, a_t):
drqa_input = torch.LongTensor(state[0].drqa_input).unsqueeze_(0).cuda()
for i in range(1, len(state)):
drqa_input = torch.cat((drqa_input, torch.LongTensor(state[i].drqa_input).unsqueeze_(0).cuda()), dim=0)
encoded_doc = self.action_drqa(drqa_input, state)[1]
_, emb_a_t, _ = self.action_enc(a_t)
return self.forward(state_rep, emb_a_t, encoded_doc)
def act(self, state, epsilon, epsilon2=0.15):
graph_state_rep = state.graph_state_rep
if not self.params['pruned']:
epsilon2 = 0
if random.random() > epsilon:
feasible_actions_rep = state.all_actions_rep
with torch.no_grad():
drqa_input = torch.LongTensor(state.drqa_input).unsqueeze_(0).cuda()
s_t = self.state_gat(graph_state_rep).unsqueeze_(0).repeat(len(feasible_actions_rep), 1).cuda()
encoded_doc = self.action_drqa(drqa_input, state)[1]
a_t = torch.LongTensor(feasible_actions_rep).cuda()#unsqueeze_(0).cuda()
encoded_doc = encoded_doc.repeat(len(feasible_actions_rep), 1)
_, emb_a_t, _ = self.action_enc(a_t)
fwd, fwd_at = self.forward(s_t, emb_a_t, encoded_doc)
max_q, max_idx = torch.max(fwd, 0)
action_ids = feasible_actions_rep[max_idx]
picked = True
else:
if self.params['pruned']:
if random.random() > epsilon2:
feasible_actions_rep = state.all_actions_rep
else:
feasible_actions_rep = state.pruned_actions_rep
else:
feasible_actions_rep = state.pruned_actions_rep
action_ids = feasible_actions_rep[random.randrange(len(feasible_actions_rep))]
picked = False
return action_ids, picked#, s_t[0].squeeze_(), fwd_at[max_idx].squeeze_()
class StateNetwork(nn.Module):
def __init__(self, action_set, params, embeddings=None):
super(StateNetwork, self).__init__()
self.action_set = action_set
self.gat = GAT(params['gat_emb_size'], 3, len(action_set), params['dropout_ratio'], 0.2, 1)
if params['qa_init']:
self.pretrained_embeds = nn.Embedding.from_pretrained(embeddings, freeze=False)
else:
self.pretrained_embeds = embeddings.new_tensor(embeddings.data)
self.vocab_kge = self.load_vocab_kge()
self.vocab = self.load_vocab()
self.init_state_ent_emb()
self.fc1 = nn.Linear(self.state_ent_emb.weight.size()[0] * 3 * 1, 100)
def init_state_ent_emb(self):
embeddings = torch.zeros((len(self.vocab_kge), self.params['embedding_size']))
for i in range(len(self.vocab_kge)):
graph_node_text = self.vocab_kge[i].split('_')
graph_node_ids = []
for w in graph_node_text:
if w in self.vocab.keys():
if self.vocab[w] < len(self.vocab) - 2:
graph_node_ids.append(self.vocab[w])
else:
graph_node_ids.append(1)
else:
graph_node_ids.append(1)
graph_node_ids = torch.LongTensor(graph_node_ids).cuda()
cur_embeds = self.pretrained_embeds(graph_node_ids)
cur_embeds = cur_embeds.mean(dim=0)
embeddings[i, :] = cur_embeds
self.state_ent_emb = nn.Embedding.from_pretrained(embeddings, freeze=False)
def load_vocab_kge(self):
ent = {}
with open('initialize_double/state/entity2id.tsv', 'r') as f:
for line in f:
e, eid = line.split('\t')
ent[int(eid.strip())] = e.strip()
return ent
def load_vocab(self):
vocab = eval(open('../w2id.txt', 'r').readline())
return vocab
def forward(self, graph_rep):
node_feats, adj = graph_rep
adj = torch.IntTensor(adj).cuda()
x = self.gat(self.state_ent_emb.weight, adj).view(-1)
out = self.fc1(x)
return out
class ActionDrQA(nn.Module):
def __init__(self, opt, embeddings):
super(ActionDrQA, self).__init__()
doc_input_size = opt['embedding_size']
if opt['qa_init']:
self.embeddings = nn.Embedding.from_pretrained(embeddings, freeze=False)
else:
self.embeddings = nn.Embedding(opt['vocab_size'], opt['embedding_size'])
self.doc_rnn = StackedBRNN(
input_size=doc_input_size,
hidden_size=opt['doc_hidden_size'],
num_layers=opt['doc_layers'],
dropout_rate=opt['doc_dropout_rnn'],
dropout_output=opt['doc_dropout_rnn_output'],
concat_layers=opt['doc_concat_rnn_layers'],
rnn_type=nn.LSTM,
padding=opt['doc_rnn_padding'],
)
if opt['qa_init']:
inter = torch.load(opt['act_emb_init_file'])['state_dict']['doc_encoder']#['weight']
self.doc_rnn.load_state_dict(inter)
def forward(self, vis_state_tensor, state):
mask = torch.IntTensor([80] * vis_state_tensor.size(0)).cuda()
emb_tensor = self.embeddings(vis_state_tensor)
return self.doc_rnn(emb_tensor, mask)
| 8,466
| 40.915842
| 115
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/train.py
|
from gdqn import KGDQNTrainer
from utils.grid_search import RandomGridSearch
from joblib import Parallel, delayed
def parallelize(game, params):
print(params)
trainer = KGDQNTrainer(game, params)
trainer.train()
if __name__ == "__main__":
#Example for random grid search on the parameter space
"""
param_grid = {
'replay_buffer_type': ['priority', 'standard'],
'replay_buffer_size': [10000, 50000],
'num_frames': [100000, 500000],
'batch_size': [64],
'lr': [0.01, 0.001],
'gamma': [0.5, 0.2, 0.5],
'rho': [0.25],
'scheduler_type': ['exponential', 'linear'],
'e_decay': [500, 10000, 20000, 50000],
'e_final': [0.01, 0.1, 0.2],
'hidden_dims': [[64, 32], [128, 64], [256, 128]],
'update_frequency': [1, 4, 10]
}
"""
#grid_search = RandomGridSearch(param_grid, 0.2, 21)
#insert one ulx file generated by tw-make here
game = "*.ulx"
#all_params = grid_search.get_configs()
# parallelize(game, all_params[0])
#Uncomment and define cuda visible device to parallelize across multiple processes
#Parallel(n_jobs=2, prefer='processes', backend='multiprocessing')(
# delayed(parallelize)(game, params) for params in all_params)
params = {
'replay_buffer_type': 'priority',
'replay_buffer_size': 100000,
'num_episodes': 5000,
'num_frames': 5000,
'batch_size': 32,
'lr': 0.001,
'gamma': 0.5,
'rho': 0.25,
'scheduler_type': 'exponential',
'e_decay': 10000,
'e_final': 0.2,
'hidden_dims': 0,
'update_frequency': 5,
'padding_idx': 0,
'embedding_size': 50,
'dropout_ratio': 0.2,
'hidden_size': 100,
'gat_emb_size': 50,
'drqa_emb_size': 384,
'gat_emb_init_file': '',
'act_emb_init_file': '',
'preload_weights': False,
'preload_file': '',
'pruned': False,
'max_actions': 40,
'init_graph_embeds': True,
'qa_init': True,
'vocab_size': 1000,
'cuda_device': 1,
'gameid': 0,
}
drqa_params = {
'doc_hidden_size': 64,
'doc_layers': 3,
'doc_dropout_rnn': 0.2,
'doc_dropout_rnn_output': True,
'doc_concat_rnn_layers': True,
'doc_rnn_padding': True
}
params.update(drqa_params)
parallelize(game, params)
| 2,461
| 27.298851
| 86
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/representations.py
|
import networkx as nx
import requests
from nltk import sent_tokenize, word_tokenize
import json
import numpy as np
import re
import matplotlib.pyplot as plt
import itertools
import random
def call_stanford_openie(sentence):
url = "http://localhost:9000/"
querystring = {
"properties": "%7B%22annotators%22%3A%20%22openie%22%7D",
"pipelineLanguage": "en"}
response = requests.request("POST", url, data=sentence, params=querystring)
response = json.JSONDecoder().decode(response.text)
return response
class StateNAction(object):
def __init__(self):
self.graph_state = nx.DiGraph()
self.graph_state_rep = []
self.visible_state = ""
self.drqa_input = ""
self.vis_pruned_actions = []
self.pruned_actions_rep = []
self.vocab_drqa = self.load_vocab()
self.rev_vocab_drqa = {v: k for k, v in self.vocab_drqa.items()}
self.all_actions = self.load_action_dictionary()
self.vocab_kge = self.load_vocab_kge()
self.adj_matrix = np.zeros((len(self.vocab_kge['entity']), len(self.vocab_kge['entity'])))
self.all_actions_rep = [self.get_action_rep_drqa(x) for x in list(self.all_actions.keys())]
self.room = ""
def visualize(self):
pos = nx.spring_layout(self.graph_state)
edge_labels = {e: self.graph_state.edges[e]['rel'] for e in self.graph_state.edges}
nx.draw_networkx_edge_labels(self.graph_state, pos, edge_labels)
nx.draw(self.graph_state, pos=pos, with_labels=True, node_size=200, font_size=10)
plt.show()
def load_vocab_kge(self):
ent = {}
with open('initialize/state/entity2id.tsv', 'r') as f:
for line in f:
e, eid = line.split('\t')
ent[e.strip()] = int(eid.strip())
rel = {}
with open('initialize/state/relation2id.tsv', 'r') as f:
for line in f:
r, rid = line.split('\t')
rel[r.strip()] = int(rid.strip())
return {'entity': ent, 'relation': rel}
def load_vocab(self):
vocab = eval(open('../w2id.txt', 'r').readline())
return vocab
def load_action_dictionary(self):
all_actions = eval(open('../act2id.txt', 'r').readline())
return all_actions
def update_state_base(self, visible_state):
visible_state = visible_state.split('-')
if len(visible_state) > 1:
visible_state = visible_state[2]
self.visible_state = visible_state
try:
sents = call_stanford_openie(self.visible_state)['sentences']
for ov in sents:
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'], tr['relation'], tr['object']
self.graph_state.add_edge(h, t, rel=r)
except:
print(self.visible_state)
return
def update_state(self, visible_state, prev_action=None):
remove = []
prev_remove = []
link = []
visible_state = visible_state.split('-')
if len(visible_state) > 1:
visible_state = visible_state[2]
dirs = ['north', 'south', 'east', 'west']
self.visible_state = str(visible_state)
rules = []
sents = call_stanford_openie(self.visible_state)['sentences']
for ov in sents:
triple = ov['openie']
for tr in triple:
h, r, t = tr['subject'].lower(), tr['relation'].lower(), tr['object'].lower()
if h == 'we':
h = 'you'
if r == 'are in':
r = "'ve entered"
if h == 'it':
break
rules.append((h, r, t))
room = ""
room_set = False
for rule in rules:
h, r, t = rule
if 'entered' in r or 'are in' in r:
prev_remove.append(r)
if not room_set:
room = t
room_set = True
if 'should' in r:
prev_remove.append(r)
if 'see' in r or 'make out' in r:
link.append((r, t))
remove.append(r)
#else:
# link.append((r, t))
prev_room = self.room
self.room = room
add_rules = []
if prev_action is not None:
for d in dirs:
if d in prev_action and self.room != "":
add_rules.append((prev_room, d + ' of', room))
prev_room_subgraph = None
prev_you_subgraph = None
for sent in sent_tokenize(self.visible_state):
if 'exit' in sent or 'entranceway' in sent:
for d in dirs:
if d in sent:
rules.append((self.room, 'has', 'exit to ' + d))
if prev_room != "":
graph_copy = self.graph_state.copy()
graph_copy.remove_edge('you', prev_room)
con_cs = [graph_copy.subgraph(c) for c in nx.weakly_connected_components(graph_copy)]
for con_c in con_cs:
if prev_room in con_c.nodes:
prev_room_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
if 'you' in con_c.nodes:
prev_you_subgraph = nx.induced_subgraph(graph_copy, con_c.nodes)
for l in link:
add_rules.append((room, l[0], l[1]))
for rule in rules:
h, r, t = rule
if r not in remove:
add_rules.append(rule)
edges = list(self.graph_state.edges)
print("add", add_rules)
for edge in edges:
r = self.graph_state[edge[0]][edge[1]]['rel']
if r in prev_remove:
self.graph_state.remove_edge(*edge)
if prev_you_subgraph is not None:
self.graph_state.remove_edges_from(prev_you_subgraph.edges)
for rule in add_rules:
u = '_'.join(str(rule[0]).split())
v = '_'.join(str(rule[2]).split())
if u in self.vocab_kge['entity'].keys() and v in self.vocab_kge['entity'].keys():
if u != 'it' and v != 'it':
self.graph_state.add_edge(rule[0], rule[2], rel=rule[1])
print("pre", self.graph_state.edges)
if prev_room_subgraph is not None:
self.graph_state.add_edges_from(prev_room_subgraph.edges)
print(self.graph_state.edges)
return
def get_state_rep_kge(self):
ret = []
self.adj_matrix = np.zeros((len(self.vocab_kge['entity']), len(self.vocab_kge['entity'])))
#adj = []
#for g in self.graph_state.nodes:
# ret.append(self.vocab_kge['entity']['_'.join(str(g).split())])
for u, v in self.graph_state.edges:
u = '_'.join(str(u).split())
v = '_'.join(str(v).split())
if u not in self.vocab_kge['entity'].keys() or v not in self.vocab_kge['entity'].keys():
break
u_idx = self.vocab_kge['entity'][u]
v_idx = self.vocab_kge['entity'][v]
self.adj_matrix[u_idx][v_idx] = 1
ret.append(self.vocab_kge['entity'][u])
ret.append(self.vocab_kge['entity'][v])
return list(set(ret))
def get_visible_state_rep_drqa(self, state_description):
state_desc_num = []#120 * [0]
for i, token in enumerate(word_tokenize(state_description)[:80]):
if token not in self.vocab_drqa.keys():
token = '<UNK>'
state_desc_num.append(self.vocab_drqa[token])
return state_desc_num
def get_action_rep_drqa(self, action):
action_desc_num = 20 * [0]
for i, token in enumerate(word_tokenize(action)[:20]):
if token not in self.vocab_drqa.keys():
token = '<UNK>'
action_desc_num[i] = self.vocab_drqa[token]
return action_desc_num
def get_cur_actions(self):
return list(self.all_actions.keys())
def get_cur_actions_pruned(self):
action_ents = {a:[] for a in self.all_actions.keys()}
action_scores = {a:0 for a in self.all_actions.keys()}
for action in self.all_actions.keys():
for n in self.graph_state.nodes:
if str(n) in action:
action_ents[action] += [n]
action_scores[action] += 1
for a in action_ents.keys():
if len(action_ents[a]) < 2:
continue
ent_pairs = itertools.combinations(action_ents, 2)
try:
for pair in ent_pairs:
if nx.has_path(self.graph_state, pair[0], pair[1]):
action_scores[a] += 1
if nx.has_path(self.graph_state, pair[1], pair[0]):
action_scores[a] += 1
except nx.NodeNotFound:
continue
sorted_scores = sorted(action_scores.items(), key=lambda kv: kv[1], reverse=True)
max_score = max([a[1] for a in sorted_scores])
max_actions = 36
if max_score == 0:
ret = random.sample(list(self.all_actions.keys()), max_actions)
return ret
partitions = {s: [] for s in range(0, max_score + 1)}
for act, score in sorted_scores:
partitions[score] += [act]
ret = []
left = max_actions
for s in range(max_score, 0, -1):
sample_no = min(left, len(partitions[s]))
left -= sample_no
to_add = random.sample(partitions[s], sample_no)
ret += to_add
if len(ret) > max_actions:
ret = ret[:max_actions]
break
for dir in ['north', 'south', 'east', 'west']:
ret.append('go ' + dir)
return ret
def get_action_text(self, action_ids):
ret = ""
for ids in action_ids:
if ids != 0:
if self.rev_vocab_drqa[ids] != "'s":
ret += ' ' + self.rev_vocab_drqa[ids]
else:
ret += self.rev_vocab_drqa[ids]
#ret = " ".join([self.rev_vocab_drqa[i] for i in action_ids if i != 0])
ret = ret.strip()
return ret
def step_pruned(self, visible_state, prev_action=None):
self.update_state(visible_state, prev_action)
self.vis_pruned_actions = self.get_cur_actions_pruned()
self.pruned_actions_rep = [self.get_action_rep_drqa(a) for a in self.vis_pruned_actions]
inter = self.visible_state + "The actions are:" + ",".join(self.vis_pruned_actions) + "."
self.drqa_input = self.get_visible_state_rep_drqa(inter)
self.graph_state_rep = self.get_state_rep_kge(), self.adj_matrix
def step(self, visible_state, prev_action=None, pruned=True):
if pruned:
self.step_pruned(visible_state, prev_action)
return
self.update_state(visible_state, prev_action)
self.vis_pruned_actions = self.get_cur_actions()
self.pruned_actions_rep = [self.get_action_rep_drqa(a) for a in self.vis_pruned_actions]
inter = self.visible_state + "The actions are:" + ",".join(self.vis_pruned_actions) + "."
self.drqa_input = self.get_visible_state_rep_drqa(inter)
self.graph_state_rep = self.get_state_rep_kge(), self.adj_matrix
#return graph_state_rep, vis_feasible_actions, feasible_action_rep, drqa_input
| 11,697
| 34.7737
| 109
|
py
|
KG-DQN
|
KG-DQN-master/kgdqn/drqa.py
|
import random
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import logging
from torch.autograd import Variable
from utils.drqa_utils import AverageMeter
from rnn_reader import RnnDocReader
class DocReaderModel(object):
"""High level model that handles intializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
def __init__(self, opt, embedding=None, state_dict=None):
# Book-keeping.
self.opt = opt
self.device = torch.cuda.current_device() if opt['cuda'] else torch.device('cpu')
self.updates = state_dict['updates'] if state_dict else 0
self.train_loss = AverageMeter()
if state_dict:
self.train_loss.load(state_dict['loss'])
# Building network.
self.network = RnnDocReader(opt, embedding=embedding)
if state_dict:
new_state = set(self.network.state_dict().keys())
for k in list(state_dict['network'].keys()):
if k not in new_state:
del state_dict['network'][k]
self.network.load_state_dict(state_dict['network'])
self.network.to(self.device)
# Building optimizer.
self.opt_state_dict = state_dict['optimizer'] if state_dict else None
self.build_optimizer()
def build_optimizer(self):
parameters = [p for p in self.network.parameters() if p.requires_grad]
if self.opt['optimizer'] == 'sgd':
self.optimizer = optim.SGD(parameters, self.opt['learning_rate'],
momentum=self.opt['momentum'],
weight_decay=self.opt['weight_decay'])
elif self.opt['optimizer'] == 'adamax':
self.optimizer = optim.Adamax(parameters,
weight_decay=self.opt['weight_decay'])
else:
raise RuntimeError('Unsupported optimizer: %s' % self.opt['optimizer'])
if self.opt_state_dict:
self.optimizer.load_state_dict(self.opt_state_dict)
def update(self, ex):
#print("======")
#print(ex)
# Train mode
self.network.train()
# Transfer to GPU
inputs = [e.to(self.device) for e in ex[:7]]
target_s = ex[7].to(self.device)
target_e = ex[8].to(self.device)
# Run forward
score_s, score_e = self.network(*inputs)
# Compute loss and accuracies
loss = F.nll_loss(score_s, target_s) + F.nll_loss(score_e, target_e)
self.train_loss.update(loss.item())
# Clear gradients and run backward
self.optimizer.zero_grad()
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.network.parameters(),
self.opt['grad_clipping'])
# Update parameters
self.optimizer.step()
self.updates += 1
def predict(self, ex):
# Eval mode
self.network.eval()
# Transfer to GPU
if self.opt['cuda']:
inputs = [Variable(e.cuda(async=True)) for e in ex[:7]]
else:
inputs = [Variable(e) for e in ex[:7]]
# Run forward
with torch.no_grad():
score_s, score_e = self.network(*inputs)
# Transfer to CPU/normal tensors for numpy ops
score_s = score_s.data.cpu()
score_e = score_e.data.cpu()
# Get argmax text spans
text = ex[-2]
spans = ex[-1]
predictions = []
max_len = self.opt['max_len'] or score_s.size(1)
for i in range(score_s.size(0)):
scores = torch.ger(score_s[i], score_e[i])
scores.triu_().tril_(max_len - 1)
scores = scores.numpy()
s_idx, e_idx = np.unravel_index(np.argmax(scores), scores.shape)
s_offset, e_offset = spans[i][s_idx][0], spans[i][e_idx][1]
predictions.append(text[i][s_offset:e_offset])
return predictions
def save(self, filename, epoch, scores):
em, f1, best_eval = scores
params = {
'state_dict': {
'network': self.network.state_dict(),
'embeddings': self.network.embedding.state_dict(),
'doc_encoder': self.network.doc_rnn.state_dict(),
'q_encoder': self.network.question_rnn.state_dict(),
'self_attn_layer': self.network.self_attn.state_dict(),
'qemb_layer': self.network.qemb_match.state_dict(),
'optimizer': self.optimizer.state_dict(),
'updates': self.updates,
'loss': self.train_loss.state_dict()
},
'config': self.opt,
'epoch': epoch,
'em': em,
'f1': f1,
'best_eval': best_eval,
'random_state': random.getstate(),
'torch_state': torch.random.get_rng_state(),
'torch_cuda_state': torch.cuda.get_rng_state()
}
try:
torch.save(params, filename)
#logger.info('model saved to {}'.format(filename))
except BaseException:
#logger.warning('[ WARN: Saving failed... continuing anyway. ]')
pass
| 5,300
| 35.061224
| 89
|
py
|
Slic
|
Slic-master/utils.py
|
import torch
from dataset import SolarDataset
from model import SolarClassifier
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
import sunpy.cm as cm
import matplotlib.pyplot as plt
import torch.nn.functional as F
import os, html
from astropy.io import fits
import sunpy.map as m
from skimage.transform import resize
class Classification:
def __init__(self,fits_pth,weights):
'''
This is a class for classifying solar images quickly to be used with the pre-trained Slic network.
Parameters
----------
fits_pth : str
This is the path to the fits files to be classified.
weights : str
This is the path to the pretrained model.
Attributes
----------
files : list
A list of the image files to be classified.
weights : str
The path to the pretrained model.
class_dict : dict
A dictionary to store the classifications of the image in. The numbers in the entries of the dictionary correspond to the index in the file list meaning that file contains its associated feature.
hist_dict : dict
A dictionary to store the probability histograms in the same format as class_dict.
label_dict : dict
A dictionary to store the corresponding numerical class labels with what they correspond to physically.
'''
self.files = sorted([fits_pth+x for x in os.listdir(fits_pth)]) #this assumes that the fits files are named sensibly
self.weights = weights
self.class_dict = {}
self.hist_dict = {}
self.label_dict = {
"filaments" : 0,
"flares" : 1,
"prominences" : 2,
"quiet" : 3,
"sunspots" : 4
}
def solar_classification(self,features=None,freedom=False):
'''
This class method does the classificiation of the images in the files attribute and updates the class_dict and hist_dict attributes with the classification of the images and their probability histograms respectively.
Parameters
----------
features : None, list
The features we would like to keep after classifying. The default is None which will lead to all feature data being saved.
freedom : bool
This garbage collects the model after usage to free up memory. Default is false.
'''
im_arr = np.zeros((len(self.files),1,256,256)) #this sets up the array of images to be classified
for i, image in enumerate(self.files):
tmp = fits.getdata(image).astype(np.float64)
tmp = resize(tmp,(256,256),anti_aliasing=True)
tmp = tmp.reshape(1,256,256)
im_arr[i] = tmp
dataset = SolarDataset(source="numpy",data_arr=im_arr,test=True)
idxs = np.zeros(dataset.__len__())
labels = np.zeros(dataset.__len__())
hists = np.zeros((dataset.__len__(),5))
data_loader = DataLoader(dataset,batch_size=1)
device = ("cuda:0" if torch.cuda.is_available() else "cpu")
model = SolarClassifier()
model.to(device)
model.load_state_dict(torch.load(self.weights,map_location=device))
model.eval()
with torch.no_grad():
for idx, images in tqdm(enumerate(data_loader),desc="Classifying images"):
images = images.float().to(device)
output = model(images)
_, predicted = torch.max(output.data,1)
idxs[idx] = idx
labels[idx] = predicted.item()
hists[idx] = F.softmax(output.cpu()).numpy()
if features == None:
self.class_dict.update({
"filaments" : idxs[np.where(labels==0)].astype(np.int16),
"flares" : idxs[np.where(labels==1)].astype(np.int16),
"prominences" : idxs[np.where(labels==2)].astype(np.int16),
"quiet" : idxs[np.where(labels==3)].astype(np.int16),
"sunspots" : idxs[np.where(labels==4)].astype(np.int16)
})
self.hist_dict.update({
"filaments" : hists[np.where(labels==0)],
"flares" : hists[np.where(labels==1)],
"prominences" : hists[np.where(labels==2)],
"quiet" : hists[np.where(labels==3)],
"sunspots" : hists[np.where(labels==4)]
})
else:
for f in features:
self.class_dict.update({f : idxs[np.where(labels==self.label_dict[f])].astype(np.int16)})
self.hist_dict.update({f : hists[np.where(labels==self.label_dict[f])]})
if freedom:
del(model)
def plot_image(self,feature,idx=None):
'''
This is a class method to plot the images that we are interested in after classification.
Parameters
----------
feature : str
The feature we want to look at.
idx : None, list
A list of the indices we want to look at. These are the numerical values assigned to the files via their index in the files attribute. Default is None which means the code will plot all of a single class.
'''
fig = plt.figure()
if idx == None:
idx = list(self.class_dict[feature])
if type(idx) == list:
fig_side = np.sqrt(len(idx))
for j, i in enumerate(idx):
ax = fig.add_subplot(np.floor(fig_side),np.ceil(fig_side),j+1)
im = m.Map(self.files[i])
im.plot_settings["title"] = im.meta["detector"] + " " + im.meta["wave"][3:] + html.unescape("Å") +" " + im.meta["date-obs"][:10] + " " + im.meta["date-obs"][11:-4]
im.plot()
ax.set_ylabel("Solar-Y [arcsec]")
ax.set_xlabel("Solar-X [arcsec]")
else:
raise TypeError("Indices should be a list.")
fig.tight_layout()
def plot_hist(self,feature,idx):
'''
This is a class method to plot the probability distributions that we are interested in after classification.
Parameters
----------
feature : str
The feature we want to see the distribution for.
idx : None, list
A list of the indices we want to look at. These are the numerical values assigned to the files via their index in the files attribute. Default is None which means the code will plot all of the probability distributions for a single class.
'''
fig = plt.figure()
if idx == None:
idx = list(self.class_dict[feature])
if type(idx) == list:
fig_side = np.sqrt(len(idx))
for j, i in enumerate(idx):
hist = np.where(self.class_dict[feature] == i)[0]
ax = fig.add_subplot(np.floor(fig_side),np.ceil(fig_side),j+1)
ax.bar(range(5),self.hist_dict[feature][hist][0],tick_label=list(self.label_dict.keys()))
ax.set_yscale("log")
ax.tick_params(axis="x",labelrotation=90)
else:
raise TypeError("Indices should be a list.")
fig.tight_layout()
| 7,245
| 41.623529
| 250
|
py
|
Slic
|
Slic-master/model.py
|
import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
class SolarClassifier(nn.Module):
def __init__(self):
super().__init__()
self.max_pool = nn.MaxPool2d(kernel_size=2,stride=2)
self.layer1 = nn.Sequential(
nn.Conv2d(1,64,kernel_size=3,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(64,64,kernel_size=3,padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Conv2d(64,128,kernel_size=3,padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True)
)
self.layer4 = nn.Sequential(
nn.Conv2d(128,128,kernel_size=3,padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True)
)
self.layer5 = nn.Sequential(
nn.Conv2d(128,256,kernel_size=3,padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer6 = nn.Sequential(
nn.Conv2d(256,256,kernel_size=3,padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.layer7 = nn.Sequential(
nn.Conv2d(256,512,kernel_size=3,padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
self.layer8 = nn.Sequential(
nn.Conv2d(512,512,kernel_size=3,padding=1),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
)
self.classifier = nn.Sequential(
nn.Linear(512*8*8,4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096,4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096,5)
)
for m in self.modules():
if not Solar_Classifier:
kaiming_normal_(m.weight,nonlinearity="relu")
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = self.max_pool(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.max_pool(out)
out = self.layer5(out)
out = self.layer6(out)
out = self.max_pool(out)
out = self.layer7(out)
out = self.layer8(out)
out = self.max_pool(out)
out = self.layer8(out)
out = self.layer8(out)
out = self.max_pool(out)
out = out.view(out.size(0),-1)
out = self.classifier(out)
return out
def graph(self):
return nn.Sequential(self.layer1,self.layer2,self.maxPool,self.layer3,self.layer4,self.maxPool,self.layer5,self.layer6,self.maxPool,self.layer7,self.layer8, self.maxPool,self.layer8,self.layer8,self.maxPool,self.classifier)
| 2,842
| 32.05814
| 231
|
py
|
Slic
|
Slic-master/dataset.py
|
import numpy as np
from torch.utils.data import Dataset
class SolarDataset(Dataset):
def __init__(self,source="from_file",dat_file=None,data_arr=None,label_arr=None,test=False):
super().__init__()
self.test = test
if not self.test:
if source == "from_file":
if dat_file is None:
raise TypeError("dat_file should not be done when initialising a class instance from a file!")
f = np.load(dat_file)
if len(f.files) == 2:
self.header = f["header"]
self.label = f["data"][:,0]
self.length = f["data"].shape[0]
self.data = f["data"][:,1:].reshape(self.length,1,256,256)
del f
elif source == "numpy":
self.label = label_arr
self.length = label_arr.shape[0]
self.data = data_arr.reshape(self.length,1,256,256)
else:
raise TypeError("Invalid source format.")
else:
self.data = data_arr
def __len__(self):
if not self.test:
return len(self.label)
else:
return self.data.shape[0]
def __getitem__(self,idx):
if not self.test:
if hasattr(self,"header"):
item = self.data[idx]
label = self.label[idx]
header = self.header[idx]
return (item,label,header)
else:
item = self.data[idx]
label = self.label[idx]
return (item,label)
else:
item = self.data[idx]
return item
def parse_dataset(dataset):
'''
A function for splitting the dataset into the different classes for use in the confusion matrix.
Parameters
----------
dataset : solar_dataset or str
A solar dataset class instance or path to the file containing images of the different classes.
Returns
-------
sol_list : list
A list of solar_dataset instances with dependent length on the length of np.unique(dataset.label).
'''
sol_list = []
if type(dataset) == solar_dataset:
for l in np.unique(dataset.label):
args = np.argwhere(dataset.label == l) #finds all of the indices where the given label is l and returns an array of these indices
sol_list.append(solar_dataset(source="numpy",data_arr=dataset.data[args],label_arr=dataset.label[args]))
return sol_list
elif type(dataset) == str:
s_dataset = solar_dataset(source="from_file",dat_file=dataset)
for l in np.unique(s_dataset.label):
args = np.argwhere(s_dataset.label == l)
sol_list.append(solar_dataset(source="numpy",data_arr=s_dataset.data[args],label_arr=s_dataset.label[args]))
return sol_list
| 2,892
| 33.035294
| 141
|
py
|
Slic
|
Slic-master/data.py
|
import numpy as np
from astropy.io.fits import getdata
import os,argparse
from scipy.misc import imresize
from tqdm import tqdm
def train_test_data(dataset,percentage_split=10,save_dir="./"):
'''
Parameters
----------
dataset : str
The path to the dataset to be prepped.
percentage_split : int
The percentage to be used in the validation. Default is 10.
save_dir : str
The directory to save the files to. Default is the current working directory.
'''
if dataset is None:
raise TypeError("Please tell us what dataset you would like prepped!")
class bad_data(Exception): pass
dir_list = sorted([dataset+x for x in os.listdir(dataset) if not x.startswith(".")]) #generates a list of the dataset class folders each containing the images pertaining to the eponymous class
train_list, test_list = [], []
for (i,direc) in enumerate(dir_list):
data_list = sorted([direc+"/"+x for x in os.listdir(direc) if not x.startswith(".")])
for (j, image) in tqdm(enumerate(data_list),desc=str(dir_list[i])):
tmp = getdata(image).astype(np.float64)
try:
for (x,y), pixel in np.ndenumerate(tmp):
if tmp[x,y] == 0:
raise bad_data() #skip over images with faults in the data
except bad_data:
continue
tmp = imresize(tmp,(256,256),interp="bicubic") #resizes the images to 256x256 pixels using bicubic interpolation
tmp = tmp.flatten() #flatten the image to a 1D vector for easier storage in the .npz file
tmp = np.insert(tmp,0,i)
if (j % percentage_split) == 0:
test_list.append(tmp)
else:
train_list.append(tmp)
print("There are %d training images." % len(train_list))
print("There are %d test images." % len(test_list))
train_arr = np.array(train_list)
test_arr = np.array(test_list)
del(train_list,test_list)
np.savez_compressed(save_dir+"solar_train_data.npz",data=train_arr)
np.savez_compressed(save_dir+"solar_test_data.npz",data=test_arr)
print("The training and testing data files have been created.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset",help="The path to the dataset to be loaded in.",default=None)
parser.add_argument("--percent_split",help="The percentage of the dataset to put in the validation.",default=10)
parser.add_argument("--save_dir",help="The directory to save the prepped datasets to.",default="./")
args = parser.parse_args()
train_test_data(dataset=args.dataset,percentage_split=args.percent_split,save_dir=args.save_dir)
| 2,750
| 42.666667
| 196
|
py
|
Slic
|
Slic-master/confusion_matrix.py
|
import numpy as np
import pandas as pd
from dataset import *
from model import solar_classifier
import torch
from torch.utils.data import DataLoader
class ConfusionMatrix():
'''
A class to store the confusion matrix, its features and the associated statistics that go along with it.
Parameters
----------
val_set : SolarDataset
A SolarDataset instance for the data to calculate the confusion matrix for.
model : ??
The model to calculate the confusion matrix for.
model_pth : str
The path to the trained weights of the model to calculate the confusion matrix for.
labels : list
A names for the class labels.
'''
def __init__(self, val_set, model, model_pth, labels):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(self.device)
model.load_state_dict(torch.load(model_pth)["model"])
model.eval()
label_sets = self.parse_dataset(val_set)
self.labels = labels
self.feature_dict = {self.labels[j] : j for j in range(len(self.labels))}
misclassified = []
cm_diag = []
for x in label_sets:
loader = DataLoader(dataset=x, batch_size=1)
correct = 0
with torch.no_grad():
for j, (image, label) in enumerate(loader):
image, label = image.float().to(self.device), label.long().to(self.device)
output = model(image)
_, predicted = torch.max(output.data, 1)
correct += (predicted == label).sum().item()
if predicted != label:
misclassified.append((j, predicted.item(), label.item()))
#save the index of the misclassification and what was predicted and what the true label is
cm_diag.append(correct)
cm = torch.diag(torch.tensor(cm_diag)) #this creates a square tensor with the diagonal elements filled
for m in misclassified:
# in confusion matrix, y-axis is true class, x-axis is predicted class
# thus, elements with true class l and predicted class k will add one count to cm[l,k]
cm[m[2], m[1]] += 1
self.cm_pd = pd.DataFrame(data=cm.numpy(), index=self.labels, columns=self.labels, dtype=np.uint16)
self.cm = cm
def true_pos(self, feature):
'''
A function for obtaining the true positives of a specific feature from the confusion matrix.
Parameters
----------
feature : str
The name of the feature to get the true positive result for.
Returns
-------
int
The true positive of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
return self.cm[idx, idx].item()
def false_pos(self, feature):
'''
A function for obtaining the false positives of a specific feature from the confusion matrix. The false positives are indicated by the numbers down a column in the data that are not from the diagonal element of that column (i.e. where the row and column index are equal).
Parameters
----------
feature : str
The name of the feature to get the false positive result for.
Returns
-------
int
The false positive of the desired feature.
'''
feautre = feature.lower()
idx = self.feature_dict[feature]
tp = self.true_pos(feature=feature)
return self.cm[:, idx].sum().item() - tp
def false_neg(self, feature):
'''
A function for obtaining the false negatives of a specific feature from the confusion matrix. The false negatives are indicated by the numbers across a row in the data that are not from the diagonal element of that row (i.e. where the row and column index are equal).
Parameters
----------
feature : str
The name of the feature to get the false negative result for.
Returns
-------
int
The false negative of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
tp = self.true_pos(feature=feature)
return self.cm[idx].sum().item() - tp
def true_neg(self, feature):
'''
A function for obtaining the true negatives of a specific feature from the confusion matrix. The true negatives are the number of samples that are not attributed to the feature in question and are not classified as that feature.
Parameters
----------
feature : str
The name of the feature to get the true negative for.
Returns
-------
int
The true negative of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
tp = self.true_pos(feature=feature)
fp = self.false_pos(feature=feature)
fn = self.false_neg(feature=feature)
return cm.sum().item() - tp - fp - fn
def precision(self, feature):
'''
The precision of the classifier is the ratio
TP_j / (TP_j + FP_j)
i.e. it is the ratio of correct predictions for a certain feature to the total number of predictions for a certain feature.
Parameters
----------
feature : str
The name of the feature to get the precision value for.
Returns
-------
int
The precision of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
tp = self.true_pos(feature=feature)
fp = self.false_pos(feature=feature)
return tp / (tp + fp)
def recall(self, feature):
'''
The recall of the classifier is the ratio
TP_j / (TP_j + FN_j)
i.e. it is the ratio of the correct predictions for a certain feature to the total number of instances containing that feature regardless of correct classifications.
Parameters
----------
feature : str
The name of the feature to get the recall value for.
Returns
-------
int
The recall of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
tp = self.true_pos(feature=feature)
fn = self.false_neg(feature=feature)
return tp / (tp + fn)
def F1_score(self, feature):
'''
The F1 score of the classifier describes the balance between the precision and recall. It is given by
2 * P * R / (P + R)
Parameters
----------
feature : str
The name of the feature to get the F1 score value for.
Returns
-------
int
The F1 score of the desired feature.
'''
feature = feature.lower()
idx = self.feature_dict[feature]
p = self.precision(feature=feature)
r = self.recall(feature=feature)
return 2 * p * r / (p + r)
@staticmethod
def parse_dataset(dataset):
'''
A function for splitting the dataset into the different classes for use in the confusion matrix.
Parameters
----------
dataset : EODataset
A EODataset object containing the images and labels for classification.
Returns
-------
tens_list : list
A list of EODataset objects with dependent length on the length of torch.unique(dataset.label).
'''
tens_list = []
for l in torch.unique(dataset.labels):
args = torch.nonzero(dataset.labels == l).squeeze()
tens_list.append(EODataset(dataset.data[args], dataset.labels[args], transform=dataset.transform, transform_prob=dataset.transform_prob))
return tens_list
def plot_confusion_matrix(self, title=None, cmap=plt.cm.Blues, normalise=False):
'''
This function plots the confusion matrix.
Parameters
----------
title : str or None
The title of the plot.
cmap : str or cm
The colour map to use for the plot of the confusion matrix.
normalise : bool
Whether or not to normalise the data in the confusion matrix based on the number of samples from each label. Default is False.
'''
if normalise:
cm = self.cm.numpy() / self.cm.numpy().sum(axis=1)[:, np.newaxis]
else:
cm = self.cm
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax)
ax.set(xticks=np.arange(self.cm.size(1)),
yticks=np.arange(self.cm.size(0)),
xticklabels=self.labels, yticklabels=self.labels,
title=title,
ylabel="True label",
xlabel="Predicted label")
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
fmt = ".3f" if normalise else "d"
thresh = self.cm.max() / 2
for i in range(self.cm.shape[0]):
for j in range(self.cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
| 9,563
| 32.093426
| 279
|
py
|
Slic
|
Slic-master/train.py
|
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from dataset import SolarDataset
from model import SolarClassifier
import argparse
from tqdm import tqdm
def train(model,device,data_loader,optimiser,epoch,criterion):
model.to(device)
model.train()
for i, (images, labels) in tqdm(enumerate(data_loader),desc="Epoch no."+str(epoch)):
images, labels = images.float().to(device), labels.long().to(device) #casts the tensors to the GPU if available
optimiser.zero_grad() #must zero the gradients in the optimiser since backward() accumulates gradients and this stops mixing of values between batches
output = model(images) #feeds the data through the network
loss = criterion(output,labels) #finds the distance in the loss space between predicted values and actual values
loss.backward()
optimiser.step()
def validate(model,device,data_loader,epoch,test_losses):
model.to(device)
model.eval()
total, correct = 0, 0
with torch.no_grad():
for images, labels in data_loader:
images, labels = images.float().to(device), labels.long().to(device)
output = model(images)
_, predicted = torch.max(output.data,1)
total += labels.size(0)
correct += (predicted == labels).sum()
print("Test Accuracy of the model on the test images: %f %% on epoch %d" % (100 * correct / total, epoch))
test_losses.append(correct / total)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lr",help="The learning rate for the network.",default=0.0001,type=float)
parser.add_argument("--n_epochs",help="The number of epochs to train for.",default=100,type=int)
parser.add_argument("--batch_size",help="The batch size to use for training and validation.",default=2,type=int)
parser.add_argument("--no_gpu",help="Don't use the GPU.",dest='use_gpu',action='store_false')
parser.add_argument("--use_dataparallel",help="Use DataParallel to parallelise across multiple GPUs.",dest='use_dataparallel',action='store_true')
parser.add_argument("--train_data",help="The path to the training data.",default="./solar_train_data.npz")
parser.add_argument("--val_data",help="The path to the validation data.",default="./solar_test_data.npz")
parser.add_argument("--save_dir",help="The directory to save the models from each epoch.",default="./")
parser.set_defaults(use_gpu=True, use_dataparallel=False)
args = parser.parse_args()
<<<<<<< HEAD
device = torch.device("cuda:0" if torch.cuda.is_available and args.use_gpu else "cpu")
sol_clas = SolarClassifier() #creates an instance of the solar classification network
=======
device = torch.device("cuda:0" if torch.cuda.is_available() and args.use_gpu else "cpu")
sol_clas = Solar_Classifier() #creates an instance of the solar classification network
if args.use_gpu and torch.cuda.device_count() > 1 and args.use_dataparallel:
print("Using %d GPUs!" % torch.cuda.device_count())
sol_clas = nn.DataParallel(sol_clas)
>>>>>>> 93fdf38f917daec0d142d0be8c9ecbd38377703e
criterion = nn.CrossEntropyLoss()
optimiser = optim.SGD(sol_clas.parameters(),args.lr,momentum=0.9,nesterov=True)
test_losses = []
train_dataset = SolarDataset(dat_file=args.train_data)
train_loader = DataLoader(dataset=train_dataset,batch_size=args.batch_size,shuffle=True)
val_dataset = SolarDataset(dat_file=args.val_data)
val_loader = DataLoader(dataset=val_dataset,batch_size=args.batch_size,shuffle=True)
del(train_dataset,val_dataset)
for i in tqdm(range(1,args.n_epochs+1)):
train(sol_clas,device,train_loader,optimiser,i,criterion)
torch.save(sol_clas.state_dict(),args.save_dir+"sol_class_"+str(i)+".pth")
validate(sol_clas,"cpu",val_loader,i,test_losses)
np.save("loss"+str(args.lr)+".npy",np.array(test_losses))
| 4,003
| 51.684211
| 158
|
py
|
dancin_seq2seq
|
dancin_seq2seq-master/adversarial.py
|
"""
adversarial.py - Adversarial classes.
Classes:
Autoencoder: a general autoencoder interface.
SpamSeq2SeqAutoencoder: a sequence to sequence autoencoder interface.
"""
from __future__ import division
import gc
import logging
import numpy as np
import os
import scipy
import scipy.stats
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from dataset import SpamDataset
from discriminator import MultinomialNBDiscriminator
from seq2seq.model import Seq2Seq, Seq2SeqAutoencoder
use_cuda = torch.cuda.is_available()
print "Use CUDA:" + str(use_cuda)
class DancinSeq2SeqAdversarial():
"""
Dancin Seq2Seq Adversarial Text Generation..
Implementation from: https://github.com/MaximumEntropy/Seq2Seq-PyTorch
Uses the following config: config_en_autoencoder_1_billion.json
"""
def __init__(self,
truncation_len=30,
adversarial_weight=.5,
baseline_weight = 0.99,
adversarial_checkpoint=None,
autoencoder_checkpoint=None,
discriminator_checkpoint=None,
dataset=None):
self.truncation_len = truncation_len
self.adversarial_weight = adversarial_weight
self.autoencoder_weight = 1 - adversarial_weight
self.baseline_weight = baseline_weight
self.dataset = dataset
self.vocab_size = len(self.dataset.vocab_encoder.word2index)
self.pad_token_ind = self.dataset.vocab_encoder.word2index['<PAD>']
self.batch_size = 10
# Initialize the models.
self.adversarial_model = Seq2SeqAutoencoder(
src_emb_dim=256,
trg_emb_dim=256,
src_vocab_size=self.vocab_size,
src_hidden_dim=512,
trg_hidden_dim=512,
batch_size=self.batch_size,
bidirectional=True,
pad_token_src=self.pad_token_ind,
nlayers=2,
nlayers_trg=1,
dropout=0.,
).cuda()
self.autoencoder_model = Seq2SeqAutoencoder(
src_emb_dim=256,
trg_emb_dim=256,
src_vocab_size=self.vocab_size,
src_hidden_dim=512,
trg_hidden_dim=512,
batch_size=self.batch_size,
bidirectional=True,
pad_token_src=self.pad_token_ind,
nlayers=2,
nlayers_trg=1,
dropout=0.,
).cuda()
# Restore the autoencoder and discriminator from the checkpoint if provided.
self.autoencoder_model.load_state_dict(torch.load(open(autoencoder_checkpoint)))
self.autoencoder_model.eval()
self.discriminator_model = MultinomialNBDiscriminator(checkpoint=discriminator_checkpoint,
truncation_len=truncation_len)
# Restore adversarial model from checkpoint if provided.
if adversarial_checkpoint:
self.restore_model(adversarial_checkpoint)
# Initialize the distance module.
self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
# Initialize the optimizer.
self.lr = 0.0002
self.clip_c = 1
self.optimizer = optim.Adam(self.adversarial_model.parameters(), lr=self.lr)
# Loss criterion.
weight_mask = torch.ones(self.vocab_size).cuda()
weight_mask[self.pad_token_ind] = 0
self.reward_baseline = None
self.loss_criterion = self.adversarial_loss_criterion
# Save the initial adversarial model.
self.save_model()
def adversarial_loss_criterion(self, decoder_logit, reward):
# Baseline: use a simple EMA baseline.
if self.reward_baseline is None:
self.reward_baseline = reward.mean()
else:
self.reward_baseline = (self.baseline_weight * self.reward_baseline) + ((1 - self.baseline_weight) * reward.mean())
# Advantage = reward - baseline.
advantage = (reward - self.reward_baseline).detach() # Detach to avoid accumulating gradients.
# Sum the log_probs of the sampled sentences over the timesteps:
max_probs, _unused = decoder_logit.max(dim=-1)
sum_max_probs = max_probs.sum(dim=-1)
reduced_loss = advantage * sum_max_probs
return reduced_loss.mean()
def clip_gradient(self, model, clip):
"""Compute a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in self.model.parameters():
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = math.sqrt(totalnorm)
return min(1, clip / (totalnorm + 1e-6))
def get_dataset_minibatch(self, examples, iter_ind, batch_size):
"""
Iterator over the dataset split and get autoencoder minibatches.
"""
minibatch = examples[iter_ind:iter_ind+batch_size]
# Create the Pytorch variables.
input_lines = Variable(torch.LongTensor(np.fliplr(minibatch).copy()),
requires_grad=False).cuda() # Reverse the input lines.
return input_lines
def perplexity(self):
"""Calculate the BLEU score."""
pass
def l2_normalize(self, x):
norm = x.norm(p=2, dim=1, keepdim=True)
x_normalized = x.div(norm.expand_as(x))
return x_normalized
def reward_function(self, input_lines_src, decoder_indices):
"""Calculate the reward."""
# Get and penalize the autoencoder differences.
src_encoding = self.autoencoder_model.encode(input_lines_src.detach())
trg_encoding = self.autoencoder_model.encode(decoder_indices.detach())
# Normalize the encodings and calculate the cos. similarity.
autoencoder_difference = self.cos(src_encoding, trg_encoding)
# Get the discriminator probs.
discriminator_examples = decoder_indices.data.cpu().numpy()
discriminator_probs = Variable(torch.FloatTensor(
self.discriminator_model.get_adversarial_probs(discriminator_examples)), requires_grad=False).cuda()
return (self.adversarial_weight * discriminator_probs) + (self.autoencoder_weight * autoencoder_difference)
def train(self, dataset, epochs=2, write_checkpoint=1, monitor_loss=1, print_samples=1):
examples, _ = dataset.examples(split="val", shuffled=True)
num_examples, max_len = examples.shape
for epoch in xrange(epochs):
losses = []
for iter_ind in xrange(0, num_examples, self.batch_size):
self.adversarial_model.zero_grad()
self.autoencoder_model.zero_grad()
# Get a minibatch.
input_lines_src = self.get_dataset_minibatch(examples, iter_ind, self.batch_size)
# Get the adversarial decoder logits.
decoder_logit = self.adversarial_model(input_lines_src)
### Calculate the reward from the inputs and outputs.
decoder_indices = self.adversarial_model.decode_argmax(decoder_logit)
reward = self.reward_function(input_lines_src, decoder_indices)
# Update the adversarial model.
self.optimizer.zero_grad()
loss = self.loss_criterion(decoder_logit, reward)
losses.append(loss.data[0])
loss.backward()
self.optimizer.step()
if iter_ind % monitor_loss == 0:
logging.info('Epoch : %d Minibatch : %d Loss : %.5f' % (epoch, iter_ind, np.mean(losses)))
losses = []
# Memory check
#print "NUM OBJECTS: " + str(len([obj for obj in gc.get_objects() if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data))]))
if iter_ind % print_samples == 0:
# Print samples.
word_probs = decoder_indices.data.cpu().numpy()
output_lines_trg = input_lines_src.data.cpu().numpy()
real_probs = self.discriminator_model.get_adversarial_probs(output_lines_trg[:10])
pred_probs = self.discriminator_model.get_adversarial_probs(word_probs[:10])
for sentence_pred, sentence_real, real_prob, pred_prob in zip(word_probs[:10],
output_lines_trg[:10],
real_probs,
pred_probs):
if pred_prob > real_prob:
decoded_real = dataset.vocab_encoder.decode_numpy(sentence_real[::-1])
decoded_pred = dataset.vocab_encoder.decode_numpy(sentence_pred)
logging.info('===============================================')
logging.info("REAL: " + str(real_prob) + " " + decoded_real)
logging.info("PREDICTED: " + str(pred_prob) + " " + decoded_pred)
logging.info('===============================================')
del decoded_real, decoded_pred
# Evaluate the samples and print ones where the probability increased.
logging.info("Mean real probs: " + str(np.mean(real_probs)))
logging.info("Mean pred probs: " + str(np.mean(pred_probs)))
del real_probs, pred_probs, word_probs, output_lines_trg
del input_lines_src, reward, decoder_logit, decoder_indices
gc.collect()
if epoch % write_checkpoint == 0:
self.save_model()
def evaluate(self, dataset, split, verbose=True):
raise Exception("Not implemented")
def save_model(self,
checkpoint_dir='/cvgl2/u/catwong/cs332_final_project/checkpoints',
checkpoint_name='dancin_seq2seq_adversarial'):
# Outputs a path that can be passed into the restore.
checkpoint_file = checkpoint_name + '.model'
full_checkpoint_path = os.path.join(checkpoint_dir, checkpoint_file)
torch.save(
self.adversarial_model.state_dict(),
open(full_checkpoint_path, 'wb')
)
return full_checkpoint_path
def restore_model(self, checkpoint):
self.adversarial_model.load_state_dict(torch.load(open(checkpoint)))
# Demo
if __name__ == "__main__":
adversarial_weights = [.5, .8, .9, .95]
truncation_len=30
adversarial_weight = adversarial_weights[1] # The weight to give to "fooling" the discriminator
autoencoder_weight = 1 - adversarial_weight
easy_dataset = True # Whether to use lower confidence spam.
experiment_name = "%d_trunc_%d_adv_%d_auto_%d_easy_dancin" % (truncation_len,
adversarial_weight*100,
autoencoder_weight*100,
easy_dataset)
print "Experiment: " + experiment_name
# Initialize logging.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename='log/%s' % (experiment_name),
filemode='w'
)
# define a new Handler to log to console as well
console = logging.StreamHandler()
# optional, set the logging leveld
console.setLevel(logging.INFO)
# set a format which is the same for console use
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# If True, use the easy spam dataset composed of lower confidence scores.
if easy_dataset:
spam_dataset = SpamDataset(truncation_len=truncation_len, encoded_files=['encoded_spam_low_conf.txt'])
else:
spam_dataset = SpamDataset(truncation_len=truncation_len, encoded_files=['encoded_spam.txt'])
# Get all of the checkpoints.
checkpoints_dir = '/cvgl2/u/catwong/cs332_final_project/checkpoints'
discriminator_checkpoint_files = ["_multinomial_nb_model.pkl", "_multinomial_nb_tf_transformer.pkl"]
discriminator_checkpoint = [os.path.join(checkpoints_dir, str(truncation_len) + filename)
for filename in discriminator_checkpoint_files]
autoencoder_checkpoint_file = str(truncation_len) + "_seq2seq_autoencoder.model"
autoencoder_checkpoint = os.path.join(checkpoints_dir, autoencoder_checkpoint_file)
adversarial_checkpoint_file = experiment_name + ".model"
adversarial_checkpoint = os.path.join(checkpoints_dir, autoencoder_checkpoint_file)
adversarial = DancinSeq2SeqAdversarial(truncation_len=30,
adversarial_weight=adversarial_weight,
adversarial_checkpoint=adversarial_checkpoint,
autoencoder_checkpoint=autoencoder_checkpoint,
discriminator_checkpoint=discriminator_checkpoint,
dataset=spam_dataset)
adversarial.train(
dataset=spam_dataset,
epochs=1000,
write_checkpoint=100,
monitor_loss=100,
print_samples=10)
#checkpoint = adversarial.save_model(checkpoint_name=experiment_name)
| 14,259
| 43.5625
| 168
|
py
|
dancin_seq2seq
|
dancin_seq2seq-master/discriminator.py
|
"""
discriminator.py - Discriminator classes.
Classes:
Discriminator: a general discriminator interface.
MultinomialNBDiscriminator: a multinomial NaiveBayes subclass.
"""
from __future__ import division
import os
import numpy as np
import scipy
import scipy.stats
import sklearn
import sklearn.feature_extraction, sklearn.naive_bayes, sklearn.metrics, sklearn.externals
from collections import defaultdict, Counter
from dataset import SpamDataset
# The number of terms, including special tokens, in the final vocabulary.
TRAINING_VOCAB_SIZE = {
100: 4480,
30: 4628
}
class Discriminator(object):
"""
Discriminator: a general discriminator class.
"""
def __init__(self, checkpoint=None):
pass
def train(self, dataset):
raise Exception("Not implemented")
def evaluate(self, dataset, split, verbose=True):
raise Exception("Not implemented")
def save_model(self):
# Outputs a path that can be passed into the restore.
raise Exception("Not implemented")
def restore_model(self, model_checkpoint):
raise Exception("Not implemented")
class MultinomialNBDiscriminator(Discriminator):
"""
MultinomialNB: Multinomial Naive Bayes Classifier w. alpha=1.0
Trained using TF-IDF features.
"""
def __init__(self, truncation_len=100, checkpoint=None):
Discriminator.__init__(self, checkpoint)
self.truncation_len=truncation_len
if not checkpoint:
self.model = sklearn.naive_bayes.MultinomialNB()
else:
self.restore_model(checkpoint)
def examples_to_term_doc(self, examples):
"""
Converts a numerically-encoded examples matrix into a sparse term-documents matrix.
"""
num_terms = TRAINING_VOCAB_SIZE[self.truncation_len]
all_row_inds = all_col_inds = all_data = None
for row_ind, example in enumerate(examples):
itemfreqs = scipy.stats.itemfreq(example).T
# Column indices: the term indices in that document.
col_inds = itemfreqs[0]
# Data: the counts of the terms in that document.
data = itemfreqs[1]
# Row indices: the current document, for each of the terms in that document.
row_inds = np.ones(itemfreqs.shape[1], dtype=np.int) * row_ind
# Concatenate to the existing data.
if all_row_inds is None:
all_row_inds = row_inds
all_col_inds = col_inds
all_data = data
else:
all_row_inds = np.append(all_row_inds, row_inds)
all_col_inds = np.append(all_col_inds, col_inds)
all_data = np.append(all_data, data)
num_docs = len(examples)
return scipy.sparse.csr_matrix((all_data, (all_row_inds, all_col_inds)), shape=(num_docs, num_terms))
def train(self, dataset):
examples, labels = dataset.examples(split='train', shuffled=True)
# Silly way to compute sparse doc term matrix from examples matrix by converting it back into "strings".
self.train_counts = self.examples_to_term_doc(examples)
# Featurize using TFIDF.
self.tf_transformer = sklearn.feature_extraction.text.TfidfTransformer()
X_transformed = self.tf_transformer.fit_transform(self.train_counts)
# Fit the model to TFIDF counts.
self.model.fit(X_transformed, labels)
def calculate_roc_auc(self, probs, labels):
# Probability estimates of the positive class.
pos_probs = probs[:, 1]
return sklearn.metrics.roc_auc_score(labels, pos_probs)
def evaluate(self, dataset, split, verbose=True):
# Get the test or validation examples.
examples, labels = dataset.examples(split=split, shuffled=True)
doc_terms = self.examples_to_term_doc(examples)
X_transformed = self.tf_transformer.transform(doc_terms)
# Evaluate the model.
probs = self.model.predict_proba(doc_terms)
predicted = np.argmax(probs, axis=1)
# Mean accuracy.
mean_accuracy = np.mean(predicted == labels)
print "Mean_accuracy: %f" % mean_accuracy
# ROC-AUC Score.
#roc_auc = self.calculate_roc_auc(probs, labels)
#print "ROC AUC: %f" % roc_auc
return examples, labels, probs
def get_adversarial_probs(self, examples):
doc_terms = self.examples_to_term_doc(examples)
X_transformed = self.tf_transformer.transform(doc_terms)
probs = self.model.predict_proba(doc_terms)
return probs[:, 0]
def save_model(self,
checkpoint_dir='/cvgl2/u/catwong/cs332_final_project/checkpoints',
checkpoint_name='multinomial_nb'):
# Separately pickles the model and the transformer.
checkpoint = os.path.join(checkpoint_dir, str(self.truncation_len) + "_" + checkpoint_name)
sklearn.externals.joblib.dump(self.model, checkpoint + "_model.pkl")
sklearn.externals.joblib.dump(self.tf_transformer, checkpoint + "_tf_transformer.pkl")
return [checkpoint + "_model.pkl", checkpoint + "_tf_transformer.pkl"]
def restore_model(self, model_checkpoints):
self.model = sklearn.externals.joblib.load(model_checkpoints[0])
self.tf_transformer = sklearn.externals.joblib.load(model_checkpoints[1])
# Demonstration models.
if __name__ == "__main__":
for truncation_len in [30, 100]:
print "Now on truncation_len: " + str(truncation_len)
spam_dataset = SpamDataset(truncation_len=truncation_len)
checkpoints_dir = '/cvgl2/u/catwong/cs332_final_project/checkpoints'
checkpoint_files = ["_multinomial_nb_model.pkl", "_multinomial_nb_tf_transformer.pkl"]
checkpoint = [os.path.join(checkpoints_dir, str(truncation_len) + filename) for filename in checkpoint_files]
new_discriminator = MultinomialNBDiscriminator(checkpoint=checkpoint, truncation_len=truncation_len)
new_discriminator.evaluate(spam_dataset, 'val')
| 6,178
| 38.608974
| 117
|
py
|
dancin_seq2seq
|
dancin_seq2seq-master/dataset.py
|
"""
dataset.py
Classes:
DatasetEncoderDecoder: encodes and decodes sentences according to a fixed, written vocabulary.
SpamDataset: utility functions to read and write dataset files.
"""
import os
import numpy as np
import sklearn
class DatasetEncoderDecoder(object):
"""
Encodes and decodes sentences according to a vocabulary.
Sentences are truncated. OOV words are assigned an <UNK> token, and <SOS>, <PAD>, and <EOS> tokens are added.
truncation_len
"""
def __init__(self, vocab_file, truncation_len=100):
self.truncation_len = truncation_len
# Create index to word and word to index dicts from the vocab_file.
num_default_tokens = 4
self.index2word = {0:'<SOS>', 1:'<EOS>', 2: '<UNK>', 3: '<PAD>'}
self.word2index = {'<SOS>':0, '<EOS>':1, '<UNK>': 2, '<PAD>': 3}
with open(vocab_file) as f:
all_lines = [line.strip() for line in f.readlines()]
for idx, token in enumerate(all_lines):
self.index2word[idx + num_default_tokens] = token
self.word2index[token] = idx + num_default_tokens
def encode(self, sentence):
"""
Encodes a sentence according to the vocabulary.
Returns:
normalized: the normalized sentence, as it would be decoded.
encoded: the space-separated numerical sentence.
"""
truncated = sentence.lower().split()[:self.truncation_len]
truncated += ['<PAD>'] * max(self.truncation_len - len(truncated), 0)
truncated = ['<SOS>'] + truncated + ['<EOS>']
normalized = []
encoded = []
# Encode, removing the UNK tokens
for token in truncated:
token = token if token in self.word2index else '<UNK>'
normalized.append(token)
encoded.append(str(self.word2index[token]))
normalized = " ".join(normalized)
encoded = " ".join(encoded)
return normalized, encoded
def decode_numpy(self, numerical_encoded):
"""Returns the decoded sentence."""
return " ".join([self.index2word[token] for token in numerical_encoded])
def decode(self, encoded):
"""Returns the decoded sentence."""
numerical_encoded = [int(token) for token in encoded.split()]
return " ".join([self.index2word[token] for token in numerical_encoded])
class SpamDataset(object):
"""
Dataset: encapsulates utility functions to get the dataset files.
"""
def __init__(self,
base_data_dir="/cvgl2/u/catwong/cs332_final_project/data/",
splits=['train', 'val', 'test'],
label_names=['ham', 'spam'],
truncation_len=100,
encoded_files=['encoded_ham.txt', 'encoded_spam.txt'],
vocab_file='email_train_vocab.txt',
random_seed=10,
spam_only=False):
self.base_data_dir = base_data_dir
self.splits = splits
self.label_names = label_names
self.encoded_files = [str(truncation_len) + "_" + f for f in encoded_files]
self.vocab_file = os.path.join(base_data_dir, str(truncation_len) + "_" + vocab_file)
self.vocab_encoder = DatasetEncoderDecoder(self.vocab_file, truncation_len=truncation_len)
self.random_seed = random_seed
# Read in all of the lines from the files.
self.examples_dict = {}
self.labels_dict = {}
for split in splits:
all_examples = []
all_labels = []
for label, encoded_file in enumerate(self.encoded_files):
data_file = os.path.join(base_data_dir, split, encoded_file)
with open(data_file) as f:
all_lines = [line.strip().split() for line in f.readlines()]
all_examples += all_lines
all_labels += [label] * len(all_lines)
self.examples_dict[split] = all_examples
self.labels_dict[split] = all_labels
def examples(self,
split,
shuffled=False):
"""
Args:
split: one of the splits (ex. train, val, test) with labels.
shuffled: whether to shuffle the examples.(default: True)
Returns:
examples: (list of lists)
labels: (list)
"""
examples = np.array(self.examples_dict[split]).astype(int)
labels = np.array(self.labels_dict[split])
if shuffled:
examples, labels = sklearn.utils.shuffle(examples, labels, random_state=self.random_seed)
return examples, labels
def dataset_stats(self):
"""Prints useful stats about the dataset."""
for split in self.splits:
labels = self.labels_dict[split]
num_pos = np.sum(labels)
num_neg = len(labels) - num_pos
print "Total %s examples: %d, %s: %d, %s: %d" % (split,
len(labels),
self.label_names[0],
num_neg,
self.label_names[1],
num_pos)
if __name__ == "__main__":
# SpamDataset demonstration.
print "SpamDataset demo:"
for truncation_len in [30, 100]:
dataset = SpamDataset(truncation_len=truncation_len)
examples, labels = dataset.examples(split='train', shuffled=True)
print examples[0]
print labels[0]
print dataset.vocab_encoder.decode(" ".join(examples[0].astype(str)))
dataset.dataset_stats()
| 5,820
| 40.283688
| 113
|
py
|
dancin_seq2seq
|
dancin_seq2seq-master/autoencoder.py
|
"""
autoencoder.py - Autoencoder classes.
Classes:
Autoencoder: a general autoencoder interface.
SpamSeq2SeqAutoencoder: a sequence to sequence autoencoder interface.
"""
from __future__ import division
import logging
import numpy as np
import os
import scipy
import scipy.stats
import sklearn
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from dataset import SpamDataset
from seq2seq.model import Seq2Seq, Seq2SeqAutoencoder
use_cuda = torch.cuda.is_available()
print "Use CUDA:" + str(use_cuda)
class Autoencoder(object):
"""
Autoencoder: a general discriminator class.
"""
def __init__(self, checkpoint=None, dataset=None):
pass
def train(self, dataset):
raise Exception("Not implemented")
def evaluate(self, dataset, split, verbose=True):
raise Exception("Not implemented")
def save_model(self):
# Outputs a path that can be passed into the restore.
raise Exception("Not implemented")
def restore_model(self, model_checkpoint):
raise Exception("Not implemented")
class SpamSeq2SeqAutoencoder(Autoencoder):
"""
SpamSeq2Seq Autoencoder.
Implementation from: https://github.com/MaximumEntropy/Seq2Seq-PyTorch
Uses the following config: config_en_autoencoder_1_billion.json
"""
def __init__(self, truncation_len=100, checkpoint=None, dataset=None):
Autoencoder.__init__(self, checkpoint, dataset)
self.truncation_len = truncation_len
self.dataset = dataset
self.vocab_size = len(self.dataset.vocab_encoder.word2index)
self.pad_token_ind = self.dataset.vocab_encoder.word2index['<PAD>']
self.batch_size = 50
# Initialize the model.
self.model = Seq2SeqAutoencoder(
src_emb_dim=256,
trg_emb_dim=256,
src_vocab_size=self.vocab_size,
src_hidden_dim=512,
trg_hidden_dim=512,
batch_size=self.batch_size,
bidirectional=True,
pad_token_src=self.pad_token_ind,
nlayers=2,
nlayers_trg=1,
dropout=0.,
).cuda()
# Restore from checkpoint if provided.
if checkpoint:
self.restore_model(checkpoint)
# Initialize the optimizer.
self.lr = 0.0002
self.clip_c = 1
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
# Loss criterion.
weight_mask = torch.ones(self.vocab_size).cuda()
weight_mask[self.pad_token_ind] = 0
self.loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda()
# Save the initial model.
self.save_model()
def clip_gradient(self, model, clip):
"""Compute a gradient clipping coefficient based on gradient norm."""
totalnorm = 0
for p in self.model.parameters():
modulenorm = p.grad.data.norm()
totalnorm += modulenorm ** 2
totalnorm = math.sqrt(totalnorm)
return min(1, clip / (totalnorm + 1e-6))
def get_dataset_minibatch(self, examples, iter_ind, batch_size):
"""
Iterator over the dataset split and get autoencoder minibatches.
"""
minibatch = examples[iter_ind:iter_ind+batch_size]
# Create the Pytorch variables.
input_lines = Variable(torch.LongTensor(np.fliplr(minibatch).copy())).cuda() # Reverse the input lines.
output_lines = Variable(torch.LongTensor(minibatch)).cuda()
return input_lines, output_lines
def perplexity(self):
"""Calculate the BLEU score."""
def train(self, dataset, epochs=2, write_checkpoint=1, monitor_loss=1, print_samples=1):
examples, _ = dataset.examples(split="train", shuffled=True)
num_examples, max_len = examples.shape
for epoch in xrange(epochs):
losses = []
for iter_ind in xrange(0, num_examples, self.batch_size):
# Get a minibatch.
input_lines_src, output_lines_src = self.get_dataset_minibatch(examples, iter_ind, self.batch_size)
# Run a training step.
decoder_logit = self.model(input_lines_src)
self.optimizer.zero_grad()
loss = self.loss_criterion(
decoder_logit.contiguous().view(-1, self.vocab_size),
output_lines_src.view(-1)
)
losses.append(loss.data[0])
loss.backward()
self.optimizer.step()
if iter_ind % monitor_loss == 0:
logging.info('Epoch : %d Minibatch : %d Loss : %.5f' % (epoch, iter_ind, np.mean(losses)))
del losses
losses = []
if iter_ind % print_samples == 0:
# Print samples.
word_probs = self.model.decode(decoder_logit).data.cpu().numpy().argmax(axis=-1)
output_lines_trg = input_lines_src.data.cpu().numpy()
for sentence_pred, sentence_real in zip(word_probs[:5], output_lines_trg[:5]):
decoded_real = dataset.vocab_encoder.decode_numpy(sentence_real[::-1])
decoded_pred = dataset.vocab_encoder.decode_numpy(sentence_pred)
logging.info('===============================================')
logging.info("REAL: " + decoded_real)
logging.info("PREDICTED: " + decoded_pred)
logging.info('===============================================')
if epoch % write_checkpoint == 0:
self.save_model()
def evaluate(self, dataset, split, verbose=True):
raise Exception("Not implemented")
def save_model(self,
checkpoint_dir='/cvgl2/u/catwong/cs332_final_project/checkpoints',
checkpoint_name='seq2seq_autoencoder'):
# Outputs a path that can be passed into the restore.
checkpoint_file = str(self.truncation_len) + "_" + checkpoint_name + '.model'
full_checkpoint_path = os.path.join(checkpoint_dir, checkpoint_file)
torch.save(
self.model.state_dict(),
open(full_checkpoint_path, 'wb')
)
return full_checkpoint_path
def restore_model(self, checkpoint):
self.model.load_state_dict(torch.load(open(checkpoint)))
# Demo
if __name__ == "__main__":
truncation_len=100
experiment_name = str(truncation_len) + "_autoencoder"
# Initialize logging.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
filename='log/%s' % (experiment_name),
filemode='w'
)
# define a new Handler to log to console as well
console = logging.StreamHandler()
# optional, set the logging leveld
console.setLevel(logging.INFO)
# set a format which is the same for console use
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Train the autoencoder.
spam_dataset = SpamDataset(truncation_len=truncation_len)
autoencoder = SpamSeq2SeqAutoencoder(truncation_len=truncation_len, dataset=spam_dataset)
autoencoder.train(
dataset=spam_dataset,
epochs=1000,
write_checkpoint=1,
monitor_loss=5000,
print_samples=10000)
checkpoint = autoencoder.save_model()
| 7,919
| 35.837209
| 115
|
py
|
dancin_seq2seq
|
dancin_seq2seq-master/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/train.py
|
import torch, os, datetime, copy, json, scipy, cv2
import numpy as np
from model.model import parsingNet
from data.dataloader import get_train_loader
from data.dataset import raildb_row_anchor
from utils.evaluation import LaneEval, grid_2_inter
from utils.dist_utils import dist_print, dist_tqdm, is_main_process
from utils.factory import get_metric_dict, get_loss_dict, get_optimizer, get_scheduler
from utils.metrics import update_metrics, reset_metrics
from utils.common import merge_config, save_model, cp_projects
from utils.common import get_work_dir, get_logger
import time
from IPython import embed
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
def inference(net, data_label):
img, cls_label, _, _, _ = data_label
img, cls_label = img.cuda(), cls_label.long().cuda()
cls_out = net(img)
return {'cls_out': cls_out, 'cls_label': cls_label}
def resolve_val_data(results):
# input: (batch_size, num_gridding, num_cls_per_lane, num_of_lanes)
# output: (batch_size, num_cls_per_lane, num_of_lanes)
results['cls_out'] = torch.argmax(results['cls_out'], dim=1)
return results
def calc_loss(loss_dict, results, logger, global_step):
loss = 0
for i in range(len(loss_dict['name'])):
data_src = loss_dict['data_src'][i]
datas = [results[src] for src in data_src]
loss_cur = loss_dict['op'][i](*datas)
if global_step % 20 == 0:
# print(loss_cur)
logger.add_scalar('loss/'+loss_dict['name'][i], loss_cur, global_step)
loss += loss_cur * loss_dict['weight'][i]
return loss
def train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict):
dist_print('***************** Training ***********************')
net.train(mode=True)
progress_bar = dist_tqdm(train_loader)
t_data_0 = time.time()
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
reset_metrics(metric_dict)
global_step = epoch * len(train_loader) + b_idx
t_net_0 = time.time()
results = inference(net, data_label)
loss = calc_loss(loss_dict, results, logger, global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step(global_step)
t_net_1 = time.time()
results = resolve_val_data(results)
update_metrics(metric_dict, results)
if global_step % 20 == 0:
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
logger.add_scalar('meta/lr', optimizer.param_groups[0]['lr'], global_step=global_step)
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(loss = '%.3f' % float(loss),
data_time = '%.3f' % float(t_data_1 - t_data_0),
net_time = '%.3f' % float(t_net_1 - t_net_0),
**kwargs)
t_data_0 = time.time()
def validate(net, val_loader, logger, metric_dict, savefig=[]):
dist_print('***************** Validating ***********************')
net.train(mode=False)
progress_bar = dist_tqdm(val_loader)
t_data_0 = time.time()
reset_metrics(metric_dict)
preds = []; gts = []
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
global_step = b_idx
results = inference(net, data_label)
preds_inter = [grid_2_inter(out, cfg.griding_num) for out in results['cls_out']]
# print(pred)
gt = data_label[2].cpu().numpy()
# print(gt)
if len(savefig)!=0:
for idx, item in enumerate(data_label[-1]):
vis = cv2.resize(cv2.imread(os.path.join(savefig[0], item)), (1280, 720))
vis_mask = np.zeros_like(vis).astype(np.uint8)
for i in range(preds_inter[idx].shape[0]):
points = [[int(x),int(y)] for (x,y) in zip(preds_inter[idx][i], raildb_row_anchor) if x>=0]
cv2.polylines(vis, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
cv2.polylines(vis_mask, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
vis_path = os.path.join(savefig[0], 'row_based/vis', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path))
cv2.imwrite(vis_path, vis)
pred_path = os.path.join(savefig[0], 'row_based/pred', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(pred_path)): os.makedirs(os.path.dirname(pred_path))
cv2.imwrite(pred_path, vis)
results = resolve_val_data(results)
update_metrics(metric_dict, results)
t_data_0 = time.time()
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
acc_top1 = metric_dict['op'][0].get()
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(**kwargs,
data_time = '%.3f' % float(t_data_1 - t_data_0),
)
preds.append(preds_inter)
gts.append(gt)
preds = np.concatenate(preds); gts = np.concatenate(gts)
res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
res = json.loads(res)
for r in res:
dist_print(r['name'], r['value'])
# for i in range(1, 21):
# LaneEval.pixel_thresh = i
# res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
# res = json.loads(res)
# for r in res:
# dist_print(r['name'], r['value'])
return acc_top1
def validateplus(cfg, distributed, best_model, logger, metric_dict):
dist_print('************* validate sun ***************')
val_sun_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='sun')
validate(best_model, val_sun_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'sun'])
dist_print('************* validate rain ***************')
val_rain_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='rain')
validate(best_model, val_rain_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'rain'])
dist_print('************* validate night ***************')
val_night_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='night')
validate(best_model, val_night_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'night'])
dist_print('************* validate line ***************')
val_line_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='line')
validate(best_model, val_line_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'line'])
dist_print('************* validate cross ***************')
val_cross_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='cross')
validate(best_model, val_cross_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'cross'])
dist_print('************* validate curve ***************')
val_curve_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='curve')
validate(best_model, val_curve_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'curve'])
dist_print('************* validate slope ***************')
val_slope_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='slope')
validate(best_model, val_slope_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'slope'])
dist_print('************* validate near ***************')
val_near_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='near')
validate(best_model, val_near_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'near'])
dist_print('************* validate far ***************')
val_far_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='far')
validate(best_model, val_far_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'far'])
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
args, cfg = merge_config()
work_dir = get_work_dir(cfg)
distributed = False
if 'WORLD_SIZE' in os.environ:
distributed = int(os.environ['WORLD_SIZE']) > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
dist_print(datetime.datetime.now().strftime('[%Y/%m/%d %H:%M:%S]') + ' start training...')
dist_print(cfg)
assert cfg.backbone in ['18','34','50','mobilenet_v2', 'squeezenet1_0', 'vit_b_16',]
train_loader, cls_num_per_lane = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='train', type=cfg.type)
val_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='all')
net = parsingNet(pretrained = True, backbone=cfg.backbone, cls_dim = (cfg.griding_num+1, cls_num_per_lane, cfg.num_lanes)).cuda()
if distributed:
net = torch.nn.parallel.DistributedDataParallel(net, device_ids = [args.local_rank])
optimizer = get_optimizer(net, cfg)
if cfg.finetune is not None:
dist_print('finetune from ', cfg.finetune)
state_all = torch.load(cfg.finetune)['model']
state_clip = {} # only use backbone parameters
for k,v in state_all.items():
if 'model' in k:
state_clip[k] = v
net.load_state_dict(state_clip, strict=False)
if cfg.resume is not None:
dist_print('==> Resume model from ' + cfg.resume)
resume_dict = torch.load(cfg.resume, map_location='cpu')
net.load_state_dict(resume_dict['model'])
if 'optimizer' in resume_dict.keys():
optimizer.load_state_dict(resume_dict['optimizer'])
resume_epoch = int(os.path.split(cfg.resume)[1][2:5]) + 1
else:
resume_epoch = 0
scheduler = get_scheduler(optimizer, cfg, len(train_loader))
dist_print(len(train_loader))
metric_dict = get_metric_dict(cfg)
loss_dict = get_loss_dict(cfg)
logger = get_logger(work_dir, cfg)
cp_projects(args.auto_backup, work_dir)
best_acc = 0; best_epoch = 0; best_model = None
for epoch in range(resume_epoch, cfg.epoch):
train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict)
acc = validate(net, val_loader, logger, metric_dict)
if acc > best_acc: best_acc, best_epoch, best_model = acc, epoch, copy.deepcopy(net)
save_model(net, optimizer, epoch, work_dir, distributed)
# net.load_state_dict(torch.load('/home/ssd7T/lxpData/RAIL-DB/log/rail/best_0.893.pth', map_location='cpu'))
# best_model = copy.deepcopy(net)
dist_print('************* validate all ***************')
validate(best_model, val_loader, logger, metric_dict,) # savefig=[cfg.data_root, 'all'])
# validateplus(cfg, distributed, best_model, logger, metric_dict)
logger.close()
dist_print(best_acc, best_epoch)
if is_main_process(): torch.save(best_model.state_dict(), os.path.join(work_dir, 'best_{:.3f}.pth'.format(best_acc)))
| 12,493
| 46.325758
| 158
|
py
|
Rail-Detection
|
Rail-Detection-main/segmentation/backbone.py
|
import torch,pdb
import torchvision
import torch.nn.modules
class vgg16bn(torch.nn.Module):
def __init__(self,pretrained = False):
super(vgg16bn,self).__init__()
model = list(torchvision.models.vgg16_bn(pretrained=pretrained).features.children())
model = model[:33]+model[34:43]
self.model = torch.nn.Sequential(*model)
def forward(self,x):
return self.model(x)
class resnet(torch.nn.Module):
def __init__(self,layers,pretrained = False):
super(resnet,self).__init__()
if layers == '18':
model = torchvision.models.resnet18(pretrained=pretrained)
elif layers == '34':
model = torchvision.models.resnet34(pretrained=pretrained)
elif layers == '50':
model = torchvision.models.resnet50(pretrained=pretrained)
elif layers == '101':
model = torchvision.models.resnet101(pretrained=pretrained)
elif layers == '152':
model = torchvision.models.resnet152(pretrained=pretrained)
elif layers == '50next':
model = torchvision.models.resnext50_32x4d(pretrained=pretrained)
elif layers == '101next':
model = torchvision.models.resnext101_32x8d(pretrained=pretrained)
elif layers == '50wide':
model = torchvision.models.wide_resnet50_2(pretrained=pretrained)
elif layers == '101wide':
model = torchvision.models.wide_resnet101_2(pretrained=pretrained)
else:
raise NotImplementedError
self.conv1 = model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool = model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x2 = self.layer2(x)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return x,x2,x3,x4
| 2,097
| 35.172414
| 92
|
py
|
Rail-Detection
|
Rail-Detection-main/segmentation/speed_simple.py
|
<<<<<<< HEAD
import torch
import time, sys
import numpy as np
from model_seg import parsingNet
torch.backends.cudnn.benchmark = True
net = parsingNet(pretrained = False, backbone='18', cls_dim=(200, 52, 4)).cuda()
net.eval()
x = torch.zeros((1,3,288,800)).cuda() + 1
for i in range(10):
y = net(x)
t_all = []
for i in range(100):
t1 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
print('average time:', np.mean(t_all) / 1)
print('average fps:',1 / np.mean(t_all))
print('fastest time:', min(t_all) / 1)
print('fastest fps:',1 / min(t_all))
print('slowest time:', max(t_all) / 1)
print('slowest fps:',1 / max(t_all))
from torchinfo import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
summary(net, input_size=(1, 3, 288, 800))
from torchstat import stat
=======
import torch
import time, sys
import numpy as np
from model_seg import parsingNet
torch.backends.cudnn.benchmark = True
net = parsingNet(pretrained = False, backbone='18', cls_dim=(200, 52, 4)).cuda()
net.eval()
x = torch.zeros((1,3,288,800)).cuda() + 1
for i in range(10):
y = net(x)
t_all = []
for i in range(100):
t1 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
print('average time:', np.mean(t_all) / 1)
print('average fps:',1 / np.mean(t_all))
print('fastest time:', min(t_all) / 1)
print('fastest fps:',1 / min(t_all))
print('slowest time:', max(t_all) / 1)
print('slowest fps:',1 / max(t_all))
from torchinfo import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
summary(net, input_size=(1, 3, 288, 800))
from torchstat import stat
>>>>>>> d175ba8a15a74cff363e8da114147f44311bfb42
stat(net.cpu(), (3, 288, 800))
| 1,764
| 22.851351
| 80
|
py
|
Rail-Detection
|
Rail-Detection-main/segmentation/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/segmentation/model_seg.py
|
from turtle import forward
import torch, sys
from backbone import resnet
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class conv_bn_relu(nn.Module):
def __init__(self, in_channels, out_channels, upsample=0):
super(conv_bn_relu,self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation = 1, bias = False)
self.bn = torch.nn.BatchNorm2d(out_channels)
self.relu = torch.nn.ReLU()
self.upsample = upsample
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
return x
class SegmentationBlock(nn.Module):
def __init__(self, in_channels, out_channels, n_upsamples=0):
super().__init__()
blocks = [conv_bn_relu(in_channels, out_channels, upsample=bool(n_upsamples))]
if n_upsamples > 1:
for _ in range(1, n_upsamples):
blocks.append(conv_bn_relu(out_channels, out_channels, upsample=True))
self.block = nn.Sequential(*blocks)
def forward(self, x):
return self.block(x)
class FPNBlock(nn.Module):
def __init__(self, pyramid_channels, skip_channels):
super().__init__()
self.skip_conv = nn.Conv2d(skip_channels, pyramid_channels, kernel_size=1)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
skip = self.skip_conv(skip)
x = x + skip
return x
class SegmentationBlock(nn.Module):
def __init__(self, in_channels, out_channels, n_upsamples=0):
super().__init__()
blocks = [conv_bn_relu(in_channels, out_channels, upsample=bool(n_upsamples))]
if n_upsamples > 1:
for _ in range(1, n_upsamples):
blocks.append(conv_bn_relu(out_channels, out_channels, upsample=True))
self.block = nn.Sequential(*blocks)
def forward(self, x):
return self.block(x)
class SegmentationHead(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=4):
super(SegmentationHead, self).__init__()
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)
self.upsampling = nn.UpsamplingBilinear2d(scale_factor=upsampling) if upsampling > 1 else nn.Identity()
def forward(self, x):
x = self.conv2d(x)
x = self.upsampling(x)
return x
class parsingNet(torch.nn.Module):
def __init__(self, size=(288, 800), pretrained=True, backbone='50', cls_dim=(37, 10, 4)):
super(parsingNet, self).__init__()
self.size = size
self.w = size[0]
self.h = size[1]
self.cls_dim = cls_dim # (num_gridding, num_cls_per_lane, num_of_lanes)
# num_cls_per_lane is the number of row anchors
self.total_dim = np.prod(cls_dim)
# input : nchw,
# output: (w+1) * sample_rows * 4
self.model = resnet(backbone, pretrained=pretrained)
# Lateral layers
self.p5 = conv_bn_relu(512, 128) if backbone in ['34','18'] else conv_bn_relu(2048, 128)
self.p4 = FPNBlock(128, 256) if backbone in ['34','18'] else FPNBlock(128, 1024)
self.p3 = FPNBlock(128, 128) if backbone in ['34','18'] else FPNBlock(128, 512)
self.p2 = FPNBlock(128, 64) if backbone in ['34','18'] else FPNBlock(128, 256)
# Smooth layers
self.smooth5 = SegmentationBlock(128, 128, n_upsamples=3)
self.smooth4 = SegmentationBlock(128, 128, n_upsamples=2)
self.smooth3 = SegmentationBlock(128, 128, n_upsamples=1)
self.smooth2 = SegmentationBlock(128, 128, n_upsamples=0)
# Final layers output : n, num_of_lanes+1, h, w
self.finallayer = SegmentationHead(128*4, cls_dim[-1]+1)
initialize_weights(self.p5, self.p4, self.p3, self.p2,
self.smooth5, self.smooth4, self.smooth3, self.smooth2, self.finallayer)
def forward(self, x):
# n c h w - > n 2048 sh sw
# Bottom-up
c2, c3, c4, c5 = self.model(x)
# Top-down
p5 = self.p5(c5)
p4 = self.p4(p5, c4)
p3 = self.p3(p4, c3)
p2 = self.p2(p3, c2)
# Smooth
p5 = self.smooth5(p5)
p4 = self.smooth4(p4)
p3 = self.smooth3(p3)
p2 = self.smooth2(p2)
seg = self.finallayer(torch.cat([p5, p4, p3, p2], dim=1))
return seg
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m,torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
| 5,456
| 33.980769
| 126
|
py
|
Rail-Detection
|
Rail-Detection-main/segmentation/train.py
|
<<<<<<< HEAD
from wsgiref import validate
from matplotlib.pyplot import plot
import torch, os, datetime, copy, json, scipy, time, sys, cv2
import numpy as np
from IPython import embed
from model_seg import parsingNet
sys.path.append("..")
from data.dataloader import get_train_loader
from data.constant import raildb_row_anchor
sys.path.append("..")
from utils.evaluation import LaneEval, mask_2_inter
from utils.dist_utils import dist_print, dist_tqdm, is_main_process
from utils.factory import get_metric_dict_seg, get_loss_dict_seg, get_optimizer, get_scheduler
from utils.metrics import update_metrics, reset_metrics
from utils.common import merge_config, save_model, cp_projects
from utils.common import get_work_dir, get_logger
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
def inference(net, data_label):
img, _, _, seg_label, _ = data_label
img, seg_label = img.cuda(), seg_label.long().cuda()
seg_out = net(img)
return {'seg_out': seg_out, 'seg_label': seg_label}
def resolve_val_data(results):
# input: (batch_size, num_of_lanes+1, width, length)
# output: (batch_size, width, length)
results['seg_out'] = torch.argmax(results['seg_out'], dim=1)
return results
def calc_loss(loss_dict, results, logger, global_step):
loss = 0
for i in range(len(loss_dict['name'])):
data_src = loss_dict['data_src'][i]
datas = [results[src] for src in data_src]
loss_cur = loss_dict['op'][i](*datas)
if global_step % 20 == 0:
# print(loss_cur)
logger.add_scalar('loss/'+loss_dict['name'][i], loss_cur, global_step)
loss += loss_cur * loss_dict['weight'][i]
return loss
def train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict):
dist_print('***************** Training ***********************')
net.train(mode=True)
progress_bar = dist_tqdm(train_loader)
t_data_0 = time.time()
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
reset_metrics(metric_dict)
global_step = epoch * len(train_loader) + b_idx
t_net_0 = time.time()
results = inference(net, data_label)
loss = calc_loss(loss_dict, results, logger, global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step(global_step)
t_net_1 = time.time()
results = resolve_val_data(results)
update_metrics(metric_dict, results)
if global_step % 20 == 0:
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
logger.add_scalar('meta/lr', optimizer.param_groups[0]['lr'], global_step=global_step)
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(loss = '%.3f' % float(loss),
data_time = '%.3f' % float(t_data_1 - t_data_0),
net_time = '%.3f' % float(t_net_1 - t_net_0),
**kwargs)
t_data_0 = time.time()
def validate(net, val_loader, logger, metric_dict, savefig=[]):
dist_print('***************** Validating ***********************')
net.train(mode=False)
progress_bar = dist_tqdm(val_loader)
t_data_0 = time.time()
reset_metrics(metric_dict)
preds = []; gts = []
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
global_step = b_idx
results = inference(net, data_label)
preds_out = (torch.argmax(torch.nn.functional.interpolate(results['seg_out'], (720, 1280)), dim=1)).cpu().numpy()
preds_inter = [mask_2_inter(out, raildb_row_anchor) for out in preds_out]
gt = data_label[-3].cpu().numpy()
if len(savefig)!=0:
for idx, item in enumerate(data_label[-1]):
vis = cv2.resize(cv2.imread(os.path.join(savefig[0], item)), (1280, 720))
vis_mask = np.zeros_like(vis).astype(np.uint8)
for i in range(preds_inter[idx].shape[0]):
points = [[int(x),int(y)] for (x,y) in zip(preds_inter[idx][i], raildb_row_anchor) if x>=0]
cv2.polylines(vis, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
vis_mask[(preds_out[idx])==(i+1)] = color_list[i]
vis_path = os.path.join(savefig[0], 'segmentation/vis', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path))
cv2.imwrite(vis_path, vis)
pred_path = os.path.join(savefig[0], 'segmentation/pred', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(pred_path)): os.makedirs(os.path.dirname(pred_path))
cv2.imwrite(pred_path, vis)
results = resolve_val_data(results)
update_metrics(metric_dict, results)
t_data_0 = time.time()
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
acc_top1 = metric_dict['op'][0].get()
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(**kwargs,
data_time = '%.3f' % float(t_data_1 - t_data_0),
)
preds.append(preds_inter)
gts.append(gt)
preds = np.concatenate(preds); gts = np.concatenate(gts)
for i in range(1, 21):
LaneEval.pixel_thresh = i
res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
res = json.loads(res)
for r in res:
dist_print(r['name'], r['value'])
return acc_top1
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
args, cfg = merge_config()
work_dir = get_work_dir(cfg)
distributed = False
if 'WORLD_SIZE' in os.environ:
distributed = int(os.environ['WORLD_SIZE']) > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
dist_print(datetime.datetime.now().strftime('[%Y/%m/%d %H:%M:%S]') + ' start training...')
dist_print(cfg)
assert cfg.backbone in ['18','34','50','101','152','50next','101next','50wide','101wide']
train_loader, cls_num_per_lane = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='train', type=cfg.type)
val_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='all')
val_sun_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='sun')
val_rain_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='rain')
val_night_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='night')
val_line_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='line')
val_cross_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='cross')
val_curve_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='curve')
val_slope_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='slope')
val_near_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='near')
val_far_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='far')
net = parsingNet(pretrained = True, backbone=cfg.backbone, cls_dim = (cfg.griding_num+1, cls_num_per_lane, cfg.num_lanes)).cuda()
if distributed:
net = torch.nn.parallel.DistributedDataParallel(net, device_ids = [args.local_rank])
optimizer = get_optimizer(net, cfg)
if cfg.finetune is not None:
dist_print('finetune from ', cfg.finetune)
state_all = torch.load(cfg.finetune)['model']
state_clip = {} # only use backbone parameters
for k,v in state_all.items():
if 'model' in k:
state_clip[k] = v
net.load_state_dict(state_clip, strict=False)
if cfg.resume is not None:
dist_print('==> Resume model from ' + cfg.resume)
resume_dict = torch.load(cfg.resume, map_location='cpu')
net.load_state_dict(resume_dict['model'])
if 'optimizer' in resume_dict.keys():
optimizer.load_state_dict(resume_dict['optimizer'])
resume_epoch = int(os.path.split(cfg.resume)[1][2:5]) + 1
else:
resume_epoch = 0
scheduler = get_scheduler(optimizer, cfg, len(train_loader))
dist_print(len(train_loader))
metric_dict = get_metric_dict_seg(cfg)
loss_dict = get_loss_dict_seg(cfg)
logger = get_logger(work_dir, cfg)
cp_projects(args.auto_backup, work_dir)
# best_acc = 0; best_epoch = 0; best_model = None
# for epoch in range(resume_epoch, cfg.epoch):
# train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict)
# acc = validate(net, val_loader, logger, metric_dict)
# if acc > best_acc: best_acc, best_epoch, best_model = acc, epoch, copy.deepcopy(net)
# save_model(net, optimizer, epoch, work_dir, distributed)
net.load_state_dict(torch.load('/home/ssd7T/lxpData/rail/log/rail_seg/best_0.747.pth', map_location='cpu'))
best_model = copy.deepcopy(net)
dist_print('************* validate all ***************')
validate(best_model, val_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'all'])
# dist_print('************* validate sun ***************')
# validate(best_model, val_sun_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'sun'])
# dist_print('************* validate rain ***************')
# validate(best_model, val_rain_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'rain'])
# dist_print('************* validate night ***************')
# validate(best_model, val_night_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'night'])
# dist_print('************* validate line ***************')
# validate(best_model, val_line_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'line'])
# dist_print('************* validate cross ***************')
# validate(best_model, val_cross_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'cross'])
# dist_print('************* validate curve ***************')
# validate(best_model, val_curve_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'curve'])
# dist_print('************* validate slope ***************')
# validate(best_model, val_slope_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'slope'])
# dist_print('************* validate near ***************')
# validate(best_model, val_near_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'near'])
# dist_print('************* validate far ***************')
# validate(best_model, val_far_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'far'])
logger.close()
# dist_print(best_acc, best_epoch)
# if is_main_process(): torch.save(best_model.state_dict(), os.path.join(work_dir, 'best_{:.3f}.pth'.format(best_acc)))
=======
from wsgiref import validate
from matplotlib.pyplot import plot
import torch, os, datetime, copy, json, scipy, time, sys, cv2
import numpy as np
from IPython import embed
from model_seg import parsingNet
sys.path.append("..")
from data.dataloader import get_train_loader
from data.constant import raildb_row_anchor
sys.path.append("..")
from utils.evaluation import LaneEval, mask_2_inter
from utils.dist_utils import dist_print, dist_tqdm, is_main_process
from utils.factory import get_metric_dict_seg, get_loss_dict_seg, get_optimizer, get_scheduler
from utils.metrics import update_metrics, reset_metrics
from utils.common import merge_config, save_model, cp_projects
from utils.common import get_work_dir, get_logger
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
def inference(net, data_label):
img, _, _, seg_label, _ = data_label
img, seg_label = img.cuda(), seg_label.long().cuda()
seg_out = net(img)
return {'seg_out': seg_out, 'seg_label': seg_label}
def resolve_val_data(results):
# input: (batch_size, num_of_lanes+1, width, length)
# output: (batch_size, width, length)
results['seg_out'] = torch.argmax(results['seg_out'], dim=1)
return results
def calc_loss(loss_dict, results, logger, global_step):
loss = 0
for i in range(len(loss_dict['name'])):
data_src = loss_dict['data_src'][i]
datas = [results[src] for src in data_src]
loss_cur = loss_dict['op'][i](*datas)
if global_step % 20 == 0:
# print(loss_cur)
logger.add_scalar('loss/'+loss_dict['name'][i], loss_cur, global_step)
loss += loss_cur * loss_dict['weight'][i]
return loss
def train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict):
dist_print('***************** Training ***********************')
net.train(mode=True)
progress_bar = dist_tqdm(train_loader)
t_data_0 = time.time()
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
reset_metrics(metric_dict)
global_step = epoch * len(train_loader) + b_idx
t_net_0 = time.time()
results = inference(net, data_label)
loss = calc_loss(loss_dict, results, logger, global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step(global_step)
t_net_1 = time.time()
results = resolve_val_data(results)
update_metrics(metric_dict, results)
if global_step % 20 == 0:
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
logger.add_scalar('meta/lr', optimizer.param_groups[0]['lr'], global_step=global_step)
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(loss = '%.3f' % float(loss),
data_time = '%.3f' % float(t_data_1 - t_data_0),
net_time = '%.3f' % float(t_net_1 - t_net_0),
**kwargs)
t_data_0 = time.time()
def validate(net, val_loader, logger, metric_dict, savefig=[]):
dist_print('***************** Validating ***********************')
net.train(mode=False)
progress_bar = dist_tqdm(val_loader)
t_data_0 = time.time()
reset_metrics(metric_dict)
preds = []; gts = []
for b_idx, data_label in enumerate(progress_bar):
t_data_1 = time.time()
global_step = b_idx
results = inference(net, data_label)
preds_out = (torch.argmax(torch.nn.functional.interpolate(results['seg_out'], (720, 1280)), dim=1)).cpu().numpy()
preds_inter = [mask_2_inter(out, raildb_row_anchor) for out in preds_out]
gt = data_label[-3].cpu().numpy()
if len(savefig)!=0:
for idx, item in enumerate(data_label[-1]):
vis = cv2.resize(cv2.imread(os.path.join(savefig[0], item)), (1280, 720))
vis_mask = np.zeros_like(vis).astype(np.uint8)
for i in range(preds_inter[idx].shape[0]):
points = [[int(x),int(y)] for (x,y) in zip(preds_inter[idx][i], raildb_row_anchor) if x>=0]
cv2.polylines(vis, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
vis_mask[(preds_out[idx])==(i+1)] = color_list[i]
vis_path = os.path.join(savefig[0], 'segmentation/vis', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path))
cv2.imwrite(vis_path, vis)
pred_path = os.path.join(savefig[0], 'segmentation/pred', item).replace('pic', savefig[1])
if not os.path.exists(os.path.dirname(pred_path)): os.makedirs(os.path.dirname(pred_path))
cv2.imwrite(pred_path, vis)
results = resolve_val_data(results)
update_metrics(metric_dict, results)
t_data_0 = time.time()
for me_name, me_op in zip(metric_dict['name'], metric_dict['op']):
logger.add_scalar('metric/' + me_name, me_op.get(), global_step=global_step)
acc_top1 = metric_dict['op'][0].get()
if hasattr(progress_bar, 'set_postfix'):
kwargs = {me_name: '%.3f' % me_op.get() for me_name, me_op in zip(metric_dict['name'], metric_dict['op'])}
progress_bar.set_postfix(**kwargs,
data_time = '%.3f' % float(t_data_1 - t_data_0),
)
preds.append(preds_inter)
gts.append(gt)
preds = np.concatenate(preds); gts = np.concatenate(gts)
for i in range(1, 21):
LaneEval.pixel_thresh = i
res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
res = json.loads(res)
for r in res:
dist_print(r['name'], r['value'])
return acc_top1
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
args, cfg = merge_config()
work_dir = get_work_dir(cfg)
distributed = False
if 'WORLD_SIZE' in os.environ:
distributed = int(os.environ['WORLD_SIZE']) > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
dist_print(datetime.datetime.now().strftime('[%Y/%m/%d %H:%M:%S]') + ' start training...')
dist_print(cfg)
assert cfg.backbone in ['18','34','50','101','152','50next','101next','50wide','101wide']
train_loader, cls_num_per_lane = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='train', type=cfg.type)
val_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='all')
val_sun_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='sun')
val_rain_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='rain')
val_night_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='night')
val_line_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='line')
val_cross_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='cross')
val_curve_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='curve')
val_slope_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='slope')
val_near_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='near')
val_far_loader, _ = get_train_loader(cfg.batch_size, cfg.data_root, cfg.griding_num, distributed, cfg.num_lanes, mode='val', type='far')
net = parsingNet(pretrained = True, backbone=cfg.backbone, cls_dim = (cfg.griding_num+1, cls_num_per_lane, cfg.num_lanes)).cuda()
if distributed:
net = torch.nn.parallel.DistributedDataParallel(net, device_ids = [args.local_rank])
optimizer = get_optimizer(net, cfg)
if cfg.finetune is not None:
dist_print('finetune from ', cfg.finetune)
state_all = torch.load(cfg.finetune)['model']
state_clip = {} # only use backbone parameters
for k,v in state_all.items():
if 'model' in k:
state_clip[k] = v
net.load_state_dict(state_clip, strict=False)
if cfg.resume is not None:
dist_print('==> Resume model from ' + cfg.resume)
resume_dict = torch.load(cfg.resume, map_location='cpu')
net.load_state_dict(resume_dict['model'])
if 'optimizer' in resume_dict.keys():
optimizer.load_state_dict(resume_dict['optimizer'])
resume_epoch = int(os.path.split(cfg.resume)[1][2:5]) + 1
else:
resume_epoch = 0
scheduler = get_scheduler(optimizer, cfg, len(train_loader))
dist_print(len(train_loader))
metric_dict = get_metric_dict_seg(cfg)
loss_dict = get_loss_dict_seg(cfg)
logger = get_logger(work_dir, cfg)
cp_projects(args.auto_backup, work_dir)
# best_acc = 0; best_epoch = 0; best_model = None
# for epoch in range(resume_epoch, cfg.epoch):
# train(net, train_loader, loss_dict, optimizer, scheduler, logger, epoch, metric_dict)
# acc = validate(net, val_loader, logger, metric_dict)
# if acc > best_acc: best_acc, best_epoch, best_model = acc, epoch, copy.deepcopy(net)
# save_model(net, optimizer, epoch, work_dir, distributed)
net.load_state_dict(torch.load('/home/ssd7T/lxpData/rail/log/rail_seg/best_0.747.pth', map_location='cpu'))
best_model = copy.deepcopy(net)
dist_print('************* validate all ***************')
validate(best_model, val_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'all'])
# dist_print('************* validate sun ***************')
# validate(best_model, val_sun_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'sun'])
# dist_print('************* validate rain ***************')
# validate(best_model, val_rain_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'rain'])
# dist_print('************* validate night ***************')
# validate(best_model, val_night_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'night'])
# dist_print('************* validate line ***************')
# validate(best_model, val_line_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'line'])
# dist_print('************* validate cross ***************')
# validate(best_model, val_cross_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'cross'])
# dist_print('************* validate curve ***************')
# validate(best_model, val_curve_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'curve'])
# dist_print('************* validate slope ***************')
# validate(best_model, val_slope_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'slope'])
# dist_print('************* validate near ***************')
# validate(best_model, val_near_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'near'])
# dist_print('************* validate far ***************')
# validate(best_model, val_far_loader, logger, metric_dict, ) # savefig=[cfg.data_root, 'far'])
logger.close()
# dist_print(best_acc, best_epoch)
# if is_main_process(): torch.save(best_model.state_dict(), os.path.join(work_dir, 'best_{:.3f}.pth'.format(best_acc)))
>>>>>>> d175ba8a15a74cff363e8da114147f44311bfb42
| 24,730
| 48.860887
| 158
|
py
|
Rail-Detection
|
Rail-Detection-main/hand-crafted/hand_crafted.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 8 21:49:26 2017
@author: zander
"""
import os, random, sys, json
import cv2
import hand_utils
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import time, tqdm
from IPython import embed
import pandas as pd
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
sys.path.append("..")
import torchvision.transforms as transforms
from data.dataloader import get_train_loader
from data.constant import raildb_row_anchor
sys.path.append("..")
from utils.evaluation import mask_2_inter, LaneEval
data_root = '/home/ssd7T/lxpData/rail/dataset/'
img_transforms = transforms.Compose([
transforms.ToTensor(),
])
resolution_src = np.float32([1280, 720])
ROI_src = np.float32([[80, 720], [400, 250], [1200, 720], [800, 250]])
ROI_dst = np.float32([(0, 720), (0, 0), (1280, 720), (1280, 0)])
def validate(data_type):
print(data_type)
val_loader, _ = get_train_loader(1, data_root, griding_num=200, distributed=False, num_lanes=4, mode='val', type=data_type)
t_all = []; preds = []; gts = []
for i, data_label in enumerate(tqdm.tqdm(val_loader)):
# if i==5: break
_, _, inter_labels, _, names = data_label
frame = cv2.resize(cv2.imread(os.path.join(data_root, names[0])), (1280, 720))
t1 = time.time()
# perform perspective transform
M, Minv = hand_utils.get_M_Minv(ROI_src, ROI_dst)
img_warped = cv2.warpPerspective(frame.copy(), M, frame.shape[1::-1], flags=cv2.INTER_LINEAR)
# get the thresholded binary image
img_bin = hand_utils.thresholding(img_warped)
# perform detection
left_fit, right_fit, [lefty, leftx], [righty, rightx] = hand_utils.find_line(img_bin)
t2 = time.time()
t_all.append(t2 - t1)
# draw the detected laneline and the information
image = Image.fromarray(frame)
img_draw, warp_draw = hand_utils.draw_area(image, img_bin, ROI_src, Minv, left_fit, right_fit, lefty, leftx, righty, rightx)
# plt.imshow(img_draw)
# plt.pause(2)
vis_path = os.path.join(data_root, 'hand-crafted/vis', names[0]).replace('pic', data_type)
if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path))
cv2.imwrite(vis_path, img_draw)
pred_path = os.path.join(data_root, 'hand-crafted/pred', names[0]).replace('pic', data_type)
if not os.path.exists(os.path.dirname(pred_path)): os.makedirs(os.path.dirname(pred_path))
cv2.imwrite(pred_path, warp_draw)
# evaluation
pred = [mask_2_inter(warp_draw, raildb_row_anchor)]
gt = inter_labels.cpu().numpy()
preds.append(pred)
gts.append(gt)
preds = np.concatenate(preds); gts = np.concatenate(gts)
for i in range(1,21):
LaneEval.pixel_thresh = i
res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
res = json.loads(res)
for r in res:
print(r['name'], r['value'])
print('average time:', np.mean(t_all) / 1)
print('average fps:',1 / np.mean(t_all))
print('fastest time:', min(t_all) / 1)
print('fastest fps:',1 / min(t_all))
print('slowest time:', max(t_all) / 1)
print('slowest fps:',1 / max(t_all))
validate('all')
# for i in ['all','sun','rain','night','line','cross','curve','slope','near','far']:
# data_type = i
# validate(data_type)
| 3,461
| 34.690722
| 132
|
py
|
Rail-Detection
|
Rail-Detection-main/hand-crafted/hand_utils.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 6 23:37:10 2017
@author: yang
"""
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from IPython import embed
import pdb
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
#get all image in the given directory persume that this directory only contain image files
def get_images_by_dir(dirname):
img_names = os.listdir(dirname)
img_paths = [dirname+'/'+img_name for img_name in img_names]
imgs = [cv2.imread(path) for path in img_paths]
return imgs
#function take the chess board image and return the object points and image points
def calibrate(images,grid=(9,6)):
object_points=[]
img_points = []
for img in images:
object_point = np.zeros( (grid[0]*grid[1],3),np.float32 )
object_point[:,:2]= np.mgrid[0:grid[0],0:grid[1]].T.reshape(-1,2)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, grid, None)
if ret:
object_points.append(object_point)
img_points.append(corners)
return object_points,img_points
def get_M_Minv(src, dst):
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv
# function takes an image, object points, and image points
# performs the camera calibration, image distortion correction and
# returns the undistorted image
def cal_undistort(img, objpoints, imgpoints):
# Use cv2.calibrateCamera() and cv2.undistort()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img.shape[1::-1], None, None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
return dst
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Return the result
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh_=(30, 170)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh_[0]) & (gradmag <= mag_thresh_[1])] = 1
# Return the binary image
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0.7, 1.3)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
def hls_select(img, channel='s',thresh=(160, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
if channel=='h':
channel = hls[:,:,0]
elif channel=='l':
channel=hls[:,:,1]
else:
channel=hls[:,:,2]
binary_output = np.zeros_like(channel)
binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1
return binary_output
def luv_select(img, thresh=(225, 255)):
luv = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
l_channel = luv[:,:,0]
binary_output = np.zeros_like(l_channel)
binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 1
return binary_output
def lab_select(img, thresh=(155, 210)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
b_channel = lab[:,:,2]
binary_output = np.zeros_like(b_channel)
binary_output[(b_channel > thresh[0]) & (b_channel <= thresh[1])] = 1
return binary_output
def find_line(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
# pdb.set_trace()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 25
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
# print(good_left_inds)
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# pdb.set_trace()
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 3)
right_fit = np.polyfit(righty, rightx, 3)
return left_fit, right_fit, [lefty, leftx], [righty, rightx]
def find_line_by_previous(binary_warped, left_fit, right_fit):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 50
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**3) + left_fit[1]*(nonzeroy**2) +
left_fit[2]*nonzeroy +left_fit[3] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**3) +
left_fit[1]*(nonzeroy**2) + left_fit[2]*nonzeroy + left_fit[3] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**3) + right_fit[1]*(nonzeroy**2) +
right_fit[2]*nonzeroy + right_fit[3] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**3) +
right_fit[1]*(nonzeroy**2) + right_fit[2]*nonzeroy + right_fit[3] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 3)
right_fit = np.polyfit(righty, rightx, 3)
return left_fit, right_fit, [lefty, leftx], [righty, rightx]
def expand(img):
image = img
# embed()
h, w, _ = img.shape
_, green, _ = cv2.split(image)
s = np.sum(green, axis=1)
a = range(h)
for i in reversed(a):
if s[i] < 200:
break
for j in range(w): # min x
if green[i][j] == 255: break
for k in reversed(range(w)): #max x
if green[i][k] == 255: break
for l in range(int(s[i]/255)): # s[i]/255 the number
image[i,j-l,2] = 255
for l in range(int(s[i]/255)):
# embed()
# print(k,l)
image[i,k+l,2] = 255
return image
def draw_area(undist, binary_warped, ROI_src, Minv, left_fit, right_fit, lefty, leftx, righty, rightx):
# Generate x and y values for curve
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**3 + left_fit[1]*ploty**2 + left_fit[2]*ploty+left_fit[3]
right_fitx = right_fit[0]*ploty**3 + right_fit[1]*ploty**2 + right_fit[2]*ploty+right_fit[3]
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# draw lines on image
draw_points = (np.asarray([left_fitx, ploty]).T).astype(np.int32)
cv2.polylines(color_warp, [draw_points], False, color_list[0], thickness=thickness_list[0])
draw_points = (np.asarray([right_fitx, ploty]).T).astype(np.int32)
cv2.polylines(color_warp, [draw_points], False, color_list[1], thickness=thickness_list[1])
# warp back and combine the result with the original image
undist = np.array(undist)
newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))
result = cv2.addWeighted(undist, 1, newwarp, 1, 0)
return result, color_warp
def thresholding(img):
# setting all sorts of thresholds
x_thresh = abs_sobel_thresh(img, orient='x', thresh_min=30 ,thresh_max=280)
# mag_thresh_ = mag_thresh(img, sobel_kernel=3, mag_thresh_=(30, 170))
# dir_thresh = dir_threshold(img, sobel_kernel=3, thresh=(0.7, 1.3))
# hls_thresh = hls_select(img, thresh=(160, 255))
# lab_thresh = lab_select(img, thresh=(155, 210))
# luv_thresh = luv_select(img, thresh=(225, 255))
# thresholding combination
threshholded = np.zeros_like(x_thresh)
threshholded[x_thresh == 1] = 1
return threshholded
def calculate_curv_and_pos(binary_warped, left_fit, right_fit):
# Define y-value where we want radius of curvature
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
leftx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
rightx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
y_eval = np.max(ploty)
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
curvature = ((left_curverad + right_curverad) / 2)
#print(curvature)
mid_pos = int(binary_warped.shape[0]/2)
lane_width = np.absolute(leftx[mid_pos] - rightx[mid_pos])
lane_xm_per_pix = 3.7 / lane_width
veh_pos = (((leftx[mid_pos] + rightx[mid_pos]) * lane_xm_per_pix) / 2.)
cen_pos = ((binary_warped.shape[1] * lane_xm_per_pix) / 2.)
distance_from_center = cen_pos - veh_pos
return curvature, distance_from_center
def select_yellow(image):
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
lower = np.array([20,60,60])
upper = np.array([38,174, 250])
mask = cv2.inRange(hsv, lower, upper)
return mask
def select_white(image):
lower = np.array([170,170,170])
upper = np.array([255,255,255])
mask = cv2.inRange(image, lower, upper)
return mask
def draw_values(img,curvature,distance_from_center):
font = cv2.FONT_HERSHEY_SIMPLEX
radius_text = "Radius of Curvature: %sm"%(round(curvature))
if distance_from_center>0:
pos_flag = 'right'
else:
pos_flag= 'left'
cv2.putText(img,radius_text,(100,100), font, 1,(255,255,255),2)
#center_text = "Vehicle is %.3fm %s of center"%(abs(distance_from_center),pos_flag)
#cv2.putText(img,center_text,(100,150), font, 1,(255,255,255),2)
return img
if __name__=='__main__':
M, M_I = get_M_Minv()
print(M, M_I, )
| 14,160
| 38.555866
| 125
|
py
|
Rail-Detection
|
Rail-Detection-main/hand-crafted/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/hand-crafted/line.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 19:38:04 2017
@author: yang
"""
import numpy as np
# Define a class to receive the characteristics of each line detection
class Line():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_fitted = [np.array([False])]
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
def check_detected(self):
if (self.diffs[0] < 0.01 and self.diffs[1] < 10.0 and self.diffs[2] < 1000.) and len(self.recent_fitted) > 0:
return True
else:
return False
def update(self,fit):
if fit is not None:
if self.best_fit is not None:
self.diffs = abs(fit - self.best_fit)
if self.check_detected():
self.detected =True
if len(self.recent_fitted)>10:
self.recent_fitted = self.recent_fitted[1:]
self.recent_fitted.append(fit)
else:
self.recent_fitted.append(fit)
self.best_fit = np.average(self.recent_fitted, axis=0)
self.current_fit = fit
else:
self.detected = False
else:
self.best_fit = fit
self.current_fit = fit
self.detected=True
self.recent_fitted.append(fit)
| 2,265
| 36.147541
| 117
|
py
|
Rail-Detection
|
Rail-Detection-main/configs/raildb.py
|
# DATA
dataset = 'raildb'
data_root = '/home/xinpeng/Rail-DB/'
# TRAIN
epoch = 50
batch_size = 64
optimizer = 'Adam' #['SGD','Adam']
# learning_rate = 0.1
learning_rate = 4e-4
weight_decay = 1e-4
momentum = 0.9
scheduler = 'cos' #['multi', 'cos']
# steps = [50,75]
gamma = 0.1
warmup = 'linear'
warmup_iters = 100
# NETWORK
backbone = '18'
griding_num = 200
cls_num_per_lane = 52
# EXP
note = 'test'
log_path = '/home/xinpeng/Rail-DB/log'
# FINETUNE or RESUME MODEL PATH
finetune = None
resume = None
# TEST
test_model = '/home/xinpeng/Rail-DB/log/test_model.pth'
test_work_dir = '/home/xinpeng/Rail-DB/test/'
num_lanes = 4
type = 'all'
| 653
| 15.35
| 55
|
py
|
Rail-Detection
|
Rail-Detection-main/configs/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/utils/deploy.py
|
import torch, os, cv2, sys
import scipy.special, tqdm
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
sys.path.append("..")
from model.model import parsingNet
from utils.common import merge_config
from utils.dist_utils import dist_print
from utils.evaluation import grid_2_inter
from IPython import embed
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
raildb_row_anchor = [200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320,
330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450,
460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580,
590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710]
griding_num = 200
def deploy_image(file_name, net):
frame = cv2.imread(file_name)
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
img_transforms = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
image = img_transforms(image).unsqueeze(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image = image.to(device)
net.to(device)
outs = net(image)
vis = cv2.resize(frame, (1280, 720))
preds_inter = [grid_2_inter(out, griding_num) for out in outs]
for i in range(preds_inter[0].shape[0]):
points = [[int(x),int(y)] for (x,y) in zip(preds_inter[0][i], raildb_row_anchor) if x>=0]
cv2.polylines(vis, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
cv2.imwrite(file_name[:-4]+'_output.jpg', vis)
def deploy_videos(video_path, net):
cap = cv2.VideoCapture(video_path)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_path[:-4]+'_output.avi', fourcc, 20.0, (1280, 720))
while cap.isOpened():
# get a frame
ret, frame = cap.read()
if frame is None: break
# get a prediction
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
img_transforms = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),])
image = img_transforms(image).unsqueeze(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image = image.to(device)
net.to(device)
output = net(image)[0]
# plot a prediction
vis = cv2.resize(frame, (1280, 720))
preds_inter = grid_2_inter(output, griding_num)
for i in range(preds_inter.shape[0]):
points = [[int(x),int(y)] for (x,y) in zip(preds_inter[i], raildb_row_anchor) if x>=0]
cv2.polylines(vis, (np.asarray([points])).astype(np.int32), False, color_list[i], thickness=thickness_list[i])
# show and save a frame
# cv2.imshow("capture", vis)
out.write(vis)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
out.release()
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
dist_print('start testing...')
net = parsingNet(pretrained = False, backbone='18', cls_dim = (griding_num+1, len(raildb_row_anchor), 4),).cuda()
state_dict = torch.load('../best_model.pth', map_location='cpu')
compatible_state_dict = {}
for k, v in state_dict.items():
if 'module.' in k:
compatible_state_dict[k[7:]] = v
else:
compatible_state_dict[k] = v
net.load_state_dict(compatible_state_dict, strict=False)
net.eval()
deploy_videos('./example.mp4', net)
# deploy_image('./example.jpg', net)
# python deploy.py
| 4,050
| 35.827273
| 122
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from IPython import embed
class OhemCELoss(nn.Module):
def __init__(self, thresh, n_min, ignore_lb=255, *args, **kwargs):
super(OhemCELoss, self).__init__()
self.thresh = -torch.log(torch.tensor(thresh, dtype=torch.float)).cuda()
self.n_min = n_min
self.ignore_lb = ignore_lb
self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none')
def forward(self, logits, labels):
N, C, H, W = logits.size()
loss = self.criteria(logits, labels).view(-1)
loss, _ = torch.sort(loss, descending=True)
if loss[self.n_min] > self.thresh:
loss = loss[loss>self.thresh]
else:
loss = loss[:self.n_min]
return torch.mean(loss)
class SoftmaxFocalLoss(nn.Module):
def __init__(self, gamma, ignore_lb=255, *args, **kwargs):
super(SoftmaxFocalLoss, self).__init__()
self.gamma = gamma
self.nll = nn.NLLLoss(ignore_index=ignore_lb)
def forward(self, logits, labels):
scores = F.softmax(logits, dim=1)
factor = torch.pow(1.-scores, self.gamma)
log_score = F.log_softmax(logits, dim=1)
log_score = factor * log_score
loss = self.nll(log_score, labels)
return loss
class ParsingRelationLoss(nn.Module):
def __init__(self):
super(ParsingRelationLoss, self).__init__()
def forward(self,logits):
n,c,h,w = logits.shape
loss_all = []
for i in range(0,h-1):
loss_all.append(logits[:,:,i,:] - logits[:,:,i+1,:])
#loss0 : n,c,w
loss = torch.cat(loss_all)
return torch.nn.functional.smooth_l1_loss(loss,torch.zeros_like(loss))
class ParsingRelationDis(nn.Module):
def __init__(self):
super(ParsingRelationDis, self).__init__()
self.l1 = torch.nn.L1Loss()
# self.l1 = torch.nn.MSELoss()
def forward(self, x):
n,dim,num_rows,num_cols = x.shape
x = torch.nn.functional.softmax(x[:,:dim-1,:,:],dim=1)
embedding = torch.Tensor(np.arange(dim-1)).float().to(x.device).view(1,-1,1,1)
pos = torch.sum(x*embedding,dim = 1)
diff_list1 = []
for i in range(0,num_rows // 2):
diff_list1.append(pos[:,i,:] - pos[:,i+1,:])
loss = 0
for i in range(len(diff_list1)-1):
loss += self.l1(diff_list1[i],diff_list1[i+1])
loss /= len(diff_list1) - 1
return loss
| 2,528
| 34.125
| 86
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/dist_utils.py
|
import torch
import torch.distributed as dist
import pickle
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def can_log():
return is_main_process()
def dist_print(*args, **kwargs):
if can_log():
print(*args, **kwargs)
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def dist_cat_reduce_tensor(tensor):
if not dist.is_available():
return tensor
if not dist.is_initialized():
return tensor
# dist_print(tensor)
rt = tensor.clone()
all_list = [torch.zeros_like(tensor) for _ in range(get_world_size())]
dist.all_gather(all_list,rt)
# dist_print(all_list[0][1],all_list[1][1],all_list[2][1],all_list[3][1])
# dist_print(all_list[0][2],all_list[1][2],all_list[2][2],all_list[3][2])
# dist_print(all_list[0][3],all_list[1][3],all_list[2][3],all_list[3][3])
# dist_print(all_list[0].shape)
return torch.cat(all_list,dim = 0)
def dist_sum_reduce_tensor(tensor):
if not dist.is_available():
return tensor
if not dist.is_initialized():
return tensor
if not isinstance(tensor, torch.Tensor):
return tensor
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
return rt
def dist_mean_reduce_tensor(tensor):
rt = dist_sum_reduce_tensor(tensor)
rt /= get_world_size()
return rt
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
from torch.utils.tensorboard import SummaryWriter
class DistSummaryWriter(SummaryWriter):
def __init__(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).__init__(*args, **kwargs)
def add_scalar(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).add_scalar(*args, **kwargs)
def add_figure(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).add_figure(*args, **kwargs)
def add_graph(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).add_graph(*args, **kwargs)
def add_histogram(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).add_histogram(*args, **kwargs)
def add_image(self, *args, **kwargs):
if can_log():
super(DistSummaryWriter, self).add_image(*args, **kwargs)
def close(self):
if can_log():
super(DistSummaryWriter, self).close()
import tqdm
def dist_tqdm(obj, *args, **kwargs):
if can_log():
return tqdm.tqdm(obj, *args, **kwargs)
else:
return obj
| 4,623
| 25.574713
| 77
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/config.py
|
import json
import os.path as osp
import shutil
import sys
import tempfile
from argparse import Action, ArgumentParser
from collections import abc
from importlib import import_module
from addict import Dict
BASE_KEY = '_base_'
DELETE_KEY = '_delete_'
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(f"'{self.__class__.__name__}' object has no "
f"attribute '{name}'")
except Exception as e:
ex = e
else:
return value
raise ex
def add_args(parser, cfg, prefix=''):
for k, v in cfg.items():
if isinstance(v, str):
parser.add_argument('--' + prefix + k)
elif isinstance(v, int):
parser.add_argument('--' + prefix + k, type=int)
elif isinstance(v, float):
parser.add_argument('--' + prefix + k, type=float)
elif isinstance(v, bool):
parser.add_argument('--' + prefix + k, action='store_true')
elif isinstance(v, dict):
add_args(parser, v, prefix + k + '.')
elif isinstance(v, abc.Iterable):
parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')
else:
print(f'cannot parse key {prefix + k} of type {type(v)}')
return parser
class Config(object):
"""A facility for config and config files.
It supports common file formats as configs: python/json/yaml. The interface
is the same as a dict object and also allows access config values as
attributes.
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{'b1': [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile('tests/data/config/a.py')
>>> cfg.filename
"/home/kchen/projects/mmcv/tests/data/config/a.py"
>>> cfg.item4
'test'
>>> cfg
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
"""
@staticmethod
def _file2dict(filename):
filename = osp.abspath(osp.expanduser(filename))
if filename.endswith('.py'):
with tempfile.TemporaryDirectory() as temp_config_dir:
temp_config_file = tempfile.NamedTemporaryFile(
dir=temp_config_dir, suffix='.py')
temp_config_name = osp.basename(temp_config_file.name)
# close temp file
temp_config_file.close()
shutil.copyfile(filename,
osp.join(temp_config_dir, temp_config_name))
temp_module_name = osp.splitext(temp_config_name)[0]
sys.path.insert(0, temp_config_dir)
mod = import_module(temp_module_name)
sys.path.pop(0)
cfg_dict = {
name: value
for name, value in mod.__dict__.items()
if not name.startswith('__')
}
# delete imported module
del sys.modules[temp_module_name]
elif filename.endswith(('.yml', '.yaml', '.json')):
import mmcv
cfg_dict = mmcv.load(filename)
else:
raise IOError('Only py/yml/yaml/json type are supported now!')
cfg_text = filename + '\n'
with open(filename, 'r') as f:
cfg_text += f.read()
if BASE_KEY in cfg_dict:
cfg_dir = osp.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = base_filename if isinstance(
base_filename, list) else [base_filename]
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
_cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if len(base_cfg_dict.keys() & c.keys()) > 0:
raise KeyError('Duplicate key is not allowed among bases')
base_cfg_dict.update(c)
base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
cfg_dict = base_cfg_dict
# merge cfg_text
cfg_text_list.append(cfg_text)
cfg_text = '\n'.join(cfg_text_list)
return cfg_dict, cfg_text
@staticmethod
def _merge_a_into_b(a, b):
# merge dict `a` into dict `b` (non-inplace). values in `a` will
# overwrite `b`.
# copy first to avoid inplace modification
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
if not isinstance(b[k], dict):
raise TypeError(
f'{k}={v} in child config cannot inherit from base '
f'because {k} is a dict in the child config but is of '
f'type {type(b[k])} in base config. You may set '
f'`{DELETE_KEY}=True` to ignore the base config')
b[k] = Config._merge_a_into_b(v, b[k])
else:
b[k] = v
return b
@staticmethod
def fromfile(filename):
cfg_dict, cfg_text = Config._file2dict(filename)
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
@staticmethod
def auto_argparser(description=None):
"""Generate argparser from config file automatically (experimental)
"""
partial_parser = ArgumentParser(description=description)
partial_parser.add_argument('config', help='config file path')
cfg_file = partial_parser.parse_known_args()[0].config
cfg = Config.fromfile(cfg_file)
parser = ArgumentParser(description=description)
parser.add_argument('config', help='config file path')
add_args(parser, cfg)
return parser, cfg
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError('cfg_dict must be a dict, but '
f'got {type(cfg_dict)}')
super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
super(Config, self).__setattr__('_filename', filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, 'r') as f:
text = f.read()
else:
text = ''
super(Config, self).__setattr__('_text', text)
@property
def filename(self):
return self._filename
@property
def text(self):
return self._text
@property
def pretty_text(self):
indent = 4
def _indent(s_, num_spaces):
s = s_.split('\n')
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * ' ') + line for line in s]
s = '\n'.join(s)
s = first + '\n' + s
return s
def _format_basic_types(k, v):
if isinstance(v, str):
v_str = f"'{v}'"
else:
v_str = str(v)
attr_str = f'{str(k)}={v_str}'
attr_str = _indent(attr_str, indent)
return attr_str
def _format_list(k, v):
# check if all items in the list are dict
if all(isinstance(_, dict) for _ in v):
v_str = '[\n'
v_str += '\n'.join(
f'dict({_indent(_format_dict(v_), indent)}),'
for v_ in v).rstrip(',')
attr_str = f'{str(k)}={v_str}'
attr_str = _indent(attr_str, indent) + ']'
else:
attr_str = _format_basic_types(k, v)
return attr_str
def _format_dict(d, outest_level=False):
r = ''
s = []
for idx, (k, v) in enumerate(d.items()):
is_last = idx >= len(d) - 1
end = '' if outest_level or is_last else ','
if isinstance(v, dict):
v_str = '\n' + _format_dict(v)
attr_str = f'{str(k)}=dict({v_str}'
attr_str = _indent(attr_str, indent) + ')' + end
elif isinstance(v, list):
attr_str = _format_list(k, v) + end
else:
attr_str = _format_basic_types(k, v) + end
s.append(attr_str)
r += '\n'.join(s)
return r
cfg_dict = self._cfg_dict.to_dict()
text = _format_dict(cfg_dict, outest_level=True)
return text
def __repr__(self):
return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'
def __len__(self):
return len(self._cfg_dict)
def __getattr__(self, name):
return getattr(self._cfg_dict, name)
def __getitem__(self, name):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self):
cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
format_text = json.dumps(cfg_dict, indent=2)
return format_text
def merge_from_dict(self, options):
"""Merge list into cfg_dict
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {'model.backbone.depth': 50,
... 'model.backbone.with_cp':True}
>>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
d = option_cfg_dict
key_list = full_key.split('.')
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
super(Config, self).__setattr__(
'_cfg_dict', Config._merge_a_into_b(option_cfg_dict, cfg_dict))
class DictAction(Action):
"""
argparse action to split an argument into KEY=VALUE form
on the first = and append to a dictionary. List options should
be passed as comma separated values, i.e KEY=V1,V2,V3
"""
@staticmethod
def _parse_int_float_bool(val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if val.lower() in ['true', 'false']:
return True if val.lower() == 'true' else False
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
key, val = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if len(val) == 1:
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
| 12,100
| 33.377841
| 79
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/common.py
|
import os, argparse
from utils.dist_utils import is_main_process, dist_print, DistSummaryWriter
from utils.config import Config
import torch
import time
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('config', help = 'path to config file')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dataset', default = None, type = str)
parser.add_argument('--data_root', default = None, type = str)
parser.add_argument('--epoch', default = None, type = int)
parser.add_argument('--batch_size', default = None, type = int)
parser.add_argument('--optimizer', default = None, type = str)
parser.add_argument('--learning_rate', default = None, type = float)
parser.add_argument('--weight_decay', default = None, type = float)
parser.add_argument('--momentum', default = None, type = float)
parser.add_argument('--scheduler', default = None, type = str)
parser.add_argument('--steps', default = None, type = int, nargs='+')
parser.add_argument('--gamma', default = None, type = float)
parser.add_argument('--warmup', default = None, type = str)
parser.add_argument('--warmup_iters', default = None, type = int)
parser.add_argument('--backbone', default = None, type = str)
parser.add_argument('--griding_num', default = None, type = int)
parser.add_argument('--note', default = None, type = str)
parser.add_argument('--log_path', default = None, type = str)
parser.add_argument('--finetune', default = None, type = str)
parser.add_argument('--resume', default = None, type = str)
parser.add_argument('--test_model', default = None, type = str)
parser.add_argument('--test_work_dir', default = None, type = str)
parser.add_argument('--num_lanes', default = None, type = int)
parser.add_argument('--auto_backup', action='store_true', help='automatically backup current code in the log path')
return parser
def merge_config():
args = get_args().parse_args()
cfg = Config.fromfile(args.config)
items = ['dataset','data_root','epoch','batch_size','optimizer','learning_rate',
'weight_decay','momentum','scheduler','steps','gamma','warmup','warmup_iters'
,'griding_num','backbone','note','log_path',
'finetune','resume', 'test_model','test_work_dir', 'num_lanes']
for item in items:
if getattr(args, item) is not None:
dist_print('merge ', item, ' config')
setattr(cfg, item, getattr(args, item))
return args, cfg
def save_model(net, optimizer, epoch, save_path, distributed):
if is_main_process():
model_state_dict = net.state_dict()
state = {'model': model_state_dict, 'optimizer': optimizer.state_dict()}
# state = {'model': model_state_dict}
assert os.path.exists(save_path)
model_path = os.path.join(save_path, 'ep%03d.pth' % epoch)
torch.save(state, model_path)
import pathspec
def cp_projects(auto_backup, to_path):
if is_main_process() and auto_backup:
with open('./.gitignore','r') as fp:
ign = fp.read()
ign += '\n.git'
spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, ign.splitlines())
all_files = {os.path.join(root,name) for root,dirs,files in os.walk('./') for name in files}
matches = spec.match_files(all_files)
matches = set(matches)
to_cp_files = all_files - matches
dist_print('Copying projects to '+ to_path + ' for backup')
t0 = time.time()
warning_flag = True
for f in to_cp_files:
dirs = os.path.join(to_path,'code',os.path.split(f[2:])[0])
if not os.path.exists(dirs):
os.makedirs(dirs)
os.system('cp %s %s'%(f,os.path.join(to_path,'code',f[2:])))
elapsed_time = time.time() - t0
if elapsed_time > 5 and warning_flag:
dist_print('If the program is stuck, it might be copying large files in this directory. please don\'t set --auto_backup. Or please make you working directory clean, i.e, don\'t place large files like dataset, log results under this directory.')
warning_flag = False
import datetime, os
def get_work_dir(cfg):
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
hyper_param_str = '_lr_%1.0e_b_%d' % (cfg.learning_rate, cfg.batch_size)
work_dir = os.path.join(cfg.log_path, now + hyper_param_str + cfg.note)
return work_dir
def get_logger(work_dir, cfg):
logger = DistSummaryWriter(work_dir)
config_txt = os.path.join(work_dir, 'cfg.txt')
if is_main_process():
with open(config_txt, 'w') as fp:
fp.write(str(cfg))
return logger
| 5,025
| 43.477876
| 260
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/factory.py
|
from utils.loss import SoftmaxFocalLoss, ParsingRelationLoss, ParsingRelationDis
from utils.metrics import MultiLabelAcc, AccTopk, Metric_mIoU
from utils.dist_utils import DistSummaryWriter
import torch
def get_optimizer(net,cfg):
training_params = filter(lambda p: p.requires_grad, net.parameters())
if cfg.optimizer == 'Adam':
optimizer = torch.optim.Adam(training_params, lr=cfg.learning_rate, weight_decay=cfg.weight_decay)
elif cfg.optimizer == 'SGD':
optimizer = torch.optim.SGD(training_params, lr=cfg.learning_rate, momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
else:
raise NotImplementedError
return optimizer
def get_scheduler(optimizer, cfg, iters_per_epoch):
if cfg.scheduler == 'multi':
scheduler = MultiStepLR(optimizer, cfg.steps, cfg.gamma, iters_per_epoch, cfg.warmup, iters_per_epoch if cfg.warmup_iters is None else cfg.warmup_iters)
elif cfg.scheduler == 'cos':
scheduler = CosineAnnealingLR(optimizer, cfg.epoch * iters_per_epoch, eta_min = 0, warmup = cfg.warmup, warmup_iters = cfg.warmup_iters)
else:
raise NotImplementedError
return scheduler
def get_loss_dict(cfg):
loss_dict = {
'name': ['cls_loss', ],
'op': [SoftmaxFocalLoss(2), ],
'weight': [1.0, ],
'data_src': [('cls_out', 'cls_label'), ]
}
return loss_dict
def get_loss_dict_seg(cfg):
loss_dict = {
'name': ['seg_loss', ],
'op': [torch.nn.CrossEntropyLoss(), ],
'weight': [1.0, ],
'data_src': [('seg_out', 'seg_label'), ]
}
return loss_dict
def get_metric_dict(cfg):
metric_dict = {
'name': ['top1', 'top2', 'top3'],
'op': [MultiLabelAcc(), AccTopk(cfg.griding_num, 2), AccTopk(cfg.griding_num, 3)],
'data_src': [('cls_out', 'cls_label'), ('cls_out', 'cls_label'), ('cls_out', 'cls_label')]
}
return metric_dict
def get_metric_dict_seg(cfg):
metric_dict = {
'name': ['iou'],
'op': [Metric_mIoU(cfg.num_lanes+1)],
'data_src': [('seg_out', 'seg_label')]
}
return metric_dict
class MultiStepLR:
def __init__(self, optimizer, steps, gamma = 0.1, iters_per_epoch = None, warmup = None, warmup_iters = None):
self.warmup = warmup
self.warmup_iters = warmup_iters
self.optimizer = optimizer
self.steps = steps
self.steps.sort()
self.gamma = gamma
self.iters_per_epoch = iters_per_epoch
self.iters = 0
self.base_lr = [group['lr'] for group in optimizer.param_groups]
def step(self, external_iter = None):
self.iters += 1
if external_iter is not None:
self.iters = external_iter
if self.warmup == 'linear' and self.iters < self.warmup_iters:
rate = self.iters / self.warmup_iters
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * rate
return
# multi policy
if self.iters % self.iters_per_epoch == 0:
epoch = int(self.iters / self.iters_per_epoch)
power = -1
for i, st in enumerate(self.steps):
if epoch < st:
power = i
break
if power == -1:
power = len(self.steps)
# print(self.iters, self.iters_per_epoch, self.steps, power)
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * (self.gamma ** power)
import math
class CosineAnnealingLR:
def __init__(self, optimizer, T_max , eta_min = 0, warmup = None, warmup_iters = None):
self.warmup = warmup
self.warmup_iters = warmup_iters
self.optimizer = optimizer
self.T_max = T_max
self.eta_min = eta_min
self.iters = 0
self.base_lr = [group['lr'] for group in optimizer.param_groups]
def step(self, external_iter = None):
self.iters += 1
if external_iter is not None:
self.iters = external_iter
if self.warmup == 'linear' and self.iters < self.warmup_iters:
rate = self.iters / self.warmup_iters
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = lr * rate
return
# cos policy
for group, lr in zip(self.optimizer.param_groups, self.base_lr):
group['lr'] = self.eta_min + (lr - self.eta_min) * (1 + math.cos(math.pi * self.iters / self.T_max)) / 2
| 4,637
| 33.61194
| 160
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/speed_simple.py
|
import torch
import time, sys
import numpy as np
sys.path.append("..")
from model.model import parsingNet
# from segmentation.model_seg import parsingNet
torch.backends.cudnn.benchmark = True
net = parsingNet(pretrained = False, backbone='34', cls_dim=(200, 52, 4)).cuda()
net.eval()
x = torch.zeros((1,3,288,800)).cuda() + 1
for i in range(10):
y = net(x)
t_all = []
for i in range(300):
t1 = time.time()
y = net(x)
t2 = time.time()
t_all.append(t2 - t1)
print('average time:', np.mean(t_all) / 1)
print('average fps:',1 / np.mean(t_all))
print('fastest time:', min(t_all) / 1)
print('fastest fps:',1 / min(t_all))
print('slowest time:', max(t_all) / 1)
print('slowest fps:',1 / max(t_all))
from torchinfo import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
summary(net, input_size=(1, 3, 288, 800))
from torchstat import stat
stat(net.cpu(), (3, 288, 800))
| 934
| 23.605263
| 80
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/metrics.py
|
import numpy as np
import torch
import time,pdb
def converter(data):
if isinstance(data,torch.Tensor):
data = data.cpu().data.numpy().flatten()
return data.flatten()
def fast_hist(label_pred, label_true, num_classes):
hist = np.bincount(num_classes * label_true.astype(int) + label_pred, minlength=num_classes ** 2)
hist = hist.reshape(num_classes, num_classes)
return hist
class Metric_mIoU():
def __init__(self,class_num):
self.class_num = class_num
self.hist = np.zeros((self.class_num,self.class_num))
def update(self,predict,target):
predict,target = converter(predict),converter(target)
self.hist += fast_hist(predict,target,self.class_num)
def reset(self):
self.hist = np.zeros((self.class_num,self.class_num))
def get_miou(self):
miou = np.diag(self.hist) / (
np.sum(self.hist, axis=1) + np.sum(self.hist, axis=0) -
np.diag(self.hist))
miou = np.nanmean(miou)
return miou
def get_acc(self):
acc = np.diag(self.hist) / self.hist.sum(axis=1)
acc = np.nanmean(acc)
return acc
def get(self):
return self.get_miou()
class MultiLabelAcc():
def __init__(self):
self.cnt = 0
self.correct = 0
def reset(self):
self.cnt = 0
self.correct = 0
def update(self,predict,target):
predict,target = converter(predict),converter(target)
self.cnt += len(predict)
self.correct += np.sum(predict==target)
def get_acc(self):
return self.correct * 1.0 / self.cnt
def get(self):
return self.get_acc()
class AccTopk():
def __init__(self,background_classes,k):
self.background_classes = background_classes
self.k = k
self.cnt = 0
self.top5_correct = 0
def reset(self):
self.cnt = 0
self.top5_correct = 0
def update(self,predict,target):
predict,target = converter(predict),converter(target)
self.cnt += len(predict)
background_idx = (predict == self.background_classes) + (target == self.background_classes)
self.top5_correct += np.sum(predict[background_idx] == target[background_idx])
not_background_idx = np.logical_not(background_idx)
self.top5_correct += np.sum(np.absolute(predict[not_background_idx]-target[not_background_idx])<self.k)
def get(self):
return self.top5_correct * 1.0 / self.cnt
def update_metrics(metric_dict, pair_data):
for i in range(len(metric_dict['name'])):
metric_op = metric_dict['op'][i]
data_src = metric_dict['data_src'][i]
metric_op.update(pair_data[data_src[0]], pair_data[data_src[1]])
def reset_metrics(metric_dict):
for op in metric_dict['op']:
op.reset()
if __name__ == '__main__':
# p = np.random.randint(5, size=(800, 800))
# t = np.zeros((800, 800))
# me = Metric_mIoU(5)
# me.update(p,p)
# me.update(p,t)
# me.update(p,p)
# me.update(p,t)
# print(me.get_miou())
# print(me.get_acc())
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
b = np.array([1, 1, 2, 2, 2, 3, 3, 4, 4, 0])
me = AccTopk(0,5)
me.update(b,a)
print(me.get())
| 3,271
| 31.39604
| 111
|
py
|
Rail-Detection
|
Rail-Detection-main/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/utils/evaluation.py
|
import numpy as np
import json
from scipy import special
from IPython import embed
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255), (255,255,0), (125,255,255)]
thickness_list = [1, 3, 5, 7, 9, 11, 13, 15]
thickness_list.reverse()
def grid_2_inter(out, griding_num):
out = out.data.cpu().numpy()
out_loc = np.argmax(out,axis=0)
prob = special.softmax(out[:-1, :, :], axis=0)
idx = np.arange(griding_num)
idx = idx.reshape(-1, 1, 1)
loc = np.sum(prob * idx, axis=0)
loc[out_loc == griding_num] = griding_num
out_loc = loc
lanes = []
for i in range(out_loc.shape[1]):
out_i = out_loc[:,i]
lane = [int(round((loc + 0.5) * 1280.0 / (griding_num - 1))) if loc != griding_num else -2 for loc in out_i]
lanes.append(lane)
return np.array(lanes)
def mask_2_inter(mask, row_anchor, num_lanes=4):
all_idx = np.zeros((num_lanes, len(row_anchor)))
for i, r in enumerate(row_anchor):
label_r = np.asarray(mask)[int(round(r))]
for lane_idx in range(1, num_lanes + 1):
pos = np.where(label_r == lane_idx)[0]
# pos = np.where(label_r == color_list[lane_idx])[0]
if len(pos) == 0:
all_idx[lane_idx - 1, i] = -1
continue
pos = np.mean(pos)
all_idx[lane_idx - 1, i] = pos
return all_idx
class LaneEval(object):
pixel_thresh = 6
pt_thresh = 0.85
@staticmethod
def line_accuracy(pred, gt, thresh):
pred = np.array([p if p >= 0 else -100 for p in pred])
gt = np.array([g if g >= 0 else -100 for g in gt])
return np.sum(np.where(np.abs(pred - gt) < thresh, 1., 0.)) / len(gt)
@staticmethod
def bench(pred, gt, y_samples):
# embed()
if any(len(p) != len(y_samples) for p in pred):
raise Exception('Format of lanes error.')
line_accs = []
fp, fn = 0., 0.
matched = 0.
for x_gts, x_preds in zip(gt, pred):
acc = LaneEval.line_accuracy(np.array(x_preds), np.array(x_gts), LaneEval.pixel_thresh)
if acc < LaneEval.pt_thresh:
fn += 1
else:
matched += 1
line_accs.append(acc)
fp = len(pred) - matched
if len(gt) > 4 and fn > 0:
fn -= 1
s = sum(line_accs)
if len(gt) > 4:
s -= min(line_accs)
return s / max(min(4.0, len(gt)), 1.), fp / len(pred) if len(pred) > 0 else 0., fn / max(min(len(gt), 4.) , 1.)
@staticmethod
def bench_all(preds, gts, y_samples):
accuracy, fp, fn = 0., 0., 0.
for pred, gt in zip(preds, gts):
try:
a, p, n = LaneEval.bench(pred, gt, y_samples)
except BaseException as e:
raise Exception('Format of lanes error.')
accuracy += a
fp += p
fn += n
num = len(gts)
# the first return parameter is the default ranking parameter
return json.dumps([
{'name': 'Accuracy', 'value': accuracy / num, 'order': 'desc'},
{'name': 'FP', 'value': fp / num, 'order': 'asc'},
{'name': 'FN', 'value': fn / num, 'order': 'asc'}
])
if __name__ == '__main__':
from data.constant import raildb_row_anchor
preds = np.random.randint(0, high=1280, size=(4, 4, len(raildb_row_anchor)))
gts = np.random.randint(0, high=1280, size=(4, 4, len(raildb_row_anchor)))
res = LaneEval.bench_all(preds, gts, raildb_row_anchor)
res = json.loads(res)
for r in res:
print(r['name'], r['value'])
| 3,658
| 32.568807
| 119
|
py
|
Rail-Detection
|
Rail-Detection-main/data/mytransforms.py
|
import numbers
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
#from config import cfg
import torch
import pdb
import cv2
# ===============================img tranforms============================
class Compose2(object):
# compose all transforms for image and label
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask, bbx=None):
if bbx is None:
for t in self.transforms:
img, mask = t(img, mask)
return img, mask
for t in self.transforms:
img, mask, bbx = t(img, mask, bbx)
return img, mask, bbx
class FreeScale(object):
# resize image and mask
def __init__(self, size):
self.size = size # (h, w)
def __call__(self, img, mask):
return img.resize((self.size[1], self.size[0]), Image.BILINEAR), mask.resize((self.size[1], self.size[0]), Image.NEAREST)
class FreeScaleMask(object):
# resize mask
def __init__(self,size):
self.size = size
def __call__(self,mask):
return mask.resize((self.size[1], self.size[0]), Image.NEAREST)
class Scale(object):
# resize based on the smaller size
def __init__(self, size):
self.size = size
def __call__(self, img, mask):
if img.size != mask.size:
print(img.size)
print(mask.size)
assert img.size == mask.size
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img, mask
if w < h:
ow = self.size
oh = int(self.size * h / w)
return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)
else:
oh = self.size
ow = int(self.size * w / h)
return img.resize((ow, oh), Image.BILINEAR), mask.resize((ow, oh), Image.NEAREST)
class RandomRotate(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, angle):
self.angle = angle
def __call__(self, image, label):
assert label is None or image.size == label.size
angle = random.randint(0, self.angle * 2) - self.angle
label = label.rotate(angle, resample=Image.NEAREST)
image = image.rotate(angle, resample=Image.BILINEAR)
return image, label
# ===============================label tranforms============================
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class MaskToTensor(object):
def __call__(self, img):
return torch.from_numpy(np.array(img, dtype=np.int32)).long()
def find_start_pos(row_sample, start_line):
# row_sample = row_sample.sort()
# for i,r in enumerate(row_sample):
# if r >= start_line:
# return i
l, r = 0, len(row_sample)-1
while True:
mid = int((l+r)/2)
if r - l == 1:
return r
if row_sample[mid] < start_line:
l = mid
if row_sample[mid] > start_line:
r = mid
if row_sample[mid] == start_line:
return mid
class RandomLROffsetLABEL(object):
def __init__(self,max_offset):
self.max_offset = max_offset
def __call__(self,img,label):
offset = np.random.randint(-self.max_offset,self.max_offset)
w, h = img.size
img = np.array(img)
if offset > 0:
img[:,offset:,:] = img[:,0:w-offset,:]
img[:,:offset,:] = 0
if offset < 0:
real_offset = -offset
img[:,0:w-real_offset,:] = img[:,real_offset:,:]
img[:,w-real_offset:,:] = 0
label = np.array(label)
if offset > 0:
label[:,offset:] = label[:,0:w-offset]
label[:,:offset] = 0
if offset < 0:
offset = -offset
label[:,0:w-offset] = label[:,offset:]
label[:,w-offset:] = 0
return Image.fromarray(img),Image.fromarray(label)
class RandomUDoffsetLABEL(object):
def __init__(self,max_offset):
self.max_offset = max_offset
def __call__(self,img,label):
offset = np.random.randint(-self.max_offset,self.max_offset)
w, h = img.size
img = np.array(img)
if offset > 0:
img[offset:,:,:] = img[0:h-offset,:,:]
img[:offset,:,:] = 0
if offset < 0:
real_offset = -offset
img[0:h-real_offset,:,:] = img[real_offset:,:,:]
img[h-real_offset:,:,:] = 0
label = np.array(label)
if offset > 0:
label[offset:,:] = label[0:h-offset,:]
label[:offset,:] = 0
if offset < 0:
offset = -offset
label[0:h-offset,:] = label[offset:,:]
label[h-offset:,:] = 0
return Image.fromarray(img),Image.fromarray(label)
| 5,217
| 30.245509
| 129
|
py
|
Rail-Detection
|
Rail-Detection-main/data/constant.py
|
# row anchors are a series of pre-defined coordinates in image height to detect lanes
# the row anchors are defined according to the evaluation protocol of CULane and Tusimple
# since our method will resize the image to 288x800 for training, the row anchors are defined with the height of 288
# you can modify these row anchors according to your training image resolution
# start from 80 with step 5
raildb_row_anchor = [200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320,
330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450,
460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580,
590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710]
| 745
| 66.818182
| 116
|
py
|
Rail-Detection
|
Rail-Detection-main/data/dataloader.py
|
import torch, os
import numpy as np
import torchvision.transforms as transforms
import data.mytransforms as mytransforms
from data.dataset import raildb_row_anchor
from data.dataset import RailClsDataset, RailTestDataset
def get_train_loader(batch_size, data_root, griding_num=56, distributed=True, num_rails=4, mode='train', type='all'):
target_transform = transforms.Compose([
mytransforms.FreeScaleMask((288, 800)),
mytransforms.MaskToTensor(),
])
img_transform = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
if mode=='train':
simu_transform = mytransforms.Compose2([
mytransforms.RandomRotate(6),
mytransforms.RandomUDoffsetLABEL(100),
mytransforms.RandomLROffsetLABEL(200)
])
else:
simu_transform = None
train_dataset = RailClsDataset(
data_root,
data_root+'meta.csv',
img_transform = img_transform,
target_transform = target_transform,
simu_transform = simu_transform,
griding_num = griding_num,
row_anchor = raildb_row_anchor,
num_rails = num_rails,
mode = mode,
type = type,
)
if distributed:
sampler_train = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
sampler_train = torch.utils.data.RandomSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler_train, num_workers=4)
return train_loader, len(raildb_row_anchor)
class SeqDistributedSampler(torch.utils.data.distributed.DistributedSampler):
'''
Change the behavior of DistributedSampler to sequential distributed sampling.
The sequential sampling helps the stability of multi-thread testing, which needs multi-thread file io.
Without sequentially sampling, the file io on thread may interfere other threads.
'''
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=False):
super().__init__(dataset, num_replicas, rank, shuffle)
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
num_per_rank = int(self.total_size // self.num_replicas)
# sequential sampling
indices = indices[num_per_rank * self.rank : num_per_rank * (self.rank + 1)]
assert len(indices) == self.num_samples
return iter(indices)
| 3,045
| 37.075
| 122
|
py
|
Rail-Detection
|
Rail-Detection-main/data/dataset.py
|
import torch
from PIL import Image
import os
import pdb
import numpy as np
import cv2
import random
import csv
import pandas as pd
import data.mytransforms as mytransforms
# import mytransforms as mytransforms
import torchvision.transforms as transforms
from IPython import embed
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
raildb_row_anchor = [200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320,
330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450,
460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580,
590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710]
def loader_func(path):
return Image.open(path).resize((1280,720), Image.NEAREST)
def inter_2_grid(intersactions, num_cols, w):
num_rail, num_row = intersactions.shape
col_sample = np.linspace(0, w - 1, num_cols)
to_pts = np.zeros((num_row, num_rail))
for i in range(num_rail):
pti = intersactions[i, :]
to_pts[:,i] = np.asarray(
[int(pt // (col_sample[1] - col_sample[0])) if pt != -1 else num_cols for pt in pti])
return to_pts.astype(int)
def mask_2_inter(mask, row_anchor, num_rails=4):
all_idx = np.zeros((num_rails, len(row_anchor)))
for i, r in enumerate(row_anchor):
label_r = np.asarray(mask)[int(round(r))]
for rail_idx in range(1, num_rails + 1):
pos = np.where(label_r == rail_idx)[0]
# pos = np.where(label_r == color_list[rail_idx])[0]
if len(pos) == 0:
all_idx[rail_idx - 1, i] = -1
continue
pos = np.mean(pos)
all_idx[rail_idx - 1, i] = pos
return all_idx
class RailTestDataset(torch.utils.data.Dataset):
def __init__(self, data_path, meta_file, img_transform = None, type='all'):
super(RailTestDataset, self).__init__()
self.data_path = data_path
self.meta_file = meta_file
self.img_transform = img_transform
self.type = type
pd_reader = pd.read_csv(meta_file)
random.seed(2022)
rd_ind = list(range(len(pd_reader)))
random.shuffle(rd_ind)
self.pd_reader = pd_reader.reindex(index=rd_ind)
len_image = int(len(self.pd_reader)*0.2)
self.pd_reader = self.pd_reader.iloc[len(self.pd_reader)-len_image:]
if self.type != 'all': self.img_list = list(self.pd_reader['name'][self.pd_reader[self.type].astype(bool)])
else: self.img_list = list(self.pd_reader['name'])
print(str(self.type) + ' has {} testing'.format(len(self.img_list)))
def __getitem__(self, index):
# parse label and image name
img_name = self.img_list[index]
jpeg_name = 'pic/' + img_name[:-12] + '/' + img_name
# read label and image
img_path = os.path.join(self.data_path, jpeg_name)
img = loader_func(img_path)
img = self.img_transform(img)
return img, jpeg_name
def __len__(self):
return len(self.img_list)
class RailClsDataset(torch.utils.data.Dataset):
def __init__(self, data_path, meta_file, img_transform = None, target_transform = None, simu_transform = None,
griding_num = 100, row_anchor = None, num_rails = 4, mode='train', type='all'):
super(RailClsDataset, self).__init__()
self.img_transform = img_transform
self.target_transform = target_transform
self.simu_transform = simu_transform
self.data_path = data_path
self.griding_num = griding_num
self.num_rails = num_rails
self.mode = mode
self.type = type
pd_reader = pd.read_csv(meta_file)
random.seed(2022)
rd_ind = list(range(len(pd_reader)))
random.shuffle(rd_ind)
self.pd_reader = pd_reader.reindex(index=rd_ind)
print('we have totally {} images'.format(len(self.pd_reader['name'])))
# embed()
if self.mode == 'train':
len_image = int(len(self.pd_reader)*0.8)
self.pd_reader_train = self.pd_reader.iloc[:len_image]
if self.type != 'all': self.img_list = list(self.pd_reader_train['name'][self.pd_reader_train[self.type].astype(bool)])
else: self.img_list = list(self.pd_reader_train['name'])
print(self.type + ' has {} training'.format(len(self.img_list)))
elif self.mode == 'val':
len_image = int(len(self.pd_reader)*0.2)
self.pd_reader_val = self.pd_reader.iloc[len(self.pd_reader)-len_image:]
if self.type != 'all': self.img_list = list(self.pd_reader_val['name'][self.pd_reader_val[self.type].astype(bool)])
else: self.img_list = list(self.pd_reader_val['name'])
print(str(self.type) + ' has {} validating'.format(len(self.img_list)))
self.row_anchor = row_anchor
self.row_anchor.sort()
def __getitem__(self, index):
# parse label and image name
img_name = self.img_list[index]
jpeg_name = 'pic/' + img_name[:-12] + '/' + img_name
label_name = 'mask/' + img_name[:-12] + '/' + img_name.replace('jpeg', 'png')
# read label and image
label_path = os.path.join(self.data_path, label_name)
label = loader_func(label_path)
# print(label.size)
img_path = os.path.join(self.data_path, jpeg_name)
img = loader_func(img_path)
# get the positions of intersactions between polyline and rowline (num_rails, num_rows)
inter_label = mask_2_inter(label, self.row_anchor, num_rails=4)
# print(inter_label.shape)
# get the coordinates of rails at row anchors (num_rows, num_rails)
grid_label = inter_2_grid(inter_label, self.griding_num, label.size[0])
# print(grid_label.shape)
if self.simu_transform:
img, label = self.simu_transform(img, label)
img = self.img_transform(img)
seg_label = self.target_transform(label)
seg_label[seg_label>self.num_rails] = 0
assert (seg_label >= 0).all() & (seg_label < self.num_rails+1).all()
return img, grid_label, inter_label, seg_label, jpeg_name
def __len__(self):
return len(self.img_list)
if __name__ == "__main__":
data_path = '/home/ssd7T/lxpData/rail/dataset/'
meta_file = '/home/ssd7T/lxpData/rail/dataset/meta.csv'
img_transform = transforms.Compose([
transforms.Resize((288, 800)),
transforms.ToTensor(),
# transforms.Normalize(0.5, 0.5),
])
target_transform = transforms.Compose([
mytransforms.FreeScaleMask((288, 800)),
mytransforms.MaskToTensor(),
])
simu_transform = mytransforms.Compose2([
# mytransforms.RandomRotate(6),
# mytransforms.RandomUDoffsetLABEL(100),
# mytransforms.RandomLROffsetLABEL(200)
])
all_dataset = RailClsDataset(data_path, meta_file, img_transform = img_transform, target_transform = target_transform,
simu_transform = simu_transform, griding_num=56, row_anchor = raildb_row_anchor, num_rails=4, mode='val', type='far')
all_loader = torch.utils.data.DataLoader(all_dataset, batch_size=1, shuffle=False, num_workers=1)
col_sample = np.linspace(0, 800 - 1, 56)
col_sample_w = col_sample[1] - col_sample[0]
color_list = [(0,0,225), (255,0,0), (0,225,0), (255,0,225), (255,255,225), (0,255,255)]
for ind, (img, grid_label, inter_label, label, jpeg_name) in enumerate(all_loader):
image = (img[0].permute(1, 2, 0).numpy() * 255).astype(int)
label = (label[0].numpy()).astype(int)
label = np.repeat(np.expand_dims(label, axis=2), 3, axis=2)
canvas = image.copy().astype(np.uint8)
grid_label[grid_label == 56] = 0
for i in range(grid_label[0].shape[0]):
for j in range(grid_label[0].shape[1]):
if grid_label[0][i,j] > 0:
point = (int(grid_label[0][i,j]*col_sample_w)-1, int(raildb_row_anchor[j]/2.5)-1)
cv2.circle(canvas, point, 5, color_list[i], -1)
for lb_value in range(4):
label[label[:,:,0]==(lb_value+1)] = color_list[lb_value]
arr_output = np.concatenate([image, label, canvas], axis=0)
vis_path = data_path+jpeg_name[0].replace('pic', 'check')
print(vis_path)
if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path))
cv2.imwrite(vis_path, arr_output)
break
| 8,571
| 38.141553
| 137
|
py
|
Rail-Detection
|
Rail-Detection-main/data/__init__.py
| 0
| 0
| 0
|
py
|
|
Rail-Detection
|
Rail-Detection-main/model/hubconf.py
|
<<<<<<< HEAD
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
# efficientnet_v2_s,
# efficientnet_v2_m,
# efficientnet_v2_l,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
from torchvision.models.mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3
from torchvision.models.mobilenetv2 import mobilenet_v2
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_y_400mf,
regnet_y_800mf,
regnet_y_1_6gf,
regnet_y_3_2gf,
regnet_y_8gf,
regnet_y_16gf,
regnet_y_32gf,
regnet_y_128gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_1_6gf,
regnet_x_3_2gf,
regnet_x_8gf,
regnet_x_16gf,
regnet_x_32gf,
)
from torchvision.models.resnet import (
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
resnext50_32x4d,
resnext101_32x8d,
wide_resnet50_2,
wide_resnet101_2,
)
from torchvision.models.segmentation import (
fcn_resnet50,
fcn_resnet101,
deeplabv3_resnet50,
deeplabv3_resnet101,
deeplabv3_mobilenet_v3_large,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import shufflenet_v2_x0_5, shufflenet_v2_x1_0
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
from torchvision.models.vision_transformer import (
vit_b_16,
vit_b_32,
vit_l_16,
vit_l_32,
# vit_h_14,
=======
# Optional list of dependencies required by the package
dependencies = ["torch"]
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
# efficientnet_v2_s,
# efficientnet_v2_m,
# efficientnet_v2_l,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
from torchvision.models.mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, mnasnet1_3
from torchvision.models.mobilenetv2 import mobilenet_v2
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_y_400mf,
regnet_y_800mf,
regnet_y_1_6gf,
regnet_y_3_2gf,
regnet_y_8gf,
regnet_y_16gf,
regnet_y_32gf,
regnet_y_128gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_1_6gf,
regnet_x_3_2gf,
regnet_x_8gf,
regnet_x_16gf,
regnet_x_32gf,
)
from torchvision.models.resnet import (
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
resnext50_32x4d,
resnext101_32x8d,
wide_resnet50_2,
wide_resnet101_2,
)
from torchvision.models.segmentation import (
fcn_resnet50,
fcn_resnet101,
deeplabv3_resnet50,
deeplabv3_resnet101,
deeplabv3_mobilenet_v3_large,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import shufflenet_v2_x0_5, shufflenet_v2_x1_0
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
from torchvision.models.vision_transformer import (
vit_b_16,
vit_b_32,
vit_l_16,
vit_l_32,
# vit_h_14,
>>>>>>> d175ba8a15a74cff363e8da114147f44311bfb42
)
| 4,315
| 28.972222
| 101
|
py
|
Rail-Detection
|
Rail-Detection-main/model/model.py
|
<<<<<<< HEAD
import torch
from model.backbone import resnet, mobilenet, squeezenet, VisionTransformer
import numpy as np
class conv_bn_relu(torch.nn.Module):
def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1,bias=False):
super(conv_bn_relu,self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size,
stride = stride, padding = padding, dilation = dilation,bias = bias)
self.bn = torch.nn.BatchNorm2d(out_channels)
self.relu = torch.nn.ReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class parsingNet(torch.nn.Module):
def __init__(self, size=(288, 800), pretrained=True, backbone='50', cls_dim=(100, 52, 4)):
# cls_dim: (num_gridding, num_cls_per_lane, num_of_lanes)
super(parsingNet, self).__init__()
self.size = size
self.w = size[1]
self.h = size[0]
self.cls_dim = cls_dim
# input : nchw,
# 1/32,
# 288,800 -> 9,25
if backbone in ['34','18']:
self.model = resnet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(512,8,1)
if backbone in ['50','101']:
self.model = resnet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(2048,8,1)
if backbone in ['mobilenet_v2', 'mobilenet_v3_large', 'mobilenet_v3_small']:
self.model = mobilenet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(1280,8,1)
if backbone in ['squeezenet1_0', 'squeezenet1_1',]:
self.model = squeezenet(backbone, pretrained=pretrained)
self.pool = torch.nn.Sequential(
torch.nn.Conv2d(512,8,1),
torch.nn.AdaptiveAvgPool2d((9, 25)),
)
if backbone in ['vit_b_16', ]:
self.model = VisionTransformer(backbone, pretrained=pretrained)
self.pool = torch.nn.Sequential(
torch.nn.Linear(768, 1800),
)
# input: 9,25,8 = 1800
# output: (gridding_num+1) * sample_rows * 4
# 56+1 * 42 * 4
self.cls_cat = torch.nn.Sequential(
torch.nn.Linear(1800, 2048),
torch.nn.ReLU(),
torch.nn.Linear(2048, np.prod(cls_dim)),
)
initialize_weights(self.cls_cat)
def forward(self, x):
# n c h w - > n 2048 sh sw
# -> n 2048
x4 = self.model(x)
fea = self.pool(x4).view(-1, 1800)
group_cat = self.cls_cat(fea).view(-1, *self.cls_dim)
return group_cat
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m,torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
=======
import torch
from model.backbone import resnet, mobilenet, squeezenet, VisionTransformer
import numpy as np
class conv_bn_relu(torch.nn.Module):
def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1,bias=False):
super(conv_bn_relu,self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size,
stride = stride, padding = padding, dilation = dilation,bias = bias)
self.bn = torch.nn.BatchNorm2d(out_channels)
self.relu = torch.nn.ReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class parsingNet(torch.nn.Module):
def __init__(self, size=(288, 800), pretrained=True, backbone='50', cls_dim=(100, 52, 4)):
# cls_dim: (num_gridding, num_cls_per_lane, num_of_lanes)
super(parsingNet, self).__init__()
self.size = size
self.w = size[1]
self.h = size[0]
self.cls_dim = cls_dim
# input : nchw,
# 1/32,
# 288,800 -> 9,25
if backbone in ['34','18']:
self.model = resnet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(512,8,1)
if backbone in ['50','101']:
self.model = resnet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(2048,8,1)
if backbone in ['mobilenet_v2', 'mobilenet_v3_large', 'mobilenet_v3_small']:
self.model = mobilenet(backbone, pretrained=pretrained)
self.pool = torch.nn.Conv2d(1280,8,1)
if backbone in ['squeezenet1_0', 'squeezenet1_1',]:
self.model = squeezenet(backbone, pretrained=pretrained)
self.pool = torch.nn.Sequential(
torch.nn.Conv2d(512,8,1),
torch.nn.AdaptiveAvgPool2d((9, 25)),
)
if backbone in ['vit_b_16', ]:
self.model = VisionTransformer(backbone, pretrained=pretrained)
self.pool = torch.nn.Sequential(
torch.nn.Linear(768, 1800),
)
# input: 9,25,8 = 1800
# output: (gridding_num+1) * sample_rows * 4
# 56+1 * 42 * 4
self.cls_cat = torch.nn.Sequential(
torch.nn.Linear(1800, 2048),
torch.nn.ReLU(),
torch.nn.Linear(2048, np.prod(cls_dim)),
)
initialize_weights(self.cls_cat)
def forward(self, x):
# n c h w - > n 2048 sh sw
# -> n 2048
x4 = self.model(x)
fea = self.pool(x4).view(-1, 1800)
group_cat = self.cls_cat(fea).view(-1, *self.cls_dim)
return group_cat
def initialize_weights(*models):
for model in models:
real_init_weights(model)
def real_init_weights(m):
if isinstance(m, list):
for mini_m in m:
real_init_weights(mini_m)
else:
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m, torch.nn.Linear):
m.weight.data.normal_(0.0, std=0.01)
elif isinstance(m, torch.nn.BatchNorm2d):
torch.nn.init.constant_(m.weight, 1)
torch.nn.init.constant_(m.bias, 0)
elif isinstance(m,torch.nn.Module):
for mini_m in m.children():
real_init_weights(mini_m)
else:
print('unkonwn module', m)
>>>>>>> d175ba8a15a74cff363e8da114147f44311bfb42
| 7,248
| 33.850962
| 106
|
py
|
Rail-Detection
|
Rail-Detection-main/model/backbone.py
|
<<<<<<< HEAD
import torch, pdb
import torchvision
import torch.nn.modules
from IPython import embed
from model.hubconf import *
# from hubconf import *
class mobilenet(torch.nn.Module):
def __init__(self, backbone, pretrained = False):
super(mobilenet, self).__init__()
features = list(mobilenet_v2(pretrained=pretrained).features.children())
self.features = torch.nn.Sequential(*features)
def forward(self, x):
return self.features(x)
class squeezenet(torch.nn.Module):
def __init__(self, backbone, pretrained = False):
super(squeezenet, self).__init__()
features = list(squeezenet1_0(pretrained=pretrained).features.children())
self.features = torch.nn.Sequential(*features)
def forward(self, x):
return self.features(x)
class vgg16bn(torch.nn.Module):
def __init__(self,pretrained = False):
super(vgg16bn,self).__init__()
model = list(torchvision.models.vgg16_bn(pretrained=pretrained).features.children())
model = model[:33]+model[34:43]
self.model = torch.nn.Sequential(*model)
def forward(self,x):
return self.model(x)
class resnet(torch.nn.Module):
def __init__(self, layers, pretrained = False):
super(resnet,self).__init__()
if layers == '18':
model = torchvision.models.resnet18(pretrained=pretrained)
elif layers == '34':
model = torchvision.models.resnet34(pretrained=pretrained)
elif layers == '50':
model = torchvision.models.resnet50(pretrained=pretrained)
else:
raise NotImplementedError
self.conv1 = model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool = model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x2 = self.layer2(x)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return x4
class VisionTransformer(torch.nn.Module):
def __init__(self, layers, pretrained = False):
super(VisionTransformer, self).__init__()
model = vit_b_16(pretrained=pretrained)
self.class_token = model.class_token
self.encoder = model.encoder
self.image_size = model.image_size
self.conv_proj = model.conv_proj
self.hidden_dim = model.hidden_dim
self.patch_size = model.patch_size
def _process_input(self, x: torch.Tensor) -> torch.Tensor:
x = torch.nn.functional.adaptive_avg_pool2d(x, (224, 224))
n, c, h, w = x.shape
p = self.patch_size
torch._assert(h == self.image_size, "Wrong image height!")
torch._assert(w == self.image_size, "Wrong image width!")
n_h = h // p
n_w = w // p
# (n, c, h, w) -> (n, hidden_dim, n_h, n_w)
x = self.conv_proj(x)
# (n, hidden_dim, n_h, n_w) -> (n, hidden_dim, (n_h * n_w))
x = x.reshape(n, self.hidden_dim, n_h * n_w)
# (n, hidden_dim, (n_h * n_w)) -> (n, (n_h * n_w), hidden_dim)
# The self attention layer expects inputs in the format (N, S, E)
# where S is the source sequence length, N is the batch size, E is the
# embedding dimension
x = x.permute(0, 2, 1)
return x
def forward(self, x):
# Reshape and permute the input tensor
x = self._process_input(x)
n = x.shape[0]
# Expand the class token to the full batch
batch_class_token = self.class_token.expand(n, -1, -1)
x = torch.cat([batch_class_token, x], dim=1)
x = self.encoder(x)
# Classifier "token" as used by standard language architectures
x = x[:, 0]
# x = self.heads(x)
return x
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
model = VisionTransformer('vit_b_16').cuda()
x = torch.zeros((1,3,288,800)).cuda() + 1
out = model(x)
=======
import torch, pdb
import torchvision
import torch.nn.modules
from IPython import embed
from model.hubconf import *
# from hubconf import *
class mobilenet(torch.nn.Module):
def __init__(self, backbone, pretrained = False):
super(mobilenet, self).__init__()
features = list(mobilenet_v2(pretrained=pretrained).features.children())
self.features = torch.nn.Sequential(*features)
def forward(self, x):
return self.features(x)
class squeezenet(torch.nn.Module):
def __init__(self, backbone, pretrained = False):
super(squeezenet, self).__init__()
features = list(squeezenet1_0(pretrained=pretrained).features.children())
self.features = torch.nn.Sequential(*features)
def forward(self, x):
return self.features(x)
class vgg16bn(torch.nn.Module):
def __init__(self,pretrained = False):
super(vgg16bn,self).__init__()
model = list(torchvision.models.vgg16_bn(pretrained=pretrained).features.children())
model = model[:33]+model[34:43]
self.model = torch.nn.Sequential(*model)
def forward(self,x):
return self.model(x)
class resnet(torch.nn.Module):
def __init__(self, layers, pretrained = False):
super(resnet,self).__init__()
if layers == '18':
model = torchvision.models.resnet18(pretrained=pretrained)
elif layers == '34':
model = torchvision.models.resnet34(pretrained=pretrained)
elif layers == '50':
model = torchvision.models.resnet50(pretrained=pretrained)
else:
raise NotImplementedError
self.conv1 = model.conv1
self.bn1 = model.bn1
self.relu = model.relu
self.maxpool = model.maxpool
self.layer1 = model.layer1
self.layer2 = model.layer2
self.layer3 = model.layer3
self.layer4 = model.layer4
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x2 = self.layer2(x)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return x4
class VisionTransformer(torch.nn.Module):
def __init__(self, layers, pretrained = False):
super(VisionTransformer, self).__init__()
model = vit_b_16(pretrained=pretrained)
self.class_token = model.class_token
self.encoder = model.encoder
self.image_size = model.image_size
self.conv_proj = model.conv_proj
self.hidden_dim = model.hidden_dim
self.patch_size = model.patch_size
def _process_input(self, x: torch.Tensor) -> torch.Tensor:
x = torch.nn.functional.adaptive_avg_pool2d(x, (224, 224))
n, c, h, w = x.shape
p = self.patch_size
torch._assert(h == self.image_size, "Wrong image height!")
torch._assert(w == self.image_size, "Wrong image width!")
n_h = h // p
n_w = w // p
# (n, c, h, w) -> (n, hidden_dim, n_h, n_w)
x = self.conv_proj(x)
# (n, hidden_dim, n_h, n_w) -> (n, hidden_dim, (n_h * n_w))
x = x.reshape(n, self.hidden_dim, n_h * n_w)
# (n, hidden_dim, (n_h * n_w)) -> (n, (n_h * n_w), hidden_dim)
# The self attention layer expects inputs in the format (N, S, E)
# where S is the source sequence length, N is the batch size, E is the
# embedding dimension
x = x.permute(0, 2, 1)
return x
def forward(self, x):
# Reshape and permute the input tensor
x = self._process_input(x)
n = x.shape[0]
# Expand the class token to the full batch
batch_class_token = self.class_token.expand(n, -1, -1)
x = torch.cat([batch_class_token, x], dim=1)
x = self.encoder(x)
# Classifier "token" as used by standard language architectures
x = x[:, 0]
# x = self.heads(x)
return x
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
model = VisionTransformer('vit_b_16').cuda()
x = torch.zeros((1,3,288,800)).cuda() + 1
out = model(x)
>>>>>>> d175ba8a15a74cff363e8da114147f44311bfb42
print(out.size())
| 8,429
| 31.929688
| 92
|
py
|
Rail-Detection
|
Rail-Detection-main/model/__init__.py
| 0
| 0
| 0
|
py
|
|
git_unordered_points_plane
|
git_unordered_points_plane-main/library.py
|
# ****************************************************************************
# Copyright (C) 2022 Patricio Gallardo, Benjamin Schmidt
# Contact: <pgallard@ucr.edu, schmbe@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# ****************************************************************************
from sage.all import var, vector
x, y, z = var('x'), var('y'), var('z')
# This is the basis labelled e_0 to e_14 in the paper.
e = [vector([x**2 * y, -x**3, 0]),
vector([x * y**2, -x**2 * y, 0]),
vector([y**3, -x * y**2, 0]),
vector([x**2 * z, 0, -x**3]),
vector([x * z**2, 0, -x**2 * z]),
vector([z**3, 0, -x * z**2]),
vector([x * y * z, -x**2 * z, 0]),
vector([x * y * z, 0, -x**2 * y]),
vector([y**2 * z, -x * y * z, 0]),
vector([y**2 * z, 0, -x * y**2]),
vector([y * z**2, -x * z**2, 0]),
vector([y * z**2, 0, -x * y * z]),
vector([0, y**2 * z, -y**3]),
vector([0, y * z**2, -y**2 * z]),
vector([0, z**3, -y * z**2])]
def coeff_to_point(coeff):
r"""Returns a vector in `math:H^0(\Omega(4))` defined through the
coefficients `coeff` for our standard basis `math:e_1, \ldots, e_15`.
"""
return sum(coeff[j] * e[j] for j in range(len(e)))
def group_action(coeff, matrix):
r"""Computes the action of `matrix` on a point in the final model for
`math: n = 7`. The point is determined through its coefficients in front
of our standard basis (see paper) in the form of the list `coeff`.
"""
# Check the remarks below Section 5.1 in the paper to understand the
# group action.
v = vector([x, y, z]) * matrix
point = coeff_to_point(coeff)
w = (matrix * point).subs(x=v[0], y=v[1], z=v[2])
w = vector([entry.full_simplify().expand() for entry in w])
# Determine the coefficients in front of the basis vectors after the
# group action with matrix.
return [w[0].coefficient(x, 2).coefficient(y, 1), # e0
w[0].coefficient(x, 1).coefficient(y, 2), # e1
w[0].coefficient(y, 3), # e2
w[0].coefficient(x, 2).coefficient(z, 1), # e3
w[0].coefficient(x, 1).coefficient(z, 2), # e4
w[0].coefficient(z, 3), # e5
-w[1].coefficient(x, 2).coefficient(z, 1), # e6
-w[2].coefficient(x, 2).coefficient(y, 1), # e7
-w[1].coefficient(x, 1).coefficient(y, 1).coefficient(z, 1), # e8
-w[2].coefficient(x, 1).coefficient(y, 2), # e9
-w[1].coefficient(x, 1).coefficient(z, 2), # e10
-w[2].coefficient(x, 1).coefficient(y, 1).coefficient(z, 1), # e11
w[1].coefficient(y, 2).coefficient(z, 1), # e12
w[1].coefficient(y, 1).coefficient(z, 2), # e13
w[1].coefficient(z, 3)] # e14
| 3,386
| 42.987013
| 79
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CXX=c++ python3 setup.py build develop
# or
# CXX=c++ pip install . -vv
#
# Potentially also set TORCHBEAST_LIBS_PREFIX.
import os
import subprocess
import sys
import unittest
import numpy as np
import setuptools
from torch.utils import cpp_extension
PREFIX = os.getenv("CONDA_PREFIX")
if os.getenv("TORCHBEAST_LIBS_PREFIX"):
PREFIX = os.getenv("TORCHBEAST_LIBS_PREFIX")
if not PREFIX:
PREFIX = "/usr/local"
def build_pb():
protoc = f"{PREFIX}/bin/protoc"
# Hard-code client.proto for now.
source = os.path.join(os.path.dirname(__file__), "libtorchbeast", "rpcenv.proto")
output = source.replace(".proto", ".pb.cc")
if os.path.exists(output) and (
os.path.exists(source) and os.path.getmtime(source) < os.path.getmtime(output)
):
return
print("calling protoc")
if (
subprocess.call(
[protoc, "--cpp_out=libtorchbeast", "-Ilibtorchbeast", "rpcenv.proto"]
)
!= 0
):
sys.exit(-1)
if (
subprocess.call(
protoc + " --grpc_out=libtorchbeast -Ilibtorchbeast"
" --plugin=protoc-gen-grpc=`which grpc_cpp_plugin`"
" rpcenv.proto",
shell=True,
)
!= 0
):
sys.exit(-1)
def test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover("tests", pattern="*_test.py")
return test_suite
class build_ext(cpp_extension.BuildExtension):
def run(self):
build_pb()
cpp_extension.BuildExtension.run(self)
def main():
extra_compile_args = []
extra_link_args = []
grpc_objects = [
f"{PREFIX}/lib/libgrpc++.a",
f"{PREFIX}/lib/libgrpc.a",
f"{PREFIX}/lib/libgpr.a",
f"{PREFIX}/lib/libaddress_sorting.a",
]
include_dirs = cpp_extension.include_paths() + [
np.get_include(),
f"{PREFIX}/include",
]
libraries = []
if sys.platform == "darwin":
extra_compile_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
extra_link_args += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
# Relevant only when c-cares is not embedded in grpc, e.g. when
# installing grpc via homebrew.
libraries.append("cares")
elif sys.platform == "linux":
libraries.append("z")
grpc_objects.append(f"{PREFIX}/lib/libprotobuf.a")
libtorchbeast = cpp_extension.CppExtension(
name="libtorchbeast._C",
sources=[
"libtorchbeast/libtorchbeast.cc",
"libtorchbeast/actorpool.cc",
"libtorchbeast/rpcenv.cc",
"libtorchbeast/rpcenv.pb.cc",
"libtorchbeast/rpcenv.grpc.pb.cc",
],
include_dirs=include_dirs,
libraries=libraries,
language="c++",
extra_compile_args=["-std=c++17"] + extra_compile_args,
extra_link_args=extra_link_args,
extra_objects=grpc_objects,
)
setuptools.setup(
name="libtorchbeast",
packages=["libtorchbeast"],
version="0.0.14",
ext_modules=[libtorchbeast],
cmdclass={"build_ext": build_ext},
test_suite="setup.test_suite",
)
if __name__ == "__main__":
main()
| 3,827
| 25.957746
| 86
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/atari_data.py
|
# Taken from https://github.com/deepmind/dqn_zoo/blob/master/dqn_zoo/atari_data.py
#
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to compute human-normalized Atari scores.
The data used in this module is human and random performance data on Atari-57.
It comprises of evaluation scores (undiscounted returns), each averaged
over at least 3 episode runs, on each of the 57 Atari games. Each episode begins
with the environment already stepped with a uniform random number (between 1 and
30 inclusive) of noop actions.
The two agents are:
* 'random' (agent choosing its actions uniformly randomly on each step)
* 'human' (professional human game tester)
Scores are obtained by averaging returns over the episodes played by each agent,
with episode length capped to 108,000 frames (i.e. timeout after 30 minutes).
The term 'human-normalized' here means a linear per-game transformation of
a game score in such a way that 0 corresponds to random performance and 1
corresponds to human performance.
"""
# pylint: disable=g-bad-import-order
import math
# Game: score-tuple dictionary. Each score tuple contains
# 0: score random (float) and 1: score human (float).
_ATARI_DATA = {
'alien': (227.8, 7127.7),
'amidar': (5.8, 1719.5),
'assault': (222.4, 742.0),
'asterix': (210.0, 8503.3),
'asteroids': (719.1, 47388.7),
'atlantis': (12850.0, 29028.1),
'bank_heist': (14.2, 753.1),
'battle_zone': (2360.0, 37187.5),
'beam_rider': (363.9, 16926.5),
'berzerk': (123.7, 2630.4),
'bowling': (23.1, 160.7),
'boxing': (0.1, 12.1),
'breakout': (1.7, 30.5),
'centipede': (2090.9, 12017.0),
'chopper_command': (811.0, 7387.8),
'crazy_climber': (10780.5, 35829.4),
'defender': (2874.5, 18688.9),
'demon_attack': (152.1, 1971.0),
'double_dunk': (-18.6, -16.4),
'enduro': (0.0, 860.5),
'fishing_derby': (-91.7, -38.7),
'freeway': (0.0, 29.6),
'frostbite': (65.2, 4334.7),
'gopher': (257.6, 2412.5),
'gravitar': (173.0, 3351.4),
'hero': (1027.0, 30826.4),
'ice_hockey': (-11.2, 0.9),
'jamesbond': (29.0, 302.8),
'kangaroo': (52.0, 3035.0),
'krull': (1598.0, 2665.5),
'kung_fu_master': (258.5, 22736.3),
'montezuma_revenge': (0.0, 4753.3),
'ms_pacman': (307.3, 6951.6),
'name_this_game': (2292.3, 8049.0),
'phoenix': (761.4, 7242.6),
'pitfall': (-229.4, 6463.7),
'pong': (-20.7, 14.6),
'private_eye': (24.9, 69571.3),
'qbert': (163.9, 13455.0),
'riverraid': (1338.5, 17118.0),
'road_runner': (11.5, 7845.0),
'robotank': (2.2, 11.9),
'seaquest': (68.4, 42054.7),
'skiing': (-17098.1, -4336.9),
'solaris': (1236.3, 12326.7),
'space_invaders': (148.0, 1668.7),
'star_gunner': (664.0, 10250.0),
'surround': (-10.0, 6.5),
'tennis': (-23.8, -8.3),
'time_pilot': (3568.0, 5229.2),
'tutankham': (11.4, 167.6),
'up_n_down': (533.4, 11693.2),
'venture': (0.0, 1187.5),
# Note the random agent score on Video Pinball is sometimes greater than the
# human score under other evaluation methods.
'video_pinball': (16256.9, 17667.9),
'wizard_of_wor': (563.5, 4756.5),
'yars_revenge': (3092.9, 54576.9),
'zaxxon': (32.5, 9173.3),
}
_RANDOM_COL = 0
_HUMAN_COL = 1
ATARI_GAMES = tuple(sorted(_ATARI_DATA.keys()))
def get_human_normalized_score(game: str, raw_score: float) -> float:
"""Converts game score to human-normalized score."""
game_scores = _ATARI_DATA.get(game, (math.nan, math.nan))
random, human = game_scores[_RANDOM_COL], game_scores[_HUMAN_COL]
return (raw_score - random) / (human - random)
| 4,282
| 36.902655
| 82
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/nest/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CXX=c++ python3 setup.py build develop
# or
# CXX=c++ pip install . -vv
#
import sys
import setuptools
import setuptools.command.build_ext
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
setuptools.Extension(
"nest",
["nest/nest_pybind.cc"],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
],
depends=["nest/nest.h", "nest/nest_pybind.h"],
language="c++",
extra_compile_args=["-std=c++17"],
)
]
class BuildExt(setuptools.command.build_ext.build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {"msvc": ["/EHsc"], "unix": []}
if sys.platform == "darwin":
c_opts["unix"] += ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
if ct == "unix":
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append("-std=c++17")
opts.append("-fvisibility=hidden")
elif ct == "msvc":
opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args += opts
if sys.platform == "darwin":
ext.extra_link_args = ["-stdlib=libc++"]
super().build_extensions()
setuptools.setup(
name="nest",
version="0.0.3",
author="TorchBeast team",
ext_modules=ext_modules,
headers=["nest/nest.h", "nest/nest_pybind.h"],
cmdclass={"build_ext": BuildExt},
install_requires=["pybind11>=2.3"],
setup_requires=["pybind11>=2.3"],
)
| 2,736
| 28.117021
| 84
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/nest/nest_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import nest
import torch
class NestTest(unittest.TestCase):
def setUp(self):
self.n1 = ("Test", ["More", 32], {"h": 4})
self.n2 = ("Test", ("More", 32, (None, 43, ())), {"h": 4})
def test_nest_flatten_no_asserts(self):
t = torch.tensor(1)
t2 = torch.tensor(2)
n = (t, t2)
d = {"hey": t}
nest.flatten((t, t2))
nest.flatten(d)
nest.flatten((d, t))
nest.flatten((d, n, t))
nest.flatten(((t, t2), (t, t2)))
nest.flatten(self.n1)
nest.flatten(self.n2)
d2 = {"hey": t2, "there": d, "more": t2}
nest.flatten(d2) # Careful here, order not necessarily as above.
def test_nest_map(self):
t1 = torch.tensor(0)
t2 = torch.tensor(1)
d = {"hey": t2}
n = nest.map(lambda t: t + 42, (t1, t2))
self.assertSequenceEqual(n, [t1 + 42, t2 + 42])
self.assertSequenceEqual(n, nest.flatten(n))
n1 = (d, n, t1)
n2 = nest.map(lambda t: t * 2, n1)
self.assertEqual(n2[0], {"hey": torch.tensor(2)})
self.assertEqual(n2[1], (torch.tensor(84), torch.tensor(86)))
self.assertEqual(n2[2], torch.tensor(0))
t = torch.tensor(42)
# Doesn't work with pybind11/functional.h, but does with py::function.
self.assertEqual(nest.map(t.add, t2), torch.tensor(43))
def test_nest_flatten(self):
self.assertEqual(nest.flatten(None), [None])
self.assertEqual(nest.flatten(self.n1), ["Test", "More", 32, 4])
def test_nest_pack_as(self):
self.assertEqual(self.n2, nest.pack_as(self.n2, nest.flatten(self.n2)))
with self.assertRaisesRegex(ValueError, "didn't exhaust sequence"):
nest.pack_as(self.n2, nest.flatten(self.n2) + [None])
with self.assertRaisesRegex(ValueError, "Too few elements"):
nest.pack_as(self.n2, nest.flatten(self.n2)[1:])
def test_nest_map_many2(self):
def f(a, b):
return (b, a)
self.assertEqual(nest.map_many2(f, (1, 2), (3, 4)), ((3, 1), (4, 2)))
with self.assertRaisesRegex(ValueError, "got 2 vs 1"):
nest.map_many2(f, (1, 2), (3,))
self.assertEqual(nest.map_many2(f, {"a": 1}, {"a": 2}), {"a": (2, 1)})
with self.assertRaisesRegex(ValueError, "same keys"):
nest.map_many2(f, {"a": 1}, {"b": 2})
with self.assertRaisesRegex(ValueError, "1 vs 0"):
nest.map_many2(f, {"a": 1}, {})
with self.assertRaisesRegex(ValueError, "nests don't match"):
nest.map_many2(f, {"a": 1}, ())
def test_nest_map_many(self):
def f(a):
return (a[1], a[0])
self.assertEqual(nest.map_many(f, (1, 2), (3, 4)), ((3, 1), (4, 2)))
return
with self.assertRaisesRegex(ValueError, "got 2 vs 1"):
nest.map_many(f, (1, 2), (3,))
self.assertEqual(nest.map_many(f, {"a": 1}, {"a": 2}), {"a": (2, 1)})
with self.assertRaisesRegex(ValueError, "same keys"):
nest.map_many(f, {"a": 1}, {"b": 2})
with self.assertRaisesRegex(ValueError, "1 vs 0"):
nest.map_many(f, {"a": 1}, {})
with self.assertRaisesRegex(ValueError, "nests don't match"):
nest.map_many(f, {"a": 1}, ())
def test_front(self):
self.assertEqual(nest.front((1, 2, 3)), 1)
self.assertEqual(nest.front((2, 3)), 2)
self.assertEqual(nest.front((3,)), 3)
def test_refcount(self):
obj = "my very large and random string with numbers 1234"
rc = sys.getrefcount(obj)
# Test nest.front. This doesn't involve returning nests
# from C++ to Python.
nest.front((None, obj))
self.assertEqual(rc, sys.getrefcount(obj))
nest.front(obj)
self.assertEqual(rc, sys.getrefcount(obj))
nest.front((obj,))
self.assertEqual(rc, sys.getrefcount(obj))
nest.front((obj, obj, [obj, {"obj": obj}, obj]))
self.assertEqual(rc, sys.getrefcount(obj))
# Test returning nests of Nones.
nest.map(lambda x: None, (obj, obj, [obj, {"obj": obj}, obj]))
self.assertEqual(rc, sys.getrefcount(obj))
# Test returning actual nests.
nest.map(lambda s: s, obj)
self.assertEqual(rc, sys.getrefcount(obj))
nest.map(lambda x: x, {"obj": obj})
self.assertEqual(rc, sys.getrefcount(obj))
nest.map(lambda x: x, (obj,))
self.assertEqual(rc, sys.getrefcount(obj))
nest.map(lambda s: s, (obj, obj))
nest.map(lambda s: s, (obj, obj))
self.assertEqual(rc, sys.getrefcount(obj))
n = nest.map(lambda s: s, (obj,))
self.assertEqual(rc + 1, sys.getrefcount(obj))
del n
self.assertEqual(rc, sys.getrefcount(obj))
if __name__ == "__main__":
unittest.main()
| 5,498
| 31.157895
| 79
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/vtrace_test.py
|
# This file taken from
# https://github.com/deepmind/scalable_agent/blob/
# d24bd74bd53d454b7222b7f0bea57a358e4ca33e/vtrace_test.py
# and modified.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for V-trace.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
"""
import unittest
import numpy as np
import torch
from torchbeast.core import vtrace
def _shaped_arange(*shape):
"""Runs np.arange, converts to float and reshapes."""
return np.arange(np.prod(shape), dtype=np.float32).reshape(*shape)
def _softmax(logits):
"""Applies softmax non-linearity on inputs."""
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def _ground_truth_calculation(
discounts,
log_rhos,
rewards,
values,
bootstrap_value,
clip_rho_threshold,
clip_pg_rho_threshold,
):
"""Calculates the ground truth for V-trace in Python/Numpy."""
vs = []
seq_len = len(discounts)
rhos = np.exp(log_rhos)
cs = np.minimum(rhos, 1.0)
clipped_rhos = rhos
if clip_rho_threshold:
clipped_rhos = np.minimum(rhos, clip_rho_threshold)
clipped_pg_rhos = rhos
if clip_pg_rho_threshold:
clipped_pg_rhos = np.minimum(rhos, clip_pg_rho_threshold)
# This is a very inefficient way to calculate the V-trace ground truth.
# We calculate it this way because it is close to the mathematical notation
# of V-trace.
# v_s = V(x_s)
# + \sum^{T-1}_{t=s} \gamma^{t-s}
# * \prod_{i=s}^{t-1} c_i
# * \rho_t (r_t + \gamma V(x_{t+1}) - V(x_t))
# Note that when we take the product over c_i, we write `s:t` as the
# notation of the paper is inclusive of the `t-1`, but Python is exclusive.
# Also note that np.prod([]) == 1.
values_t_plus_1 = np.concatenate([values, bootstrap_value[None, :]], axis=0)
for s in range(seq_len):
v_s = np.copy(values[s]) # Very important copy.
for t in range(s, seq_len):
v_s += (
np.prod(discounts[s:t], axis=0)
* np.prod(cs[s:t], axis=0)
* clipped_rhos[t]
* (rewards[t] + discounts[t] * values_t_plus_1[t + 1] - values[t])
)
vs.append(v_s)
vs = np.stack(vs, axis=0)
pg_advantages = clipped_pg_rhos * (
rewards
+ discounts * np.concatenate([vs[1:], bootstrap_value[None, :]], axis=0)
- values
)
return vtrace.VTraceReturns(vs=vs, pg_advantages=pg_advantages)
def assert_allclose(actual, desired):
return np.testing.assert_allclose(actual, desired, rtol=1e-06, atol=1e-05)
class ActionLogProbsTest(unittest.TestCase):
def test_action_log_probs(self, batch_size=2):
seq_len = 7
num_actions = 3
policy_logits = _shaped_arange(seq_len, batch_size, num_actions) + 10
actions = np.random.randint(
0, num_actions, size=(seq_len, batch_size), dtype=np.int64
)
action_log_probs_tensor = vtrace.action_log_probs(
torch.from_numpy(policy_logits), torch.from_numpy(actions)
)
# Ground Truth
# Using broadcasting to create a mask that indexes action logits
action_index_mask = actions[..., None] == np.arange(num_actions)
def index_with_mask(array, mask):
return array[mask].reshape(*array.shape[:-1])
# Note: Normally log(softmax) is not a good idea because it's not
# numerically stable. However, in this test we have well-behaved values.
ground_truth_v = index_with_mask(
np.log(_softmax(policy_logits)), action_index_mask
)
assert_allclose(ground_truth_v, action_log_probs_tensor)
def test_action_log_probs_batch_1(self):
self.test_action_log_probs(1)
class VtraceTest(unittest.TestCase):
def test_vtrace(self, batch_size=5):
"""Tests V-trace against ground truth data calculated in python."""
seq_len = 5
# Create log_rhos such that rho will span from near-zero to above the
# clipping thresholds. In particular, calculate log_rhos in [-2.5, 2.5),
# so that rho is in approx [0.08, 12.2).
log_rhos = _shaped_arange(seq_len, batch_size) / (batch_size * seq_len)
log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).
values = {
"log_rhos": log_rhos,
# T, B where B_i: [0.9 / (i+1)] * T
"discounts": np.array(
[[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)],
dtype=np.float32,
),
"rewards": _shaped_arange(seq_len, batch_size),
"values": _shaped_arange(seq_len, batch_size) / batch_size,
"bootstrap_value": _shaped_arange(batch_size) + 1.0,
"clip_rho_threshold": 3.7,
"clip_pg_rho_threshold": 2.2,
}
ground_truth = _ground_truth_calculation(**values)
values = {key: torch.tensor(value) for key, value in values.items()}
output = vtrace.from_importance_weights(**values)
for a, b in zip(ground_truth, output):
assert_allclose(a, b)
def test_vtrace_batch_1(self):
self.test_vtrace(1)
def test_vtrace_from_logits(self, batch_size=2):
"""Tests V-trace calculated from logits."""
seq_len = 5
num_actions = 3
clip_rho_threshold = None # No clipping.
clip_pg_rho_threshold = None # No clipping.
values = {
"behavior_policy_logits": _shaped_arange(seq_len, batch_size, num_actions),
"target_policy_logits": _shaped_arange(seq_len, batch_size, num_actions),
"actions": np.random.randint(
0, num_actions - 1, size=(seq_len, batch_size)
),
"discounts": np.array( # T, B where B_i: [0.9 / (i+1)] * T
[[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)],
dtype=np.float32,
),
"rewards": _shaped_arange(seq_len, batch_size),
"values": _shaped_arange(seq_len, batch_size) / batch_size,
"bootstrap_value": _shaped_arange(batch_size) + 1.0, # B
}
values = {k: torch.from_numpy(v) for k, v in values.items()}
from_logits_output = vtrace.from_logits(
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
**values,
)
target_log_probs = vtrace.action_log_probs(
values["target_policy_logits"], values["actions"]
)
behavior_log_probs = vtrace.action_log_probs(
values["behavior_policy_logits"], values["actions"]
)
log_rhos = target_log_probs - behavior_log_probs
# Calculate V-trace using the ground truth logits.
from_iw = vtrace.from_importance_weights(
log_rhos=log_rhos,
discounts=values["discounts"],
rewards=values["rewards"],
values=values["values"],
bootstrap_value=values["bootstrap_value"],
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
assert_allclose(from_iw.vs, from_logits_output.vs)
assert_allclose(from_iw.pg_advantages, from_logits_output.pg_advantages)
assert_allclose(
behavior_log_probs, from_logits_output.behavior_action_log_probs
)
assert_allclose(target_log_probs, from_logits_output.target_action_log_probs)
assert_allclose(log_rhos, from_logits_output.log_rhos)
def test_vtrace_from_logits_batch_1(self):
self.test_vtrace_from_logits(1)
def test_higher_rank_inputs_for_importance_weights(self):
"""Checks support for additional dimensions in inputs."""
T = 3 # pylint: disable=invalid-name
B = 2 # pylint: disable=invalid-name
values = {
"log_rhos": torch.zeros(T, B, 1),
"discounts": torch.zeros(T, B, 1),
"rewards": torch.zeros(T, B, 42),
"values": torch.zeros(T, B, 42),
"bootstrap_value": torch.zeros(B, 42),
}
output = vtrace.from_importance_weights(**values)
self.assertSequenceEqual(output.vs.shape, (T, B, 42))
def test_inconsistent_rank_inputs_for_importance_weights(self):
"""Test one of many possible errors in shape of inputs."""
T = 3 # pylint: disable=invalid-name
B = 2 # pylint: disable=invalid-name
values = {
"log_rhos": torch.zeros(T, B, 1),
"discounts": torch.zeros(T, B, 1),
"rewards": torch.zeros(T, B, 42),
"values": torch.zeros(T, B, 42),
# Should be [B, 42].
"bootstrap_value": torch.zeros(B),
}
with self.assertRaisesRegex(
RuntimeError, "same number of dimensions: got 3 and 2"
):
vtrace.from_importance_weights(**values)
if __name__ == "__main__":
unittest.main()
| 9,701
| 35.611321
| 87
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/polybeast_net_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polybeast Net class implementation."""
import unittest
import torch
from torchbeast import polybeast_learner as polybeast
class NetTest(unittest.TestCase):
def setUp(self):
self.unroll_length = 4 # Arbitrary.
self.batch_size = 4 # Arbitrary.
self.frame_dimension = 84 # Has to match what expected by the model.
self.num_actions = 6 # Specific to each environment.
self.num_channels = 4 # Has to match with the first conv layer of the net.
self.core_output_size = 256 # Has to match what expected by the model.
self.num_lstm_layers = 1 # As in the model.
self.inputs = dict(
frame=torch.ones(
self.unroll_length,
self.batch_size,
self.num_channels,
self.frame_dimension,
self.frame_dimension,
),
reward=torch.ones(self.batch_size, self.unroll_length),
done=torch.zeros(self.batch_size, self.unroll_length, dtype=torch.uint8),
)
def test_forward_return_signature_no_lstm(self):
model = polybeast.Net(num_actions=self.num_actions, use_lstm=False)
core_state = ()
(action, policy_logits, baseline), core_state = model(self.inputs, core_state)
self.assertSequenceEqual(action.shape, (self.batch_size, self.unroll_length))
self.assertSequenceEqual(
policy_logits.shape, (self.batch_size, self.unroll_length, self.num_actions)
)
self.assertSequenceEqual(baseline.shape, (self.batch_size, self.unroll_length))
self.assertSequenceEqual(core_state, ())
def test_forward_return_signature_with_lstm(self):
model = polybeast.Net(num_actions=self.num_actions, use_lstm=True)
core_state = model.initial_state(self.batch_size)
(action, policy_logits, baseline), core_state = model(self.inputs, core_state)
self.assertSequenceEqual(action.shape, (self.batch_size, self.unroll_length))
self.assertSequenceEqual(
policy_logits.shape, (self.batch_size, self.unroll_length, self.num_actions)
)
self.assertSequenceEqual(baseline.shape, (self.batch_size, self.unroll_length))
self.assertEqual(len(core_state), 2)
for core_state_element in core_state:
self.assertSequenceEqual(
core_state_element.shape,
(self.num_lstm_layers, self.batch_size, self.core_output_size),
)
def test_initial_state(self):
model_no_lstm = polybeast.Net(num_actions=self.num_actions, use_lstm=False)
initial_state_no_lstm = model_no_lstm.initial_state(self.batch_size)
self.assertSequenceEqual(initial_state_no_lstm, ())
model_with_lstm = polybeast.Net(num_actions=self.num_actions, use_lstm=True)
initial_state_with_lstm = model_with_lstm.initial_state(self.batch_size)
self.assertEqual(len(initial_state_with_lstm), 2)
for core_state_element in initial_state_with_lstm:
self.assertSequenceEqual(
core_state_element.shape,
(self.num_lstm_layers, self.batch_size, self.core_output_size),
)
if __name__ == "__main__":
unittest.main()
| 3,863
| 41.933333
| 88
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/batching_queue_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actorpool.BatchingQueue.
Basic functionalities actorpool.BatchingQueue are tested
in libtorchbeast/actorpool_test.cc.
"""
import threading
import time
import unittest
import numpy as np
import torch
import libtorchbeast
class BatchingQueueTest(unittest.TestCase):
def test_bad_construct(self):
with self.assertRaisesRegex(ValueError, "Min batch size must be >= 1"):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=0, maximum_batch_size=1
)
with self.assertRaisesRegex(
ValueError, "Max batch size must be >= min batch size"
):
libtorchbeast.BatchingQueue(
batch_dim=3, minimum_batch_size=1, maximum_batch_size=0
)
def test_multiple_close_calls(self):
queue = libtorchbeast.BatchingQueue()
queue.close()
with self.assertRaisesRegex(RuntimeError, "Queue was closed already"):
queue.close()
def test_check_inputs(self):
queue = libtorchbeast.BatchingQueue(batch_dim=2)
with self.assertRaisesRegex(
ValueError, "Enqueued tensors must have more than batch_dim =="
):
queue.enqueue(torch.ones(5))
with self.assertRaisesRegex(
ValueError, "Cannot enqueue empty vector of tensors"
):
queue.enqueue([])
with self.assertRaisesRegex(
libtorchbeast.ClosedBatchingQueue, "Enqueue to closed queue"
):
queue.close()
queue.enqueue(torch.ones(1, 1, 1))
def test_simple_run(self):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
queue.enqueue(inputs)
batch = next(queue)
np.testing.assert_array_equal(batch, inputs)
def test_batched_run(self, batch_size=2):
queue = libtorchbeast.BatchingQueue(
batch_dim=0, minimum_batch_size=batch_size, maximum_batch_size=batch_size
)
inputs = [torch.full((1, 2, 3), i) for i in range(batch_size)]
def enqueue_target(i):
while queue.size() < i:
# Make sure thread i calls enqueue before thread i + 1.
time.sleep(0.05)
queue.enqueue(inputs[i])
enqueue_threads = []
for i in range(batch_size):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
for t in enqueue_threads:
t.start()
batch = next(queue)
np.testing.assert_array_equal(batch, torch.cat(inputs))
for t in enqueue_threads:
t.join()
class BatchingQueueProducerConsumerTest(unittest.TestCase):
def test_many_consumers(
self, enqueue_threads_number=16, repeats=100, dequeue_threads_number=64
):
queue = libtorchbeast.BatchingQueue(batch_dim=0)
lock = threading.Lock()
total_batches_consumed = 0
def enqueue_target(i):
for _ in range(repeats):
queue.enqueue(torch.full((1, 2, 3), i))
def dequeue_target():
nonlocal total_batches_consumed
for batch in queue:
batch_size, *_ = batch.shape
with lock:
total_batches_consumed += batch_size
enqueue_threads = []
for i in range(enqueue_threads_number):
enqueue_threads.append(
threading.Thread(
target=enqueue_target, name=f"enqueue-thread-{i}", args=(i,)
)
)
dequeue_threads = []
for i in range(dequeue_threads_number):
dequeue_threads.append(
threading.Thread(target=dequeue_target, name=f"dequeue-thread-{i}")
)
for t in enqueue_threads + dequeue_threads:
t.start()
for t in enqueue_threads:
t.join()
queue.close()
for t in dequeue_threads:
t.join()
self.assertEqual(total_batches_consumed, repeats * enqueue_threads_number)
if __name__ == "__main__":
unittest.main()
| 4,880
| 30.490323
| 85
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/polybeast_loss_functions_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polybeast loss functions implementation."""
import unittest
import numpy as np
import torch
from torch.nn import functional as F
from torchbeast import polybeast_learner as polybeast
def _softmax(logits):
"""Applies softmax non-linearity on inputs."""
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def _softmax_grad(logits):
"""Compute the gradient of softmax function."""
s = np.expand_dims(_softmax(logits), 0)
return s.T * (np.eye(s.size) - s)
def assert_allclose(actual, desired):
return np.testing.assert_allclose(actual, desired, rtol=1e-06, atol=1e-05)
class ComputeBaselineLossTest(unittest.TestCase):
def setUp(self):
# Floating point constants are randomly generated.
self.advantages = np.array([1.4, 3.43, 5.2, 0.33])
def test_compute_baseline_loss(self):
ground_truth_value = 0.5 * np.sum(self.advantages ** 2)
assert_allclose(
ground_truth_value,
polybeast.compute_baseline_loss(torch.from_numpy(self.advantages)),
)
def test_compute_baseline_loss_grad(self):
advantages_tensor = torch.from_numpy(self.advantages)
advantages_tensor.requires_grad_()
calculated_value = polybeast.compute_baseline_loss(advantages_tensor)
calculated_value.backward()
# Manually computed gradients:
# 0.5 * d(xˆ2)/dx == x
# hence the expected gradient is the same as self.advantages.
assert_allclose(advantages_tensor.grad, self.advantages)
class ComputeEntropyLossTest(unittest.TestCase):
def setUp(self):
# Floating point constants are randomly generated.
self.logits = np.array([0.0012, 0.321, 0.523, 0.109, 0.416])
def test_compute_entropy_loss(self):
# Calculate entropy with:
# H(s) = - sum(prob(x) * ln(prob(x)) for each x in s)
softmax_logits = _softmax(self.logits)
ground_truth_value = np.sum(softmax_logits * np.log(softmax_logits))
calculated_value = polybeast.compute_entropy_loss(torch.from_numpy(self.logits))
assert_allclose(ground_truth_value, calculated_value)
def test_compute_entropy_loss_grad(self):
logits_tensor = torch.from_numpy(self.logits)
logits_tensor.requires_grad_()
calculated_value = polybeast.compute_entropy_loss(logits_tensor)
calculated_value.backward()
expected_grad = np.matmul(
np.ones_like(self.logits),
np.matmul(
np.diag(1 + np.log(_softmax(self.logits))), _softmax_grad(self.logits)
),
)
assert_allclose(logits_tensor.grad, expected_grad)
class ComputePolicyGradientLossTest(unittest.TestCase):
def setUp(self):
# Floating point constants are randomly generated.
self.logits = np.array(
[
[
[0.206, 0.738, 0.125, 0.484, 0.332],
[0.168, 0.504, 0.523, 0.496, 0.626],
[0.236, 0.186, 0.627, 0.441, 0.533],
],
[
[0.015, 0.904, 0.583, 0.651, 0.855],
[0.811, 0.292, 0.061, 0.597, 0.590],
[0.999, 0.504, 0.464, 0.077, 0.143],
],
]
)
self.actions = np.array([[3, 0, 1], [4, 2, 2]])
self.advantages = np.array([[1.4, 0.31, 0.75], [2.1, 1.5, 0.03]])
def test_compute_policy_gradient_loss(self):
T, B, N = self.logits.shape
# Calculate the the cross entropy loss, with the formula:
# loss = -sum_over_j(y_j * log(p_j))
# Where:
# - `y_j` is whether the action corrisponding to index j has been taken or not,
# (hence y is a one-hot-array of size == number of actions).
# - `p_j` is the value of the sofmax logit corresponding to the jth action.
# In our implementation, we also multiply for the advantages.
labels = F.one_hot(torch.from_numpy(self.actions), num_classes=N).numpy()
cross_entropy_loss = -labels * np.log(_softmax(self.logits))
ground_truth_value = np.sum(
cross_entropy_loss * self.advantages.reshape(T, B, 1)
)
calculated_value = polybeast.compute_policy_gradient_loss(
torch.from_numpy(self.logits),
torch.from_numpy(self.actions),
torch.from_numpy(self.advantages),
)
assert_allclose(ground_truth_value, calculated_value.item())
def test_compute_policy_gradient_loss_grad(self):
T, B, N = self.logits.shape
logits_tensor = torch.from_numpy(self.logits)
logits_tensor.requires_grad_()
calculated_value = polybeast.compute_policy_gradient_loss(
logits_tensor,
torch.from_numpy(self.actions),
torch.from_numpy(self.advantages),
)
self.assertSequenceEqual(calculated_value.shape, [])
calculated_value.backward()
# The gradient of the cross entropy loss function for the jth logit
# can be expressed as:
# p_j - y_j
# where:
# - `p_j` is the value of the softmax logit corresponding to the jth action.
# - `y_j` is whether the action corrisponding to index j has been taken,
# (hence y is a one-hot-array of size == number of actions).
# In our implementation, we also multiply for the advantages.
softmax = _softmax(self.logits)
labels = F.one_hot(torch.from_numpy(self.actions), num_classes=N).numpy()
expected_grad = (softmax - labels) * self.advantages.reshape(T, B, 1)
assert_allclose(logits_tensor.grad, expected_grad)
def test_compute_policy_gradient_loss_grad_flow(self):
logits_tensor = torch.from_numpy(self.logits)
logits_tensor.requires_grad_()
advantages_tensor = torch.from_numpy(self.advantages)
advantages_tensor.requires_grad_()
loss = polybeast.compute_policy_gradient_loss(
logits_tensor, torch.from_numpy(self.actions), advantages_tensor
)
loss.backward()
self.assertIsNotNone(logits_tensor.grad)
self.assertIsNone(advantages_tensor.grad)
if __name__ == "__main__":
unittest.main()
| 6,870
| 36.752747
| 88
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/inference_speed_profiling.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import threading
import time
import timeit
import torch
sys.path.append("..")
import experiment # noqa: E402
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
batch_size = int(sys.argv[1]) if len(sys.argv) > 1 else 4
num_inference_threads = int(sys.argv[2]) if len(sys.argv) > 2 else 2
def main():
filename = "inference_speed_test.json"
with torch.autograd.profiler.profile() as prof:
run()
logging.info("Collecting trace and writing to '%s.gz'", filename)
prof.export_chrome_trace(filename)
os.system("gzip %s" % filename)
def run():
size = (4, 84, 84)
num_actions = 6
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
model = experiment.Net(observation_size=size, num_actions=num_actions)
model = model.to(device=device)
should_stop = threading.Event()
step = 0
def stream_inference(frame):
nonlocal step
T, B, *_ = frame.shape
stream = torch.cuda.Stream()
with torch.no_grad():
with torch.cuda.stream(stream):
while not should_stop.is_set():
input = frame.pin_memory()
input = frame.to(device, non_blocking=True)
outputs = model(input)
outputs = [t.cpu() for t in outputs]
stream.synchronize()
step += B
def inference(frame, lock=threading.Lock()): # noqa: B008
nonlocal step
T, B, *_ = frame.shape
with torch.no_grad():
while not should_stop.is_set():
input = frame.to(device)
with lock:
outputs = model(input)
step += B
outputs = [t.cpu() for t in outputs]
def direct_inference(frame):
nonlocal step
frame = frame.to(device)
T, B, *_ = frame.shape
with torch.no_grad():
while not should_stop.is_set():
model(frame)
step += B
frame = 255 * torch.rand((1, batch_size) + size)
work_threads = [
threading.Thread(target=stream_inference, args=(frame,))
for _ in range(num_inference_threads)
]
for thread in work_threads:
thread.start()
try:
while step < 10000:
start_time = timeit.default_timer()
start_step = step
time.sleep(3)
end_step = step
logging.info(
"Step %i @ %.1f SPS.",
end_step,
(end_step - start_step) / (timeit.default_timer() - start_time),
)
except KeyboardInterrupt:
pass
should_stop.set()
for thread in work_threads:
thread.join()
if __name__ == "__main__":
main()
| 3,562
| 25.992424
| 86
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/dynamic_batcher_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for actorpool.DynamicBatcher."""
import threading
import time
import unittest
import numpy as np
import torch
import libtorchbeast
_BROKEN_PROMISE_MESSAGE = (
"The associated promise has been destructed prior"
" to the associated state becoming ready."
)
class DynamicBatcherTest(unittest.TestCase):
def test_simple_run(self):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
outputs = torch.ones(1, 42, 3)
def target():
np.testing.assert_array_equal(batcher.compute(inputs), outputs)
t = threading.Thread(target=target, name="compute-thread")
t.start()
batch = next(batcher)
np.testing.assert_array_equal(batch.get_inputs(), inputs)
batch.set_outputs(outputs)
t.join()
def test_timeout(self):
timeout_ms = 300
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0,
minimum_batch_size=5,
maximum_batch_size=5,
timeout_ms=timeout_ms,
)
inputs = torch.zeros(1, 2, 3)
outputs = torch.ones(1, 42, 3)
def compute_target():
batcher.compute(inputs)
compute_thread = threading.Thread(target=compute_target, name="compute-thread")
compute_thread.start()
start_waiting_time = time.time()
# Wait until approximately timeout_ms.
batch = next(batcher)
waiting_time_ms = (time.time() - start_waiting_time) * 1000
# Timeout has expired and the batch of size 1 (< minimum_batch_size)
# has been consumed.
batch.set_outputs(outputs)
compute_thread.join()
self.assertTrue(timeout_ms <= waiting_time_ms <= timeout_ms + timeout_ms / 10)
def test_batched_run(self, batch_size=10):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0, minimum_batch_size=batch_size, maximum_batch_size=batch_size
)
inputs = [torch.full((1, 2, 3), i) for i in range(batch_size)]
outputs = torch.ones(batch_size, 42, 3)
def target(i):
while batcher.size() < i:
# Make sure thread i calls compute before thread i + 1.
time.sleep(0.05)
np.testing.assert_array_equal(
batcher.compute(inputs[i]), outputs[i : i + 1]
)
threads = []
for i in range(batch_size):
threads.append(
threading.Thread(target=target, name=f"compute-thread-{i}", args=(i,))
)
for t in threads:
t.start()
batch = next(batcher)
batched_inputs = batch.get_inputs()
np.testing.assert_array_equal(batched_inputs, torch.cat(inputs))
batch.set_outputs(outputs)
for t in threads:
t.join()
def test_dropped_batch(self):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
def target():
with self.assertRaisesRegex(
libtorchbeast.AsyncError, _BROKEN_PROMISE_MESSAGE
):
batcher.compute(inputs)
t = threading.Thread(target=target, name="compute-thread")
t.start()
next(batcher) # Retrieves but doesn't keep the batch object.
t.join()
def test_check_outputs1(self):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=2, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
def target():
batcher.compute(inputs)
t = threading.Thread(target=target, name="compute-thread")
t.start()
batch = next(batcher)
with self.assertRaisesRegex(ValueError, "output shape must have at least"):
outputs = torch.ones(1)
batch.set_outputs(outputs)
# Set correct outputs so the thread can join.
batch.set_outputs(torch.ones(1, 1, 1))
t.join()
def test_check_outputs2(self):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=2, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
def target():
batcher.compute(inputs)
t = threading.Thread(target=target, name="compute-thread")
t.start()
batch = next(batcher)
with self.assertRaisesRegex(
ValueError,
"Output shape must have the same batch dimension as the input batch size.",
):
# Dimenstion two of the outputs is != from the size of the batch (3 != 1).
batch.set_outputs(torch.ones(1, 42, 3))
# Set correct outputs so the thread can join.
batch.set_outputs(torch.ones(1, 1, 1))
t.join()
def test_multiple_set_outputs_calls(self):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0, minimum_batch_size=1, maximum_batch_size=1
)
inputs = torch.zeros(1, 2, 3)
outputs = torch.ones(1, 42, 3)
def target():
batcher.compute(inputs)
t = threading.Thread(target=target, name="compute-thread")
t.start()
batch = next(batcher)
batch.set_outputs(outputs)
with self.assertRaisesRegex(RuntimeError, "set_outputs called twice"):
batch.set_outputs(outputs)
t.join()
class DynamicBatcherProducerConsumerTest(unittest.TestCase):
def test_many_consumers(
self,
minimum_batch_size=1,
compute_thread_number=64,
repeats=100,
consume_thread_number=16,
):
batcher = libtorchbeast.DynamicBatcher(
batch_dim=0, minimum_batch_size=minimum_batch_size
)
lock = threading.Lock()
total_batches_consumed = 0
def compute_thread_target(i):
for _ in range(repeats):
inputs = torch.full((1, 2, 3), i)
batcher.compute(inputs)
def consume_thread_target():
nonlocal total_batches_consumed
for batch in batcher:
inputs = batch.get_inputs()
batch_size, *_ = inputs.shape
batch.set_outputs(torch.ones_like(inputs))
with lock:
total_batches_consumed += batch_size
compute_threads = []
for i in range(compute_thread_number):
compute_threads.append(
threading.Thread(
target=compute_thread_target, name=f"compute-thread-{i}", args=(i,)
)
)
consume_threads = []
for i in range(consume_thread_number):
consume_threads.append(
threading.Thread(
target=consume_thread_target, name=f"consume-thread-{i}"
)
)
for t in compute_threads + consume_threads:
t.start()
for t in compute_threads:
t.join()
# Stop iteration in all consume_threads.
batcher.close()
for t in consume_threads:
t.join()
self.assertEqual(total_batches_consumed, compute_thread_number * repeats)
if __name__ == "__main__":
unittest.main()
| 7,972
| 28.639405
| 87
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/contiguous_arrays_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that non-contiguous arrays are handled properly."""
import subprocess
import threading
import unittest
import numpy as np
import torch
import libtorchbeast
class ContiguousArraysTest(unittest.TestCase):
def setUp(self):
self.server_proc = subprocess.Popen(
["python", "tests/contiguous_arrays_env.py"]
)
server_address = ["unix:/tmp/contiguous_arrays_test"]
self.learner_queue = libtorchbeast.BatchingQueue(
batch_dim=1, minimum_batch_size=1, maximum_batch_size=10, check_inputs=True
)
self.inference_batcher = libtorchbeast.DynamicBatcher(
batch_dim=1,
minimum_batch_size=1,
maximum_batch_size=10,
timeout_ms=100,
check_outputs=True,
)
actor = libtorchbeast.ActorPool(
unroll_length=1,
learner_queue=self.learner_queue,
inference_batcher=self.inference_batcher,
env_server_addresses=server_address,
initial_agent_state=(),
)
def run():
actor.run()
self.actor_thread = threading.Thread(target=run)
self.actor_thread.start()
self.target = np.arange(3 * 4 * 5)
self.target = self.target.reshape(3, 4, 5)
self.target = self.target.transpose(2, 1, 0)
def check_inference_inputs(self):
batch = next(self.inference_batcher)
batched_env_outputs, _ = batch.get_inputs()
frame, *_ = batched_env_outputs
self.assertTrue(np.array_equal(frame.shape, (1, 1, 5, 4, 3)))
frame = frame.reshape(5, 4, 3)
self.assertTrue(np.array_equal(frame, self.target))
# Set an arbitrary output.
batch.set_outputs(((torch.ones(1, 1),), ()))
def test_contiguous_arrays(self):
self.check_inference_inputs()
# Stop actor thread.
self.inference_batcher.close()
self.learner_queue.close()
self.actor_thread.join()
def tearDown(self):
self.server_proc.terminate()
| 2,650
| 31.728395
| 87
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/contiguous_arrays_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock environment for the test contiguous_arrays_test.py."""
import numpy as np
import libtorchbeast
class Env:
def __init__(self):
self.frame = np.arange(3 * 4 * 5)
self.frame = self.frame.reshape(3, 4, 5)
self.frame = self.frame.transpose(2, 1, 0)
assert not self.frame.flags.c_contiguous
def reset(self):
return self.frame
def step(self, action):
return self.frame, 0.0, False, {}
if __name__ == "__main__":
server_address = "unix:/tmp/contiguous_arrays_test"
server = libtorchbeast.Server(Env, server_address=server_address)
server.run()
| 1,220
| 31.131579
| 74
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/core_agent_state_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock environment for the test core_agent_state_test.py."""
import numpy as np
import libtorchbeast
class Env:
def __init__(self):
self.frame = np.zeros((1, 1))
self.count = 0
self.done_after = 5
def reset(self):
self.frame = np.zeros((1, 1))
return self.frame
def step(self, action):
self.frame += 1
done = self.frame.item() == self.done_after
return self.frame, 0.0, done, {}
if __name__ == "__main__":
server_address = "unix:/tmp/core_agent_state_test"
server = libtorchbeast.Server(Env, server_address=server_address)
server.run()
| 1,230
| 29.02439
| 74
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/polybeast_learn_function_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polybeast learn function implementation."""
import copy
import unittest
from unittest import mock
import numpy as np
import torch
from torchbeast import polybeast_learner as polybeast
def _state_dict_to_numpy(state_dict):
return {key: value.numpy() for key, value in state_dict.items()}
class LearnTest(unittest.TestCase):
def setUp(self):
unroll_length = 2 # Arbitrary.
batch_size = 4 # Arbitrary.
frame_dimension = 84 # Has to match what expected by the model.
num_actions = 6 # Specific to each environment.
num_channels = 4 # Has to match with the first conv layer of the net.
# The following hyperparamaters are arbitrary.
self.lr = 0.1
total_steps = 100000
# Set the random seed manually to get reproducible results.
torch.manual_seed(0)
self.model = polybeast.Net(num_actions=num_actions, use_lstm=False)
self.actor_model = polybeast.Net(num_actions=num_actions, use_lstm=False)
self.initial_model_dict = copy.deepcopy(self.model.state_dict())
self.initial_actor_model_dict = copy.deepcopy(self.actor_model.state_dict())
optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=total_steps // 10
)
self.stats = {}
# The call to plogger.log will not perform any action.
plogger = mock.Mock()
plogger.log = mock.Mock()
# Mock flags.
mock_flags = mock.Mock()
mock_flags.learner_device = torch.device("cpu")
mock_flags.reward_clipping = "abs_one" # Default value from cmd.
mock_flags.discounting = 0.99 # Default value from cmd.
mock_flags.baseline_cost = 0.5 # Default value from cmd.
mock_flags.entropy_cost = 0.0006 # Default value from cmd.
mock_flags.unroll_length = unroll_length
mock_flags.batch_size = batch_size
mock_flags.grad_norm_clipping = 40
# Prepare content for mock_learner_queue.
frame = torch.ones(
unroll_length, batch_size, num_channels, frame_dimension, frame_dimension
)
rewards = torch.ones(unroll_length, batch_size)
done = torch.zeros(unroll_length, batch_size, dtype=torch.uint8)
episode_step = torch.ones(unroll_length, batch_size)
episode_return = torch.ones(unroll_length, batch_size)
env_outputs = (frame, rewards, done, episode_step, episode_return)
actor_outputs = (
# Actions taken.
torch.randint(low=0, high=num_actions, size=(unroll_length, batch_size)),
# Logits.
torch.randn(unroll_length, batch_size, num_actions),
# Baseline.
torch.rand(unroll_length, batch_size),
)
initial_agent_state = () # No lstm.
tensors = ((env_outputs, actor_outputs), initial_agent_state)
# Mock learner_queue.
mock_learner_queue = mock.MagicMock()
mock_learner_queue.__iter__.return_value = iter([tensors])
self.learn_args = (
mock_flags,
mock_learner_queue,
self.model,
self.actor_model,
optimizer,
scheduler,
self.stats,
plogger,
)
def test_parameters_copied_to_actor_model(self):
"""Check that the learner model copies the parameters to the actor model."""
# Reset models.
self.model.load_state_dict(self.initial_model_dict)
self.actor_model.load_state_dict(self.initial_actor_model_dict)
polybeast.learn(*self.learn_args)
np.testing.assert_equal(
_state_dict_to_numpy(self.actor_model.state_dict()),
_state_dict_to_numpy(self.model.state_dict()),
)
def test_weights_update(self):
"""Check that trainable parameters get updated after one iteration."""
# Reset models.
self.model.load_state_dict(self.initial_model_dict)
self.actor_model.load_state_dict(self.initial_actor_model_dict)
polybeast.learn(*self.learn_args)
model_state_dict = self.model.state_dict(keep_vars=True)
actor_model_state_dict = self.actor_model.state_dict(keep_vars=True)
for key, initial_tensor in self.initial_model_dict.items():
model_tensor = model_state_dict[key]
actor_model_tensor = actor_model_state_dict[key]
# Assert that the gradient is not zero for the learner.
self.assertGreater(torch.norm(model_tensor.grad), 0.0)
# Assert actor has no gradient.
# Note that even though actor model tensors have no gradient,
# they have requires_grad == True. No gradients are ever calculated
# for these tensors because the inference function in polybeast.py
# (that performs forward passes with the actor_model) uses torch.no_grad
# context manager.
self.assertIsNone(actor_model_tensor.grad)
# Assert that the weights are updated in the expected way.
# We manually perform a gradient descent step,
# and check that they are the same as the calculated ones
# (ignoring floating point errors).
expected_tensor = (
initial_tensor.detach().numpy() - self.lr * model_tensor.grad.numpy()
)
np.testing.assert_almost_equal(
model_tensor.detach().numpy(), expected_tensor
)
np.testing.assert_almost_equal(
actor_model_tensor.detach().numpy(), expected_tensor
)
def test_gradients_update(self):
"""Check that gradients get updated after one iteration."""
# Reset models.
self.model.load_state_dict(self.initial_model_dict)
self.actor_model.load_state_dict(self.initial_actor_model_dict)
# There should be no calculated gradient yet.
for p in self.model.parameters():
self.assertIsNone(p.grad)
for p in self.actor_model.parameters():
self.assertIsNone(p.grad)
polybeast.learn(*self.learn_args)
# Check that every parameter for the learner model has a gradient, and that
# there is at least some non-zero gradient for each set of paramaters.
for p in self.model.parameters():
self.assertIsNotNone(p.grad)
self.assertFalse(torch.equal(p.grad, torch.zeros_like(p.grad)))
# Check that the actor model has no gradients associated with it.
for p in self.actor_model.parameters():
self.assertIsNone(p.grad)
def test_non_zero_loss(self):
"""Check that the loss is not zero after one iteration."""
# Reset models.
self.model.load_state_dict(self.initial_model_dict)
self.actor_model.load_state_dict(self.initial_actor_model_dict)
polybeast.learn(*self.learn_args)
self.assertNotEqual(self.stats["total_loss"], 0.0)
self.assertNotEqual(self.stats["pg_loss"], 0.0)
self.assertNotEqual(self.stats["baseline_loss"], 0.0)
self.assertNotEqual(self.stats["entropy_loss"], 0.0)
if __name__ == "__main__":
unittest.main()
| 7,901
| 39.111675
| 85
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/core_agent_state_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the core state is handled correctly by the batching mechanism."""
import unittest
import threading
import subprocess
import torch
from torch import nn
import libtorchbeast
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def initial_state(self):
return torch.zeros(1, 1)
def forward(self, inputs, core_state):
x = inputs["frame"]
notdone = (~inputs["done"]).float()
T, B, *_ = x.shape
for nd in notdone.unbind():
nd.view(1, -1)
core_state = nd * core_state
core_state = core_state + 1
# Arbitrarily return action 1.
action = torch.ones((T, B), dtype=torch.int32)
return (action,), core_state
class CoreAgentStateTest(unittest.TestCase):
def setUp(self):
self.server_proc = subprocess.Popen(["python", "tests/core_agent_state_env.py"])
self.B = 2
self.T = 3
self.model = Net()
server_address = ["unix:/tmp/core_agent_state_test"]
self.learner_queue = libtorchbeast.BatchingQueue(
batch_dim=1,
minimum_batch_size=self.B,
maximum_batch_size=self.B,
check_inputs=True,
)
self.inference_batcher = libtorchbeast.DynamicBatcher(
batch_dim=1,
minimum_batch_size=1,
maximum_batch_size=1,
timeout_ms=100,
check_outputs=True,
)
self.actor = libtorchbeast.ActorPool(
unroll_length=self.T,
learner_queue=self.learner_queue,
inference_batcher=self.inference_batcher,
env_server_addresses=server_address,
initial_agent_state=self.model.initial_state(),
)
def inference(self):
for batch in self.inference_batcher:
batched_env_outputs, agent_state = batch.get_inputs()
frame, _, done, *_ = batched_env_outputs
# Check that when done is set we reset the environment.
# Since we only have one actor producing experience we will always
# have batch_size == 1, hence we can safely use item().
if done.item():
self.assertEqual(frame.item(), 0.0)
outputs = self.model(dict(frame=frame, done=done), agent_state)
batch.set_outputs(outputs)
def learn(self):
for i, tensors in enumerate(self.learner_queue):
batch, initial_agent_state = tensors
env_outputs, actor_outputs = batch
frame, _, done, *_ = env_outputs
# Make sure the last env_outputs of a rollout equals the first of the
# following one.
# This is guaranteed to be true if there is only one actor filling up
# the learner queue.
self.assertEqual(frame[self.T][0].item(), frame[0][1].item())
self.assertEqual(done[self.T][0].item(), done[0][1].item())
# Make sure the initial state equals the value of the frame at the beginning
# of the rollout. This has to be the case in our test since:
# - every call to forward increments the core state by one.
# - every call to step increments the value in the frame by one (modulo 5).
env_done_after = 5 # Matches self.done_after in core_agent_state_env.py.
self.assertEqual(
frame[0][0].item(), initial_agent_state[0][0].item() % env_done_after
)
self.assertEqual(
frame[0][1].item(), initial_agent_state[0][1].item() % env_done_after
)
if i >= 10:
# Stop execution.
self.learner_queue.close()
self.inference_batcher.close()
def test_core_agent_state(self):
def run():
self.actor.run()
threads = [
threading.Thread(target=self.inference),
threading.Thread(target=run),
]
# Start actor and inference thread.
for thread in threads:
thread.start()
self.learn()
for thread in threads:
thread.join()
def tearDown(self):
self.server_proc.terminate()
self.server_proc.wait()
if __name__ == "__main__":
unittest.main()
| 4,910
| 33.584507
| 88
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/tests/polybeast_inference_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for polybeast inference implementation."""
import unittest
import warnings
from unittest import mock
import torch
from torchbeast import polybeast_learner as polybeast
class InferenceTest(unittest.TestCase):
def setUp(self):
self.unroll_length = 1 # Inference called for every step.
self.batch_size = 4 # Arbitrary.
self.frame_dimension = 84 # Has to match what expected by the model.
self.num_actions = 6 # Specific to each environment.
self.num_channels = 4 # Has to match with the first conv layer of the net.
self.core_output_size = 256 # Has to match what expected by the model.
self.num_lstm_layers = 1 # As in the model.
self.frame = torch.ones(
self.unroll_length,
self.batch_size,
self.num_channels,
self.frame_dimension,
self.frame_dimension,
)
self.rewards = torch.ones(self.unroll_length, self.batch_size)
self.done = torch.zeros(self.unroll_length, self.batch_size, dtype=torch.uint8)
self.episode_return = torch.ones(
self.unroll_length, self.batch_size
) # Not used in the current implemenation of inference.
self.episode_step = torch.ones(
self.unroll_length, self.batch_size
) # Not used in the current implemenation of inference.
self.mock_batch = mock.Mock()
# Set the mock inference batcher to be iterable and return a mock_batch.
self.mock_inference_batcher = mock.MagicMock()
self.mock_inference_batcher.__iter__.return_value = iter([self.mock_batch])
def _test_inference(self, use_lstm, device):
model = polybeast.Net(num_actions=self.num_actions, use_lstm=use_lstm)
model.to(device)
agent_state = model.initial_state()
inputs = (
(
self.frame,
self.rewards,
self.done,
self.episode_return,
self.episode_return,
),
agent_state,
)
# Set the behaviour of the methods of the mock batch.
self.mock_batch.get_inputs = mock.Mock(return_value=inputs)
self.mock_batch.set_outputs = mock.Mock()
# Preparing the mock flags. Could do with just a dict but using
# a Mock object for consistency.
mock_flags = mock.Mock()
mock_flags.actor_device = device
mock_flags.use_lstm = use_lstm
polybeast.inference(mock_flags, self.mock_inference_batcher, model)
# Assert the batch is used only once.
self.mock_batch.get_inputs.assert_called_once()
self.mock_batch.set_outputs.assert_called_once()
# Check that set_outputs has been called with paramaters with the expected shape.
batch_args, batch_kwargs = self.mock_batch.set_outputs.call_args
self.assertEqual(batch_kwargs, {})
model_outputs, *other_args = batch_args
self.assertEqual(other_args, [])
(action, policy_logits, baseline), core_state = model_outputs
self.assertSequenceEqual(action.shape, (self.unroll_length, self.batch_size))
self.assertSequenceEqual(
policy_logits.shape, (self.unroll_length, self.batch_size, self.num_actions)
)
self.assertSequenceEqual(baseline.shape, (self.unroll_length, self.batch_size))
for tensor in (action, policy_logits, baseline) + core_state:
self.assertEqual(tensor.device, torch.device("cpu"))
self.assertEqual(len(core_state), 2 if use_lstm else 0)
for core_state_element in core_state:
self.assertSequenceEqual(
core_state_element.shape,
(self.num_lstm_layers, self.batch_size, self.core_output_size),
)
def test_inference_cpu_no_lstm(self):
self._test_inference(use_lstm=False, device=torch.device("cpu"))
def test_inference_cuda_no_lstm(self):
if not torch.cuda.is_available():
warnings.warn("Not testing cuda as it's not available")
return
self._test_inference(use_lstm=False, device=torch.device("cuda"))
def test_inference_cpu_with_lstm(self):
self._test_inference(use_lstm=True, device=torch.device("cpu"))
def test_inference_cuda_with_lstm(self):
if not torch.cuda.is_available():
warnings.warn("Not testing cuda as it's not available")
return
self._test_inference(use_lstm=True, device=torch.device("cuda"))
if __name__ == "__main__":
unittest.main()
| 5,191
| 39.248062
| 89
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/layer.py
|
import math
import torch
from torch import nn
from torch.nn import functional as F
from torchbeast.fast_weight import fast_weight_delta
from torchbeast.fast_transformers import fast_weight_sum
from torchbeast.rec_update_fwm_tanh import rec_update_fwm_tanh
from torchbeast.fast_weight_rnn_v2 import fast_rnn_v2
from torchbeast.self_ref_v0 import self_ref_v0
from torchbeast.self_ref_v1 import self_ref_v1
@torch.jit.script
def elu_p1(x):
return F.elu(x, 1., False) + 1.
@torch.jit.script
def sum_norm(x):
return x / x.sum(-1, keepdim=True)
@torch.jit.script
def sum_norm_eps(x):
return x / (x.sum(-1, keepdim=True) + 1e-5)
@torch.jit.script
def elu_p1_sum_norm_eps(x):
y = F.elu(x, 1., False) + 1.
return y / (y.sum(-1, keepdim=True) + 1e-5)
# A block of residual feed-forward layers in Transformer
class TransformerFFlayers(nn.Module):
def __init__(self, ff_dim, res_dim, dropout, use_layernorm=True):
super(TransformerFFlayers, self).__init__()
self.res_dim = res_dim
self.ff_dim = ff_dim
self.dropout = dropout
self.use_layernorm = use_layernorm
self.ff_layers = nn.Sequential(
nn.Linear(res_dim, ff_dim), nn.ReLU(inplace=False),
nn.Dropout(dropout),
nn.Linear(ff_dim, res_dim),
nn.Dropout(dropout),
)
if use_layernorm:
self.layer_norm = nn.LayerNorm(res_dim)
def forward(self, x):
out = self.layer_norm(x) if self.use_layernorm else x
out = self.ff_layers(out) + x
return out
# Fast weight layer with feed-forward fast net
# linear tranformer
class AdditiveFastFFlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(AdditiveFastFFlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = fast_weight_sum
self.slow_net = nn.Linear(
in_dim, num_head * (3 * dim_head), bias=False)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
def forward(self, x, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = x.size()
out = self.layer_norm(x)
qkv = self.slow_net(out)
qkv = qkv.view(slen, bsz, self.num_head, 3 * self.dim_head)
head_q, head_k, head_v = torch.split(qkv, (self.dim_head,) * 3, -1)
# reshape to (B, heads, len, dim)
head_q = head_q.permute(1, 2, 0, 3)
head_k = head_k.permute(1, 2, 0, 3)
head_v = head_v.permute(1, 2, 0, 3)
head_q = elu_p1_sum_norm_eps(head_q)
head_k = elu_p1_sum_norm_eps(head_k)
if state is not None:
fast_weights = state
else:
assert False
fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
assert torch.isnan(
fast_weights).sum().item() == 0, f"Before NaN: fast weights"
out = self.fw_layer(head_q, head_k, head_v, fast_weights)
assert torch.isnan(
fast_weights).sum().item() == 0, f"NaN: fast weights"
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = x + out
return out, fast_weights.clone()
# Fast weight layer with feed-forward fast net
class FastFFlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(FastFFlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = fast_weight_delta
self.slow_net = nn.Linear(
in_dim, num_head * (3 * dim_head + 1), bias=False)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
def forward(self, x, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = x.size()
out = self.layer_norm(x)
qkvb = self.slow_net(out)
qkvb = qkvb.view(slen, bsz, self.num_head, 3 * self.dim_head + 1)
head_q, head_k, head_v, head_beta = torch.split(
qkvb, (self.dim_head,) * 3 + (1,), -1)
head_beta = torch.sigmoid(head_beta)
# reshape to (B, heads, len, dim)
head_q = head_q.permute(1, 2, 0, 3)
head_k = head_k.permute(1, 2, 0, 3)
head_v = head_v.permute(1, 2, 0, 3)
head_beta = head_beta.permute(1, 2, 0, 3)
head_q = elu_p1_sum_norm_eps(head_q)
head_k = elu_p1_sum_norm_eps(head_k)
if state is not None:
fast_weights = state
else:
assert False
fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
assert torch.isnan(
fast_weights).sum().item() == 0, f"Before NaN: fast weights"
out = self.fw_layer(head_q, head_k, head_v, head_beta, fast_weights)
assert torch.isnan(
fast_weights).sum().item() == 0, f"NaN: fast weights"
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = x + out
return out, fast_weights.clone()
# Fast weight layer with feed-forward fast net
class FastFastFFlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(FastFastFFlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = fast_weight_memory
self.very_fw_layer = fast_weight_memory
self.cached_fast_weights = nn.Parameter(
torch.zeros(1, self.num_head, self.dim_head,
3 * self.dim_head + 1),
requires_grad=False)
self.slow_net = nn.Linear(
in_dim, num_head * (5 * dim_head + 2), bias=False)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
def forward(self, x, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = x.size()
out = self.layer_norm(x)
qkvb = self.slow_net(out)
qkvb = qkvb.view(slen, bsz, self.num_head, 5 * self.dim_head + 2)
head_q, head_k, head_v, head_beta = torch.split(
qkvb, (self.dim_head, self.dim_head, 3 * self.dim_head + 1, 1), -1)
head_beta = torch.sigmoid(head_beta)
# reshape to (B, heads, len, dim)
head_q = head_q.permute(1, 2, 0, 3)
head_k = head_k.permute(1, 2, 0, 3)
head_v = head_v.permute(1, 2, 0, 3)
head_beta = head_beta.permute(1, 2, 0, 3)
head_q = elu_p1_sum_norm_eps(head_q)
head_k = elu_p1_sum_norm_eps(head_k)
if state is not None:
fast_weights, very_fast_weights = state
else:
assert False
# For the delta-delta, this one is expected to be carried
# over across episodes.
fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, 3 * self.dim_head + 1,
device=head_k.device)
# For delta
very_fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
assert torch.isnan(
fast_weights).sum().item() == 0, "Before NaN: fast weights"
# forward fast weight
out = self.fw_layer(head_q, head_k, head_v, head_beta, fast_weights)
assert torch.isnan(
fast_weights).sum().item() == 0, "NaN: fast weights"
fast_head_q, fast_head_k, fast_head_v, fast_beta = torch.split(
out, (self.dim_head,) * 3 + (1,), -1)
fast_head_q = elu_p1_sum_norm_eps(fast_head_q)
fast_head_k = elu_p1_sum_norm_eps(fast_head_k)
fast_beta = torch.sigmoid(fast_beta)
# forward very fast weight
out = self.very_fw_layer(fast_head_q, fast_head_k, fast_head_v,
fast_beta, very_fast_weights)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = x + out
return out, (fast_weights.clone(), very_fast_weights.clone())
# self referential weight matrix
class PseudoSRlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(PseudoSRlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
n_head = num_head
d_head = dim_head
self.W_y = nn.Parameter(torch.Tensor(n_head, d_head, d_head),
requires_grad=True)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
self.reset_rec_parameters()
def reset_rec_parameters(self):
std = 1.0 / math.sqrt(self.dim_head)
nn.init.normal_(self.W_y, mean=0., std=std)
def forward(self, h, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = h.size()
x = h.reshape(slen, bsz, self.num_head, self.dim_head)
x = x.reshape(slen * bsz, self.num_head, self.dim_head)
x = x.permute(1, 0, 2) # (H, len*B, dim)
out = torch.bmm(x, self.W_y) # (H, len*B, dim)
out = out.permute(1, 0, 2) # (len*B, H, dim)
out = out.reshape(slen, bsz, self.num_head, self.dim_head)
out = out.reshape(slen, bsz, self.num_head * self.dim_head)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = h + out
# compute the new shift (not very efficient; get it better from kernel)
# there is no need for extra state here...
return out, state
# self referential weight matrix
class SRlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(SRlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = self_ref_v0
n_head = num_head
d_head = dim_head
self.W_y = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.W_q = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.W_k = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.w_b = nn.Parameter(torch.Tensor(1, n_head, d_head, 4),
requires_grad=True)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
self.reset_rec_parameters()
def reset_rec_parameters(self):
std = 1.0 / math.sqrt(self.dim_head)
nn.init.normal_(self.W_y, mean=0., std=std)
nn.init.normal_(self.W_q, mean=0., std=std)
nn.init.normal_(self.W_k, mean=0., std=std)
# tried -1 for beta but 0 seems to be better
# nn.init.normal_(self.w_b, mean=-5., std=std)
nn.init.normal_(self.w_b, mean=0., std=std)
def forward(self, h, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = h.size()
# out = self.layer_norm(x)
x = h.reshape(slen, bsz, self.num_head, self.dim_head)
x = F.softmax(x, dim=-1)
# reshape to (B, heads, len, dim)
x = x.permute(1, 2, 0, 3)
if state is not None: # state store the shift from the current base weights.
W_y_bc, W_q_bc, W_k_bc, w_b_bc = state
else:
assert False
W_y_bc = W_y_bc + self.W_y.repeat(bsz, 1, 1, 1)
W_q_bc = W_q_bc + self.W_q.repeat(bsz, 1, 1, 1)
W_k_bc = W_k_bc + self.W_k.repeat(bsz, 1, 1, 1)
w_b_bc = w_b_bc + self.w_b.repeat(bsz, 1, 1, 1)
out = self.fw_layer(x, W_y_bc, W_q_bc, W_k_bc, w_b_bc)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = h + out
# out = self.layer_norm(h) + out
# compute the new shift (not very efficient; get it better from kernel)
W_y_bc = W_y_bc.detach() - self.W_y.repeat(bsz, 1, 1, 1)
W_q_bc = W_q_bc.detach() - self.W_q.repeat(bsz, 1, 1, 1)
W_k_bc = W_k_bc.detach() - self.W_k.repeat(bsz, 1, 1, 1)
w_b_bc = w_b_bc.detach() - self.w_b.repeat(bsz, 1, 1, 1)
state = (
W_y_bc.detach(), W_q_bc.detach(), W_k_bc.detach(), w_b_bc.detach())
# there is no need for extra state here...
return out, state
# self referential weight matrix, without carrying weights over segment
class NoCarryOverSRlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(NoCarryOverSRlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = self_ref_v0
n_head = num_head
d_head = dim_head
self.W_y = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.W_q = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.W_k = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.w_b = nn.Parameter(torch.Tensor(1, n_head, d_head, 4),
requires_grad=True)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
self.reset_rec_parameters()
def reset_rec_parameters(self):
std = 1.0 / math.sqrt(self.dim_head)
nn.init.normal_(self.W_y, mean=0., std=std)
nn.init.normal_(self.W_q, mean=0., std=std)
nn.init.normal_(self.W_k, mean=0., std=std)
# tried -1 for beta but 0 seems to be better
nn.init.normal_(self.w_b, mean=0., std=std)
def forward(self, h, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = h.size()
# out = self.layer_norm(x)
x = h.reshape(slen, bsz, self.num_head, self.dim_head)
x = F.softmax(x, dim=-1)
# reshape to (B, heads, len, dim)
x = x.permute(1, 2, 0, 3)
W_y_bc = self.W_y.repeat(bsz, 1, 1, 1)
W_q_bc = self.W_q.repeat(bsz, 1, 1, 1)
W_k_bc = self.W_k.repeat(bsz, 1, 1, 1)
w_b_bc = self.w_b.repeat(bsz, 1, 1, 1)
out = self.fw_layer(x, W_y_bc, W_q_bc, W_k_bc, w_b_bc)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = h + out
# there is no need for extra state here...
return out, state
# self modifying FWP layer
class SMFWPlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(SMFWPlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = self_ref_v1
n_head = num_head
d_head = dim_head
y_d_head = 3 * dim_head + 1
self.y_d_head = y_d_head
self.W_y = nn.Parameter(torch.Tensor(1, n_head, d_head, y_d_head),
requires_grad=True)
self.W_q = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.W_k = nn.Parameter(torch.Tensor(1, n_head, d_head, d_head),
requires_grad=True)
self.w_b = nn.Parameter(torch.Tensor(1, n_head, d_head, 4),
requires_grad=True)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
self.reset_rec_parameters()
def reset_rec_parameters(self):
std = 1.0 / math.sqrt(self.dim_head)
nn.init.normal_(self.W_y, mean=0., std=std)
nn.init.normal_(self.W_q, mean=0., std=std)
nn.init.normal_(self.W_k, mean=0., std=std)
# tried -1 for beta but 0 seems to be better
nn.init.normal_(self.w_b, mean=0., std=std)
def forward(self, h, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = h.size()
# out = self.layer_norm(x)
x = h.reshape(slen, bsz, self.num_head, self.dim_head)
x = F.softmax(x, dim=-1)
# reshape to (B, heads, len, dim)
x = x.permute(1, 2, 0, 3)
if state is not None: # state store the shift from the current base weights.
W_y_bc, W_q_bc, W_k_bc, w_b_bc, fast_weights = state
else:
assert False
assert torch.isnan(
fast_weights).sum().item() == 0, "Before NaN: fast weights"
W_y_bc = W_y_bc + self.W_y.repeat(bsz, 1, 1, 1)
W_q_bc = W_q_bc + self.W_q.repeat(bsz, 1, 1, 1)
W_k_bc = W_k_bc + self.W_k.repeat(bsz, 1, 1, 1)
w_b_bc = w_b_bc + self.w_b.repeat(bsz, 1, 1, 1)
fast_qkvb = self.fw_layer(x, W_y_bc, W_q_bc, W_k_bc, w_b_bc)
fast_head_q, fast_head_k, fast_head_v, fast_beta = torch.split(
fast_qkvb, (self.dim_head,) * 3 + (1,), -1)
fast_head_q = F.softmax(fast_head_q, dim=-1)
fast_head_k = F.softmax(fast_head_k, dim=-1)
fast_beta = torch.sigmoid(fast_beta)
out = fast_weight_memory(
fast_head_q, fast_head_k, fast_head_v, fast_beta, fast_weights)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = h + out
# out = self.layer_norm(h) + out
# compute the new shift (not very efficient; get it better from kernel)
W_y_bc = W_y_bc.detach() - self.W_y.repeat(bsz, 1, 1, 1)
W_q_bc = W_q_bc.detach() - self.W_q.repeat(bsz, 1, 1, 1)
W_k_bc = W_k_bc.detach() - self.W_k.repeat(bsz, 1, 1, 1)
w_b_bc = w_b_bc.detach() - self.w_b.repeat(bsz, 1, 1, 1)
state = (
W_y_bc.detach(), W_q_bc.detach(), W_k_bc.detach(), w_b_bc.detach(),
fast_weights.detach())
# there is no need for extra state here...
return out, state
# Fast weight layer with RNN fast net
class FastRNNlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(FastRNNlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = fast_weight_delta
self.rec_fw_layer = fast_rnn_v2
self.slow_net = nn.Linear(
in_dim, num_head * (5 * dim_head + 2), bias=False)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
def forward(self, x, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = x.size()
out = self.layer_norm(x)
qkvb = self.slow_net(out)
qkvb = qkvb.view(slen, bsz, self.num_head, 5 * self.dim_head + 2)
(head_q, head_k, head_v, rec_head_k, rec_head_v, head_beta,
rec_beta) = torch.split(qkvb, (self.dim_head,) * 5 + (1,) * 2, -1)
head_beta = torch.sigmoid(head_beta)
rec_beta = torch.sigmoid(rec_beta)
# reshape to (B, heads, len, dim)
head_q = head_q.permute(1, 2, 0, 3)
head_k = head_k.permute(1, 2, 0, 3)
head_v = head_v.permute(1, 2, 0, 3)
head_beta = head_beta.permute(1, 2, 0, 3)
rec_head_k = rec_head_k.permute(1, 2, 0, 3)
rec_head_v = rec_head_v.permute(1, 2, 0, 3)
rec_beta = rec_beta.permute(1, 2, 0, 3)
head_q = F.softmax(head_q, dim=-1)
head_k = F.softmax(head_k, dim=-1)
# make recurrent key consistent with rec activation
rec_head_k = F.softmax(rec_head_k, dim=-1)
# # normalize k and q, crucial for stable training.
# head_k = sum_norm(head_k)
# head_q = sum_norm(head_q)
if state is None:
fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
rec_fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
state0 = torch.zeros(
bsz, self.num_head, 1, self.dim_head, device=head_k.device)
else:
fast_weights, rec_fast_weights, state0 = state
assert torch.isnan(
fast_weights).sum().item() == 0, f"Before NaN: fast weights"
z_out = self.fw_layer(
head_q, head_k, head_v, head_beta, fast_weights)
out = self.rec_fw_layer(
z_out, rec_head_k, rec_head_v, rec_fast_weights, rec_beta, state0)
state0_next = out[:, :, -1, :].clone()
state0_next = state0_next.unsqueeze(2)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = x + out
return out, (
fast_weights.clone(), rec_fast_weights.clone(), state0_next)
class RecUpdateTanhFastFFlayer(nn.Module):
def __init__(self, num_head, dim_head, in_dim, dropout):
super(RecUpdateTanhFastFFlayer, self).__init__()
self.num_head = num_head
self.dim_head = dim_head
self.in_dim = in_dim
self.fw_layer = rec_update_fwm_tanh
self.slow_net = nn.Linear(
in_dim, num_head * (3 * dim_head + 1), bias=False)
self.R_q = nn.Parameter(torch.Tensor(1, num_head, dim_head, dim_head),
requires_grad=True)
self.R_k = nn.Parameter(torch.Tensor(1, num_head, dim_head, dim_head),
requires_grad=True)
self.R_v = nn.Parameter(torch.Tensor(1, num_head, dim_head, dim_head),
requires_grad=True)
self.r_b = nn.Parameter(torch.Tensor(1, num_head, 1, dim_head),
requires_grad=True)
self.layer_norm = nn.LayerNorm(in_dim)
self.out_linear = nn.Linear(num_head * dim_head, in_dim, bias=False)
self.drop = nn.Dropout(dropout)
self.reset_rec_parameters()
def reset_rec_parameters(self):
std = 1.0 / math.sqrt(self.dim_head)
nn.init.normal_(self.R_q, mean=0., std=std)
nn.init.normal_(self.R_k, mean=0., std=std)
nn.init.normal_(self.R_v, mean=0., std=std)
nn.init.normal_(self.r_b, mean=0., std=std)
def forward(self, x, state=None):
# x shape: (len, B, n_head * d_head)
slen, bsz, _ = x.size()
out = self.layer_norm(x)
qkvb = self.slow_net(out)
qkvb = qkvb.view(slen, bsz, self.num_head, 3 * self.dim_head + 1)
head_q, head_k, head_v, head_beta = torch.split(
qkvb, (self.dim_head,) * 3 + (1,), -1)
# reshape to (B, heads, len, dim)
head_q = head_q.permute(1, 2, 0, 3)
head_k = head_k.permute(1, 2, 0, 3)
head_v = head_v.permute(1, 2, 0, 3)
head_beta = head_beta.permute(1, 2, 0, 3)
if state is None:
fast_weights = torch.zeros(
bsz, self.num_head, self.dim_head, self.dim_head,
device=head_k.device)
state0 = torch.zeros(
bsz, self.num_head, 1, self.dim_head, device=head_k.device)
else:
fast_weights, state0 = state
out = self.fw_layer(head_q, head_k, head_v, head_beta,
self.R_q, self.R_k, self.R_v, self.r_b,
fast_weights, state0)
state0_next = out[:, :, -1, :].clone()
state0_next = state0_next.unsqueeze(2)
out = out.transpose(1, 2)
out = out.reshape(bsz, slen, self.num_head * self.dim_head)
out = out.transpose(0, 1)
# expect [qlen, B, n_head * d_head]
# linear projection
out = self.out_linear(out)
out = self.drop(out)
out = x + out
return out, (fast_weights.clone(), state0_next)
# Linear Transformer with Fast weight memory update rule.
class LinearTransformerLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(LinearTransformerLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
AdditiveFastFFlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
# core_state is a tuple with self.num_layers elements
state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](out, state=core_state[i].squeeze(0))
state_list.append(out_state.unsqueeze(0).clone())
out = self.ff_layers[i](out)
state_tuple = tuple(state_list)
return out, state_tuple
# DeeperNet, Transformer block without self-attention
class DeeperNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, dim_ff, dropout):
super(DeeperNetLayer, self).__init__()
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x):
# core_state is a tuple with self.num_layers elements
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out = self.ff_layers[i](out)
return out
# Linear Transformer with Fast weight memory update rule.
class DeltaNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(DeltaNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
FastFFlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
# core_state is a tuple with self.num_layers elements
state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](out, state=core_state[i].squeeze(0))
state_list.append(out_state.unsqueeze(0).clone())
out = self.ff_layers[i](out)
state_tuple = tuple(state_list)
return out, state_tuple
class FastFFRecUpdateTanhLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(FastFFRecUpdateTanhLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
RecUpdateTanhFastFFlayer(
num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
fw_states, rnn_states = core_state
# core_state is a tuple with self.num_layers elements
fw_state_list = []
rnn_state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](
out, state=(fw_states[i].squeeze(0), rnn_states[i].squeeze(0)))
fw_state_list.append(out_state[0].unsqueeze(0).clone())
rnn_state_list.append(out_state[1].unsqueeze(0).clone())
out = self.ff_layers[i](out)
fw_state_tuple = tuple(fw_state_list)
rnn_state_tuple = tuple(rnn_state_list)
state_tuple = (fw_state_tuple, rnn_state_tuple)
return out, state_tuple
class FastRNNModelLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(FastRNNModelLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
FastRNNlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
fw_states, rec_fw_states, rnn_states = core_state
# core_state is a tuple with self.num_layers elements
fw_state_list = []
rec_fw_state_list = []
rnn_state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](
out,
state=(fw_states[i].squeeze(0), rec_fw_states[i].squeeze(0),
rnn_states[i].squeeze(0)))
fw_state_list.append(out_state[0].unsqueeze(0).clone())
rec_fw_state_list.append(out_state[1].unsqueeze(0).clone())
rnn_state_list.append(out_state[2].unsqueeze(0).clone())
out = self.ff_layers[i](out)
fw_state_tuple = tuple(fw_state_list)
rec_fw_state_tuple = tuple(rec_fw_state_list)
rnn_state_tuple = tuple(rnn_state_list)
state_tuple = (fw_state_tuple, rec_fw_state_tuple, rnn_state_tuple)
return out, state_tuple
# delta delta.
class DeltaDeltaNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(DeltaDeltaNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
FastFastFFlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
fw_states, very_fw_states = core_state
# core_state is a tuple with self.num_layers elements
fw_state_list = []
very_fw_state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](
out,
state=(fw_states[i].squeeze(0), very_fw_states[i].squeeze(0)))
fw_state_list.append(out_state[0].unsqueeze(0).clone())
very_fw_state_list.append(out_state[1].unsqueeze(0).clone())
out = self.ff_layers[i](out)
fw_state_tuple = tuple(fw_state_list)
very_fw_state_tuple = tuple(very_fw_state_list)
state_tuple = (fw_state_tuple, very_fw_state_tuple)
return out, state_tuple
# Simple Self-Referential layer
class SRNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(SRNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
SRlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
Wy_states, Wq_states, Wk_states, wb_states = core_state
# core_state is a tuple with self.num_layers elements
Wy_state_list = []
Wq_state_list = []
Wk_state_list = []
wb_state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](
out,
state=(Wy_states[i].squeeze(0), Wq_states[i].squeeze(0),
Wk_states[i].squeeze(0), wb_states[i].squeeze(0)))
Wy_state_list.append(out_state[0].unsqueeze(0).clone())
Wq_state_list.append(out_state[1].unsqueeze(0).clone())
Wk_state_list.append(out_state[2].unsqueeze(0).clone())
wb_state_list.append(out_state[3].unsqueeze(0).clone())
out = self.ff_layers[i](out)
Wy_state_tuple = tuple(Wy_state_list)
Wq_state_tuple = tuple(Wq_state_list)
Wk_state_tuple = tuple(Wk_state_list)
wb_state_tuple = tuple(wb_state_list)
state_tuple = (
Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple)
return out, state_tuple
# Simple Self-Referential layer
class PseudoSRNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(PseudoSRNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
PseudoSRlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, _ = self.fwm_layers[i](out)
out = self.ff_layers[i](out)
return out, core_state
# Simple Self-Referential layer, no carry over weights
class NoCarryOverSRNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(NoCarryOverSRNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
NoCarryOverSRlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, _ = self.fwm_layers[i](out, state=core_state)
out = self.ff_layers[i](out)
return out, core_state
# Simple self-modifying FWP layer
class SMFWPNetLayer(nn.Module):
def __init__(self, input_dim, hidden_size, num_layers, num_head, dim_head,
dim_ff, dropout):
super(SMFWPNetLayer, self).__init__()
assert num_head * dim_head == hidden_size
self.input_dim = input_dim
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.input_proj = nn.Linear(input_dim, hidden_size)
fwm_layers = []
ff_layers = []
for _ in range(num_layers): # each "layer" consists of two sub-layers
fwm_layers.append(
SMFWPlayer(num_head, dim_head, hidden_size, dropout))
ff_layers.append(
TransformerFFlayers(dim_ff, hidden_size, dropout))
self.fwm_layers = nn.ModuleList(fwm_layers)
self.ff_layers = nn.ModuleList(ff_layers)
def forward(self, x, core_state):
Wy_states, Wq_states, Wk_states, wb_states, fw_states = core_state
# core_state is a tuple with self.num_layers elements
Wy_state_list = []
Wq_state_list = []
Wk_state_list = []
wb_state_list = []
fw_state_list = []
out = self.input_proj(x) # shape (len, B, dim)
for i in range(self.num_layers):
out, out_state = self.fwm_layers[i](
out,
state=(Wy_states[i].squeeze(0), Wq_states[i].squeeze(0),
Wk_states[i].squeeze(0), wb_states[i].squeeze(0),
fw_states[i].squeeze(0))
)
Wy_state_list.append(out_state[0].unsqueeze(0).clone())
Wq_state_list.append(out_state[1].unsqueeze(0).clone())
Wk_state_list.append(out_state[2].unsqueeze(0).clone())
wb_state_list.append(out_state[3].unsqueeze(0).clone())
fw_state_list.append(out_state[4].unsqueeze(0).clone())
out = self.ff_layers[i](out)
Wy_state_tuple = tuple(Wy_state_list)
Wq_state_tuple = tuple(Wq_state_list)
Wk_state_tuple = tuple(Wk_state_list)
wb_state_tuple = tuple(wb_state_list)
fw_state_tuple = tuple(fw_state_list)
state_tuple = (
Wy_state_tuple, Wq_state_tuple, Wk_state_tuple, wb_state_tuple,
fw_state_tuple)
return out, state_tuple
| 42,905
| 33.573731
| 85
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/atari_wrappers.py
|
# The MIT License
#
# Copyright (c) 2017 OpenAI (http://openai.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Taken from
# https://raw.githubusercontent.com/openai/baselines/7c520852d9cf4eaaad326a3d548efc915dc60c10/baselines/common/atari_wrappers.py
# and slightly modified.
import numpy as np
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condition for a few frames
# so it's important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
"""
Warp frames to 84x84 as done in the Nature paper and later work.
If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which
observation should be warped.
"""
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(
low=0,
high=255,
shape=(self._height, self._width, num_colors),
dtype=np.uint8,
)
if self._key is None:
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert original_space.dtype == np.uint8 and len(original_space.shape) == 3
def observation(self, obs):
if self._key is None:
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(
frame, (self._width, self._height), interpolation=cv2.INTER_AREA
)
if self._grayscale:
frame = np.expand_dims(frame, -1)
if self._key is None:
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:-1] + (shp[-1] * k,)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[frames.ndim - 1]
def frame(self, i):
return self._force()[..., i]
def make_atari(env_id, max_episode_steps=None):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
assert max_episode_steps is None
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env
class ImageToPyTorch(gym.ObservationWrapper):
"""
Image shape to channels x weight x height
"""
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=255,
shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.uint8,
)
def observation(self, observation):
return np.transpose(observation, axes=(2, 0, 1))
def wrap_pytorch(env):
return ImageToPyTorch(env)
| 11,424
| 32.902077
| 130
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/polybeast.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
import numpy as np
from torchbeast import polybeast_learner
from torchbeast import polybeast_env
def run_env(flags, actor_id):
np.random.seed() # Get new random seed in forked process.
polybeast_env.main(flags)
def run_learner(flags):
polybeast_learner.main(flags)
def main():
flags = argparse.Namespace()
flags, argv = polybeast_learner.parser.parse_known_args(namespace=flags)
flags, argv = polybeast_env.parser.parse_known_args(args=argv, namespace=flags)
if argv:
# Produce an error message.
polybeast_learner.parser.print_usage()
print("")
polybeast_env.parser.print_usage()
print("Unkown args:", " ".join(argv))
return -1
flags.num_servers = flags.num_actors
env_processes = []
for actor_id in range(1):
p = mp.Process(target=run_env, args=(flags, actor_id))
p.start()
env_processes.append(p)
run_learner(flags)
for p in env_processes:
p.join()
if __name__ == "__main__":
main()
| 1,674
| 26.916667
| 83
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/noneg_polybeast_learner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import logging
import os
import threading
import time
import timeit
import traceback
import nest
import torch
import libtorchbeast
from torch import nn
from torch.nn import functional as F
from torchbeast.core import file_writer
from torchbeast.core import vtrace
from torchbeast.model import DeltaNetModel as DeltaNet
from torchbeast.model import LinearTransformerModel as LT
from torchbeast.model import RecDeltaModel as RecDelta
from torchbeast.model import FastRNNModel as FastRNN
# Necessary for multithreading.
os.environ["OMP_NUM_THREADS"] = "1"
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast",
help="Basename for the pipes for inter-process communication. "
"Has to be of the type unix:/some/path.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--save_extra_checkpoint", default=50000000, type=int,
help="Save an extra checkpoint at .")
parser.add_argument("--eval_extra", action="store_true",
help="Eval extra checkpoint.")
parser.add_argument("--savedir", default="~/palaas/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors.")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_learner_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--num_inference_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--num_actions", default=6, type=int, metavar="A",
help="Number of actions.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument("--use_delta_rnn", action="store_true",
help="Use Delta RNN in agent model.")
parser.add_argument("--use_delta", action="store_true",
help="Use Delta Net in agent model.")
parser.add_argument("--use_lt", action="store_true",
help="Use Linear Trafo in agent model.")
parser.add_argument("--use_rec_delta", action="store_true",
help="Use Recurrent Delta Net in agent model.")
parser.add_argument("--max_learner_queue_size", default=None, type=int, metavar="N",
help="Optional maximum learner queue size. Defaults to batch_size.")
# Model settings.
parser.add_argument("--hidden_size", default=128, type=int,
help="transformer hidden size.")
parser.add_argument("--dim_ff", default=512, type=int,
help="transformer hidden size.")
parser.add_argument("--dim_head", default=32, type=int,
help="transformer head size.")
parser.add_argument("--num_layers", default=2, type=int,
help="tranformer num layers.")
parser.add_argument("--num_head", default=4, type=int,
help="tranformer num heads.")
parser.add_argument("--dropout", default=0.0, type=float,
help="tranformer dropout.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006, type=float,
help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5, type=float,
help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99, type=float,
help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048, type=float,
metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# Misc settings.
parser.add_argument("--write_profiler_trace", action="store_true",
help="Collect and write a profiler trace "
"for chrome://tracing/.")
# yapf: enable
parser.add_argument('--num_servers', default=4, type=int, metavar='N',
help='Number of environment servers.')
parser.add_argument('--env', type=str, default='PongNoFrameskip-v4',
help='Gym environment.')
# Wandb settings
parser.add_argument('--project_name', type=str, default=None,
help='project name for wandb.')
parser.add_argument('--job_name', type=str, default=None,
help='job name for wandb.')
parser.add_argument('--use_wandb', action='store_true',
help='use wandb.')
args = parser.parse_args()
if args.use_wandb: # configure wandb.
import wandb
use_wandb = True
if args.project_name is None:
project_name = (os.uname()[1]
+ datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
else:
project_name = args.project_name
wandb.init(project=project_name)
if args.job_name is None:
# wandb.run.name = (os.uname()[1]
# + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# + args.work_dir)
wandb.run.name = f"{os.uname()[1]}" \
f"-{args.mode}" \
f"-{args.xpid}" \
f"-{args.disable_checkpoint}" \
f"-{args.savedir}" \
f"-{args.num_actors}" \
f"-{args.total_steps}" \
f"-{args.batch_size}" \
f"-{args.unroll_length}" \
f"-{args.entropy_cost}" \
f"-{args.baseline_cost}" \
f"-{args.discounting}" \
f"-{args.reward_clipping}" \
f"-{args.learning_rate}" \
f"-{args.alpha}" \
f"-{args.momentum}" \
f"-{args.epsilon}" \
f"-{args.grad_norm_clipping}"
else:
wandb.run.name = f"{os.uname()[1]}//{args.job_name}"
config = wandb.config
config.host = os.uname()[1] # host node name
config.mode=args.mode
config.xpid=args.xpid
config.disable_checkpoint=args.disable_checkpoint
config.savedir=args.savedir
config.num_actors=args.num_actors
config.total_steps=args.total_steps
config.batch_size=args.batch_size
config.unroll_length=args.unroll_length
config.disable_cuda=args.disable_cuda
config.use_lstm=args.use_lstm
config.entropy_cost=args.entropy_cost
config.baseline_cost=args.baseline_cost
config.discounting=args.discounting
config.reward_clipping=args.reward_clipping
config.learning_rate=args.learning_rate
config.alpha=args.alpha
config.momentum=args.momentum
config.epsilon=args.epsilon
config.grad_norm_clipping=args.grad_norm_clipping
else:
use_wandb = False
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
class Net(nn.Module):
def __init__(self, num_actions, use_lstm=False):
super(Net, self).__init__()
self.num_actions = num_actions
self.use_lstm = use_lstm
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
core_output_size = self.fc.out_features + 1
if use_lstm:
self.core = nn.LSTM(core_output_size, 256, num_layers=1)
core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state):
x = inputs["frame"]
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = nest.map(nd.mul, core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
def inference(flags, inference_batcher, model, lock=threading.Lock()): # noqa: B008
with torch.no_grad():
for batch in inference_batcher:
batched_env_outputs, agent_state = batch.get_inputs()
frame, reward, done, *_ = batched_env_outputs
frame = frame.to(flags.actor_device, non_blocking=True)
reward = reward.to(flags.actor_device, non_blocking=True)
done = done.to(flags.actor_device, non_blocking=True)
agent_state = nest.map(
lambda t: t.to(flags.actor_device, non_blocking=True), agent_state
)
with lock:
outputs = model(
dict(frame=frame, reward=reward, done=done), agent_state
)
outputs = nest.map(lambda t: t.cpu(), outputs)
batch.set_outputs(outputs)
EnvOutput = collections.namedtuple(
"EnvOutput", "frame rewards done episode_step episode_return"
)
AgentOutput = collections.namedtuple("AgentOutput", "action policy_logits baseline")
Batch = collections.namedtuple("Batch", "env agent")
def learn(
flags,
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
plogger,
lock=threading.Lock(),
):
for tensors in learner_queue:
tensors = nest.map(lambda t: t.to(flags.learner_device), tensors)
batch, initial_agent_state = tensors
env_outputs, actor_outputs = batch
frame, reward, done, *_ = env_outputs
lock.acquire() # Only one thread learning at a time.
learner_outputs, unused_state = model(
dict(frame=frame, reward=reward, done=done), initial_agent_state
)
# Take final value function slice for bootstrapping.
learner_outputs = AgentOutput._make(learner_outputs)
bootstrap_value = learner_outputs.baseline[-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = nest.map(lambda t: t[1:], batch)
learner_outputs = nest.map(lambda t: t[:-1], learner_outputs)
# Turn into namedtuples again.
env_outputs, actor_outputs = batch
env_outputs = EnvOutput._make(env_outputs)
actor_outputs = AgentOutput._make(actor_outputs)
learner_outputs = AgentOutput._make(learner_outputs)
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(env_outputs.rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = env_outputs.rewards
discounts = (~env_outputs.done).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=actor_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=actor_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs.policy_logits,
actor_outputs.action,
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits
)
total_loss = pg_loss + baseline_loss + entropy_loss
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
episode_returns = env_outputs.episode_return[env_outputs.done]
stats["step"] = stats.get("step", 0) + flags.unroll_length * flags.batch_size
stats["episode_returns"] = tuple(episode_returns.cpu().numpy())
stats["mean_episode_return"] = torch.mean(episode_returns).item()
stats["mean_episode_step"] = torch.mean(env_outputs.episode_step.float()).item()
stats["total_loss"] = total_loss.item()
stats["pg_loss"] = pg_loss.item()
stats["baseline_loss"] = baseline_loss.item()
stats["entropy_loss"] = entropy_loss.item()
stats["learner_queue_size"] = learner_queue.size()
if use_wandb:
wandb.log({"episode_returns": stats["episode_returns"]})
wandb.log({"mean_episode_step": stats["mean_episode_step"]})
wandb.log({"mean_episode_return": stats["mean_episode_return"]})
wandb.log({"total_loss": stats["total_loss"]})
wandb.log({"pg_loss": stats["pg_loss"]})
wandb.log({"baseline_loss": stats["baseline_loss"]})
wandb.log({"entropy_loss": stats["entropy_loss"]})
plogger.log(stats)
if not len(episode_returns):
# Hide the mean-of-empty-tuple NaN as it scares people.
stats["mean_episode_return"] = None
lock.release()
def train(flags):
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.save_extra_checkpoint > 0:
extra_checkpointpath = os.path.expandvars(os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar")))
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.learner_device = torch.device("cuda:0")
flags.actor_device = torch.device("cuda:1")
else:
logging.info("Not using CUDA.")
flags.learner_device = torch.device("cpu")
flags.actor_device = torch.device("cpu")
if flags.max_learner_queue_size is None:
flags.max_learner_queue_size = flags.batch_size
# The queue the learner threads will get their data from.
# Setting `minimum_batch_size == maximum_batch_size`
# makes the batch size static.
learner_queue = libtorchbeast.BatchingQueue(
batch_dim=1,
minimum_batch_size=flags.batch_size,
maximum_batch_size=flags.batch_size,
check_inputs=True,
maximum_queue_size=flags.max_learner_queue_size,
)
# The "batcher", a queue for the inference call. Will yield
# "batch" objects with `get_inputs` and `set_outputs` methods.
# The batch size of the tensors will be dynamic.
inference_batcher = libtorchbeast.DynamicBatcher(
batch_dim=1,
minimum_batch_size=1,
maximum_batch_size=512,
timeout_ms=100,
check_outputs=True,
)
addresses = []
connections_per_server = 1
pipe_id = 0
while len(addresses) < flags.num_actors:
for _ in range(connections_per_server):
addresses.append(f"{flags.pipes_basename}.{pipe_id}")
if len(addresses) == flags.num_actors:
break
pipe_id += 1
if flags.use_delta:
model = DeltaNet(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = DeltaNet(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_lt:
model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = LT(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_delta_rnn:
model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
model = model.to(device=flags.learner_device)
actor_model = FastRNN(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_rec_delta:
model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
model = model.to(device=flags.learner_device)
actor_model = RecDelta(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
else:
model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm)
model = model.to(device=flags.learner_device)
actor_model = Net(
num_actions=flags.num_actions, use_lstm=flags.use_lstm)
actor_model.to(device=flags.actor_device)
# The ActorPool that will run `flags.num_actors` many loops.
actors = libtorchbeast.ActorPool(
unroll_length=flags.unroll_length,
learner_queue=learner_queue,
inference_batcher=inference_batcher,
env_server_addresses=addresses,
initial_agent_state=actor_model.initial_state(),
)
def run():
try:
actors.run()
except Exception as e:
logging.error("Exception in actorpool thread!")
traceback.print_exc()
print()
raise e
actorpool_thread = threading.Thread(target=run, name="actorpool-thread")
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return (
1
- min(epoch * flags.unroll_length * flags.batch_size, flags.total_steps)
/ flags.total_steps
)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
stats = {}
# Load state from a checkpoint, if possible.
if os.path.exists(checkpointpath):
checkpoint_states = torch.load(
checkpointpath, map_location=flags.learner_device
)
model.load_state_dict(checkpoint_states["model_state_dict"])
optimizer.load_state_dict(checkpoint_states["optimizer_state_dict"])
scheduler.load_state_dict(checkpoint_states["scheduler_state_dict"])
stats = checkpoint_states["stats"]
logging.info(f"Resuming preempted job, current stats:\n{stats}")
# Initialize actor model like learner model.
actor_model.load_state_dict(model.state_dict())
learner_threads = [
threading.Thread(
target=learn,
name="learner-thread-%i" % i,
args=(
flags,
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
plogger,
),
)
for i in range(flags.num_learner_threads)
]
inference_threads = [
threading.Thread(
target=inference,
name="inference-thread-%i" % i,
args=(flags, inference_batcher, actor_model),
)
for i in range(flags.num_inference_threads)
]
actorpool_thread.start()
for t in learner_threads + inference_threads:
t.start()
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"stats": stats,
"flags": vars(flags),
},
checkpointpath,
)
def format_value(x):
return f"{x:1.5}" if isinstance(x, float) else str(x)
try:
if flags.save_extra_checkpoint > 0:
saved_extra = False
last_checkpoint_time = timeit.default_timer()
while True:
start_time = timeit.default_timer()
start_step = stats.get("step", 0)
if start_step >= flags.total_steps:
break
time.sleep(5)
end_step = stats.get("step", 0)
if timeit.default_timer() - last_checkpoint_time > 10 * 60:
# Save every 10 min.
checkpoint()
last_checkpoint_time = timeit.default_timer()
logging.info(
"Step %i @ %.1f SPS. Inference batcher size: %i."
" Learner queue size: %i."
" Other stats: (%s)",
end_step,
(end_step - start_step) / (timeit.default_timer() - start_time),
inference_batcher.size(),
learner_queue.size(),
", ".join(
f"{key} = {format_value(value)}" for key, value in stats.items()
),
)
if flags.save_extra_checkpoint > 0:
if saved_extra is False and end_step > flags.save_extra_checkpoint:
logging.info(f"Step {end_step} Saving EXTRA checkpoint to {extra_checkpointpath}")
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"stats": stats,
"flags": vars(flags),
},
extra_checkpointpath,
)
saved_extra = True
except KeyboardInterrupt:
pass # Close properly.
else:
logging.info("Learning finished after %i steps.", stats["step"])
checkpoint()
# Done with learning. Stop all the ongoing work.
inference_batcher.close()
learner_queue.close()
actorpool_thread.join()
for t in learner_threads + inference_threads:
t.join()
def test(flags, num_episodes=30, num_runs=5, device='cuda'):
if flags.xpid is None:
checkpointpath = "./latest/model.tar"
elif flags.eval_extra:
checkpointpath = os.path.expandvars(
os.path.expanduser(
"%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar"))
)
else:
checkpointpath = os.path.expandvars(
os.path.expanduser(
"%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
from torchbeast import atari_wrappers
from torchbeast.core import environment
def create_test_env(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
gym_env = create_test_env(flags)
env = environment.Environment(gym_env)
if flags.use_delta:
model = DeltaNet(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_lt:
model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_delta_rnn:
model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_rec_delta:
model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
else:
model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm)
print(model)
print(f"# params: "
f"{sum(p.numel() for p in model.parameters() if p.requires_grad)}")
model = model.to(device)
model.eval()
checkpoint = torch.load(checkpointpath, map_location=device)
model.load_state_dict(checkpoint["model_state_dict"])
observation = env.initial()
all_returns = []
core_state = model.initial_state()
core_state = nest.map(lambda t: t.to(device), core_state)
logging.info("Start eval")
with torch.no_grad():
while len(all_returns) < num_runs:
returns = []
while len(returns) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
# (action, policy_logits, baseline), core_state
observation = nest.map(lambda t: t.to(device), observation)
agent_outputs, core_state = model(observation, core_state)
action, _, _ = agent_outputs
observation = env.step(action)
if observation["episode_return"].item() < 0:
print("rejecting episode for unexpected reward range")
observation = env.initial()
core_state = model.initial_state()
core_state = nest.map(lambda t: t.to(device), core_state)
continue
if observation["done"].item():
returns.append(observation["episode_return"].item())
logging.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
# restart from the initial states for both env and model
# (this is normally not needed, done elsewhere)
observation = env.initial()
core_state = model.initial_state()
core_state = nest.map(lambda t: t.to(device), core_state)
logging.info(
"Average returns over %i episodes: %.1f",
num_episodes, sum(returns) / len(returns)
)
all_returns.append(sum(returns) / len(returns))
env.close()
import numpy as np
logging.info(f"Average returns over {num_episodes} for {num_runs} runs: {all_returns}")
logging.info(f"Mean return: {np.mean(all_returns):.1f}, std: {np.std(all_returns):.1f}")
def main(flags):
if not flags.pipes_basename.startswith("unix:"):
raise Exception("--pipes_basename has to be of the form unix:/some/path.")
if flags.mode == "train":
if flags.write_profiler_trace:
logging.info("Running with profiler.")
with torch.autograd.profiler.profile() as prof:
train(flags)
filename = "chrome-%s.trace" % time.strftime("%Y%m%d-%H%M%S")
logging.info("Writing profiler trace to '%s.gz'", filename)
prof.export_chrome_trace(filename)
os.system("gzip %s" % filename)
else:
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
| 35,307
| 37.088457
| 130
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/polybeast_env.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing as mp
import threading
import time
import numpy as np
import libtorchbeast
from torchbeast import atari_wrappers
# yapf: disable
parser = argparse.ArgumentParser(description='Remote Environment Server')
parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast",
help="Basename for the pipes for inter-process communication. "
"Has to be of the type unix:/some/path.")
parser.add_argument('--num_servers', default=4, type=int, metavar='N',
help='Number of environment servers.')
parser.add_argument('--env', type=str, default='PongNoFrameskip-v4',
help='Gym environment.')
# yapf: enable
class Env:
def reset(self):
print("reset called")
return np.ones((4, 84, 84), dtype=np.uint8)
def step(self, action):
frame = np.zeros((4, 84, 84), dtype=np.uint8)
return frame, 0.0, False, {} # First three mandatory.
def create_env(env_name, lock=threading.Lock()):
with lock: # Atari isn't threadsafe at construction time.
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(env_name),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
def serve(env_name, server_address):
init = Env if env_name == "Mock" else lambda: create_env(env_name)
server = libtorchbeast.Server(init, server_address=server_address)
server.run()
def main(flags):
if not flags.pipes_basename.startswith("unix:"):
raise Exception("--pipes_basename has to be of the form unix:/some/path.")
processes = []
for i in range(flags.num_servers):
p = mp.Process(
target=serve, args=(flags.env, f"{flags.pipes_basename}.{i}"), daemon=True
)
p.start()
processes.append(p)
try:
# We are only here to listen to the interrupt.
while True:
time.sleep(10)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
flags = parser.parse_args()
print(f"Env: {flags.env}")
main(flags)
| 2,801
| 29.791209
| 86
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/model.py
|
import nest
import torch
from torch import nn
from torch.nn import functional as F
from torchbeast.layer import DeltaNetLayer
from torchbeast.layer import LinearTransformerLayer
from torchbeast.layer import FastFFRecUpdateTanhLayer
from torchbeast.layer import FastRNNModelLayer
from torchbeast.layer import DeltaDeltaNetLayer
# Baseline model from torchbeast
class Net(nn.Module):
def __init__(self, num_actions, use_lstm=False):
super(Net, self).__init__()
self.num_actions = num_actions
self.use_lstm = use_lstm
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
core_output_size = self.fc.out_features + 1
if use_lstm:
self.core = nn.LSTM(core_output_size, 256, num_layers=1)
core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
if not self.use_lstm:
return tuple()
return tuple(
torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)
for _ in range(2)
)
def forward(self, inputs, core_state):
x = inputs["frame"]
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, hidden_size)
# states:
nd = nd.view(1, -1, 1)
core_state = nest.map(nd.mul, core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
class DeltaNetModel(nn.Module):
def __init__(self, num_actions, hidden_size=128, num_layers=2,
num_head=4, dim_head=32, dim_ff=512, dropout=0.0):
super(DeltaNetModel, self).__init__()
self.num_actions = num_actions # output vocab size.
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
# core_output_size = self.fc.out_features + 1
self.core = DeltaNetLayer(self.fc.out_features + 1,
hidden_size, num_layers, num_head, dim_head, dim_ff, dropout)
core_output_size = hidden_size
# if use_lstm:
# self.core = nn.LSTM(core_output_size, 256, num_layers=1)
# core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
# why not create on device?
# add dummy dim0 for inference code compat.
state_tuple = tuple(torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers))
return state_tuple
def forward(self, inputs, core_state):
x = inputs["frame"]
assert x.device is not 'cpu'
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
# recurrent component
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, D, D)
# states:
# nd = nd.view(1, -1, 1)
# needs extra dim0 for compat w/ inference code
nd = nd.view(1, -1, 1, 1, 1)
core_state = nest.map(nd.mul, core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1),
num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
class LinearTransformerModel(nn.Module):
def __init__(self, num_actions, hidden_size=128, num_layers=2,
num_head=4, dim_head=32, dim_ff=512, dropout=0.0):
super(LinearTransformerModel, self).__init__()
self.num_actions = num_actions # output vocab size.
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
# core_output_size = self.fc.out_features + 1
self.core = LinearTransformerLayer(self.fc.out_features + 1,
hidden_size, num_layers, num_head, dim_head, dim_ff, dropout)
core_output_size = hidden_size
# if use_lstm:
# self.core = nn.LSTM(core_output_size, 256, num_layers=1)
# core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
# why not create on device?
# add dummy dim0 for inference code compat.
state_tuple = tuple(torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers))
return state_tuple
def forward(self, inputs, core_state):
x = inputs["frame"]
assert x.device is not 'cpu'
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
# recurrent component
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, D, D)
# states:
# nd = nd.view(1, -1, 1)
# needs extra dim0 for compat w/ inference code
nd = nd.view(1, -1, 1, 1, 1)
core_state = nest.map(nd.mul, core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1),
num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
class RecDeltaModel(nn.Module):
def __init__(self, num_actions, hidden_size=128, num_layers=2,
num_head=4, dim_head=32, dim_ff=512, dropout=0.0):
super(RecDeltaModel, self).__init__()
self.num_actions = num_actions # output vocab size.
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
# core_output_size = self.fc.out_features + 1
self.core = FastFFRecUpdateTanhLayer(self.fc.out_features + 1,
hidden_size, num_layers, num_head, dim_head, dim_ff, dropout)
core_output_size = hidden_size
# if use_lstm:
# self.core = nn.LSTM(core_output_size, 256, num_layers=1)
# core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
# why not create on device?
# add dummy dim0 for inference code compat.
fw_state_tuple = tuple(
torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers)
)
rnn_state_tuple = tuple(
torch.zeros(1, batch_size, self.num_head, 1, self.dim_head)
for _ in range(self.num_layers)
)
return (fw_state_tuple, rnn_state_tuple)
def forward(self, inputs, core_state):
x = inputs["frame"]
assert x.device is not 'cpu'
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
# recurrent component
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, D, D)
# states:
# nd = nd.view(1, -1, 1)
# needs extra dim0 for compat w/ inference code
fw_state, rnn_state = core_state
nd = nd.view(1, -1, 1, 1, 1)
fw_state = nest.map(nd.mul, fw_state)
rnn_state = nest.map(nd.mul, rnn_state)
core_state = (fw_state, rnn_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1),
num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
class FastRNNModel(nn.Module):
def __init__(self, num_actions, hidden_size=128, num_layers=2,
num_head=4, dim_head=32, dim_ff=512, dropout=0.0):
super(FastRNNModel, self).__init__()
self.num_actions = num_actions # output vocab size.
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
# core_output_size = self.fc.out_features + 1
self.core = FastRNNModelLayer(self.fc.out_features + 1,
hidden_size, num_layers, num_head, dim_head, dim_ff, dropout)
core_output_size = hidden_size
# if use_lstm:
# self.core = nn.LSTM(core_output_size, 256, num_layers=1)
# core_output_size = 256
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
# why not create on device?
# add dummy dim0 for inference code compat.
fw_state_tuple = tuple(
torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers)
)
rec_fw_state_tuple = tuple(
torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers)
)
rnn_state_tuple = tuple(
torch.zeros(1, batch_size, self.num_head, 1, self.dim_head)
for _ in range(self.num_layers)
)
return (fw_state_tuple, rec_fw_state_tuple, rnn_state_tuple)
def forward(self, inputs, core_state):
x = inputs["frame"]
assert x.device is not 'cpu'
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
# recurrent component
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, D, D)
# states:
# nd = nd.view(1, -1, 1)
# needs extra dim0 for compat w/ inference code
fw_state, rec_fw_state, rnn_state = core_state
nd = nd.view(1, -1, 1, 1, 1)
fw_state = nest.map(nd.mul, fw_state)
rec_fw_state = nest.map(nd.mul, rec_fw_state)
rnn_state = nest.map(nd.mul, rnn_state)
core_state = (fw_state, rec_fw_state, rnn_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1),
num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
class DeltaDeltaNetModel(nn.Module):
def __init__(self, num_actions, hidden_size=128, num_layers=2,
num_head=4, dim_head=32, dim_ff=512, dropout=0.0,
use_xem=False):
# use_xem: use cross episode memory
super(DeltaDeltaNetModel, self).__init__()
self.num_actions = num_actions # output vocab size.
self.use_xem = use_xem
self.hidden_size = hidden_size
self.num_layers = num_layers
self.num_head = num_head
self.dim_head = dim_head
self.dim_ff = dim_ff
self.dropout = dropout
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = 4
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
feats_convs.append(
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
resnet_block.append(nn.ReLU())
resnet_block.append(
nn.Conv2d(
in_channels=input_channels,
out_channels=num_ch,
kernel_size=3,
stride=1,
padding=1,
)
)
if i == 0:
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, 256)
# FC output size + last reward.
# core_output_size = self.fc.out_features + 1
self.core = DeltaDeltaNetLayer(
self.fc.out_features + 1, hidden_size, num_layers, num_head,
dim_head, dim_ff, dropout)
core_output_size = hidden_size
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size=1):
# why not create on device?
# add dummy dim0 for inference code compat.
fw_state_tuple = tuple(
torch.zeros(
1, batch_size, self.num_head, self.dim_head,
3 * self.dim_head + 1)
for _ in range(self.num_layers)
)
very_fw_state_tuple = tuple(
torch.zeros(
1, batch_size, self.num_head, self.dim_head, self.dim_head)
for _ in range(self.num_layers)
)
return (fw_state_tuple, very_fw_state_tuple)
def forward(self, inputs, core_state):
x = inputs["frame"]
assert x.device is not 'cpu'
T, B, *_ = x.shape
x = torch.flatten(x, 0, 1) # Merge time and batch.
x = x.float() / 255.0
res_input = None
for i, fconv in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view(T * B, -1)
x = F.relu(self.fc(x))
clipped_reward = torch.clamp(inputs["reward"], -1, 1).view(T * B, 1)
core_input = torch.cat([x, clipped_reward], dim=-1)
# recurrent component
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
# if not use_xem
# Reset core state to zero whenever an episode ended.
# Make `done` broadcastable with (num_layers, B, D, D)
# states:
# nd = nd.view(1, -1, 1)
# needs extra dim0 for compat w/ inference code
fw_state, very_fw_state = core_state
nd = nd.view(1, -1, 1, 1, 1)
if not self.use_xem:
fw_state = nest.map(nd.mul, fw_state)
else:
# save cross episodic fast weights
layer_id = 0
for fw_layer in self.core.fwm_layers:
fw_layer.cached_fast_weights = fw_state[layer_id][0]
layer_id += 1
very_fw_state = nest.map(nd.mul, very_fw_state)
core_state = (fw_state, very_fw_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1),
num_samples=1)
else:
# Don't sample when testing.
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (action, policy_logits, baseline), core_state
| 32,836
| 33.895855
| 86
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/polybeast_learner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import logging
import os
import threading
import time
import timeit
import traceback
import nest
import torch
import libtorchbeast
from torch import nn
from torch.nn import functional as F
from torchbeast.core import file_writer
from torchbeast.core import vtrace
from torchbeast.model import Net
from torchbeast.model import DeltaNetModel as DeltaNet
from torchbeast.model import DeltaDeltaNetModel as DDNet
from torchbeast.model import LinearTransformerModel as LT
from torchbeast.model import RecDeltaModel as RecDelta
from torchbeast.model import FastRNNModel as FastRNN
# Necessary for multithreading.
os.environ["OMP_NUM_THREADS"] = "1"
# yapf: disable
parser = argparse.ArgumentParser(description="PyTorch Scalable Agent")
parser.add_argument("--pipes_basename", default="unix:/tmp/polybeast",
help="Basename for the pipes for inter-process communication. "
"Has to be of the type unix:/some/path.")
parser.add_argument("--mode", default="train",
choices=["train", "test", "test_render"],
help="Training or test mode.")
parser.add_argument("--xpid", default=None,
help="Experiment id (default: None).")
# Training settings.
parser.add_argument("--disable_checkpoint", action="store_true",
help="Disable saving checkpoint.")
parser.add_argument("--save_extra_checkpoint", default=50000000, type=int,
help="Save an extra checkpoint at .")
parser.add_argument("--eval_extra", action="store_true",
help="Eval extra checkpoint.")
parser.add_argument("--savedir", default="~/palaas/torchbeast",
help="Root dir where experiment data will be saved.")
parser.add_argument("--num_actors", default=4, type=int, metavar="N",
help="Number of actors.")
parser.add_argument("--total_steps", default=100000, type=int, metavar="T",
help="Total environment steps to train for.")
parser.add_argument("--batch_size", default=8, type=int, metavar="B",
help="Learner batch size.")
parser.add_argument("--unroll_length", default=80, type=int, metavar="T",
help="The unroll length (time dimension).")
parser.add_argument("--num_learner_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--num_inference_threads", default=2, type=int,
metavar="N", help="Number learner threads.")
parser.add_argument("--disable_cuda", action="store_true",
help="Disable CUDA.")
parser.add_argument("--num_actions", default=6, type=int, metavar="A",
help="Number of actions.")
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument("--use_delta_rnn", action="store_true",
help="Use Delta RNN in agent model.")
parser.add_argument("--use_delta", action="store_true",
help="Use Delta Net in agent model.")
parser.add_argument("--use_lt", action="store_true",
help="Use Linear Trafo in agent model.")
parser.add_argument("--use_rec_delta", action="store_true",
help="Use Recurrent Delta Net in agent model.")
parser.add_argument("--use_dd", action="store_true",
help="Use Delta Delta in agent model.")
parser.add_argument("--max_learner_queue_size", default=None, type=int, metavar="N",
help="Optional maximum learner queue size. Defaults to batch_size.")
# Model settings.
parser.add_argument("--hidden_size", default=128, type=int,
help="transformer hidden size.")
parser.add_argument("--dim_ff", default=512, type=int,
help="transformer hidden size.")
parser.add_argument("--dim_head", default=32, type=int,
help="transformer head size.")
parser.add_argument("--num_layers", default=2, type=int,
help="tranformer num layers.")
parser.add_argument("--num_head", default=4, type=int,
help="tranformer num heads.")
parser.add_argument("--dropout", default=0.0, type=float,
help="tranformer dropout.")
# Loss settings.
parser.add_argument("--entropy_cost", default=0.0006, type=float,
help="Entropy cost/multiplier.")
parser.add_argument("--baseline_cost", default=0.5, type=float,
help="Baseline cost/multiplier.")
parser.add_argument("--discounting", default=0.99, type=float,
help="Discounting factor.")
parser.add_argument("--reward_clipping", default="abs_one",
choices=["abs_one", "none"],
help="Reward clipping.")
# Optimizer settings.
parser.add_argument("--learning_rate", default=0.00048, type=float,
metavar="LR", help="Learning rate.")
parser.add_argument("--alpha", default=0.99, type=float,
help="RMSProp smoothing constant.")
parser.add_argument("--momentum", default=0, type=float,
help="RMSProp momentum.")
parser.add_argument("--epsilon", default=0.01, type=float,
help="RMSProp epsilon.")
parser.add_argument("--grad_norm_clipping", default=40.0, type=float,
help="Global gradient norm clip.")
# Misc settings.
parser.add_argument("--write_profiler_trace", action="store_true",
help="Collect and write a profiler trace "
"for chrome://tracing/.")
# yapf: enable
parser.add_argument('--num_servers', default=4, type=int, metavar='N',
help='Number of environment servers.')
parser.add_argument('--env', type=str, default='PongNoFrameskip-v4',
help='Gym environment.')
# Wandb settings
parser.add_argument('--project_name', type=str, default=None,
help='project name for wandb.')
parser.add_argument('--job_name', type=str, default=None,
help='job name for wandb.')
parser.add_argument('--use_wandb', action='store_true',
help='use wandb.')
args = parser.parse_args()
if args.use_wandb: # configure wandb.
import wandb
use_wandb = True
if args.project_name is None:
project_name = (os.uname()[1]
+ datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
else:
project_name = args.project_name
wandb.init(project=project_name)
if args.job_name is None:
# wandb.run.name = (os.uname()[1]
# + datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# + args.work_dir)
wandb.run.name = f"{os.uname()[1]}" \
f"-{args.mode}" \
f"-{args.xpid}" \
f"-{args.disable_checkpoint}" \
f"-{args.savedir}" \
f"-{args.num_actors}" \
f"-{args.total_steps}" \
f"-{args.batch_size}" \
f"-{args.unroll_length}" \
f"-{args.entropy_cost}" \
f"-{args.baseline_cost}" \
f"-{args.discounting}" \
f"-{args.reward_clipping}" \
f"-{args.learning_rate}" \
f"-{args.alpha}" \
f"-{args.momentum}" \
f"-{args.epsilon}" \
f"-{args.grad_norm_clipping}"
else:
wandb.run.name = f"{os.uname()[1]}//{args.job_name}"
config = wandb.config
config.host = os.uname()[1] # host node name
config.mode=args.mode
config.xpid=args.xpid
config.disable_checkpoint=args.disable_checkpoint
config.savedir=args.savedir
config.num_actors=args.num_actors
config.total_steps=args.total_steps
config.batch_size=args.batch_size
config.unroll_length=args.unroll_length
config.disable_cuda=args.disable_cuda
config.use_lstm=args.use_lstm
config.entropy_cost=args.entropy_cost
config.baseline_cost=args.baseline_cost
config.discounting=args.discounting
config.reward_clipping=args.reward_clipping
config.learning_rate=args.learning_rate
config.alpha=args.alpha
config.momentum=args.momentum
config.epsilon=args.epsilon
config.grad_norm_clipping=args.grad_norm_clipping
else:
use_wandb = False
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
def compute_baseline_loss(advantages):
return 0.5 * torch.sum(advantages ** 2)
def compute_entropy_loss(logits):
"""Return the entropy loss, i.e., the negative entropy of the policy."""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def compute_policy_gradient_loss(logits, actions, advantages):
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
def inference(flags, inference_batcher, model, lock=threading.Lock()): # noqa: B008
with torch.no_grad():
for batch in inference_batcher:
batched_env_outputs, agent_state = batch.get_inputs()
frame, reward, done, *_ = batched_env_outputs
frame = frame.to(flags.actor_device, non_blocking=True)
reward = reward.to(flags.actor_device, non_blocking=True)
done = done.to(flags.actor_device, non_blocking=True)
agent_state = nest.map(
lambda t: t.to(flags.actor_device, non_blocking=True), agent_state
)
with lock:
outputs = model(
dict(frame=frame, reward=reward, done=done), agent_state
)
outputs = nest.map(lambda t: t.cpu(), outputs)
batch.set_outputs(outputs)
EnvOutput = collections.namedtuple(
"EnvOutput", "frame rewards done episode_step episode_return"
)
AgentOutput = collections.namedtuple("AgentOutput", "action policy_logits baseline")
Batch = collections.namedtuple("Batch", "env agent")
def learn(
flags,
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
plogger,
lock=threading.Lock(),
):
for tensors in learner_queue:
tensors = nest.map(lambda t: t.to(flags.learner_device), tensors)
batch, initial_agent_state = tensors
env_outputs, actor_outputs = batch
frame, reward, done, *_ = env_outputs
lock.acquire() # Only one thread learning at a time.
learner_outputs, unused_state = model(
dict(frame=frame, reward=reward, done=done), initial_agent_state
)
# Take final value function slice for bootstrapping.
learner_outputs = AgentOutput._make(learner_outputs)
bootstrap_value = learner_outputs.baseline[-1]
# Move from obs[t] -> action[t] to action[t] -> obs[t].
batch = nest.map(lambda t: t[1:], batch)
learner_outputs = nest.map(lambda t: t[:-1], learner_outputs)
# Turn into namedtuples again.
env_outputs, actor_outputs = batch
env_outputs = EnvOutput._make(env_outputs)
actor_outputs = AgentOutput._make(actor_outputs)
learner_outputs = AgentOutput._make(learner_outputs)
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(env_outputs.rewards, -1, 1)
elif flags.reward_clipping == "none":
clipped_rewards = env_outputs.rewards
discounts = (~env_outputs.done).float() * flags.discounting
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=actor_outputs.policy_logits,
target_policy_logits=learner_outputs.policy_logits,
actions=actor_outputs.action,
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs.baseline,
bootstrap_value=bootstrap_value,
)
pg_loss = compute_policy_gradient_loss(
learner_outputs.policy_logits,
actor_outputs.action,
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs.baseline
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs.policy_logits
)
total_loss = pg_loss + baseline_loss + entropy_loss
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), flags.grad_norm_clipping)
optimizer.step()
scheduler.step()
actor_model.load_state_dict(model.state_dict())
episode_returns = env_outputs.episode_return[env_outputs.done]
stats["step"] = stats.get("step", 0) + flags.unroll_length * flags.batch_size
stats["episode_returns"] = tuple(episode_returns.cpu().numpy())
stats["mean_episode_return"] = torch.mean(episode_returns).item()
stats["mean_episode_step"] = torch.mean(env_outputs.episode_step.float()).item()
stats["total_loss"] = total_loss.item()
stats["pg_loss"] = pg_loss.item()
stats["baseline_loss"] = baseline_loss.item()
stats["entropy_loss"] = entropy_loss.item()
stats["learner_queue_size"] = learner_queue.size()
if use_wandb:
wandb.log({"episode_returns": stats["episode_returns"]})
wandb.log({"mean_episode_step": stats["mean_episode_step"]})
wandb.log({"mean_episode_return": stats["mean_episode_return"]})
wandb.log({"total_loss": stats["total_loss"]})
wandb.log({"pg_loss": stats["pg_loss"]})
wandb.log({"baseline_loss": stats["baseline_loss"]})
wandb.log({"entropy_loss": stats["entropy_loss"]})
plogger.log(stats)
if not len(episode_returns):
# Hide the mean-of-empty-tuple NaN as it scares people.
stats["mean_episode_return"] = None
lock.release()
def train(flags):
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.save_extra_checkpoint > 0:
extra_checkpointpath = os.path.expandvars(os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar")))
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.learner_device = torch.device("cuda:0")
flags.actor_device = torch.device("cuda:1")
else:
logging.info("Not using CUDA.")
flags.learner_device = torch.device("cpu")
flags.actor_device = torch.device("cpu")
if flags.max_learner_queue_size is None:
flags.max_learner_queue_size = flags.batch_size
# The queue the learner threads will get their data from.
# Setting `minimum_batch_size == maximum_batch_size`
# makes the batch size static.
learner_queue = libtorchbeast.BatchingQueue(
batch_dim=1,
minimum_batch_size=flags.batch_size,
maximum_batch_size=flags.batch_size,
check_inputs=True,
maximum_queue_size=flags.max_learner_queue_size,
)
# The "batcher", a queue for the inference call. Will yield
# "batch" objects with `get_inputs` and `set_outputs` methods.
# The batch size of the tensors will be dynamic.
inference_batcher = libtorchbeast.DynamicBatcher(
batch_dim=1,
minimum_batch_size=1,
maximum_batch_size=512,
timeout_ms=100,
check_outputs=True,
)
addresses = []
connections_per_server = 1
pipe_id = 0
while len(addresses) < flags.num_actors:
for _ in range(connections_per_server):
addresses.append(f"{flags.pipes_basename}.{pipe_id}")
if len(addresses) == flags.num_actors:
break
pipe_id += 1
if flags.use_delta:
model = DeltaNet(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = DeltaNet(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_lt:
model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = LT(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_delta_rnn:
model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
model = model.to(device=flags.learner_device)
actor_model = FastRNN(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_rec_delta:
model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
model = model.to(device=flags.learner_device)
actor_model = RecDelta(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
elif flags.use_dd:
model = DDNet(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = DDNet(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
actor_model.to(device=flags.actor_device)
else:
model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm)
logging.info(model)
model = model.to(device=flags.learner_device)
actor_model = Net(
num_actions=flags.num_actions, use_lstm=flags.use_lstm)
actor_model.to(device=flags.actor_device)
# The ActorPool that will run `flags.num_actors` many loops.
actors = libtorchbeast.ActorPool(
unroll_length=flags.unroll_length,
learner_queue=learner_queue,
inference_batcher=inference_batcher,
env_server_addresses=addresses,
initial_agent_state=actor_model.initial_state(),
)
def run():
try:
actors.run()
except Exception as e:
logging.error("Exception in actorpool thread!")
traceback.print_exc()
print()
raise e
actorpool_thread = threading.Thread(target=run, name="actorpool-thread")
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
def lr_lambda(epoch):
return (
1
- min(epoch * flags.unroll_length * flags.batch_size, flags.total_steps)
/ flags.total_steps
)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
stats = {}
# Load state from a checkpoint, if possible.
if os.path.exists(checkpointpath):
checkpoint_states = torch.load(
checkpointpath, map_location=flags.learner_device
)
model.load_state_dict(checkpoint_states["model_state_dict"])
optimizer.load_state_dict(checkpoint_states["optimizer_state_dict"])
scheduler.load_state_dict(checkpoint_states["scheduler_state_dict"])
stats = checkpoint_states["stats"]
logging.info(f"Resuming preempted job, current stats:\n{stats}")
# Initialize actor model like learner model.
actor_model.load_state_dict(model.state_dict())
learner_threads = [
threading.Thread(
target=learn,
name="learner-thread-%i" % i,
args=(
flags,
learner_queue,
model,
actor_model,
optimizer,
scheduler,
stats,
plogger,
),
)
for i in range(flags.num_learner_threads)
]
inference_threads = [
threading.Thread(
target=inference,
name="inference-thread-%i" % i,
args=(flags, inference_batcher, actor_model),
)
for i in range(flags.num_inference_threads)
]
actorpool_thread.start()
for t in learner_threads + inference_threads:
t.start()
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"stats": stats,
"flags": vars(flags),
},
checkpointpath,
)
def format_value(x):
return f"{x:1.5}" if isinstance(x, float) else str(x)
try:
if flags.save_extra_checkpoint > 0:
saved_extra = False
last_checkpoint_time = timeit.default_timer()
while True:
start_time = timeit.default_timer()
start_step = stats.get("step", 0)
if start_step >= flags.total_steps:
break
time.sleep(5)
end_step = stats.get("step", 0)
if timeit.default_timer() - last_checkpoint_time > 10 * 60:
# Save every 10 min.
checkpoint()
last_checkpoint_time = timeit.default_timer()
logging.info(
"Step %i @ %.1f SPS. Inference batcher size: %i."
" Learner queue size: %i."
" Other stats: (%s)",
end_step,
(end_step - start_step) / (timeit.default_timer() - start_time),
inference_batcher.size(),
learner_queue.size(),
", ".join(
f"{key} = {format_value(value)}" for key, value in stats.items()
),
)
if flags.save_extra_checkpoint > 0:
if saved_extra is False and end_step > flags.save_extra_checkpoint:
logging.info(f"Step {end_step} Saving EXTRA checkpoint to {extra_checkpointpath}")
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"stats": stats,
"flags": vars(flags),
},
extra_checkpointpath,
)
saved_extra = True
except KeyboardInterrupt:
pass # Close properly.
else:
logging.info("Learning finished after %i steps.", stats["step"])
checkpoint()
# Done with learning. Stop all the ongoing work.
inference_batcher.close()
learner_queue.close()
actorpool_thread.join()
for t in learner_threads + inference_threads:
t.join()
def test(flags, num_episodes=30, num_runs=5, device='cuda'):
if flags.xpid is None:
checkpointpath = "./latest/model.tar"
elif flags.eval_extra:
checkpointpath = os.path.expandvars(
os.path.expanduser(
"%s/%s/%s" % (flags.savedir, flags.xpid, "model_extra.tar"))
)
else:
checkpointpath = os.path.expandvars(
os.path.expanduser(
"%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
from torchbeast import atari_wrappers
from torchbeast.core import environment
def create_test_env(flags):
return atari_wrappers.wrap_pytorch(
atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(flags.env),
clip_rewards=False,
frame_stack=True,
scale=False,
)
)
gym_env = create_test_env(flags)
env = environment.Environment(gym_env)
if flags.use_delta:
model = DeltaNet(
num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_lt:
model = LT(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_delta_rnn:
model = FastRNN(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_rec_delta:
model = RecDelta(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
elif flags.use_dd:
model = DDNet(num_actions=flags.num_actions, dim_head=flags.dim_head,
hidden_size=flags.hidden_size, dim_ff=flags.dim_ff,
num_layers=flags.num_layers, num_head=flags.num_head,
dropout=flags.dropout)
else:
model = Net(num_actions=flags.num_actions, use_lstm=flags.use_lstm)
print(model)
print(f"# params: "
f"{sum(p.numel() for p in model.parameters() if p.requires_grad)}")
model = model.to(device)
model.eval()
checkpoint = torch.load(checkpointpath, map_location=device)
model.load_state_dict(checkpoint["model_state_dict"])
observation = env.initial()
all_returns = []
core_state = model.initial_state()
core_state = nest.map(lambda t: t.to(device), core_state)
logging.info("Start eval")
with torch.no_grad():
while len(all_returns) < num_runs:
returns = []
while len(returns) < num_episodes:
if flags.mode == "test_render":
env.gym_env.render()
# (action, policy_logits, baseline), core_state
observation = nest.map(lambda t: t.to(device), observation)
agent_outputs, core_state = model(observation, core_state)
action, _, _ = agent_outputs
observation = env.step(action)
if observation["done"].item():
returns.append(observation["episode_return"].item())
logging.info(
"Episode ended after %d steps. Return: %.1f",
observation["episode_step"].item(),
observation["episode_return"].item(),
)
logging.info(
"Average returns over %i episodes: %.1f",
num_episodes, sum(returns) / len(returns)
)
all_returns.append(sum(returns) / len(returns))
env.close()
import numpy as np
logging.info(f"Average returns over {num_episodes} for {num_runs} runs: {all_returns}")
logging.info(f"Mean return: {np.mean(all_returns):.1f}, std: {np.std(all_returns):.1f}")
def main(flags):
if not flags.pipes_basename.startswith("unix:"):
raise Exception("--pipes_basename has to be of the form unix:/some/path.")
if flags.mode == "train":
if flags.write_profiler_trace:
logging.info("Running with profiler.")
with torch.autograd.profiler.profile() as prof:
train(flags)
filename = "chrome-%s.trace" % time.strftime("%Y%m%d-%H%M%S")
logging.info("Writing profiler trace to '%s.gz'", filename)
prof.export_chrome_trace(filename)
os.system("gzip %s" % filename)
else:
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
| 31,262
| 37.596296
| 130
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/core/vtrace.py
|
# This file taken from
# https://github.com/deepmind/scalable_agent/blob/
# cd66d00914d56c8ba2f0615d9cdeefcb169a8d70/vtrace.py
# and modified.
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
"""
import collections
import torch
import torch.nn.functional as F
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
| 4,350
| 30.078571
| 86
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/core/environment.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The environment class for MonoBeast."""
import torch
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
| 2,470
| 32.849315
| 75
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/core/prof.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Naive profiling using timeit. (Used in MonoBeast.)"""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
| 2,572
| 30.378049
| 88
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/core/file_writer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import datetime
import json
import logging
import os
import time
from typing import Dict
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
# Gathering git metadata.
try:
import git
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(
commit=git_sha,
branch=None if repo.head.is_detached else repo.active_branch.name,
is_dirty=repo.is_dirty(),
path=repo.git_dir,
)
except git.InvalidGitRepositoryError:
git_data = None
except ImportError:
git_data = None
# Gathering slurm metadata.
if "SLURM_JOB_ID" in os.environ:
slurm_env_keys = [k for k in os.environ if k.startswith("SLURM")]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace("SLURM_", "").replace("SLURMD_", "").lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(
date_start=date_start,
date_end=None,
successful=False,
git=git_data,
slurm=slurm_data,
env=os.environ.copy(),
)
class FileWriter:
def __init__(
self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = "~/logs",
symlink_to_latest: bool = True,
):
if not xpid:
# Make unique id.
xpid = "{proc}_{unixtime}".format(
proc=os.getpid(), unixtime=int(time.time())
)
self.xpid = xpid
self._tick = 0
# Metadata gathering.
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# We need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other unwanted side-effects).
self.metadata["args"] = copy.deepcopy(xp_args)
self.metadata["xpid"] = self.xpid
formatter = logging.Formatter("%(message)s")
self._logger = logging.getLogger("logs/out")
# To stdout handler.
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# To file handler.
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info("Creating log directory: %s", self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info("Found log directory: %s", self.basepath)
if symlink_to_latest:
# Add 'latest' as symlink unless it exists and is no symlink.
symlink = os.path.join(rootdir, "latest")
try:
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.exists(symlink):
os.symlink(self.basepath, symlink)
self._logger.info("Symlinked log directory: %s", symlink)
except OSError:
# os.remove() or os.symlink() raced. Don't do anything.
pass
self.paths = dict(
msg="{base}/out.log".format(base=self.basepath),
logs="{base}/logs.csv".format(base=self.basepath),
fields="{base}/fields.csv".format(base=self.basepath),
meta="{base}/meta.json".format(base=self.basepath),
)
self._logger.info("Saving arguments to %s", self.paths["meta"])
if os.path.exists(self.paths["meta"]):
self._logger.warning(
"Path to meta file already exists. " "Not overriding meta."
)
else:
self._save_metadata()
self._logger.info("Saving messages to %s", self.paths["msg"])
if os.path.exists(self.paths["msg"]):
self._logger.warning(
"Path to message file already exists. " "New data will be appended."
)
fhandle = logging.FileHandler(self.paths["msg"])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info("Saving logs data to %s", self.paths["logs"])
self._logger.info("Saving logs' fields to %s", self.paths["fields"])
self.fieldnames = ["_tick", "_time"]
if os.path.exists(self.paths["logs"]):
self._logger.warning(
"Path to log file already exists. " "New data will be appended."
)
# Override default fieldnames.
with open(self.paths["fields"], "r") as csvfile:
reader = csv.reader(csvfile)
lines = list(reader)
if len(lines) > 0:
self.fieldnames = lines[-1]
# Override default tick: use the last tick from the logs file plus 1.
with open(self.paths["logs"], "r") as csvfile:
reader = csv.reader(csvfile)
lines = list(reader)
# Need at least two lines in order to read the last tick:
# the first is the csv header and the second is the first line
# of data.
if len(lines) > 1:
self._tick = int(lines[-1][0]) + 1
self._fieldfile = open(self.paths["fields"], "a")
self._fieldwriter = csv.writer(self._fieldfile)
self._logfile = open(self.paths["logs"], "a")
self._logwriter = csv.DictWriter(self._logfile, fieldnames=self.fieldnames)
def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log["_tick"] = self._tick
self._tick += 1
to_log["_time"] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
self._fieldwriter.writerow(self.fieldnames)
self._logger.info("Updated log fields: %s", self.fieldnames)
if to_log["_tick"] == 0:
self._logfile.write("# %s\n" % ",".join(self.fieldnames))
if verbose:
self._logger.info(
"LOG | %s",
", ".join(["{}: {}".format(k, to_log[k]) for k in sorted(to_log)]),
)
self._logwriter.writerow(to_log)
self._logfile.flush()
def close(self, successful: bool = True) -> None:
self.metadata["date_end"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
self.metadata["successful"] = successful
self._save_metadata()
for f in [self._logfile, self._fieldfile]:
f.close()
def _save_metadata(self) -> None:
with open(self.paths["meta"], "w") as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
| 7,787
| 35.055556
| 84
|
py
|
modern-srwm
|
modern-srwm-main/reinforcement_learning/torchbeast/self_ref_v0/__init__.py
|
# Adaptation of the original code from
# https://github.com/idiap/fast-transformers/blob/master/fast_transformers/causal_product/__init__.py
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>,
# Apoorv Vyas <avyas@idiap.ch>
# Modifications Copyright (c) 2021 Kazuki Irie
import sys
import os
import torch
import torch.nn.functional as F
from torch.utils.cpp_extension import load
# Just in time import
# https://pytorch.org/tutorials/advanced/cpp_extens
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'self_ref_v0.cu')
fwd_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="self_ref_forward",
sources=[filename], verbose=True)
bwd_cuda = load(
extra_cuda_cflags=['--ftemplate-depth=1024'],
name="self_ref_backward",
sources=[filename], verbose=True)
self_ref_fwd_cuda = fwd_cuda.self_ref_forward
self_ref_bwd_cuda = bwd_cuda.self_ref_backward
class SelfRefv0(torch.autograd.Function):
dot = {
# "cpu": causal_dot_product_cpu,
"cuda": self_ref_fwd_cuda
}
dot_backward = {
# "cpu": causal_dot_backward_cpu,
"cuda": self_ref_bwd_cuda
}
@staticmethod
def forward(ctx, x, W_y, W_q, W_k, w_b):
# Shape of x: (B, len, D)
# Shape of W_q: (n_head, D, E) where n_head * E = D (typically)
device = x.device
N, H, L, E = x.shape
assert W_y.shape == (N, H, E, E), "Reshape/unsqueeze if needed."
assert W_q.shape == (N, H, E, E), "Reshape/unsqueeze if needed."
assert W_k.shape == (N, H, E, E), "Reshape/unsqueeze if needed."
assert w_b.shape == (N, H, E, 4), "Reshape/unsqueeze if needed."
out = torch.zeros((N, H, L, E), device=device, dtype=x.dtype) # y
q_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype)
k_main = torch.zeros((N, H, L, E), device=device, dtype=x.dtype)
beta_main = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype)
y_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype)
q_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype)
k_diff = torch.zeros((N, H, L, E), device=device, dtype=x.dtype)
beta_diff = torch.zeros((N, H, L, 4), device=device, dtype=x.dtype)
# x = F.softmax(x, dim=-1) # apply already softmax to input
SelfRefv0.dot[device.type](
x,
W_y,
W_q,
W_k,
w_b,
q_main,
k_main,
beta_main,
y_diff,
q_diff,
k_diff,
beta_diff,
out
)
ctx.save_for_backward(
x, q_main, k_main, beta_main, y_diff, q_diff, k_diff, beta_diff,
W_y, W_q, W_k, w_b)
return out
@staticmethod
def backward(ctx, grad_out):
# Extract the saved tensors
(x, q, k, beta, y_diff, q_diff, k_diff, beta_diff,
W_y, W_q, W_k, w_b) = ctx.saved_tensors
# Allocate memory for the gradients
grad_x = torch.zeros_like(x)
grad_W_y = torch.zeros_like(W_y)
grad_W_q = torch.zeros_like(W_q)
grad_W_k = torch.zeros_like(W_k)
grad_w_b = torch.zeros_like(w_b)
# out_delayed = torch.tanh(torch.cat([h0, out[:, :, :-1]], dim=2))
# x, W_y, W_q, W_k, w_b
# W_y_copy = W_y.detach().clone()
# W_q_copy = W_q.detach().clone()
# W_k_copy = W_k.detach().clone()
# w_b_copy = w_b.detach().clone()
# Compute the gradients
SelfRefv0.dot_backward[x.device.type](
x,
q,
k,
beta,
y_diff,
q_diff,
k_diff,
beta_diff,
grad_out,
# W_y_copy,
# W_q_copy,
# W_k_copy,
# w_b_copy,
W_y,
W_q,
W_k,
w_b,
grad_x,
grad_W_y,
grad_W_q,
grad_W_k,
grad_w_b
)
return grad_x, grad_W_y, grad_W_q, grad_W_k, grad_w_b
# Alias the autograd functions to python style snake case naming
self_ref_v0 = SelfRefv0.apply
if __name__ == '__main__':
import torch
import torch.nn.functional as F
torch.manual_seed(111)
# Tests pass if the relative difference compared with
# the corresponding torch autograd computation
# is smaller than a threshold.
# Ideally should be tested with double...
rel_threshold = 1e-3
# from https://github.com/idiap/fast-transformers/blob/master/tests/causal_product/test_causal_product_gpu.py
def max_relative_error(a, b, eps=1e-6):
return float(torch.abs((b - a) / (torch.abs(b) + eps)).max().item())
print('##########################')
print('# Test forward pass')
print('##########################')
# bsz, n_head, slen, d_head = 3, 5, 11, 8
bsz, n_head, slen, d_head = 3, 5, 11, 8
v_dim = d_head * 3 + 4
print(f"value dim: {v_dim}")
# W0 = torch.cuda.FloatTensor(
# 1, n_head, d_head, v_dim, device='cuda').uniform_(-1., 1.)
# W0 = W0.repeat(bsz, 1, 1, 1)
Wy0 = torch.cuda.FloatTensor(
bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.)
Wq0 = torch.cuda.FloatTensor(
bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.)
Wk0 = torch.cuda.FloatTensor(
bsz, n_head, d_head, d_head, device='cuda').uniform_(-1., 1.)
wb0 = torch.cuda.FloatTensor(
bsz, n_head, d_head, 4, device='cuda').uniform_(-1., 1.)
x0 = torch.rand(bsz, n_head, slen, d_head, device='cuda')
W_y1 = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
W_q1 = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
W_k1 = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
w_b1 = torch.zeros(
bsz, n_head, d_head, 4, requires_grad=True, device='cuda')
W_y1 = Wy0.detach().clone().requires_grad_(True)
W_q1 = Wq0.detach().clone().requires_grad_(True)
W_k1 = Wk0.detach().clone().requires_grad_(True)
w_b1 = wb0.detach().clone().requires_grad_(True)
W_y2_slow = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
W_q2_slow = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
W_k2_slow = torch.zeros(
bsz, n_head, d_head, d_head, requires_grad=True, device='cuda')
w_b2_slow = torch.zeros(
bsz, n_head, d_head, 4, requires_grad=True, device='cuda')
W_y2_slow = Wy0.detach().clone().requires_grad_(True)
W_q2_slow = Wq0.detach().clone().requires_grad_(True)
W_k2_slow = Wk0.detach().clone().requires_grad_(True)
w_b2_slow = wb0.detach().clone().requires_grad_(True)
x1 = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
x1 = x0.detach().clone().requires_grad_(True)
print("Forwarding custom kernel...")
# softmax done inside self_ref_v0
out1 = self_ref_v0(x1, W_y1, W_q1, W_k1, w_b1)
print("done.")
x2 = torch.zeros(
bsz, n_head, slen, d_head, requires_grad=True, device='cuda')
# apply softmax here
x2 = F.softmax(x0.detach(), dim=-1).clone().requires_grad_(True)
x2 = x2.permute(2, 0, 1, 3) # (len, B, H, dim)
x2 = x2.reshape(slen, bsz * n_head, d_head) # (len, B*H, dim)
# W2 = W2.reshape(bsz * n_head, d_head, v_dim)
W_y2 = W_y2_slow.view(bsz * n_head, d_head, d_head)
W_q2 = W_q2_slow.view(bsz * n_head, d_head, d_head)
W_k2 = W_k2_slow.view(bsz * n_head, d_head, d_head)
w_b2 = w_b2_slow.view(bsz * n_head, d_head, 4)
out_list = []
# out = x2[0] # (B * H, D)
print("Forwarding PyTorch code...")
for pos in range(slen):
out = x2[pos].unsqueeze(1)
# out = F.softmax(x2[pos], dim=-1).unsqueeze(1)
# out: (B * H, 1, D)
# W2: (B * H, D, v_dim)
# bmm (b,n,M) x (b,M,p) -> (b,n,p)
# ykqb before squeeze: (B * H, 1, v_dim)
y = torch.bmm(out, W_y2).squeeze(1)
out_t = y.reshape(bsz, n_head, d_head)
out_list.append(out_t.clone())
if pos < slen - 1: # no need to update weights at the last time step
q = torch.bmm(out, W_q2).squeeze(1)
k = torch.bmm(out, W_k2).squeeze(1)
beta = torch.bmm(out, w_b2).squeeze(1)
beta = torch.sigmoid(beta)
beta_y, beta_q, beta_k, beta_beta = torch.split(
beta, [1, 1, 1, 1], dim=-1)
k = F.softmax(k, dim=-1)
q = F.softmax(q, dim=-1)
# retrieve currently stored value
y_old = torch.bmm(k.unsqueeze(1), W_y2).squeeze(1)
q_old = torch.bmm(k.unsqueeze(1), W_q2).squeeze(1)
k_old = torch.bmm(k.unsqueeze(1), W_k2).squeeze(1)
beta_old = torch.bmm(k.unsqueeze(1), w_b2).squeeze(1)
y_new = torch.bmm(q.unsqueeze(1), W_y2).squeeze(1)
q_new = torch.bmm(q.unsqueeze(1), W_q2).squeeze(1)
k_new = torch.bmm(q.unsqueeze(1), W_k2).squeeze(1)
beta_new = torch.bmm(q.unsqueeze(1), w_b2).squeeze(1)
# update all weights
y_insert = beta_y * (y_new - y_old)
q_insert = beta_q * (q_new - q_old)
k_insert = beta_k * (k_new - k_old)
beta_insert = beta_beta * (beta_new - beta_old)
W_y2 = W_y2.clone() + torch.bmm(
k.unsqueeze(2), y_insert.unsqueeze(1))
W_q2 = W_q2.clone() + torch.bmm(
k.unsqueeze(2), q_insert.unsqueeze(1))
W_k2 = W_k2.clone() + torch.bmm(
k.unsqueeze(2), k_insert.unsqueeze(1))
w_b2 = w_b2.clone() + torch.bmm(
k.unsqueeze(2), beta_insert.unsqueeze(1))
print("done.")
out2 = torch.stack(out_list)
out2 = out2.view(slen, bsz, n_head, d_head)
out1 = out1.permute(2, 0, 1, 3)
for s in range(slen):
for b in range(bsz):
for h in range(n_head):
print(f"s={s}, b={b}, h={h}")
print(f"out: {out1[s][b][h]}")
print(f"ref: {out2[s][b][h]}")
assert max_relative_error(
out1[s][b][h], out2[s][b][h]) < rel_threshold
print("pass!")
print("==> Forward pass test done.")
# sys.exit(0)
print('##########################')
print('# Test Backward pass')
print('##########################')
# grad
loss1 = out1.sum()
W_y1.retain_grad()
W_q1.retain_grad()
W_k1.retain_grad()
w_b1.retain_grad()
x1.retain_grad()
loss1.backward()
loss2 = out2.sum()
W_y2_slow.retain_grad()
W_q2_slow.retain_grad()
W_k2_slow.retain_grad()
w_b2_slow.retain_grad()
x2.retain_grad()
loss2.backward()
print('##########################')
print('# Gradients input')
print('##########################')
x2_grad = x2.grad.reshape(slen, bsz, n_head, d_head)
x2_grad = x2_grad.permute(1, 2, 0, 3)
for s in reversed(range(slen)):
for b in range(bsz):
for h in range(n_head):
print(f"s={s}, b={b}, h={h}")
print(f"grad x out: {x1.grad[b][h][s]}")
print(f"grad x ref: {x2_grad[b][h][s]}")
assert max_relative_error(
x1.grad[b][h][s], x2_grad[b][h][s]) < rel_threshold
print("pass!")
print('##########################')
print('# Gradients weights')
print('##########################')
W_y2_grad = W_y2_slow.grad.reshape(bsz, n_head, d_head, d_head)
W_q2_grad = W_q2_slow.grad.reshape(bsz, n_head, d_head, d_head)
W_k2_grad = W_k2_slow.grad.reshape(bsz, n_head, d_head, d_head)
w_b2_grad = w_b2_slow.grad.reshape(bsz, n_head, d_head, 4)
print('##########################')
print('# Gradient Wy')
print('##########################')
for b in range(bsz):
for h in range(n_head):
for d in range(d_head):
print(f"b={b} h={h} d={d} ------------------------")
print(f"grad Wy out: {W_y1.grad[b][h][d]}")
print(f"grad Wy ref: {W_y2_grad[b][h][d]}")
assert max_relative_error(
W_y1.grad[b][h][d], W_y2_grad[b][h][d]) < rel_threshold
print("pass!")
print('##########################')
print('# Gradient Wq')
print('##########################')
for b in range(bsz):
for h in range(n_head):
for d in range(d_head):
print(f"b={b} h={h} d={d} ------------------------")
print(f"grad Wq out: {W_q1.grad[b][h][d]}")
print(f"grad Wq ref: {W_q2_grad[b][h][d]}")
assert max_relative_error(
W_q1.grad[b][h][d], W_q2_grad[b][h][d]) < rel_threshold
print("pass!")
print('##########################')
print('# Gradient Wk')
print('##########################')
for b in range(bsz):
for h in range(n_head):
for d in range(d_head):
print(f"b={b} h={h} d={d} ------------------------")
print(f"grad Wk out: {W_k1.grad[b][h][d]}")
print(f"grad Wk ref: {W_k2_grad[b][h][d]}")
assert max_relative_error(
W_k1.grad[b][h][d], W_k2_grad[b][h][d]) < rel_threshold
print("pass!")
print('##########################')
print('# Gradient wb')
print('##########################')
for b in range(bsz):
for h in range(n_head):
for d in range(d_head):
print(f"b={b} h={h} d={d} ------------------------")
print(f"grad wb out: {w_b1.grad[b][h][d]}")
print(f"grad wb ref: {w_b2_grad[b][h][d]}")
assert max_relative_error(
w_b1.grad[b][h][d], w_b2_grad[b][h][d]) < rel_threshold
print("pass!")
print("==> All tests pass!")
| 14,184
| 33.85258
| 113
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.