code stringlengths 17 6.64M |
|---|
def plms_mixer(old_eps, order=1):
cur_order = min(order, len(old_eps))
if (cur_order == 1):
eps_prime = old_eps[(- 1)]
elif (cur_order == 2):
eps_prime = (((3 * old_eps[(- 1)]) - old_eps[(- 2)]) / 2)
elif (cur_order == 3):
eps_prime = ((((23 * old_eps[(- 1)]) - (16 * old_eps[(- 2)])) + (5 * old_eps[(- 3)])) / 12)
elif (cur_order == 4):
eps_prime = (((((55 * old_eps[(- 1)]) - (59 * old_eps[(- 2)])) + (37 * old_eps[(- 3)])) - (9 * old_eps[(- 4)])) / 24)
if (len(old_eps) >= order):
old_eps.pop(0)
return eps_prime
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, '__float__'):
valstr = ('%-8.3g' % val)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=(lambda kv: kv[0].lower())):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
maxlen = 30
return ((s[:(maxlen - 3)] + '...') if (len(s) > maxlen) else s)
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if (i < (len(seq) - 1)):
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = list((kvs.keys() - self.keys))
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
get_current().logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
get_current().logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
'\n Write all of the diagnostics from the current iteration\n '
return get_current().dumpkvs()
|
def getkvs():
return get_current().name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
get_current().log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
get_current().set_level(level)
|
def set_comm(comm):
get_current().set_comm(comm)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return get_current().get_dir()
|
@contextmanager
def profile_kv(scopename):
logkey = ('wait_' + scopename)
tstart = time.time()
try:
(yield)
finally:
get_current().name2val[logkey] += (time.time() - tstart)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
def get_current():
if (Logger.CURRENT is None):
_configure_default_logger()
return Logger.CURRENT
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.comm is None):
d = self.name2val
else:
d = mpi_weighted_mean(self.comm, {name: (val, self.name2cnt.get(name, 1)) for (name, val) in self.name2val.items()})
if (self.comm.rank != 0):
d['dummy'] = 1
out = d.copy()
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def get_rank_without_mpi_import():
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if (varname in os.environ):
return int(os.environ[varname])
return 0
|
def mpi_weighted_mean(comm, local_name2valcount):
'\n Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110\n Perform a weighted average over dicts that are each on a different node\n Input: local_name2valcount: dict mapping key -> (value, count)\n Returns: key -> mean\n '
all_name2valcount = comm.gather(local_name2valcount)
if (comm.rank == 0):
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if (comm.rank == 0):
warnings.warn('WARNING: tried to compute mean on non-float {}={}'.format(name, val))
else:
name2sum[name] += (val * count)
name2count[name] += count
return {name: (name2sum[name] / name2count[name]) for name in name2sum}
else:
return {}
|
def configure(dir=None, format_strs=None, comm=None, log_suffix=''):
'\n If comm is provided, average all numerical stats across that comm\n '
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if (rank > 0):
log_suffix = (log_suffix + ('-rank%03i' % rank))
if (format_strs is None):
if (rank == 0):
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log(('Logging to %s' % dir))
|
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
(yield)
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
def normal_kl(mean1, logvar1, mean2, logvar2):
'\n Compute the KL divergence between two gaussians.\n\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n '
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert (tensor is not None), 'at least one argument must be a Tensor'
(logvar1, logvar2) = [(x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)) for x in (logvar1, logvar2)]
return (0.5 * (((((- 1.0) + logvar2) - logvar1) + th.exp((logvar1 - logvar2))) + (((mean1 - mean2) ** 2) * th.exp((- logvar2)))))
|
def approx_standard_normal_cdf(x):
'\n A fast approximation of the cumulative distribution function of the\n standard normal.\n '
return (0.5 * (1.0 + th.tanh((np.sqrt((2.0 / np.pi)) * (x + (0.044715 * th.pow(x, 3)))))))
|
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
'\n Compute the log-likelihood of a Gaussian distribution discretizing to a\n given image.\n\n :param x: the target images. It is assumed that this was uint8 values,\n rescaled to the range [-1, 1].\n :param means: the Gaussian mean Tensor.\n :param log_scales: the Gaussian log stddev Tensor.\n :return: a tensor like x of log probabilities (in nats).\n '
assert (x.shape == means.shape == log_scales.shape)
centered_x = (x - means)
inv_stdv = th.exp((- log_scales))
plus_in = (inv_stdv * (centered_x + (1.0 / 255.0)))
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = (inv_stdv * (centered_x - (1.0 / 255.0)))
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = (cdf_plus - cdf_min)
log_probs = th.where((x < (- 0.999)), log_cdf_plus, th.where((x > 0.999), log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))))
assert (log_probs.shape == x.shape)
return log_probs
|
def space_timesteps(num_timesteps, section_counts):
'\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there\'s 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with "ddim", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use "ddimN" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n '
if isinstance(section_counts, str):
if section_counts.startswith('ddim'):
desired_count = int(section_counts[len('ddim'):])
for i in range(1, num_timesteps):
if (len(range(0, num_timesteps, i)) == desired_count):
return set(range(0, num_timesteps, i))
raise ValueError(f'cannot create exactly {num_timesteps} steps with an integer stride')
section_counts = [int(x) for x in section_counts.split(',')]
size_per = (num_timesteps // len(section_counts))
extra = (num_timesteps % len(section_counts))
start_idx = 0
all_steps = []
for (i, section_count) in enumerate(section_counts):
size = (size_per + (1 if (i < extra) else 0))
if (size < section_count):
raise ValueError(f'cannot divide section of {size} steps into {section_count}')
if (section_count <= 1):
frac_stride = 1
else:
frac_stride = ((size - 1) / (section_count - 1))
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append((start_idx + round(cur_idx)))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
|
class SpacedDiffusion(GaussianDiffusion):
'\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n '
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs['betas'])
base_diffusion = GaussianDiffusion(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for (i, alpha_cumprod) in enumerate(base_diffusion.alphas_cumprod):
if (i in self.use_timesteps):
new_betas.append((1 - (alpha_cumprod / last_alpha_cumprod)))
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs['betas'] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs):
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs):
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score2(self, cond_fn, *args, **kwargs):
return super().condition_score2(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score3(self, cond_fn, *args, **kwargs):
return super().condition_score3(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps)
def _scale_timesteps(self, t):
return t
|
class _WrappedModel():
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = (new_ts.float() * (1000.0 / self.original_num_steps))
return self.model(x, new_ts, **kwargs)
|
def diffusion_defaults():
'\n Defaults for image and classifier training.\n '
return dict(learn_sigma=False, diffusion_steps=1000, noise_schedule='linear', timestep_respacing='', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False)
|
def classifier_defaults():
'\n Defaults for classifier models.\n '
return dict(image_size=64, classifier_use_fp16=False, classifier_width=128, classifier_depth=2, classifier_attention_resolutions='32,16,8', classifier_use_scale_shift_norm=True, classifier_resblock_updown=True, classifier_pool='attention')
|
def model_and_diffusion_defaults():
'\n Defaults for image training.\n '
res = dict(image_size=64, num_channels=128, num_res_blocks=2, num_heads=4, num_heads_upsample=(- 1), num_head_channels=(- 1), attention_resolutions='16,8', channel_mult='', dropout=0.0, class_cond=False, use_checkpoint=False, use_scale_shift_norm=True, resblock_updown=False, use_fp16=False, use_new_attention_order=False)
res.update(diffusion_defaults())
return res
|
def classifier_and_diffusion_defaults():
res = classifier_defaults()
res.update(diffusion_defaults())
return res
|
def create_model_and_diffusion(image_size, class_cond, learn_sigma, num_channels, num_res_blocks, channel_mult, num_heads, num_head_channels, num_heads_upsample, attention_resolutions, dropout, diffusion_steps, noise_schedule, timestep_respacing, use_kl, predict_xstart, rescale_timesteps, rescale_learned_sigmas, use_checkpoint, use_scale_shift_norm, resblock_updown, use_fp16, use_new_attention_order):
model = create_model(image_size, num_channels, num_res_blocks, channel_mult=channel_mult, learn_sigma=learn_sigma, class_cond=class_cond, use_checkpoint=use_checkpoint, attention_resolutions=attention_resolutions, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, dropout=dropout, resblock_updown=resblock_updown, use_fp16=use_fp16, use_new_attention_order=use_new_attention_order)
diffusion = create_gaussian_diffusion(steps=diffusion_steps, learn_sigma=learn_sigma, noise_schedule=noise_schedule, use_kl=use_kl, predict_xstart=predict_xstart, rescale_timesteps=rescale_timesteps, rescale_learned_sigmas=rescale_learned_sigmas, timestep_respacing=timestep_respacing)
return (model, diffusion)
|
def create_model(image_size, num_channels, num_res_blocks, channel_mult='', learn_sigma=False, class_cond=False, use_checkpoint=False, attention_resolutions='16', num_heads=1, num_head_channels=(- 1), num_heads_upsample=(- 1), use_scale_shift_norm=False, dropout=0, resblock_updown=False, use_fp16=False, use_new_attention_order=False):
if (channel_mult == ''):
if (image_size == 512):
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif (image_size == 256):
channel_mult = (1, 1, 2, 2, 4, 4)
elif (image_size == 128):
channel_mult = (1, 1, 2, 3, 4)
elif (image_size == 64):
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f'unsupported image size: {image_size}')
else:
channel_mult = tuple((int(ch_mult) for ch_mult in channel_mult.split(',')))
attention_ds = []
for res in attention_resolutions.split(','):
attention_ds.append((image_size // int(res)))
return UNetModel(image_size=image_size, in_channels=3, model_channels=num_channels, out_channels=(3 if (not learn_sigma) else 6), num_res_blocks=num_res_blocks, attention_resolutions=tuple(attention_ds), dropout=dropout, channel_mult=channel_mult, num_classes=(NUM_CLASSES if class_cond else None), use_checkpoint=use_checkpoint, use_fp16=use_fp16, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, use_new_attention_order=use_new_attention_order)
|
def create_classifier_and_diffusion(image_size, classifier_use_fp16, classifier_width, classifier_depth, classifier_attention_resolutions, classifier_use_scale_shift_norm, classifier_resblock_updown, classifier_pool, learn_sigma, diffusion_steps, noise_schedule, timestep_respacing, use_kl, predict_xstart, rescale_timesteps, rescale_learned_sigmas):
classifier = create_classifier(image_size, classifier_use_fp16, classifier_width, classifier_depth, classifier_attention_resolutions, classifier_use_scale_shift_norm, classifier_resblock_updown, classifier_pool)
diffusion = create_gaussian_diffusion(steps=diffusion_steps, learn_sigma=learn_sigma, noise_schedule=noise_schedule, use_kl=use_kl, predict_xstart=predict_xstart, rescale_timesteps=rescale_timesteps, rescale_learned_sigmas=rescale_learned_sigmas, timestep_respacing=timestep_respacing)
return (classifier, diffusion)
|
def create_classifier(image_size, classifier_use_fp16, classifier_width, classifier_depth, classifier_attention_resolutions, classifier_use_scale_shift_norm, classifier_resblock_updown, classifier_pool):
if (image_size == 512):
channel_mult = (0.5, 1, 1, 2, 2, 4, 4)
elif (image_size == 256):
channel_mult = (1, 1, 2, 2, 4, 4)
elif (image_size == 128):
channel_mult = (1, 1, 2, 3, 4)
elif (image_size == 64):
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f'unsupported image size: {image_size}')
attention_ds = []
for res in classifier_attention_resolutions.split(','):
attention_ds.append((image_size // int(res)))
return EncoderUNetModel(image_size=image_size, in_channels=3, model_channels=classifier_width, out_channels=1000, num_res_blocks=classifier_depth, attention_resolutions=tuple(attention_ds), channel_mult=channel_mult, use_fp16=classifier_use_fp16, num_head_channels=64, use_scale_shift_norm=classifier_use_scale_shift_norm, resblock_updown=classifier_resblock_updown, pool=classifier_pool)
|
def sr_model_and_diffusion_defaults():
res = model_and_diffusion_defaults()
res['large_size'] = 256
res['small_size'] = 64
arg_names = inspect.getfullargspec(sr_create_model_and_diffusion)[0]
for k in res.copy().keys():
if (k not in arg_names):
del res[k]
return res
|
def sr_create_model_and_diffusion(large_size, small_size, class_cond, learn_sigma, num_channels, num_res_blocks, num_heads, num_head_channels, num_heads_upsample, attention_resolutions, dropout, diffusion_steps, noise_schedule, timestep_respacing, use_kl, predict_xstart, rescale_timesteps, rescale_learned_sigmas, use_checkpoint, use_scale_shift_norm, resblock_updown, use_fp16):
model = sr_create_model(large_size, small_size, num_channels, num_res_blocks, learn_sigma=learn_sigma, class_cond=class_cond, use_checkpoint=use_checkpoint, attention_resolutions=attention_resolutions, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, dropout=dropout, resblock_updown=resblock_updown, use_fp16=use_fp16)
diffusion = create_gaussian_diffusion(steps=diffusion_steps, learn_sigma=learn_sigma, noise_schedule=noise_schedule, use_kl=use_kl, predict_xstart=predict_xstart, rescale_timesteps=rescale_timesteps, rescale_learned_sigmas=rescale_learned_sigmas, timestep_respacing=timestep_respacing)
return (model, diffusion)
|
def sr_create_model(large_size, small_size, num_channels, num_res_blocks, learn_sigma, class_cond, use_checkpoint, attention_resolutions, num_heads, num_head_channels, num_heads_upsample, use_scale_shift_norm, dropout, resblock_updown, use_fp16):
_ = small_size
if (large_size == 512):
channel_mult = (1, 1, 2, 2, 4, 4)
elif (large_size == 256):
channel_mult = (1, 1, 2, 2, 4, 4)
elif (large_size == 64):
channel_mult = (1, 2, 3, 4)
else:
raise ValueError(f'unsupported large size: {large_size}')
attention_ds = []
for res in attention_resolutions.split(','):
attention_ds.append((large_size // int(res)))
return SuperResModel(image_size=large_size, in_channels=3, model_channels=num_channels, out_channels=(3 if (not learn_sigma) else 6), num_res_blocks=num_res_blocks, attention_resolutions=tuple(attention_ds), dropout=dropout, channel_mult=channel_mult, num_classes=(NUM_CLASSES if class_cond else None), use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=num_head_channels, num_heads_upsample=num_heads_upsample, use_scale_shift_norm=use_scale_shift_norm, resblock_updown=resblock_updown, use_fp16=use_fp16)
|
def create_gaussian_diffusion(*, steps=1000, learn_sigma=False, sigma_small=False, noise_schedule='linear', use_kl=False, predict_xstart=False, rescale_timesteps=False, rescale_learned_sigmas=False, timestep_respacing=''):
betas = gd.get_named_beta_schedule(noise_schedule, steps)
if use_kl:
loss_type = gd.LossType.RESCALED_KL
elif rescale_learned_sigmas:
loss_type = gd.LossType.RESCALED_MSE
else:
loss_type = gd.LossType.MSE
if (not timestep_respacing):
timestep_respacing = [steps]
return SpacedDiffusion(use_timesteps=space_timesteps(steps, timestep_respacing), betas=betas, model_mean_type=(gd.ModelMeanType.EPSILON if (not predict_xstart) else gd.ModelMeanType.START_X), model_var_type=((gd.ModelVarType.FIXED_LARGE if (not sigma_small) else gd.ModelVarType.FIXED_SMALL) if (not learn_sigma) else gd.ModelVarType.LEARNED_RANGE), loss_type=loss_type, rescale_timesteps=rescale_timesteps)
|
def add_dict_to_argparser(parser, default_dict):
for (k, v) in default_dict.items():
v_type = type(v)
if (v is None):
v_type = str
elif isinstance(v, bool):
v_type = str2bool
parser.add_argument(f'--{k}', default=v, type=v_type)
|
def args_to_dict(args, keys):
return {k: getattr(args, k) for k in keys}
|
def str2bool(v):
'\n https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse\n '
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected')
|
class TrainLoop():
def __init__(self, *, model, diffusion, data, batch_size, microbatch, lr, ema_rate, log_interval, save_interval, resume_checkpoint, use_fp16=False, fp16_scale_growth=0.001, schedule_sampler=None, weight_decay=0.0, lr_anneal_steps=0):
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = batch_size
self.microbatch = (microbatch if (microbatch > 0) else batch_size)
self.lr = lr
self.ema_rate = ([ema_rate] if isinstance(ema_rate, float) else [float(x) for x in ema_rate.split(',')])
self.log_interval = log_interval
self.save_interval = save_interval
self.resume_checkpoint = resume_checkpoint
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.schedule_sampler = (schedule_sampler or UniformSampler(diffusion))
self.weight_decay = weight_decay
self.lr_anneal_steps = lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = (self.batch_size * dist.get_world_size())
self.sync_cuda = th.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(model=self.model, use_fp16=self.use_fp16, fp16_scale_growth=fp16_scale_growth)
self.opt = AdamW(self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay)
if self.resume_step:
self._load_optimizer_state()
self.ema_params = [self._load_ema_parameters(rate) for rate in self.ema_rate]
else:
self.ema_params = [copy.deepcopy(self.mp_trainer.master_params) for _ in range(len(self.ema_rate))]
if th.cuda.is_available():
self.use_ddp = True
self.ddp_model = DDP(self.model, device_ids=[dist_util.dev()], output_device=dist_util.dev(), broadcast_buffers=False, bucket_cap_mb=128, find_unused_parameters=False)
else:
if (dist.get_world_size() > 1):
logger.warn('Distributed training requires CUDA. Gradients will not be synchronized properly!')
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
if (dist.get_rank() == 0):
logger.log(f'loading model from checkpoint: {resume_checkpoint}...')
self.model.load_state_dict(dist_util.load_state_dict(resume_checkpoint, map_location=dist_util.dev()))
dist_util.sync_params(self.model.parameters())
def _load_ema_parameters(self, rate):
ema_params = copy.deepcopy(self.mp_trainer.master_params)
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
if ema_checkpoint:
if (dist.get_rank() == 0):
logger.log(f'loading EMA from checkpoint: {ema_checkpoint}...')
state_dict = dist_util.load_state_dict(ema_checkpoint, map_location=dist_util.dev())
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
dist_util.sync_params(ema_params)
return ema_params
def _load_optimizer_state(self):
main_checkpoint = (find_resume_checkpoint() or self.resume_checkpoint)
opt_checkpoint = bf.join(bf.dirname(main_checkpoint), f'opt{self.resume_step:06}.pt')
if bf.exists(opt_checkpoint):
logger.log(f'loading optimizer state from checkpoint: {opt_checkpoint}')
state_dict = dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
self.opt.load_state_dict(state_dict)
def run_loop(self):
while ((not self.lr_anneal_steps) or ((self.step + self.resume_step) < self.lr_anneal_steps)):
(batch, cond) = next(self.data)
self.run_step(batch, cond)
if ((self.step % self.log_interval) == 0):
logger.dumpkvs()
if ((self.step % self.save_interval) == 0):
self.save()
if (os.environ.get('DIFFUSION_TRAINING_TEST', '') and (self.step > 0)):
return
self.step += 1
if (((self.step - 1) % self.save_interval) != 0):
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
took_step = self.mp_trainer.optimize(self.opt)
if took_step:
self._update_ema()
self._anneal_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
for i in range(0, batch.shape[0], self.microbatch):
micro = batch[i:(i + self.microbatch)].to(dist_util.dev())
micro_cond = {k: v[i:(i + self.microbatch)].to(dist_util.dev()) for (k, v) in cond.items()}
last_batch = ((i + self.microbatch) >= batch.shape[0])
(t, weights) = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
compute_losses = functools.partial(self.diffusion.training_losses, self.ddp_model, micro, t, model_kwargs=micro_cond)
if (last_batch or (not self.use_ddp)):
losses = compute_losses()
else:
with self.ddp_model.no_sync():
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, losses['loss'].detach())
loss = (losses['loss'] * weights).mean()
log_loss_dict(self.diffusion, t, {k: (v * weights) for (k, v) in losses.items()})
self.mp_trainer.backward(loss)
def _update_ema(self):
for (rate, params) in zip(self.ema_rate, self.ema_params):
update_ema(params, self.mp_trainer.master_params, rate=rate)
def _anneal_lr(self):
if (not self.lr_anneal_steps):
return
frac_done = ((self.step + self.resume_step) / self.lr_anneal_steps)
lr = (self.lr * (1 - frac_done))
for param_group in self.opt.param_groups:
param_group['lr'] = lr
def log_step(self):
logger.logkv('step', (self.step + self.resume_step))
logger.logkv('samples', (((self.step + self.resume_step) + 1) * self.global_batch))
def save(self):
def save_checkpoint(rate, params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
if (dist.get_rank() == 0):
logger.log(f'saving model {rate}...')
if (not rate):
filename = f'model{(self.step + self.resume_step):06d}.pt'
else:
filename = f'ema_{rate}_{(self.step + self.resume_step):06d}.pt'
with bf.BlobFile(bf.join(get_blob_logdir(), filename), 'wb') as f:
th.save(state_dict, f)
save_checkpoint(0, self.mp_trainer.master_params)
for (rate, params) in zip(self.ema_rate, self.ema_params):
save_checkpoint(rate, params)
if (dist.get_rank() == 0):
with bf.BlobFile(bf.join(get_blob_logdir(), f'opt{(self.step + self.resume_step):06d}.pt'), 'wb') as f:
th.save(self.opt.state_dict(), f)
dist.barrier()
|
def parse_resume_step_from_filename(filename):
"\n Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the\n checkpoint's number of steps.\n "
split = filename.split('model')
if (len(split) < 2):
return 0
split1 = split[(- 1)].split('.')[0]
try:
return int(split1)
except ValueError:
return 0
|
def get_blob_logdir():
return logger.get_dir()
|
def find_resume_checkpoint():
return None
|
def find_ema_checkpoint(main_checkpoint, step, rate):
if (main_checkpoint is None):
return None
filename = f'ema_{rate}_{step:06d}.pt'
path = bf.join(bf.dirname(main_checkpoint), filename)
if bf.exists(path):
return path
return None
|
def log_loss_dict(diffusion, ts, losses):
for (key, values) in losses.items():
logger.logkv_mean(key, values.mean().item())
for (sub_t, sub_loss) in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(((4 * sub_t) / diffusion.num_timesteps))
logger.logkv_mean(f'{key}_q{quartile}', sub_loss)
|
def main():
args = create_argparser().parse_args()
dist_util.setup_dist()
logger.configure()
out_dir = os.path.join('symlink/output/', args.model_name, args.cond_name, args.method)
if (dist.get_rank() == 0):
print(out_dir)
os.makedirs(out_dir, exist_ok=True)
(config, model_config0, class_config) = create_config(args.model_name, args.timestep_rp)
batch_size = config['batch_size']
logger.log('creating model and diffusion...')
model_config = model_and_diffusion_defaults()
model_config.update(model_config0)
(model, diffusion) = create_model_and_diffusion(**model_config)
model.load_state_dict(dist_util.load_state_dict(config['model_path'], map_location='cpu'))
model.requires_grad_(False).eval().cuda()
model.to(dist_util.dev())
if model_config['use_fp16']:
model.convert_to_fp16()
logger.log('loading classifier...')
classifier_config = classifier_defaults()
classifier_config.update(class_config)
classifier = create_classifier(**classifier_config)
classifier.load_state_dict(dist_util.load_state_dict(config['classifier_path'], map_location='cpu'))
classifier.to(dist_util.dev())
classifier.requires_grad_(False).eval().cuda()
if classifier_config['classifier_use_fp16']:
classifier.convert_to_fp16()
def cond_fn(x, t, y=None, **kwargs):
assert (y is not None)
with th.enable_grad():
x_in = x.detach().requires_grad_(True)
logits = classifier(x_in, t)
log_probs = F.log_softmax(logits, dim=(- 1))
selected = log_probs[(range(len(logits)), y.view((- 1)))]
grad = th.autograd.grad(selected.sum(), x_in)[0]
return (grad * config['classifier_scale'])
def model_fn(x, t, y=None):
assert (y is not None)
return model(x, t, (y if model_config['class_cond'] else None))
if (args.method == 'ddim'):
sample_fn = diffusion.ddim_sample_loop
elif (args.method[:4] in ['plms', 'pndm']):
sample_fn = partial(diffusion.plms_sample_loop, order=int(args.method[4]))
elif (args.method[:4] in ['ltsp', 'ours', 'ltts']):
sample_fn = partial(diffusion.ltsp_sample_loop, order=int(args.method[4]))
elif (args.method[:4] in ['stsp', 'bchf']):
sample_fn = partial(diffusion.stsp_sample_loop, order=int(args.method[4]))
else:
sample_fn = diffusion.p_sample_loop
if (args.cond_name == 'cond1'):
cond_fn0 = cond_fn
else:
cond_fn0 = None
logger.log('sampling...')
all_images = []
all_labels = []
while ((len(all_images) * batch_size) < args.num_samples):
model_kwargs = {}
classes = th.randint(low=0, high=NUM_CLASSES, size=(batch_size,), device=dist_util.dev())
model_kwargs['y'] = classes
sample = sample_fn(model_fn, (batch_size, 3, config['image_size'], config['image_size']), clip_denoised=args.clip_denoised, model_kwargs=model_kwargs, cond_fn=cond_fn0, impu_fn=None, progress=True, device=dist_util.dev())
sample = ((sample + 1) * 127.5).clamp(0, 255).to(th.uint8)
sample = sample.permute(0, 2, 3, 1)
sample = sample.contiguous()
gathered_samples = [th.zeros_like(sample) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_samples, sample)
all_images.extend([sample.cpu().numpy() for sample in gathered_samples])
gathered_labels = [th.zeros_like(classes) for _ in range(dist.get_world_size())]
dist.all_gather(gathered_labels, classes)
all_labels.extend([labels.cpu().numpy() for labels in gathered_labels])
logger.log(f'created {(len(all_images) * batch_size)} samples')
arr = np.concatenate(all_images, axis=0)
arr = arr[:args.num_samples]
label_arr = np.concatenate(all_labels, axis=0)
label_arr = label_arr[:args.num_samples]
if (dist.get_rank() == 0):
shape_str = 'x'.join([str(x) for x in arr.shape])
out_path = os.path.join(out_dir, f'samples_{shape_str}.npz')
logger.log(f'saving to {out_path}')
np.savez(out_path, arr, label_arr)
dist.barrier()
logger.log('sampling complete')
|
def create_argparser():
defaults = dict(clip_denoised=True, num_samples=100, use_ddim=True, model_name='u256', method='ddim', cond_name='cond1', timestep_rp=25)
defaults.update(model_and_diffusion_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
add_dict_to_argparser(parser, defaults)
return parser
|
class BaseActorPolicy(object):
'\n This is a policy wrapper for an actor, its functions need to be filled\n to load the policy such that it can be used by the robot to act in the\n environment.\n '
def __init__(self, identifier=None):
'\n\n :param identifier: (str) defines the name of the actor policy\n '
self.identifier = identifier
return
def get_identifier(self):
'\n :return: (str) defines the name of the actor policy\n '
return self.identifier
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
raise NotImplementedError()
def reset(self):
'\n The function is called for the controller to be cleared.\n\n :return:\n '
return
|
class DummyActorPolicy(BaseActorPolicy):
'\n This is a policy wrapper for a dummy actor, which uses the interface of\n the actor policy but is basically fed the actions externally, (i.e just\n using the interface of the actor policy but actions are calculated\n externally)\n '
def __init__(self):
super(DummyActorPolicy, self).__init__(identifier='dummy_policy')
self.action = None
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self.action
def add_action(self, action):
'\n The function used to add actions which would be returned further when\n the act function is called. Can be used if the actions are calculated\n externally.\n\n :param action: (nd.array) defines the action to be executed at time\n step t\n\n :return:\n '
self.action = action
return
|
class GraspingPolicy(BaseActorPolicy):
'\n This policy is expected to run @25 Hz, its a hand designed policy for\n picking and placing blocks of a specific size 6.5CM weighing 20grams\n for the best result tried.\n The policy outputs desired normalized end_effector_positions\n\n Description of phases:\n - Phase 0: Move finger-center above the cube center of the current\n instruction.\n - Phase 1: Lower finger-center down to encircle the target cube, and\n close grip.\n - Phase 2: Move finger-center up again, keeping the grip tight\n (lifting the block).\n - Phase 3: Smoothly move the finger-center toward the goal xy, keeping the\n height constant.\n - Phase 4: Move finger-center vertically toward goal height\n (keeping relative difference of different finger heights given\n by h0), at the same time loosen the grip (i.e. increasing the\n radius of the "grip circle").\n - Phase 5: Move finger center up again\n\n Other variables and values:\n - alpha: interpolation value between two positions\n - ds: Distances of finger tips to grip center\n - t: time between 0 and 1 in current phase\n - phase: every instruction has 7 phases (described above)\n - program_counter: The index of the current instruction in the overall\n program. Is incremented once the policy has successfully\n completed all phases.\n\n Hyperparameters:\n\n - phase_velocity_k : the speed at which phase "k" in the state machine\n progresses.\n - d0_r, d0_gb: Distance of finger tips from grip center while gripping the\n object.\n - gb_angle_spread: Angle between green and blue finger tips along the "grip\n circle".\n - d1_r, d1_gb: Distance of finger tips from grip center while not gripping\n - h1_r, h1_gb: Height of grip center while moving around\n - h0_r, h0_gb: Height of grip center to which it is lowered while grasping\n - fall_trigger_h: if box is detected below this height when it is supposed\n to be gripped, try grasping it again (reset phase to 0).\n\n '
def __init__(self, tool_blocks_order):
'\n\n :param tool_blocks_order: (nd.array) specifies the program where the\n indicies ranges from 0 to the\n number of blocks available in the\n arena.\n '
super(GraspingPolicy, self).__init__(identifier='grasping_policy')
self._program_counter = 0
self._program = tool_blocks_order
self._phase = 0
self._t = 0
self._h0_r = (- 0.98)
self._h1_r = (- 0.4)
self._h0_gb = (- 0.98)
self._h1_gb = (- 0.4)
self._d0_r = 0.038
self._d0_gb = 0.038
self._d1_r = 0.1
self._d1_gb = 0.1
self._a1 = (np.pi / 2)
self._gb_angle_spread = (0.8 * np.pi)
self._a2 = (((3 * np.pi) / 2) + (self._gb_angle_spread / 2))
self._a3 = (((3 * np.pi) / 2) - (self._gb_angle_spread / 2))
self._fall_trigger_h = (- 0.7)
self._phase_velocities = [0.008, 0.01, 0.02, 0.005, 0.005, 0.01, 0.01]
self.current_target_x = None
self.current_target_y = None
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
if (self._program_counter == len(self._program)):
return obs[19:28]
block_idx = self._program[self._program_counter]
number_of_blocks = len(self._program)
target_height = obs[(((28 + (number_of_blocks * 17)) + (block_idx * 11)) + 6)]
target_x = obs[(((28 + (number_of_blocks * 17)) + (block_idx * 11)) + 4)]
target_y = obs[(((28 + (number_of_blocks * 17)) + (block_idx * 11)) + 5)]
if (self._phase == 0):
self.current_target_x = obs[((28 + (block_idx * 17)) + 4)]
self.current_target_y = obs[((28 + (block_idx * 17)) + 5)]
if (self._program_counter < (len(self._program) - 1)):
next_block_idx = self._program[(self._program_counter + 1)]
else:
next_block_idx = self._program[(- 1)]
next_cube_x = obs[((28 + (next_block_idx * 17)) + 4)]
next_cube_y = obs[((28 + (next_block_idx * 17)) + 5)]
if ((self._phase == 3) and (obs[((28 + (block_idx * 17)) + 6)] < self._fall_trigger_h)):
self._phase = 0
self._t = 0
interpolated_xy = self._get_interpolated_xy(target_x, target_y, self.current_target_x, self.current_target_y, next_cube_x, next_cube_y)
(target_h_r, target_h_g, target_h_b) = self._get_target_hs(target_height)
(d_r, d_g, d_b) = self._get_ds()
pos_r = np.array([(interpolated_xy[0] + (d_r * np.cos(self._a1))), (interpolated_xy[1] + (d_r * np.sin(self._a1))), target_h_r])
pos_g = np.array([(interpolated_xy[0] + (d_g * np.cos(self._a2))), (interpolated_xy[1] + (d_g * np.sin(self._a2))), target_h_g])
pos_b = np.array([(interpolated_xy[0] + (d_b * np.cos(self._a3))), (interpolated_xy[1] + (d_b * np.sin(self._a3))), target_h_b])
self._t += self._phase_velocities[self._phase]
if (self._t >= 1.0):
self._phase += 1
self._t -= 1.0
if (self._phase >= 7):
self._phase = 0
self._program_counter += 1
self._t = 0
return np.concatenate((pos_r, pos_g, pos_b), axis=0)
def _get_ds(self):
'\n :return: distances of finger tips to grip center\n '
if (self._phase == 0):
d_r = self._d1_r
d_gb = self._d1_gb
elif (self._phase == 1):
a = self._mix_sin(max(0, (2 * (self._t - 0.5))))
d_r = self._combine_convex(self._d1_r, self._d0_r, a)
d_gb = self._combine_convex(self._d1_gb, self._d0_gb, a)
elif (self._phase in [2, 3]):
d_r = self._d0_r
d_gb = self._d0_gb
elif (self._phase == 4):
d_r = self._d0_r
d_gb = self._d0_gb
elif (self._phase in [5, 6]):
d_r = self._d1_r
d_gb = self._d1_gb
else:
raise ValueError()
return [d_r, d_gb, d_gb]
def _get_interpolated_xy(self, target_x, target_y, current_cube_x, current_cube_y, next_cube_x, next_cube_y):
'\n :param target_x: target x of the grip center.\n :param target_y: target y of the grip center.\n :param current_cube_x: x of current cube to be gripped.\n :param current_cube_y: y of current cube to be gripped.\n :param next_cube_x: x of next cube to be gripped.\n :param next_cube_y: y of next cube to be gripped.\n :return:\n '
if (self._phase < 4):
current_x = current_cube_x
current_y = current_cube_y
else:
current_x = next_cube_x
current_y = next_cube_y
alpha = self._get_alpha()
xy_target = (((1 - alpha) * np.array([current_x, current_y])) + (alpha * np.array([target_x, target_y])))
return xy_target
def _get_alpha(self):
'\n :return: alpha for interpolation depending on the phase.\n '
if (self._phase < 3):
return 0
elif (self._phase == 3):
return self._mix_sin(self._t)
elif (self._phase == 4):
return 1.0
elif (self._phase == 5):
return 1.0
elif (self._phase == 6):
return (1 - self._mix_sin(self._t))
else:
raise ValueError()
def _get_target_hs(self, target_height):
'\n :param target_height: target height to be reached.\n :return: target height for all the end effectors.\n '
if (self._phase == 0):
h_r = self._h1_r
h_gb = self._h1_gb
elif (self._phase == 1):
a = self._mix_sin(max(0, self._t))
h_r = self._combine_convex(self._h1_r, self._h0_r, a)
h_gb = self._combine_convex(self._h1_gb, self._h0_gb, a)
elif (self._phase == 2):
a = self._mix_sin(max(0, self._t))
h_r = self._combine_convex(self._h0_r, self._h1_r, a)
h_gb = self._combine_convex(self._h0_gb, self._h1_gb, a)
elif (self._phase == 3):
h_r = self._h1_r
h_gb = self._h1_gb
elif (self._phase == 4):
h_target_r = target_height
h_target_gb = (h_target_r + (self._h0_gb - self._h0_r))
h_r = self._combine_convex(self._h1_r, h_target_gb, self._mix_sin(self._t))
h_gb = self._combine_convex(self._h1_gb, h_target_gb, self._mix_sin(self._t))
elif (self._phase == 5):
h_target_r = target_height
h_target_gb = (h_target_r + (self._h0_gb - self._h0_r))
h_r = self._combine_convex(h_target_r, self._h1_r, self._mix_sin(self._t))
h_gb = self._combine_convex(h_target_gb, self._h1_gb, self._mix_sin(self._t))
elif (self._phase == 6):
h_r = self._h1_r
h_gb = self._h1_gb
else:
raise ValueError()
return np.array([h_r, h_gb, h_gb])
def reset(self):
'\n resets the controller\n\n :return:\n '
self._phase = 0
self._t = 0
self._program_counter = 0
def _mix_sin(self, t):
'\n :param t: time ranging from 0 to 1.\n :return: mixed sin wave.\n '
return (0.5 * (1 - np.cos((t * np.pi))))
def _combine_convex(self, a, b, alpha):
'\n :param a: start\n :param b: end\n :param alpha: interpolation\n :return: convex combination.\n '
return (((1 - alpha) * a) + (alpha * b))
|
class PickAndPlaceActorPolicy(BaseActorPolicy):
def __init__(self):
'\n This policy is expected to run @83.3 Hz.\n The policy expects normalized observations and it outputs\n desired joint positions.\n\n - This policy is trained with one goal positions only.\n '
super(PickAndPlaceActorPolicy, self).__init__('pick_and_place_policy')
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/baseline_actors/pick_and_place_ppo_curr0.zip')
self._policy = PPO2.load(file)
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self._policy.predict(obs, deterministic=True)[0]
|
class PickingActorPolicy(BaseActorPolicy):
def __init__(self):
'\n This policy is expected to run @83.3 Hz.\n The policy expects normalized observations and it outputs\n desired joint positions.\n\n - This policy is trained with several goal heights.\n '
super(PickingActorPolicy, self).__init__('picking_policy')
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/baseline_actors/picking_ppo_curr1.zip')
self._policy = PPO2.load(file)
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self._policy.predict(obs, deterministic=True)[0]
|
class PushingActorPolicy(BaseActorPolicy):
def __init__(self):
'\n This policy is expected to run @83.3 Hz.\n The policy expects normalized observations and it outputs\n desired joint positions.\n\n - This policy is trained with several goals.\n '
super(PushingActorPolicy, self).__init__('pushing_policy')
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/baseline_actors/pushing_ppo_curr1.zip')
self._policy = PPO2.load(file)
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self._policy.predict(obs, deterministic=True)[0]
|
class RandomActorPolicy(BaseActorPolicy):
'\n This is a policy wrapper for a random actor.\n '
def __init__(self, low_bound, upper_bound):
super(RandomActorPolicy, self).__init__(identifier='random_policy')
self._low_bound = low_bound
self._upper_bound = upper_bound
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return np.random.uniform(self._low_bound, self._upper_bound)
|
class ReacherActorPolicy(BaseActorPolicy):
def __init__(self):
'\n This policy is expected to run @83.33 Hz.\n The policy expects normalized observations and it outputs\n desired joint positions.\n\n - This policy is trained with several goals.\n '
super(ReacherActorPolicy, self).__init__()
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/baseline_actors/reaching_ppo_curr1.zip')
self._policy = PPO2.load(file)
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self._policy.predict(obs, deterministic=True)[0]
|
class Stacking2ActorPolicy(BaseActorPolicy):
def __init__(self):
'\n This policy is expected to run @83.3 Hz.\n The policy expects normalized observations and it outputs\n desired joint positions.\n\n - This policy is trained with several goal positions.\n '
super(Stacking2ActorPolicy, self).__init__('stacking2_policy')
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../assets/baseline_actors/stacking2_ppo_curr1.zip')
self._policy = PPO2.load(file)
return
def act(self, obs):
'\n The function is called for the agent to act in the world.\n\n :param obs: (nd.array) defines the observations received by the agent\n at time step t\n\n :return: (nd.array) defines the action to be executed at time step t\n '
return self._policy.predict(obs, deterministic=True)[0]
|
class WorldConstants():
ROBOT_ID = 1
FLOOR_ID = 2
STAGE_ID = 3
FLOOR_HEIGHT = 0.011
ROBOT_HEIGHT = 0.34
ARENA_BB = np.array([[(- 0.15), (- 0.15), 0], [0.15, 0.15, 0.3]])
LINK_IDS = {'robot_finger_60_link_0': 1, 'robot_finger_60_link_1': 2, 'robot_finger_60_link_2': 3, 'robot_finger_60_link_3': 4, 'robot_finger_120_link_0': 6, 'robot_finger_120_link_1': 7, 'robot_finger_120_link_2': 8, 'robot_finger_120_link_3': 9, 'robot_finger_300_link_0': 11, 'robot_finger_300_link_1': 12, 'robot_finger_300_link_2': 13, 'robot_finger_300_link_3': 14}
VISUAL_SHAPE_IDS = {'robot_finger_60_link_0': 0, 'robot_finger_60_link_1': 4, 'robot_finger_60_link_2': 5, 'robot_finger_60_link_3': 6, 'robot_finger_120_link_0': 7, 'robot_finger_120_link_1': 11, 'robot_finger_120_link_2': 12, 'robot_finger_120_link_3': 13, 'robot_finger_300_link_0': 14, 'robot_finger_300_link_1': 15, 'robot_finger_300_link_2': 16, 'robot_finger_300_link_3': 17}
JOINT_NAMES = ['finger_upper_link_0', 'finger_middle_link_0', 'finger_lower_link_0', 'finger_upper_link_120', 'finger_middle_link_120', 'finger_lower_link_120', 'finger_upper_link_240', 'finger_middle_link_240', 'finger_lower_link_240']
TIP_LINK_NAMES = ['finger_tip_link_0', 'finger_tip_link_120', 'finger_tip_link_240']
|
class Curriculum(object):
def __init__(self, intervention_actors, actives):
'\n This corresponds to a curriculum object where it takes in\n the intervention actor and when are they supposed to be activated.\n\n :param intervention_actors: (list) list of intervention actors\n :param actives: (list of tuples) each tuple indicates (episode_start,\n episode_end, episode_periodicity,\n time_step_for_intervention)\n '
self.intervention_actors = intervention_actors
self.actives = actives
def get_interventions(self, current_task_params, episode, time_step):
'\n\n :param current_task_params: (dict) specifies the current variables in\n the world and their values, its max\n 2 levels dictionary for now.\n :param episode: (int) specifies the current episode number.\n :param time_step: (int) specifies the current time step index within\n the episode.\n\n :return: (dict) returns a dictionary of all the variables decided to\n intervene on by the actors.\n '
interventions_dict = dict()
for (actor_index, active) in enumerate(self.actives):
in_episode = (active[0] <= episode <= active[1])
episode_hold = (((episode - active[0]) % active[2]) == 0)
time_step_hold = (time_step == active[3])
if (in_episode and episode_hold and time_step_hold):
interventions_dict.update(self.intervention_actors[actor_index].act(current_task_params))
if (len(interventions_dict) == 0):
interventions_dict = None
return interventions_dict
def initialize_actors(self, env):
'\n This function is used to initialize the actors. Basically it gives\n the intervention actors a chance to access the env and query about\n things like action space and so on.\n\n :param env: (causal_world.CausalWorld) The env used.\n\n :return:\n '
for intervention_actor in self.intervention_actors:
intervention_actor.initialize(env)
return
def get_params(self):
'\n :return: (dict) returns the current status of the curriculum itself.\n The actors used and so on.\n '
params = dict()
params['actor_params'] = dict()
for actor in self.intervention_actors:
params['actor_params'].update(actor.get_params())
params['actives'] = self.actives
return params
|
class TriFingerAction(object):
def __init__(self, action_mode='joint_positions', normalize_actions=True):
'\n This class is responsible for the robot action limits and its spaces.\n\n :param action_mode: (str) this can be "joint_positions", "joint_torques" or\n "end_effector_positions".\n :param normalize_actions: (bool) true if actions should be normalized.\n '
self.normalize_actions = normalize_actions
self.max_motor_torque = 0.36
self.low = None
self.high = None
num_fingers = 3
self.action_mode = action_mode
self.joint_positions_lower_bounds = np.array(([(- 1.57), (- 1.2), (- 3.0)] * 3))
self.joint_positions_upper_bounds = np.array(([1.0, 1.57, 3.0] * 3))
self.joint_positions_raised = np.array(([(- 1.56), (- 0.08), (- 2.7)] * 3))
if (action_mode == 'joint_positions'):
lower_bounds = self.joint_positions_lower_bounds
upper_bounds = self.joint_positions_upper_bounds
elif (action_mode == 'joint_torques'):
lower_bounds = np.array((([(- self.max_motor_torque)] * 3) * num_fingers))
upper_bounds = np.array((([self.max_motor_torque] * 3) * num_fingers))
elif (action_mode == 'end_effector_positions'):
lower_bounds = np.array(([(- 0.5), (- 0.5), 0] * 3))
upper_bounds = np.array(([0.5, 0.5, 0.5] * 3))
else:
raise ValueError('No valid action_mode specified: {}'.format(action_mode))
self.set_action_space(lower_bounds, upper_bounds)
def set_action_space(self, lower_bounds, upper_bounds):
'\n\n :param lower_bounds: (list) array of the lower bounds of actions.\n :param upper_bounds: (list) array of the upper bounds of actions.\n\n :return:\n '
assert (len(lower_bounds) == len(upper_bounds))
self.low = lower_bounds
self.high = upper_bounds
def get_action_space(self):
'\n\n :return: (gym.spaces.Box) returns the current actions space.\n '
if self.normalize_actions:
return spaces.Box(low=(- np.ones(len(self.low))), high=np.ones(len(self.high)), dtype=np.float64)
else:
return spaces.Box(low=self.low, high=self.high, dtype=np.float64)
def is_normalized(self):
'\n\n :return: (bool) returns true if actions are normalized, false otherwise.\n '
return self.normalize_actions
def satisfy_constraints(self, action):
'\n\n :param action: (nd.array) action to check if it satisfies the constraints.\n\n :return: (bool) returns true if the action satisfies all constraints.\n '
if self.normalize_actions:
return ((action > (- 1.0)).all() and (action < 1.0).all())
else:
return ((action > self.low).all() and (action < self.high).all())
def clip_action(self, action):
'\n\n :param action: (nd.array) action to clip to the limits.\n\n :return: (nd.array) clipped action.\n '
if self.normalize_actions:
return clip(action, (- 1.0), 1.0)
else:
return clip(action, self.low, self.high)
def normalize_action(self, action):
'\n\n :param action: (nd.array) action to normalize.\n\n :return: (nd.array) normalized action.\n '
return (((2.0 * (action - self.low)) / (self.high - self.low)) - 1.0)
def denormalize_action(self, action):
'\n\n :param action: (nd.array) action to denormalize.\n\n :return: (nd.array) denormalized action.\n '
return (self.low + (((action + 1.0) / 2.0) * (self.high - self.low)))
|
class TriFingerObservations(object):
def __init__(self, observation_mode='structured', normalize_observations=True, observation_keys=None, cameras=None, camera_indicies=np.array([0, 1, 2])):
'\n This class represents the observation limits of the robot and takes\n care of the normalization of the observation values.\n\n :param observation_mode: (str) either "structured" or "pixel".\n :param normalize_observations: (bool) true if normalized observations.\n :param observation_keys: (list) specifies observation keys of the\n observation space if known, None\n otherwise.\n :param cameras: (list) list of causal_world.envs.robot.camera.Camera\n object that will be used in the pixel observation\n if needed, None otherwise.\n :param camera_indicies: (list) indicies of cameras selected if "pixel"\n mode - 0, 1 and 2,\n '
num_fingers = 3
self._normalized_observations = normalize_observations
self._observation_mode = observation_mode
self._camera_indicies = camera_indicies
self._lower_bounds = dict()
self._upper_bounds = dict()
self._lower_bounds['action_joint_positions'] = ([(- 1.57), (- 1.2), (- 3.0)] * num_fingers)
self._upper_bounds['action_joint_positions'] = ([1.0, 1.57, 3.0] * num_fingers)
self._lower_bounds['end_effector_positions'] = ([(- 0.5), (- 0.5), 0.0] * num_fingers)
self._upper_bounds['end_effector_positions'] = ([0.5, 0.5, 0.5] * num_fingers)
self._lower_bounds['joint_torques'] = ([(- 0.36), (- 0.36), (- 0.36)] * num_fingers)
self._upper_bounds['joint_torques'] = ([0.36, 0.36, 0.36] * num_fingers)
self._lower_bounds['joint_positions'] = ([(- 1.57), (- 1.2), (- 3.0)] * num_fingers)
self._upper_bounds['joint_positions'] = ([1.0, 1.57, 3.0] * num_fingers)
self._lower_bounds['joint_velocities'] = (([(- 50)] * 3) * num_fingers)
self._upper_bounds['joint_velocities'] = (([50] * 3) * num_fingers)
num_of_cameras = self._camera_indicies.shape[0]
self._lower_bounds['pixel'] = np.zeros(shape=(num_of_cameras, 128, 128, 3), dtype=np.float64)
self._upper_bounds['pixel'] = np.full(shape=(num_of_cameras, 128, 128, 3), fill_value=255, dtype=np.float64)
self._observation_functions = dict()
self._low_norm = (- 1)
self._high_norm = 1
if (observation_mode == 'pixel'):
self._observations_keys = ['pixel']
self._low = np.zeros(shape=(num_of_cameras, 128, 128, 3), dtype=np.float64)
self._high = np.full(shape=(num_of_cameras, 128, 128, 3), fill_value=255, dtype=np.float64)
self._low_norm = 0
self._high_norm = 1
self._cameras = cameras
elif (observation_mode == 'structured'):
if (observation_keys is None):
self._observations_keys = []
elif all(((key in self._lower_bounds.keys()) for key in observation_keys)):
self._observations_keys = observation_keys
else:
raise ValueError('One of the provided observation_keys is unknown')
if (self._observation_mode == 'structured'):
self._observation_is_not_normalized = np.array([], dtype=np.bool)
self._low = np.array([])
self._high = np.array([])
self.set_observation_spaces()
def get_observation_spaces(self):
'\n\n :return: (gym.spaces.Box) returns the current observation space.\n '
if self._normalized_observations:
observations_low_values = np.full(shape=self._low.shape, fill_value=self._low_norm, dtype=np.float64)
observations_high_values = np.full(shape=self._low.shape, fill_value=self._high_norm, dtype=np.float64)
if (self._observation_mode == 'structured'):
observations_low_values[self._observation_is_not_normalized] = self._low[self._observation_is_not_normalized]
observations_high_values[self._observation_is_not_normalized] = self._high[self._observation_is_not_normalized]
return spaces.Box(low=observations_low_values, high=observations_high_values, dtype=np.float64)
elif (self._observation_mode == 'structured'):
return spaces.Box(low=self._low, high=self._high, dtype=np.float64)
else:
return spaces.Box(low=self._low, high=self._high, dtype=np.uint8)
def set_observation_spaces(self):
'\n sets the observation space properly given that the observation keys are\n added..etc\n\n :return:\n '
self._low = np.array([])
self._high = np.array([])
self._observation_is_not_normalized = np.array([], dtype=np.bool)
if (self._observation_mode == 'pixel'):
self._low = np.array(self._lower_bounds['pixel'])
self._high = np.array(self._lower_bounds['pixel'])
else:
for key in self._observations_keys:
self._low = np.append(self._low, np.array(self._lower_bounds[key]))
self._high = np.append(self._high, np.array(self._upper_bounds[key]))
if np.array_equal(self._lower_bounds[key], self._upper_bounds[key]):
self._observation_is_not_normalized = np.append(self._observation_is_not_normalized, np.full(shape=np.array(self._upper_bounds[key]).shape, fill_value=True, dtype=np.bool))
else:
self._observation_is_not_normalized = np.append(self._observation_is_not_normalized, np.full(shape=np.array(self._upper_bounds[key]).shape, fill_value=False, dtype=np.bool))
return
def is_normalized(self):
'\n\n :return: (bool) true if observations are normalized.\n '
return self._normalized_observations
def reset_observation_keys(self):
'\n resets the observation space by clearning the observation keys and\n setting the space again.\n\n :return:\n '
self._observations_keys = []
self.set_observation_spaces()
return
def normalize_observation(self, observation):
'\n\n :param observation: (nd.array) full observation vector to normalize.\n\n :return: (nd.array) normalized observation vector.\n '
return ((((self._high_norm - self._low_norm) * (observation - self._low)) / (self._high - self._low)) + self._low_norm)
def normalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) observation vector to normalize.\n :param key: (str) key corresponding to the observation vector.\n\n :return: (nd.array) normalized observation vector.\n '
lower_key = np.array(self._lower_bounds[key])
higher_key = np.array(self._upper_bounds[key])
return ((((self._high_norm - self._low_norm) * (observation - lower_key)) / (higher_key - lower_key)) + self._low_norm)
def denormalize_observation(self, observation):
'\n\n :param observation:\n\n :return:\n '
return (self._low + (((observation - self._low_norm) / (self._high_norm - self._low_norm)) * (self._high - self._low)))
def denormalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) observation vector to denormalize.\n :param key: (str) key corresponding to the observation vector.\n\n :return: (nd.array) denormalized observation vector.\n '
lower_key = np.array(self._lower_bounds[key])
higher_key = np.array(self._upper_bounds[key])
return (lower_key + (((observation - self._low_norm) / (self._high_norm - self._low_norm)) * (higher_key - lower_key)))
def satisfy_constraints(self, observation):
'\n\n :param observation: (nd.array) observation vector to check if it\n satisfies the constraints.\n\n :return: (bool) returns true if the constraints are satisified, false\n otherwise.\n '
if self._normalized_observations:
return ((observation > self._low_norm).all() and (observation < self._high_norm).all())
else:
return ((observation > self._low).all() and (observation < self._high).all())
def clip_observation(self, observation):
'\n\n :param observation: (nd.array) observation vector to clip.\n\n :return: (nd.array) clipped observation vector to satisfy the limits.\n '
if self._normalized_observations:
return clip(observation, self._low_norm, self._high_norm)
else:
return clip(observation, self._low, self._high)
def add_observation(self, observation_key, lower_bound=None, upper_bound=None, observation_fn=None):
'\n\n :param observation_key: (str) observation key to be added.\n :param lower_bound: (nd.array) lower bound corresponding to the\n observation key if not known.\n :param upper_bound: (nd.array) upper bound corresponding to the\n observation key if not known.\n :param observation_fn: (func) function to use in calculating the\n observation, the robot state should be\n expected to be passed to this function.\n\n :return: None\n '
if ((observation_key not in self._lower_bounds.keys()) and ((lower_bound is None) or (upper_bound is None))):
raise Exception('Observation key {} is not known please specify the low and upper found'.format(observation_key))
if ((lower_bound is not None) and (upper_bound is not None)):
self._lower_bounds[observation_key] = lower_bound
self._upper_bounds[observation_key] = upper_bound
if (observation_fn is not None):
self._observation_functions[observation_key] = observation_fn
self._observations_keys.append(observation_key)
self.set_observation_spaces()
return
def is_observation_key_known(self, observation_key):
'\n\n :param observation_key: (str) observation key to check if its added\n to the space.\n\n :return: (bool) true if its known and added to the space,\n false otherwise.\n '
if (observation_key not in self._lower_bounds.keys()):
return False
else:
return True
def remove_observations(self, observations):
'\n\n :param observations: (list) list of observation keys to remove from\n the observation space.\n\n :return: None\n '
for observation in observations:
if (observation not in self._observations_keys):
raise Exception('Observation key {} is not known'.format(observation))
self._observations_keys.remove(observation)
self.set_observation_spaces()
return
def get_current_observations(self, robot_state, helper_keys):
'\n\n :param robot_state: (dict) the current robot state, with joint positions,\n velocities and torques.\n :param helper_keys: (list) observation keys that are needed but not in\n the observation space for further calculation\n of custom observations or reward function\n calculation.\n\n :return: (dict) returns a dict for all the observation keys and helper\n keys as well to be processed accordingly. Also\n normalization takes effect here if needed.\n '
observations_dict = dict()
for observation in self._observations_keys:
if (observation == 'joint_positions'):
observations_dict['joint_positions'] = robot_state['positions']
elif (observation == 'joint_torques'):
observations_dict['joint_torques'] = robot_state['torques']
elif (observation == 'joint_velocities'):
observations_dict['joint_velocities'] = robot_state['velocities']
elif (observation == 'end_effector_positions'):
observations_dict['end_effector_positions'] = robot_state['end_effector_positions']
elif (observation == 'pixel'):
camera_obs = np.stack((self._cameras[0].get_image(), self._cameras[1].get_image(), self._cameras[2].get_image()), axis=0)
observations_dict['pixel'] = camera_obs
elif (observation in self._observation_functions):
observations_dict[observation] = self._observation_functions[observation](robot_state)
for observation in helper_keys:
if (observation == 'joint_positions'):
observations_dict['joint_positions'] = robot_state['positions']
elif (observation == 'joint_torques'):
observations_dict['joint_torques'] = robot_state['torques']
elif (observation == 'joint_velocities'):
observations_dict['joint_velocities'] = robot_state['velocities']
elif (observation == 'end_effector_positions'):
observations_dict['end_effector_positions'] = robot_state['end_effector_positions']
elif (observation == 'pixel'):
images = []
for i in self._camera_indicies:
images.append(self._cameras[i].get_image())
camera_obs = np.stack(images, axis=0)
observations_dict['pixel'] = camera_obs
elif (observation in self._observation_functions):
observations_dict[observation] = self._observation_functions[observation](robot_state)
else:
raise Exception("The robot doesn't know about observation key {}".format(observation))
if self._normalized_observations:
for key in observations_dict.keys():
observations_dict[key] = self.normalize_observation_for_key(observations_dict[key], key)
return observations_dict
def get_current_camera_observations(self):
'\n\n :return: (nd.array) returns observations from the cameras if in "pixel"\n mode, normalization takes place here.\n '
images = []
for i in self._camera_indicies:
images.append(self._cameras[i].get_image())
camera_obs = np.stack(images, axis=0)
if self._normalized_observations:
camera_obs = self.normalize_observation_for_key(camera_obs, 'pixel')
return camera_obs
|
class TriFingerRobot(object):
def __init__(self, action_mode, observation_mode, skip_frame, normalize_actions, normalize_observations, simulation_time, pybullet_client_full_id, pybullet_client_w_goal_id, pybullet_client_w_o_goal_id, revolute_joint_ids, finger_tip_ids, cameras=None, camera_indicies=np.array([0, 1, 2])):
'\n This class provides the functionalities of the robot itself\n\n :param action_mode: (str) defines the action mode of the robot whether\n its joint_positions, end_effector_positions\n or joint_torques.\n :param observation_mode: (str) defines the observation mode of the robot\n if cameras or structured.\n :param skip_frame: (int) the low level controller is running @250Hz\n which corresponds to skip frame of 1, a skip frame\n of 250 corresponds to frequency of 1Hz\n :param normalize_actions: (bool) this is a boolean which specifies\n whether the actions passed to the step\n function are normalized or not.\n :param normalize_observations: (bool) this is a boolean which specifies\n whether the observations returned\n should be normalized or not.\n :param simulation_time: (float) the time for one action step in the pybullet\n simulation.\n :param pybullet_client_full_id: (int) pybullet client full mode id\n :param pybullet_client_w_goal_id: (int) pybullet client with goal mode id\n :param pybullet_client_w_o_goal_id: (int) pybullet client without goal mode id\n :param revolute_joint_ids: (list) joint ids in the urdf\n :param finger_tip_ids: (list) finger tip ids in the urdf\n :param cameras: (list) Camera objects list\n :param camera_indicies: (list) maximum of 3 elements where each element\n is from 0 to , specifies which cameras\n to return in the observations and the\n order as well.\n '
self._pybullet_client_full_id = pybullet_client_full_id
self._pybullet_client_w_goal_id = pybullet_client_w_goal_id
self._pybullet_client_w_o_goal_id = pybullet_client_w_o_goal_id
self._revolute_joint_ids = revolute_joint_ids
self._finger_tip_ids = finger_tip_ids
self._normalize_actions = normalize_actions
self._normalize_observations = normalize_observations
self._action_mode = action_mode
self._observation_mode = observation_mode
self._skip_frame = skip_frame
self._simulation_time = simulation_time
self._dt = (self._simulation_time * self._skip_frame)
self._control_index = (- 1)
self._position_gains = np.array(([10.0, 10.0, 10.0] * 3))
self._velocity_gains = np.array(([0.1, 0.3, 0.001] * 3))
self._safety_kd = np.array(([0.08, 0.08, 0.04] * 3))
self._max_motor_torque = 0.36
self._robot_actions = TriFingerAction(action_mode, normalize_actions)
if (self._pybullet_client_w_goal_id is not None):
self._set_finger_state_in_goal_image()
self._tool_cameras = cameras
self._camera_indicies = camera_indicies
self._robot_observations = TriFingerObservations(observation_mode, normalize_observations, cameras=self._tool_cameras, camera_indicies=self._camera_indicies)
self._last_action = None
self._last_clipped_action = None
if (action_mode != 'joint_torques'):
self._last_applied_joint_positions = None
self._latest_full_state = None
self._state_size = 18
self._disable_velocity_control()
return
def get_link_names(self):
'\n :return: (list) returns the link names in the urdf\n '
return WorldConstants.LINK_IDS
def get_control_index(self):
'\n\n :return: (int) returns the current control index\n '
return self._control_index
def get_full_env_state(self):
'\n\n :return: returns the current state variables and their values in the\n environment wrt to the robot.\n '
return self.get_current_variable_values()
def set_full_env_state(self, env_state):
'\n This function is used to set the env state through interventions on\n the environment itself\n\n :param env_state: (dict) specifies the state variables and its values\n to intervene on.\n\n :return: None\n '
self.apply_interventions(env_state)
return
def update_latest_full_state(self):
'\n Updates the latest full state in terms of joint positions, velocities,\n torques..etc\n\n :return: None\n '
if (self._pybullet_client_full_id is not None):
current_joint_states = pybullet.getJointStates(WorldConstants.ROBOT_ID, self._revolute_joint_ids, physicsClientId=self._pybullet_client_full_id)
else:
current_joint_states = pybullet.getJointStates(WorldConstants.ROBOT_ID, self._revolute_joint_ids, physicsClientId=self._pybullet_client_w_o_goal_id)
current_position = np.array([joint[0] for joint in current_joint_states])
current_velocity = np.array([joint[1] for joint in current_joint_states])
current_torques = np.array([joint[3] for joint in current_joint_states])
self._latest_full_state = {'positions': current_position, 'velocities': current_velocity, 'torques': current_torques, 'end_effector_positions': self._compute_end_effector_positions(current_position)}
return
def compute_pd_control_torques(self, joint_positions):
'\n Compute torque command to reach given target position using a PD\n controller.\n\n :param joint_positions: (list) Desired joint positions.\n\n :return: (list) torques to be sent to the joints of the finger in order to\n reach the specified joint_positions.\n '
position_error = (joint_positions - self._latest_full_state['positions'])
position_feedback = (np.asarray(self._position_gains) * position_error)
velocity_feedback = (np.asarray(self._velocity_gains) * self._latest_full_state['velocities'])
joint_torques = (position_feedback - velocity_feedback)
return joint_torques
def set_action_mode(self, action_mode):
'\n Sets the action mode\n\n :param action_mode: (str) specifies the action mode of the robot.\n\n :return: None\n '
self._action_mode = action_mode
self._robot_actions = TriFingerAction(action_mode, self._normalize_actions)
def get_joint_positions_raised(self):
'\n :return: (list) returns the upper joint positions limit.\n '
return self._robot_actions.joint_positions_raised
def get_action_mode(self):
'\n\n :return: (str) returns the current action mode.\n '
return self._action_mode
def set_observation_mode(self, observation_mode):
'\n Sets the observation mode\n\n :param observation_mode: (str) sets the observation mode of the robot\n itself.\n\n :return: None\n '
self._observation_mode = observation_mode
self._robot_observations = TriFingerObservations(observation_mode, self._normalize_observations, cameras=self._tool_cameras, camera_indicies=self._camera_indicies)
return
def get_observation_mode(self):
'\n\n :return: (str) returns the observation mode of the robot.\n '
return self._observation_mode
def get_skip_frame(self):
'\n\n :return: (int) returns the current skip frame.\n '
return self._skip_frame
def get_full_state(self):
'\n\n :return: (nd.array) return the positions and velocities of the three\n fingers concatenated.\n '
return np.append(self._latest_full_state['positions'], self._latest_full_state['velocities'])
def set_full_state(self, state):
'\n\n :param state: (nd.array) sets the positions and velocities, shape (18,).\n :return: None\n '
self._set_finger_state(state[:9], state[9:])
self._last_action = np.zeros(9)
self._last_clipped_action = np.zeros(9)
if (self._action_mode != 'joint_torques'):
self._last_applied_joint_positions = list(state[:9])
return
def get_last_action(self):
'\n\n :return: (nd.array) returns the last action passed to the robot.\n '
return self._last_action
def get_last_clipped_action(self):
'\n\n :return: (nd.array) returns the last clipped action passed to the\n robot, based on the range of the action space.\n '
return self._last_clipped_action
def get_last_applied_joint_positions(self):
'\n\n :return: (nd.array) returns the last applied joint positions passed to\n the pd controller. This is not valid if the action\n mode is "joint_torques".\n '
return self._last_applied_joint_positions
def get_observation_spaces(self):
'\n\n :return: (gym.Spaces) returns the current observation space of the\n robot.\n '
return self._robot_observations.get_observation_spaces()
def get_action_spaces(self):
'\n\n :return: (gym.Spaces) returns the current action space of the\n robot.\n '
return self._robot_actions.get_action_space()
def get_state_size(self):
'\n\n :return: (int) returns the state size of the robot, mainly joint\n positions and joint velocities that defines the state\n of the robot, ignoring interventions.\n '
return self._state_size
def step_simulation(self):
'\n steps through the simulation function of the pybullet backend.\n\n :return:\n '
if (self._pybullet_client_full_id is not None):
pybullet.stepSimulation(physicsClientId=self._pybullet_client_full_id)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.stepSimulation(physicsClientId=self._pybullet_client_w_o_goal_id)
self.update_latest_full_state()
return
def apply_action(self, action):
'\n Applied the passed action to the robot.\n\n :param action: (nd.array) the action to be applied. Should adhere to\n the action_mode.\n :return: None.\n '
self._control_index += 1
clipped_action = self._robot_actions.clip_action(action)
action_to_apply = clipped_action
if self._normalize_actions:
action_to_apply = self._robot_actions.denormalize_action(clipped_action)
if (self._action_mode == 'joint_positions'):
self._last_applied_joint_positions = action_to_apply
for _ in range(self._skip_frame):
desired_torques = self.compute_pd_control_torques(action_to_apply)
self.send_torque_commands(desired_torque_commands=desired_torques)
self.step_simulation()
elif (self._action_mode == 'joint_torques'):
for _ in range(self._skip_frame):
self.send_torque_commands(desired_torque_commands=action_to_apply)
self.step_simulation()
elif (self._action_mode == 'end_effector_positions'):
if np.isclose(self._latest_full_state['end_effector_positions'], action_to_apply).all():
joint_positions = self._last_applied_joint_positions
else:
joint_positions = self.get_joint_positions_from_tip_positions(action_to_apply, list(self._latest_full_state['positions']))
self._last_applied_joint_positions = joint_positions
for _ in range(self._skip_frame):
desired_torques = self.compute_pd_control_torques(joint_positions)
self.send_torque_commands(desired_torque_commands=desired_torques)
self.step_simulation()
else:
raise Exception('The action mode {} is not supported'.format(self._action_mode))
self._last_action = action
self._last_clipped_action = clipped_action
return
def get_dt(self):
'\n\n :return: (float) returns the current dt of one step. How much time is\n equivilant to one step function.\n '
return self._dt
def get_latest_full_state(self):
'\n\n :return: (dict) returns a dict with joint velocities, joint positions and joint torques\n of the robot.\n '
return self._latest_full_state
def send_torque_commands(self, desired_torque_commands):
'\n\n :param desired_torque_commands: (nd.array) the desired torque commands to be applied\n to the robot.\n\n :return: (nd.array) the actual torque commands sent to the robot after applying a safety\n check.\n '
torque_commands = self._safety_torque_check(desired_torque_commands)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.setJointMotorControlArray(bodyUniqueId=WorldConstants.ROBOT_ID, jointIndices=self._revolute_joint_ids, controlMode=pybullet.TORQUE_CONTROL, forces=torque_commands, physicsClientId=self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_full_id is not None):
pybullet.setJointMotorControlArray(bodyUniqueId=WorldConstants.ROBOT_ID, jointIndices=self._revolute_joint_ids, controlMode=pybullet.TORQUE_CONTROL, forces=torque_commands, physicsClientId=self._pybullet_client_full_id)
return torque_commands
def _safety_torque_check(self, desired_torques):
'\n limits desired_torques to max_motor_torque \n and lowers amplitude of desired_torques in velocity direction\n \n :param desired_torques: (nd.array) the desired torque commands to be applied\n to the robot.\n\n :return: (list) the modified torque commands after applying a safety check.\n '
applied_torques = clip(np.asarray(desired_torques), (- self._max_motor_torque), self._max_motor_torque)
applied_torques = clip((applied_torques - (self._safety_kd * self._latest_full_state['velocities'])), (- self._max_motor_torque), self._max_motor_torque)
return list(applied_torques)
def inverse_kinematics(self, desired_tip_positions, rest_pose):
'\n\n :param desired_tip_positions: (list) desired tip positions in world frame.\n :param rest_pose: (list) initial inverse kinemetics solution to start from.\n\n :return:\n '
desired = np.array(desired_tip_positions)
desired[2] += WorldConstants.FLOOR_HEIGHT
desired[5] += WorldConstants.FLOOR_HEIGHT
desired[8] += WorldConstants.FLOOR_HEIGHT
if (self._pybullet_client_w_o_goal_id is not None):
client = self._pybullet_client_w_o_goal_id
else:
client = self._pybullet_client_full_id
joint_pos = np.zeros([9])
finger_tip_ids = self._finger_tip_ids
final_joint_pose = pybullet.calculateInverseKinematics2(WorldConstants.ROBOT_ID, [finger_tip_ids[0], finger_tip_ids[1], finger_tip_ids[2]], [desired[0:3], desired[3:6], desired[6:]], solver=pybullet.IK_DLS, currentPositions=rest_pose, physicsClientId=client)
joint_pos[:3] = final_joint_pose[:3]
final_joint_pose = pybullet.calculateInverseKinematics2(WorldConstants.ROBOT_ID, [finger_tip_ids[1], finger_tip_ids[0], finger_tip_ids[2]], [desired[3:6], desired[0:3], desired[6:]], solver=pybullet.IK_DLS, currentPositions=rest_pose, physicsClientId=client)
joint_pos[3:6] = final_joint_pose[3:6]
final_joint_pose = pybullet.calculateInverseKinematics2(WorldConstants.ROBOT_ID, [finger_tip_ids[2], finger_tip_ids[0], finger_tip_ids[1]], [desired[6:], desired[0:3], desired[3:6]], solver=pybullet.IK_DLS, currentPositions=rest_pose, physicsClientId=client)
joint_pos[6:] = final_joint_pose[6:]
if np.isnan(joint_pos).any():
joint_pos = rest_pose
return joint_pos
def get_joint_positions_from_tip_positions(self, tip_positions, default_pose=None):
'\n\n :param tip_positions: (list) desired tip positions in world frame.\n :param default_pose: (list) initial inverse kinemetics solution to start from.\n\n :return:\n '
tip_positions[2] += WorldConstants.FLOOR_HEIGHT
tip_positions[5] += WorldConstants.FLOOR_HEIGHT
tip_positions[8] += WorldConstants.FLOOR_HEIGHT
if (default_pose is None):
positions = self.inverse_kinematics(tip_positions, list(self.get_rest_pose()[0]))
else:
positions = self.inverse_kinematics(tip_positions, list(default_pose))
return positions
def get_current_camera_observations(self):
'\n\n :return: (nd.array) returns the current camera observations from the cameras selected on the robot\n in case the observation mode was "pixel"\n '
return self._robot_observations.get_current_camera_observations()
def get_rest_pose(self):
'\n :return: (tuple) returns the rest pose that the robot usually start from, the first in the tuple\n being the joint positions and the second is the end effector positions.\n '
deg45 = (np.pi / 4)
positions = [0, deg45, (- deg45)]
joint_positions = (positions * 3)
end_effector_positions = [0.05142966, 0.03035857, 0.32112874, 0.00057646, (- 0.05971867), 0.32112874, (- 0.05200612), 0.02936011, 0.32112874]
return (joint_positions, end_effector_positions)
def get_default_state(self):
'\n\n :return: (nd.array) returns the default state of the robot, (18,) first 9 positions occupy the\n joint positions and the second 9 positions occupy the joint velocities which is zero.\n '
return np.append(self.get_rest_pose()[0], np.zeros(9))
def get_current_variable_values(self):
'\n\n :return: (dict) returns all the exposed variables in the environment along with their\n corresponding values.\n '
variable_params = dict()
variable_params['joint_positions'] = self._latest_full_state['positions']
variable_params['control_index'] = self._control_index
variable_params['joint_velocities'] = self._latest_full_state['velocities']
if (self._pybullet_client_w_o_goal_id is not None):
client = self._pybullet_client_w_o_goal_id
else:
client = self._pybullet_client_full_id
(position, _) = pybullet.getBasePositionAndOrientation(WorldConstants.ROBOT_ID, physicsClientId=client)
variable_params['robot_height'] = (position[(- 1)] + WorldConstants.ROBOT_HEIGHT)
for robot_finger_link in WorldConstants.LINK_IDS:
variable_params[robot_finger_link] = dict()
variable_params[robot_finger_link]['color'] = pybullet.getVisualShapeData(WorldConstants.ROBOT_ID, physicsClientId=client)[WorldConstants.VISUAL_SHAPE_IDS[robot_finger_link]][7][:3]
variable_params[robot_finger_link]['mass'] = pybullet.getDynamicsInfo(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[robot_finger_link], physicsClientId=client)[0]
return variable_params
def get_current_observations(self, helper_keys):
'\n\n :param helper_keys: (list) list of observation keys not part of the default observation space but needed\n to compute part of a reward function or to compute custom observations.\n\n :return: (dict) returns the full observations of the robot itself with the values of the helper keys\n as well.\n '
return self._robot_observations.get_current_observations(self._latest_full_state, helper_keys)
def _compute_end_effector_positions(self, joint_positions):
'\n\n :param joint_positions: (nd.array) the current joint positions of the robot (not used for now, might be used\n for pinnochio)\n\n :return: (nd.array) the current end effector positions of the robot.\n '
if (self._pybullet_client_full_id is not None):
position_1 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=5, computeForwardKinematics=True, physicsClientId=self._pybullet_client_full_id)[0]
position_2 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=10, computeForwardKinematics=True, physicsClientId=self._pybullet_client_full_id)[0]
position_3 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=15, computeForwardKinematics=True, physicsClientId=self._pybullet_client_full_id)[0]
else:
position_1 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=5, computeForwardKinematics=True, physicsClientId=self._pybullet_client_w_o_goal_id)[0]
position_2 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=10, computeForwardKinematics=True, physicsClientId=self._pybullet_client_w_o_goal_id)[0]
position_3 = pybullet.getLinkState(WorldConstants.ROBOT_ID, linkIndex=15, computeForwardKinematics=True, physicsClientId=self._pybullet_client_w_o_goal_id)[0]
result = ((list(position_1) + list(position_2)) + list(position_3))
result[2] -= WorldConstants.FLOOR_HEIGHT
result[5] -= WorldConstants.FLOOR_HEIGHT
result[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return np.array(result)
def _process_action_joint_positions(self, robot_state):
'\n This returns the absolute joint positions command sent in position control mode\n (end effector and joint positions), this observation shouldnt be used in torque control\n\n :param robot_state: (dict) the current robot state.\n\n :return: (nd.array) returns te last joint actions applied to be sued as part\n of the observations.\n '
last_joints_action_applied = self.get_last_applied_joint_positions()
if self._normalize_observations:
last_joints_action_applied = self.normalize_observation_for_key(observation=last_joints_action_applied, key='action_joint_positions')
return last_joints_action_applied
def clear(self):
'\n clears the robot for a reset for instance.\n\n :return: None.\n '
self._last_action = np.zeros(9)
self._last_clipped_action = np.zeros(9)
self._last_applied_joint_positions = self._latest_full_state['positions']
self._control_index = (- 1)
return
def reset_state(self, joint_positions=None, joint_velocities=None, end_effector_positions=None):
"\n\n :param joint_positions: (nd.array) the joint positions for the root to be reset in.\n :param joint_velocities: (nd.array) the joint velocities for the root to be reset in.\n :param end_effector_positions: (nd.array) the end effector positions for the root to be reset in,\n this shouldn't be used in combination with the other args.\n\n :return:\n "
self._latest_full_state = None
self._control_index = (- 1)
if (end_effector_positions is not None):
joint_positions = self.get_joint_positions_from_tip_positions(end_effector_positions, list(self.get_rest_pose()[0]))
if (joint_positions is None):
joint_positions = list(self.get_rest_pose()[0])
if (joint_velocities is None):
joint_velocities = np.zeros(9)
self._set_finger_state(joint_positions, joint_velocities)
self._last_action = np.zeros(9)
self._last_clipped_action = np.zeros(9)
if (self._action_mode != 'joint_torques'):
self._last_applied_joint_positions = list(joint_positions)
return
def sample_joint_positions(self, sampling_strategy='uniform'):
'\n\n :param sampling_strategy: (str) this only supports "uniform" strategy for now.\n\n :return: (nd.array) returns the sampled joint positions.\n '
if (sampling_strategy == 'uniform'):
positions = np.random.uniform(self._robot_actions.joint_positions_lower_bounds, self._robot_actions.joint_positions_upper_bounds)
else:
raise Exception('not yet implemented')
return positions
def sample_end_effector_positions(self, sampling_strategy='middle_stage'):
'\n\n :param sampling_strategy: (str) this only supports "middle_stage" strategy for now.\n\n :return: (nd.array) returns the sampled end effector positions.\n '
if (sampling_strategy == 'middle_stage'):
tip_positions = np.random.uniform([0.1, 0.1, 0.15, 0.1, (- 0.15), 0.15, (- 0.15), (- 0.15), 0.15], [0.15, 0.15, 0.15, 0.15, (- 0.1), 0.15, (- 0.1), (- 0.1), 0.15])
else:
raise Exception('not yet implemented')
return tip_positions
def forward_simulation(self, time=1):
'\n\n :param time: (float) forwards the simulation by the time specified.\n\n :return:\n '
old_action_mode = self.get_action_mode()
self.set_action_mode('joint_positions')
n_steps = int((time / self._simulation_time))
action_to_apply = self._latest_full_state['positions']
for _ in range(n_steps):
desired_torques = self.compute_pd_control_torques(action_to_apply)
self.send_torque_commands(desired_torque_commands=desired_torques)
self.step_simulation()
self.set_action_mode(old_action_mode)
return
def select_observations(self, observation_keys):
'\n\n :param observation_keys: (list) the observations keys for the robot to be added\n in the observation space itself.\n\n :return: None.\n '
self._robot_observations.reset_observation_keys()
for key in observation_keys:
if (key == 'action_joint_positions'):
self._robot_observations.add_observation('action_joint_positions', observation_fn=self._process_action_joint_positions)
else:
self._robot_observations.add_observation(key)
return
def close(self):
'\n closes the pybullet clients connected.\n\n :return: None.\n '
if (self._pybullet_client_full_id is not None):
pybullet.disconnect(physicsClientId=self._pybullet_client_full_id)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.disconnect(physicsClientId=self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_w_goal_id is not None):
pybullet.disconnect(physicsClientId=self._pybullet_client_w_goal_id)
return
def add_observation(self, observation_key, lower_bound=None, upper_bound=None, observation_fn=None):
'\n\n :param observation_key: (str) observation name to be added.\n :param lower_bound: (nd.array) the lower bound of this observation when added to the space unnormalized.\n :param upper_bound: (nd.array) the upper bound of this observation when added to the space unnormalized.\n :param observation_fn: (function) a callable function that when passed the robot stat, it calculates\n this custom observation.\n\n :return: None.\n '
self._robot_observations.add_observation(observation_key, lower_bound, upper_bound, observation_fn)
return
def normalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) the observation to be normalized.\n :param key: (str) the key corresponding to this observation.\n\n :return: (nd.array) observation after normalization.\n '
return self._robot_observations.normalize_observation_for_key(observation, key)
def denormalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) the observation to be denormalized.\n :param key: (str) the key corresponding to this observation.\n\n :return: (nd.array) observation after de-normalization.\n '
return self._robot_observations.denormalize_observation_for_key(observation, key)
def apply_interventions(self, interventions_dict):
'\n\n :param interventions_dict: (dict) a dictionary specifying which variables and values for a do intervention\n on variables that belong to the robot only.\n\n :return: None.\n '
old_state = self.get_full_state()
if ('joint_positions' in interventions_dict):
new_joint_positions = interventions_dict['joint_positions']
else:
new_joint_positions = old_state[:9]
if ('joint_velocities' in interventions_dict):
new_joint_velcoities = interventions_dict['joint_velocities']
else:
new_joint_velcoities = old_state[9:]
if (('joint_positions' in interventions_dict) or ('joint_velocities' in interventions_dict)):
self._set_finger_state(new_joint_positions, new_joint_velcoities)
self._last_action = np.zeros(9)
self._last_clipped_action = np.zeros(9)
if (self._action_mode != 'joint_torques'):
self._last_applied_joint_positions = list(new_joint_positions)
for intervention in interventions_dict:
if ((intervention == 'joint_velocities') or (intervention == 'joint_positions')):
continue
if (intervention == 'robot_height'):
if (self._pybullet_client_w_goal_id is not None):
pybullet.resetBasePositionAndOrientation(WorldConstants.ROBOT_ID, [0, 0, (interventions_dict[intervention] - WorldConstants.ROBOT_HEIGHT)], [0, 0, 0, 1], physicsClientId=self._pybullet_client_w_goal_id)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.resetBasePositionAndOrientation(WorldConstants.ROBOT_ID, [0, 0, (interventions_dict[intervention] - WorldConstants.ROBOT_HEIGHT)], [0, 0, 0, 1], physicsClientId=self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_full_id is not None):
pybullet.resetBasePositionAndOrientation(WorldConstants.ROBOT_ID, [0, 0, (interventions_dict[intervention] - WorldConstants.ROBOT_HEIGHT)], [0, 0, 0, 1], physicsClientId=self._pybullet_client_full_id)
self.update_latest_full_state()
continue
if ('robot_finger' in intervention):
for sub_intervention_variable in interventions_dict[intervention]:
if (sub_intervention_variable == 'color'):
if (self._pybullet_client_w_goal_id is not None):
pybullet.changeVisualShape(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[intervention], rgbaColor=np.append(interventions_dict[intervention][sub_intervention_variable], 1), physicsClientId=self._pybullet_client_w_goal_id)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.changeVisualShape(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[intervention], rgbaColor=np.append(interventions_dict[intervention][sub_intervention_variable], 1), physicsClientId=self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_full_id is not None):
pybullet.changeVisualShape(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[intervention], rgbaColor=np.append(interventions_dict[intervention][sub_intervention_variable], 1), physicsClientId=self._pybullet_client_full_id)
elif (sub_intervention_variable == 'mass'):
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.changeDynamics(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[intervention], mass=interventions_dict[intervention][sub_intervention_variable], physicsClientId=self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_full_id is not None):
pybullet.changeDynamics(WorldConstants.ROBOT_ID, WorldConstants.LINK_IDS[intervention], mass=interventions_dict[intervention][sub_intervention_variable], physicsClientId=self._pybullet_client_full_id)
else:
raise Exception('The intervention state variable specified is not allowed')
elif (intervention == 'control_index'):
self._control_index = interventions_dict['control_index']
else:
raise Exception('The intervention state variable specified is not allowed', intervention)
return
def check_feasibility_of_robot_state(self):
'\n This function checks the feasibility of the current state of the robot\n (i.e checks if its in penetration with anything now\n\n :return: (bool) A boolean indicating whether the robot is in a collision state\n or not.\n '
if (self._pybullet_client_full_id is not None):
client = self._pybullet_client_full_id
else:
client = self._pybullet_client_w_o_goal_id
for contact in pybullet.getContactPoints(physicsClientId=client):
if (((contact[1] == WorldConstants.ROBOT_ID) or (contact[2] == WorldConstants.ROBOT_ID)) and (contact[8] < (- 0.0095))):
return False
return True
def is_self_colliding(self):
'\n\n :return: (bool) A boolean indicating whether the robot is self colliding with itself.\n '
if (self._pybullet_client_full_id is not None):
client = self._pybullet_client_full_id
else:
client = self._pybullet_client_w_o_goal_id
for contact in pybullet.getContactPoints(physicsClientId=client):
if ((contact[1] == WorldConstants.ROBOT_ID) and (contact[2] == WorldConstants.ROBOT_ID)):
return True
return False
def is_colliding_with_stage(self):
'\n\n :return: (bool) A boolean indicating whether the robot is colliding with the stage.\n '
if (self._pybullet_client_full_id is not None):
client = self._pybullet_client_full_id
else:
client = self._pybullet_client_w_o_goal_id
for contact in pybullet.getContactPoints(physicsClientId=client):
if (((contact[1] == WorldConstants.ROBOT_ID) and (contact[2] == WorldConstants.STAGE_ID)) or ((contact[2] == WorldConstants.ROBOT_ID) and (contact[1] == WorldConstants.STAGE_ID))):
return True
return False
def is_in_contact_with_block(self, block):
'\n\n :param block: (causal_world.envs.RigidObject) rigid object to query collision with the robot.\n\n :return: (bool) A boolean indicating whether the robot is colliding with block passed.\n '
if (self._pybullet_client_full_id is not None):
client = self._pybullet_client_full_id
else:
client = self._pybullet_client_w_o_goal_id
for contact in pybullet.getContactPoints(physicsClientId=client):
if (((contact[1] == WorldConstants.ROBOT_ID) and (contact[2] == block._block_ids[0])) or ((contact[2] == WorldConstants.ROBOT_ID) and (contact[1] == block._block_ids[0]))):
return True
return False
def get_normal_interaction_force_with_block(self, block, finger_tip_number):
'\n\n :param block: (causal_world.envs.RigidObject) rigid object to query collision with the robot.\n :param finger_tip_number: (int) should be 60, 120 or 300.\n\n :return: (float) returns the normal interaction force between the block and the finger tip or None\n if no interaction exists.\n '
if (self._pybullet_client_full_id is not None):
client = self._pybullet_client_full_id
else:
client = self._pybullet_client_w_o_goal_id
if (finger_tip_number == 60):
idx = WorldConstants.LINK_IDS['robot_finger_60_link_3']
elif (finger_tip_number == 120):
idx = WorldConstants.LINK_IDS['robot_finger_120_link_3']
elif (finger_tip_number == 300):
idx = WorldConstants.LINK_IDS['robot_finger_300_link_3']
else:
raise Exception('finger tip number doesnt exist')
for contact in pybullet.getContactPoints(physicsClientId=client):
if (((contact[1] == WorldConstants.ROBOT_ID) and (contact[2] == block._block_ids[0])) or ((contact[2] == WorldConstants.ROBOT_ID) and (contact[1] == block._block_ids[0]))):
return (contact[9] * np.array(contact[7]))
for contact in pybullet.getContactPoints(physicsClientId=client):
if (((contact[1] == WorldConstants.ROBOT_ID) and (contact[2] == block._block_ids[0]) and (contact[3] == idx)) or ((contact[2] == WorldConstants.ROBOT_ID) and (contact[1] == block._block_ids[0]) and (contact[4] == idx))):
return (contact[9] * np.array(contact[7]))
return None
def get_tip_contact_states(self):
'\n\n :return: (list) returns a list of 3, 0 if open and 1 is closed (i.e 0 when the correpsonding finger tip\n is not in contact with anything and 1 otherwise).\n '
if (self._pybullet_client_w_o_goal_id is not None):
client = (self._pybullet_client_w_o_goal_id is not None)
else:
client = self._pybullet_client_full_id
contact_tips = [0, 0, 0]
for contact in pybullet.getContactPoints(physicsClientId=client):
if (contact[1] == WorldConstants.ROBOT_ID):
if (contact[3] == WorldConstants.LINK_IDS['robot_finger_60_link_3']):
contact_tips[0] = 1
elif (contact[3] == WorldConstants.LINK_IDS['robot_finger_120_link_3']):
contact_tips[1] = 1
elif (contact[3] == WorldConstants.LINK_IDS['robot_finger_300_link_3']):
contact_tips[2] = 1
elif (contact[2] == WorldConstants.ROBOT_ID):
if (contact[4] == WorldConstants.LINK_IDS['robot_finger_60_link_3']):
contact_tips[0] = 1
elif (contact[4] == WorldConstants.LINK_IDS['robot_finger_180_link_3']):
contact_tips[1] = 1
elif (contact[4] == WorldConstants.LINK_IDS['robot_finger_300_link_3']):
contact_tips[2] = 1
return contact_tips
def _disable_velocity_control(self):
'\n To disable the high friction velocity motors created by\n default at all revolute and prismatic joints while loading them from\n the urdf.\n\n :return: None\n '
if (self._pybullet_client_full_id is not None):
pybullet.setJointMotorControlArray(bodyUniqueId=WorldConstants.ROBOT_ID, jointIndices=self._revolute_joint_ids, controlMode=pybullet.VELOCITY_CONTROL, targetVelocities=([0] * len(self._revolute_joint_ids)), forces=([0] * len(self._revolute_joint_ids)), physicsClientId=self._pybullet_client_full_id)
if (self._pybullet_client_w_o_goal_id is not None):
pybullet.setJointMotorControlArray(bodyUniqueId=WorldConstants.ROBOT_ID, jointIndices=self._revolute_joint_ids, controlMode=pybullet.VELOCITY_CONTROL, targetVelocities=([0] * len(self._revolute_joint_ids)), forces=([0] * len(self._revolute_joint_ids)), physicsClientId=self._pybullet_client_w_o_goal_id)
return
def _set_finger_state(self, joint_positions, joint_velocities=None):
'\n\n :param joint_positions: (nd.array) joint positions for setting the finger state.\n :param joint_velocities: (nd.array) joint velocities for setting the finger state.\n\n :return: None.\n '
if (self._pybullet_client_full_id is not None):
if (joint_velocities is None):
for (i, joint_id) in enumerate(self._revolute_joint_ids):
pybullet.resetJointState(WorldConstants.ROBOT_ID, joint_id, joint_positions[i], physicsClientId=self._pybullet_client_full_id)
else:
for (i, joint_id) in enumerate(self._revolute_joint_ids):
pybullet.resetJointState(WorldConstants.ROBOT_ID, joint_id, joint_positions[i], joint_velocities[i], physicsClientId=self._pybullet_client_full_id)
if (self._pybullet_client_w_o_goal_id is not None):
if (joint_velocities is None):
for (i, joint_id) in enumerate(self._revolute_joint_ids):
pybullet.resetJointState(WorldConstants.ROBOT_ID, joint_id, joint_positions[i], physicsClientId=self._pybullet_client_w_o_goal_id)
else:
for (i, joint_id) in enumerate(self._revolute_joint_ids):
pybullet.resetJointState(WorldConstants.ROBOT_ID, joint_id, joint_positions[i], joint_velocities[i], physicsClientId=self._pybullet_client_w_o_goal_id)
self.update_latest_full_state()
return
def _set_finger_state_in_goal_image(self):
'\n raises the fingers in the goal image.\n\n :return:\n '
joint_positions = self._robot_actions.joint_positions_lower_bounds
for (i, joint_id) in enumerate(self._revolute_joint_ids):
pybullet.resetJointState(WorldConstants.ROBOT_ID, joint_id, joint_positions[i], physicsClientId=self._pybullet_client_w_goal_id)
return
|
class RigidObject(object):
def __init__(self, pybullet_client_ids, name, size, initial_position, initial_orientation, mass, color, lateral_friction, spinning_friction, restitution, initial_linear_velocity, initial_angular_velocity, fixed_bool):
'\n This is the base class of any rigid object whether it is fixed or not.\n\n :param pybullet_client_ids: (list) specifies the pybullet client ids\n where this object will be in.\n :param name: (str) specifies the name of the object, needs to be unique.\n :param size: (float list) 3 dimensional list specifies the size\n :param initial_position: (float list) specifies the x,y,z position in\n the arena.\n :param initial_orientation: (float list) specifies the quaternion\n orientation.\n :param mass: (float) specifies the mass of the object itself.\n 0 if fixed.\n :param color: (float list) specifies the RGB values of the object.\n :param lateral_friction: (float) specifies the lateral friction of the\n object.\n :param spinning_friction: (float) specifies the spinning friction of the\n object.\n :param restitution: (float) specifies the restitution of the object.\n :param initial_linear_velocity: (float list) specifies the velocity in\n the x,y,z directions.\n :param initial_angular_velocity: (float list) specifies the velocity in\n the yaw, roll, pitch values.\n :param fixed_bool: (bool) specifies if the object is fixed or not.\n '
self._pybullet_client_ids = pybullet_client_ids
self._name = name
self._type_id = None
self._mass = mass
self._size = size
self._not_fixed = (not fixed_bool)
self._color = color
self._initial_position = initial_position
self._initial_orientation = initial_orientation
self._initial_linear_velocity = initial_linear_velocity
self._initial_angular_velocity = initial_angular_velocity
self._lateral_friction = lateral_friction
self._spinning_friction = spinning_friction
self._restitution = restitution
self._block_ids = []
self._shape_ids = []
self._define_type_id()
self._volume = None
self._set_volume()
self._init_object()
self._lower_bounds = dict()
self._upper_bounds = dict()
self._lower_bounds[(self._name + '_type')] = np.array([self._type_id])
self._lower_bounds[(self._name + '_cartesian_position')] = np.array([(- 0.5), (- 0.5), 0])
self._lower_bounds[(self._name + '_cylindrical_position')] = np.array([0, 0, 0])
self._lower_bounds[(self._name + '_orientation')] = np.array(([(- 10)] * 4))
self._lower_bounds[(self._name + '_friction')] = np.array([0])
self._lower_bounds[(self._name + '_size')] = np.array([0.03, 0.03, 0.03])
self._lower_bounds[(self._name + '_color')] = np.array(([0] * 3))
if self.is_not_fixed():
self._lower_bounds[(self._name + '_linear_velocity')] = np.array(([(- 0.5)] * 3))
self._lower_bounds[(self._name + '_angular_velocity')] = np.array(([(- np.pi)] * 3))
self._lower_bounds[(self._name + '_mass')] = np.array([0.02])
self._upper_bounds[(self._name + '_type')] = np.array([self._type_id])
self._upper_bounds[(self._name + '_friction')] = np.array([10])
self._upper_bounds[(self._name + '_cartesian_position')] = np.array(([0.5] * 3))
self._upper_bounds[(self._name + '_cylindrical_position')] = np.array([0.2, np.pi, 0.5])
self._upper_bounds[(self._name + '_orientation')] = np.array(([10] * 4))
self._upper_bounds[(self._name + '_size')] = np.array([0.1, 0.1, 0.1])
self._upper_bounds[(self._name + '_color')] = np.array(([1] * 3))
if self.is_not_fixed():
self._upper_bounds[(self._name + '_linear_velocity')] = np.array(([0.5] * 3))
self._upper_bounds[(self._name + '_angular_velocity')] = np.array(([np.pi] * 3))
self._upper_bounds[(self._name + '_mass')] = np.array([0.2])
self._state_variable_names = []
if self.is_not_fixed():
self._state_variable_names = ['type', 'cartesian_position', 'cylindrical_position', 'orientation', 'linear_velocity', 'angular_velocity', 'mass', 'size', 'color', 'friction', 'type']
else:
self._state_variable_names = ['type', 'cartesian_position', 'cylindrical_position', 'orientation', 'size', 'color', 'friction', 'type']
self._state_variable_sizes = []
self._state_size = 0
for state_variable_name in self._state_variable_names:
self._state_variable_sizes.append(self._upper_bounds[((self._name + '_') + state_variable_name)].shape[0])
self._state_size += self._state_variable_sizes[(- 1)]
self._add_state_variables()
return
def get_initial_position(self):
'\n\n :return: (nd.array) initial position where the object was created.\n '
return self._initial_position
def _set_volume(self):
'\n sets the volume based on the size of the object or otherwise.\n\n :return:\n '
self._volume = ((self._size[0] * self._size[1]) * self._size[2])
return
def _add_state_variables(self):
'\n\n :return:\n '
return
def _create_object(self, pybullet_client_id, **kwargs):
'\n\n :param pybullet_client_id: (int) pybullet client id to create the\n object in.\n :param kwargs: (params) parameters of the object to be created.\n :return:\n '
raise NotImplementedError('the creation function is not defined yet')
def _define_type_id(self):
'\n defines the type id.\n\n :return:\n '
raise NotImplementedError('the define type id function is not defined yet')
def get_recreation_params(self):
'\n gets the params that are needed to recreate the same\n object again.\n\n :return:\n '
raise NotImplementedError('the define type id function is not defined yet')
def _init_object(self):
'\n initializes the object using this function.\n\n :return:\n '
for pybullet_client_id in self._pybullet_client_ids:
(shape_id, block_id) = self._create_object(pybullet_client_id)
self._block_ids.append(block_id)
self._shape_ids.append(shape_id)
self._set_color(self._color)
self._set_lateral_friction(self._lateral_friction)
self._set_restitution(self._restitution)
self._set_spinning_friction(self._spinning_friction)
return
def get_variable_state(self, variable_name):
'\n\n :param variable_name: (str) variable name to query about the object.\n :return: (nd.array, float or int) returns the corresponding value of\n the variable.\n '
if (variable_name == 'type'):
return self._type_id
elif (variable_name == 'cartesian_position'):
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return position
elif (variable_name == 'cylindrical_position'):
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return cart2cyl(position)
elif (variable_name == 'orientation'):
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return orientation
elif (variable_name == 'linear_velocity'):
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
return linear_velocity
elif (variable_name == 'angular_velocity'):
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
return angular_velocity
elif (variable_name == 'mass'):
return self._mass
elif (variable_name == 'size'):
return self._size
elif (variable_name == 'color'):
return self._color
elif (variable_name == 'friction'):
return self._lateral_friction
def reinit_object(self):
'\n removes the object and reinitilaizes it again.\n\n :return:\n '
self.remove()
self._init_object()
return
def remove(self):
'\n removes the object.\n\n :return:\n '
for i in range(0, len(self._pybullet_client_ids)):
pybullet.removeBody(self._block_ids[i], physicsClientId=self._pybullet_client_ids[i])
self._block_ids = []
self._shape_ids = []
return
def _set_color(self, color):
'\n\n :param color: (nd.array) the normalized RGB color, shape is (3,)\n :return:\n '
for i in range(len(self._pybullet_client_ids)):
pybullet.changeVisualShape(self._block_ids[i], (- 1), rgbaColor=np.append(color, 1), physicsClientId=self._pybullet_client_ids[i])
return
def _set_lateral_friction(self, lateral_friction):
'\n\n :param lateral_friction: (float) specifies the lateral friction.\n :return:\n '
for i in range(len(self._pybullet_client_ids)):
pybullet.changeDynamics(bodyUniqueId=self._block_ids[i], linkIndex=(- 1), lateralFriction=lateral_friction, physicsClientId=self._pybullet_client_ids[i])
def _set_restitution(self, restitution):
'\n\n :param restitution: (float) specifies the restitution.\n :return:\n '
for i in range(len(self._pybullet_client_ids)):
pybullet.changeDynamics(bodyUniqueId=self._block_ids[i], linkIndex=(- 1), restitution=restitution, physicsClientId=self._pybullet_client_ids[i])
def _set_spinning_friction(self, spinning_friction):
'\n\n :param spinning_friction: (float) specifies the spinning friction.\n :return:\n '
for i in range(len(self._pybullet_client_ids)):
pybullet.changeDynamics(bodyUniqueId=self._block_ids[i], linkIndex=(- 1), spinningFriction=spinning_friction, physicsClientId=self._pybullet_client_ids[i])
def _set_velocities(self):
'\n sets the velocities specified when constructing the object.\n\n :return:\n '
for i in range(0, len(self._pybullet_client_ids)):
pybullet.resetBaseVelocity(self._block_ids[i], self._initial_linear_velocity, self._initial_angular_velocity, physicsClientId=self._pybullet_client_ids[i])
def set_pose(self, position, orientation):
'\n :param position: (nd.array) specifies the cartesian position of the\n object.\n :param orientation: (nd.array) specifies the quaternion orientation\n of the object.\n :return:\n '
for i in range(0, len(self._pybullet_client_ids)):
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
pybullet.resetBasePositionAndOrientation(self._block_ids[i], position, orientation, physicsClientId=self._pybullet_client_ids[i])
return
def get_state(self, state_type='dict'):
"\n\n :param state_type: 'list' or 'dict'.\n :return: (dict or list) specifies the state of the object.\n "
if (state_type == 'dict'):
state = dict()
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
state['type'] = self._type_id
state['cartesian_position'] = np.array(position)
state['cylindrical_position'] = cart2cyl(np.array(position))
state['orientation'] = np.array(orientation)
state['size'] = self._size
state['color'] = self._color
state['friction'] = self._lateral_friction
if self.is_not_fixed():
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
state['linear_velocity'] = np.array(linear_velocity)
state['angular_velocity'] = np.array(angular_velocity)
state['mass'] = self._mass
elif (state_type == 'list'):
state = []
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
if self.is_not_fixed():
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
for name in self._state_variable_names:
if (name == 'type'):
state.append(self._type_id)
elif (name == 'cartesian_position'):
state.extend(position)
elif (name == 'orientation'):
state.extend(orientation)
elif (name == 'linear_velocity'):
state.extend(linear_velocity)
elif (name == 'angular_velocity'):
state.extend(angular_velocity)
elif (name == 'mass'):
state.append(self._mass)
elif (name == 'size'):
state.extend(self._size)
elif (name == 'color'):
state.extend(self._color)
elif (name == 'friction'):
state.append(self._lateral_friction)
return state
def set_full_state(self, new_state):
'\n\n :param new_state: (list) specifies the state of the object to be set.\n :return:\n '
new_state_dict = dict()
current_state = self.get_state()
start = 0
for i in range(len(self._state_variable_sizes)):
end = (start + self._state_variable_sizes[i])
if (not np.all((current_state[self._state_variable_names[i]] == new_state[start:end]))):
if (end == (start + 1)):
new_state_dict[self._state_variable_names[i]] = new_state[start:end][0]
else:
new_state_dict[self._state_variable_names[i]] = new_state[start:end]
start = end
self.apply_interventions(new_state_dict)
return
def apply_interventions(self, interventions_dict):
'\n\n :param interventions_dict: (dict) specifies the interventions to be\n performed on the various variables.\n :return:\n '
if ('cylindrical_position' in interventions_dict):
interventions_dict['cartesian_position'] = cyl2cart(interventions_dict['cylindrical_position'])
if ('euler_orientation' in interventions_dict):
interventions_dict['orientation'] = euler_to_quaternion(interventions_dict['euler_orientation'])
if (('cartesian_position' not in interventions_dict) or ('orientation' not in interventions_dict)):
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
if ('cartesian_position' in interventions_dict):
position = interventions_dict['cartesian_position']
if ('orientation' in interventions_dict):
orientation = interventions_dict['orientation']
if ('mass' in interventions_dict):
self._mass = interventions_dict['mass']
if ('friction' in interventions_dict):
self._lateral_friction = interventions_dict['friction']
if ('size' in interventions_dict):
self._size = interventions_dict['size']
self._set_volume()
self.reinit_object()
elif ('mass' in interventions_dict):
for i in range(0, len(self._pybullet_client_ids)):
pybullet.changeDynamics(self._block_ids[i], (- 1), mass=self._mass, physicsClientId=self._pybullet_client_ids[i])
elif ('friction' in interventions_dict):
self._set_lateral_friction(self._lateral_friction)
if (('cartesian_position' in interventions_dict) or ('orientation' in interventions_dict)):
for i in range(0, len(self._pybullet_client_ids)):
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
pybullet.resetBasePositionAndOrientation(self._block_ids[i], position, orientation, physicsClientId=self._pybullet_client_ids[i])
if ('color' in interventions_dict):
self._color = interventions_dict['color']
self._set_color(self._color)
if (('linear_velocity' in interventions_dict) ^ ('angular_velocity' in interventions_dict)):
for i in range(0, len(self._pybullet_client_ids)):
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[i], physicsClientId=self._pybullet_client_ids[i])
if ('linear_velocity' in interventions_dict):
linear_velocity = interventions_dict['linear_velocity']
if ('angular_velocity' in interventions_dict):
angular_velocity = interventions_dict['angular_velocity']
if (('angular_velocity' in interventions_dict) or ('linear_velocity' in interventions_dict)):
for i in range(0, len(self._pybullet_client_ids)):
pybullet.resetBaseVelocity(self._block_ids[i], linear_velocity, angular_velocity, physicsClientId=self._pybullet_client_ids[i])
return
def get_state_variable_names(self):
'\n\n :return: (list) returns the state variable names.\n '
return self._state_variable_names
def is_not_fixed(self):
'\n\n :return: (bool) true if its not fixed object.\n '
return self._not_fixed
def get_bounds(self):
'\n\n :return: (tuple) first position of the tuple is the lower bound of the\n bounding box of the object and second position of the\n tuple is the upper bound of the bounding box.\n '
return (self._lower_bounds, self._upper_bounds)
def get_state_size(self):
'\n\n :return: (int) specifies how large is the state of the object.\n '
return self._state_size
def get_bounding_box(self):
'\n\n :return: (nd.array) first position of the array is the lower bound of the\n bounding box of the object and second position of the\n array is the upper bound of the bounding box.\n '
bb = pybullet.getAABB(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
bb = np.array(bb)
bb[0][(- 1)] -= WorldConstants.FLOOR_HEIGHT
bb[1][(- 1)] -= WorldConstants.FLOOR_HEIGHT
return bb
def get_vertices(self):
'\n\n :return: (nd.array) specifies the current vertices of the object.\n '
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
vertices = [[1, 1, (- 1), 1], [1, (- 1), (- 1), 1], [(- 1), 1, (- 1), 1], [(- 1), (- 1), (- 1), 1], [1, 1, 1, 1], [1, (- 1), 1, 1], [(- 1), 1, 1, 1], [(- 1), (- 1), 1, 1]]
temp_size = np.array([self._size[0], self._size[1], self._size[2], 2])
vertices = [((point * temp_size) / 2.0) for point in vertices]
return rotate_points(np.array(vertices), orientation, position)
def world_to_cube_r_matrix(self):
'\n\n :return: (nd.array) returns the transformation matrix of the object.\n '
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return get_transformation_matrix(position, orientation)
def get_rotation_matrix(self):
'\n\n :return: (nd.array) returns the rotation matrix of the object.\n '
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
return get_rotation_matrix(orientation)
def get_size(self):
'\n\n :return: (nd.array) returns the size of the object.\n '
return self._size
def get_volume(self):
'\n\n :return: (nd.array) returns the volume of the object.\n '
return self._volume
def get_name(self):
'\n\n :return: (str) returns the name of the object.\n '
return self._name
def get_block_ids(self):
'\n\n :return: (list) returns the block ids in the active\n pybullet clients.\n '
return self._block_ids
|
class Cuboid(RigidObject):
def __init__(self, pybullet_client_ids, name, size=np.array([0.065, 0.065, 0.065]), initial_position=np.array([0.0, 0.0, 0.0425]), initial_orientation=np.array([0, 0, 0, 1]), mass=0.08, color=np.array([1, 0, 0]), initial_linear_velocity=np.array([0, 0, 0]), initial_angular_velocity=np.array([0, 0, 0]), lateral_friction=1):
'\n This specifies the moving cuboid object in the arena.\n\n :param pybullet_client_ids: (list) specifies the pybullet client ids.\n :param name: (str) specifies the name of the object.\n :param size: (list float) specifies the size in the three directions.\n :param initial_position: (list float) specifies the position in x,y,z.\n :param initial_orientation: (list float) specifies the quaternion of\n the object.\n :param mass: (float) specifies the mass of the object.\n :param color: (list float) specifies the RGB values of the cuboid.\n :param initial_linear_velocity: (list float) specifies the initial\n linear velocity vx, vy, vz.\n :param initial_angular_velocity: (list float) specifies the initial\n angular velocities.\n :param lateral_friction: (float) specifies the lateral friction.\n '
super(Cuboid, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=size, initial_position=initial_position, initial_orientation=initial_orientation, mass=mass, color=color, fixed_bool=False, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity=initial_linear_velocity, initial_angular_velocity=initial_angular_velocity)
def _create_object(self, pybullet_client_id, **kwargs):
'\n\n :param pybullet_client_id: (int) corresponding pybullet client to create\n the object in.\n :param kwargs: (params) parameters for the object creation.\n\n :return: (tuple) the first position specifies the shape_id and the\n second specifies the block id for pybullet.\n '
shape_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_BOX, halfExtents=(np.array(self._size) / 2), physicsClientId=pybullet_client_id)
position = np.array(self._initial_position)
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
block_id = pybullet.createMultiBody(baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id)
return (shape_id, block_id)
def _define_type_id(self):
'\n Sets the type id.\n\n :return:\n '
self._type_id = 1
return
def get_recreation_params(self):
'\n\n :return: (dict) the creation parameters needed to recreate the object.\n '
recreation_params = dict()
recreation_params['name'] = self._name
recreation_params['size'] = self._size
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
recreation_params['initial_position'] = position
recreation_params['initial_orientation'] = orientation
recreation_params['mass'] = self._mass
recreation_params['color'] = self._color
recreation_params['lateral_friction'] = self._lateral_friction
recreation_params['initial_linear_velocity'] = linear_velocity
recreation_params['initial_angular_velocity'] = angular_velocity
return copy.deepcopy(recreation_params)
|
class StaticCuboid(RigidObject):
def __init__(self, pybullet_client_ids, name, size=np.array([0.065, 0.065, 0.065]), position=np.array([0.0, 0.0, 0.0425]), orientation=np.array([0, 0, 0, 1]), color=np.array([1, 0, 0]), lateral_friction=1):
'\n\n :param pybullet_client_ids: (list) specifies the pybullet clients.\n :param name: (str) specifies the name of the object.\n :param size: (list float) specifies the size in the three directions.\n :param position: (list float) specifies the position in x,y,z.\n :param orientation: (list float) specifies the quaternion of\n the object.\n :param color: (list float) specifies the RGB values of the cuboid.\n :param lateral_friction: (float) specifies the lateral friction.\n '
super(StaticCuboid, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=size, initial_position=position, initial_orientation=orientation, mass=0, color=color, fixed_bool=True, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity=[0, 0, 0], initial_angular_velocity=[0, 0, 0])
def _create_object(self, pybullet_client_id, **kwargs):
'\n\n :param pybullet_client_id: (int) corresponding pybullet client to create\n the object in.\n :param kwargs: (params) parameters for the object creation.\n\n :return: (tuple) the first position specifies the shape_id and the\n second specifies the block id for pybullet.\n '
position = np.array(self._initial_position)
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
shape_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_BOX, halfExtents=(np.array(self._size) / 2), physicsClientId=pybullet_client_id)
block_id = pybullet.createMultiBody(baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id)
return (shape_id, block_id)
def _define_type_id(self):
'\n Sets the type id of the object.\n\n :return:\n '
self._type_id = 10
return
def get_recreation_params(self):
'\n\n :return: (dict) the creation parameters needed to recreate the object.\n '
recreation_params = dict()
recreation_params['name'] = self._name
recreation_params['size'] = self._size
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
recreation_params['position'] = position
recreation_params['orientation'] = orientation
recreation_params['color'] = self._color
recreation_params['lateral_friction'] = self._lateral_friction
return copy.deepcopy(recreation_params)
|
class MeshObject(RigidObject):
def __init__(self, pybullet_client_ids, name, filename, scale=np.array([0.01, 0.01, 0.01]), initial_position=np.array([0.0, 0.0, 0.0425]), initial_orientation=np.array([0, 0, 0, 1]), color=np.array([1, 0, 0]), mass=0.08, initial_linear_velocity=np.array([0, 0, 0]), initial_angular_velocity=np.array([0, 0, 0]), lateral_friction=1):
'\n\n :param pybullet_client_ids: (list) specifies the pybullet clients.\n :param name: (str) specifies the name of the object.\n :param filename: (str) specifies the name of the file itself.\n :param scale: (list float) specifies the scale of the mesh object.\n :param initial_position: (list float) specifies the positions in x,y,z\n :param initial_orientation: (list float) specifies the quaternion of the object.\n :param color: (list float) specifies the RGB values.\n :param mass: (float) specifies the object mass.\n :param initial_linear_velocity: (list float) specifies the velocity in vx, vy, vz.\n :param initial_angular_velocity: (list float) specifies the velocity in yaw, roll, pitch.\n :param lateral_friction: (float) specifies the lateral friction.\n '
self._scale = scale
self._filename = filename
super(MeshObject, self).__init__(pybullet_client_ids=pybullet_client_ids, name=name, size=[0, 0, 0], initial_position=initial_position, initial_orientation=initial_orientation, mass=mass, color=color, fixed_bool=False, lateral_friction=lateral_friction, spinning_friction=0.001, restitution=0, initial_linear_velocity=initial_linear_velocity, initial_angular_velocity=initial_angular_velocity)
bb = self.get_bounding_box()
self._size = np.array([(bb[1][0] - bb[0][0]), (bb[1][1] - bb[0][1]), (bb[1][2] - bb[0][2])])
def _create_object(self, pybullet_client_id, **kwargs):
'\n\n :param pybullet_client_id: (int) corresponding pybullet client to create\n the object in.\n :param kwargs: (params) parameters for the object creation.\n\n :return: (tuple) the first position specifies the shape_id and the\n second specifies the block id for pybullet.\n '
position = np.array(self._initial_position)
position[(- 1)] += WorldConstants.FLOOR_HEIGHT
shape_id = pybullet.createCollisionShape(shapeType=pybullet.GEOM_MESH, meshScale=self._scale, fileName=self._filename, physicsClientId=pybullet_client_id)
block_id = pybullet.createMultiBody(baseCollisionShapeIndex=shape_id, basePosition=position, baseOrientation=self._initial_orientation, baseMass=self._mass, physicsClientId=pybullet_client_id)
return (shape_id, block_id)
def _define_type_id(self):
'\n Sets the type id of the object.\n\n :return:\n '
self._type_id = 2
return
def get_recreation_params(self):
'\n\n :return: (dict) the creation parameters needed to recreate the object.\n '
recreation_params = dict()
recreation_params['name'] = self._name
recreation_params['filename'] = self._filename
recreation_params['scale'] = self._scale
(position, orientation) = pybullet.getBasePositionAndOrientation(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
(linear_velocity, angular_velocity) = pybullet.getBaseVelocity(self._block_ids[0], physicsClientId=self._pybullet_client_ids[0])
position = np.array(position)
position[(- 1)] -= WorldConstants.FLOOR_HEIGHT
recreation_params['initial_position'] = position
recreation_params['initial_orientation'] = orientation
recreation_params['mass'] = self._mass
recreation_params['initial_linear_velocity'] = linear_velocity
recreation_params['initial_angular_velocity'] = angular_velocity
recreation_params['color'] = self._color
recreation_params['lateral_friction'] = self._lateral_friction
return copy.deepcopy(recreation_params)
|
class StageObservations(object):
def __init__(self, rigid_objects, visual_objects, observation_mode='structured', normalize_observations=True, cameras=None, camera_indicies=np.array([0, 1, 2])):
'\n\n :param rigid_objects: (dict) dict of rigid objects in the arena.\n :param visual_objects: (dict) dict of visual objects in the arena.\n :param observation_mode: (str) specifies the observation mode\n if structured or cameras.\n :param normalize_observations: (bool) specifies if the observations are\n normalized or not.\n :param cameras: (list) a list of cameras mounted on the stage.\n :param camera_indicies: (list) specifies the indicies of the cameras\n to be specified.\n '
self._normalized_observations = normalize_observations
self._observation_mode = observation_mode
self._camera_indicies = camera_indicies
self._low_norm = (- 1)
self._high_norm = 1
if (observation_mode == 'pixel'):
self._low_norm = 0
self._high_norm = 1
self._lower_bounds = dict()
self._upper_bounds = dict()
num_of_cameras = self._camera_indicies.shape[0]
self._lower_bounds['goal_image'] = np.zeros(shape=(num_of_cameras, 128, 128, 3), dtype=np.float64)
self._upper_bounds['goal_image'] = np.full(shape=(num_of_cameras, 128, 128, 3), fill_value=255, dtype=np.float64)
self._goal_cameras = cameras
self._rigid_objects = rigid_objects
self._visual_objects = visual_objects
self._observations_keys = []
self._observation_is_not_normalized = np.array([], dtype=np.bool)
self._low = np.array([])
self._high = np.array([])
self.initialize_observations()
self.set_observation_spaces()
return
def get_observation_spaces(self):
'\n\n :return: (gym.Spaces) observation space as a gym box space.\n '
if self._normalized_observations:
observations_low_values = np.full(shape=self._low.shape, fill_value=self._low_norm, dtype=np.float64)
observations_high_values = np.full(shape=self._low.shape, fill_value=self._high_norm, dtype=np.float64)
observations_low_values[self._observation_is_not_normalized] = self._low[self._observation_is_not_normalized]
observations_high_values[self._observation_is_not_normalized] = self._high[self._observation_is_not_normalized]
return spaces.Box(low=observations_low_values, high=observations_high_values, dtype=np.float64)
elif (self._observation_mode == 'structured'):
return spaces.Box(low=self._low, high=self._high, dtype=np.float64)
else:
return spaces.Box(low=self._low, high=self._high, dtype=np.uint8)
def initialize_observations(self):
'\n Creates the upper bound and lower bound of the observation space.\n\n :return:\n '
for rigid_object in self._rigid_objects.values():
state_keys = rigid_object.get_state().keys()
(object_lower_bounds, object_upper_bounds) = rigid_object.get_bounds()
for state_key in state_keys:
self._lower_bounds[((rigid_object.get_name() + '_') + state_key)] = object_lower_bounds[((rigid_object.get_name() + '_') + state_key)]
self._upper_bounds[((rigid_object.get_name() + '_') + state_key)] = object_upper_bounds[((rigid_object.get_name() + '_') + state_key)]
for visual_object in self._visual_objects.values():
state_keys = visual_object.get_state().keys()
(object_lower_bounds, object_upper_bounds) = visual_object.get_bounds()
for state_key in state_keys:
self._lower_bounds[((visual_object.get_name() + '_') + state_key)] = object_lower_bounds[((visual_object.get_name() + '_') + state_key)]
self._upper_bounds[((visual_object.get_name() + '_') + state_key)] = object_upper_bounds[((visual_object.get_name() + '_') + state_key)]
return
def reset_observation_keys(self):
'\n Resets the observation keys and the lower bound as well as upper bound.\n\n :return:\n '
self._observations_keys = []
self._low = np.array([])
self._high = np.array([])
def _is_observation_key_known(self, observation_key):
'\n\n :param observation_key: (str) specifies the observation key to query.\n\n :return: (bool) returns true if the observation key is\n known to the space.\n '
if (observation_key not in self._lower_bounds.keys()):
return False
else:
return True
def set_observation_spaces(self):
'\n sets the lower and upper bound on the observation space.\n\n :return:\n '
self._low = np.array([])
self._high = np.array([])
self._observation_is_not_normalized = np.array([], dtype=np.bool)
if ('goal_image' in self._observations_keys):
self._low = self._lower_bounds['goal_image']
self._high = self._upper_bounds['goal_image']
else:
for key in self._observations_keys:
self._low = np.append(self._low, np.array(self._lower_bounds[key]))
self._high = np.append(self._high, np.array(self._upper_bounds[key]))
if np.array_equal(self._lower_bounds[key], self._upper_bounds[key]):
self._observation_is_not_normalized = np.append(self._observation_is_not_normalized, np.full(shape=np.array(self._upper_bounds[key]).shape, fill_value=True, dtype=np.bool))
else:
self._observation_is_not_normalized = np.append(self._observation_is_not_normalized, np.full(shape=np.array(self._upper_bounds[key]).shape, fill_value=False, dtype=np.bool))
return
def is_normalized(self):
'\n\n :return: (bool) returns true if the observations are normalized or not.\n '
return self._normalized_observations
def normalize_observation(self, observation):
'\n\n :param observation: (nd.array) represents the observation to\n be normalized.\n\n :return: (nd.array) normalized observation.\n '
return ((((self._high_norm - self._low_norm) * (observation - self._low)) / (self._high - self._low)) + self._low_norm)
def denormalize_observation(self, observation):
'\n\n :param observation: (nd.array) represents the observation to\n be denormalized.\n\n :return: (nd.array) denormalized observation.\n '
return (self._low + (((observation - self._low_norm) / (self._high_norm - self._low_norm)) * (self._high - self._low)))
def normalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) observation vector to normalize.\n :param key: (str) key corresponding to the observation vector.\n\n :return: (nd.array) normalized observation vector.\n '
lower_key = np.array(self._lower_bounds[key])
higher_key = np.array(self._upper_bounds[key])
if (lower_key == higher_key).all():
return observation
return ((((self._high_norm - self._low_norm) * (observation - lower_key)) / (higher_key - lower_key)) + self._low_norm)
def denormalize_observation_for_key(self, observation, key):
'\n :param observation: (nd.array) observation vector to denormalize.\n :param key: (str) key corresponding to the observation vector.\n\n :return: (nd.array) denormalized observation vector.\n '
lower_key = np.array(self._lower_bounds[key])
higher_key = np.array(self._upper_bounds[key])
if (lower_key == higher_key).all():
return observation
return (lower_key + (((observation - self._low_norm) / (self._high_norm - self._low_norm)) * (higher_key - lower_key)))
def satisfy_constraints(self, observation):
'\n\n :param observation: (nd.array) observation vector to check if it\n satisfies the constraints.\n\n :return: (bool) returns true if the constraints are satisified, false\n otherwise.\n '
if self._normalized_observations:
return ((observation > self._low_norm).all() and (observation < self._high_norm).all())
else:
return ((observation > self._low).all() and (observation < self._high).all())
def clip_observation(self, observation):
'\n\n :param observation: (nd.array) observation vector to clip.\n\n :return: (nd.array) clipped observation vector to satisfy the limits.\n '
if self._normalized_observations:
return clip(observation, self._low_norm, self._high_norm)
else:
return clip(observation, self._low, self._high)
def get_current_observations(self, helper_keys):
'\n :param helper_keys: (list) observation keys that are needed but not in\n the observation space for further calculation\n of custom observations or reward function\n calculation.\n\n :return: (dict) returns a dict for all the observation keys and helper\n keys as well to be processed accordingly. Also\n normalization takes effect here if needed.\n '
observations_dict = dict()
for rigid_object in self._rigid_objects.values():
observations_dict.update({((rigid_object.get_name() + '_') + k): v for (k, v) in rigid_object.get_state().items()})
for visual_object in self._visual_objects.values():
observations_dict.update({((visual_object.get_name() + '_') + k): v for (k, v) in visual_object.get_state().items()})
observation_dict_keys = list(observations_dict.keys())
for observation in observation_dict_keys:
if ((observation not in self._observations_keys) and (observation not in helper_keys)):
del observations_dict[observation]
if self._normalized_observations:
for key in observations_dict.keys():
observations_dict[key] = self.normalize_observation_for_key(observations_dict[key], key)
return observations_dict
def remove_observations(self, observations):
'\n\n :param observations: (list) list of observation keys to remove from\n the observation space.\n\n :return: None\n '
for observation in observations:
if (observation not in self._observations_keys):
raise Exception('Observation key {} is not known'.format(observation))
self._observations_keys.remove(observation)
self.set_observation_spaces()
def add_observation(self, observation_key, lower_bound=None, upper_bound=None):
'\n\n :param observation_key: (str) observation key to be added.\n :param lower_bound: (nd.array) lower bound corresponding to the\n observation key if not known.\n :param upper_bound: (nd.array) upper bound corresponding to the\n observation key if not known.\n\n :return: None\n '
if ((observation_key not in self._lower_bounds.keys()) and ((lower_bound is None) or (upper_bound is None))):
raise Exception('Observation key {} is not known please specify the low and upper found'.format(observation_key))
if ((lower_bound is not None) and (upper_bound is not None)):
self._lower_bounds[observation_key] = lower_bound
self._upper_bounds[observation_key] = upper_bound
self._observations_keys.append(observation_key)
self.set_observation_spaces()
def get_current_goal_image(self):
'\n\n :return: (nd.array) returns observations from the cameras in the goal\n pybullet instance if in "pixel" mode,\n normalization takes place here.\n '
images = []
for i in self._camera_indicies:
images.append(self._goal_cameras[i].get_image())
camera_obs = np.stack(images, axis=0)
if self._normalized_observations:
camera_obs = self.normalize_observation_for_key(camera_obs, 'goal_image')
return camera_obs
|
class Stage(object):
def __init__(self, observation_mode, normalize_observations, pybullet_client_full_id, pybullet_client_w_goal_id, pybullet_client_w_o_goal_id, cameras, camera_indicies):
'\n This class represents the stage object, where it handles all the arena\n functionalities including the objects and silhouettes existing in\n the arena.\n\n :param observation_mode: (str) should be "structured" or "pixel"\n :param normalize_observations: (bool) to normalize the observations or\n not.\n :param pybullet_client_full_id: (int) pybullet client if visualization\n is enabled.\n :param pybullet_client_w_goal_id: (int) pybullet client with the goal\n in the image without tool\n objects.\n :param pybullet_client_w_o_goal_id: (int) pybullet client without the\n goal, only tool blocks.\n :param cameras: (list) list of causal_world.robot.Camera object\n specifying the cameras mounted on top of the\n trifinger robot.\n :param camera_indicies: (list) list of integers of the order of cameras\n to be used.\n '
self._rigid_objects = OrderedDict()
self._visual_objects = OrderedDict()
self._observation_mode = observation_mode
self._pybullet_client_full_id = pybullet_client_full_id
self._pybullet_client_w_goal_id = pybullet_client_w_goal_id
self._pybullet_client_w_o_goal_id = pybullet_client_w_o_goal_id
self._camera_indicies = camera_indicies
self._normalize_observations = normalize_observations
self._stage_observations = None
self._name_keys = []
self._default_gravity = [0, 0, (- 9.81)]
self._current_gravity = np.array(self._default_gravity)
self._visual_object_client_instances = []
self._rigid_objects_client_instances = []
if (self._pybullet_client_full_id is not None):
self._visual_object_client_instances.append(self._pybullet_client_full_id)
self._rigid_objects_client_instances.append(self._pybullet_client_full_id)
if (self._pybullet_client_w_o_goal_id is not None):
self._rigid_objects_client_instances.append(self._pybullet_client_w_o_goal_id)
if (self._pybullet_client_w_goal_id is not None):
self._visual_object_client_instances.append(self._pybullet_client_w_goal_id)
self._cameras = cameras
self._goal_image = None
return
def get_floor_height(self):
'\n\n :return: (float) returns the floor height.\n '
return WorldConstants.FLOOR_HEIGHT
def get_arena_bb(self):
'\n\n :return: (list) list of the lower bound (x, y, z) and the upper bound\n (x, y, z) so (2, 3) shape.\n '
return WorldConstants.ARENA_BB
def get_rigid_objects(self):
'\n\n :return: (dict) returns the rigid objects in the arena currently.\n '
return self._rigid_objects
def get_visual_objects(self):
'\n\n :return: (dict) returns the visual objects in the arena currently.\n '
return self._visual_objects
def get_full_env_state(self):
'\n\n :return: (dict) returns a dict specifying everything about the current\n state of the arena.\n '
env_state = {}
env_state['rigid_objects'] = []
for rigid_object_key in self._rigid_objects:
if isinstance(self._rigid_objects[rigid_object_key], Cuboid):
env_state['rigid_objects'].append(['cube', self._rigid_objects[rigid_object_key].get_recreation_params()])
if isinstance(self._rigid_objects[rigid_object_key], StaticCuboid):
env_state['rigid_objects'].append(['static_cube', self._rigid_objects[rigid_object_key].get_recreation_params()])
if isinstance(self._rigid_objects[rigid_object_key], MeshObject):
env_state['rigid_objects'].append(['mesh', self._rigid_objects[rigid_object_key].get_recreation_params()])
env_state['visual_objects'] = []
for visual_object_key in self._visual_objects:
if isinstance(self._visual_objects[visual_object_key], SCuboid):
env_state['visual_objects'].append(['cube', self._visual_objects[visual_object_key].get_recreation_params()])
if isinstance(self._visual_objects[visual_object_key], SSphere):
env_state['visual_objects'].append(['sphere', self._visual_objects[visual_object_key].get_recreation_params()])
if isinstance(self._visual_objects[visual_object_key], SMeshObject):
env_state['visual_objects'].append(['mesh', self._visual_objects[visual_object_key].get_recreation_params()])
env_state['arena_variable_values'] = self.get_current_variable_values_for_arena()
return env_state
def set_full_env_state(self, env_state):
'\n\n :param env_state: (dict) dict specifying everything about the current\n state of the arena, usually obtained through\n get_full_env_state function.\n\n :return:\n '
self.remove_everything()
for rigid_object_info in env_state['rigid_objects']:
if (rigid_object_info[0] == 'mesh'):
self.add_rigid_mesh_object(**rigid_object_info[1])
else:
self.add_rigid_general_object(shape=rigid_object_info[0], **rigid_object_info[1])
for visual_object_info in env_state['visual_objects']:
if (visual_object_info[0] == 'mesh'):
self.add_silhoutte_mesh_object(**visual_object_info[1])
else:
self.add_silhoutte_general_object(shape=visual_object_info[0], **visual_object_info[1])
self.apply_interventions(env_state['arena_variable_values'])
self._stage_observations.rigid_objects = self._rigid_objects
self._stage_observations.visual_objects = self._visual_objects
return
def add_rigid_general_object(self, name, shape, **object_params):
'\n\n :param name: (str) a str specifying a unique name of the rigid object.\n :param shape: (str) specifying "cube" or "static_cube" for now.\n :param object_params: (params) depends on the parameters used for\n constructing the corresponding object.\n\n :return:\n '
if (name in self._name_keys):
raise Exception('name already exists as key for scene objects')
else:
self._name_keys.append(name)
if (shape == 'cube'):
self._rigid_objects[name] = Cuboid(self._rigid_objects_client_instances, name, **object_params)
elif (shape == 'static_cube'):
self._rigid_objects[name] = StaticCuboid(self._rigid_objects_client_instances, name, **object_params)
else:
raise Exception('shape is not yet implemented')
return
def remove_general_object(self, name):
'\n\n :param name: (str) a str specifying a unique name of the object\n to remove from the arena.\n\n :return:\n '
if (name not in self._name_keys):
raise Exception('name does not exists as key for scene objects')
else:
self._name_keys.remove(name)
if (name in self._rigid_objects.keys()):
self._rigid_objects[name].remove()
del self._rigid_objects[name]
elif (name in self._visual_objects.keys()):
self._visual_objects[name].remove()
del self._visual_objects[name]
return
def remove_everything(self):
'\n removes all the objects and visuals from the arena.\n\n :return:\n '
current_objects = (list(self._rigid_objects.keys()) + list(self._visual_objects.keys()))
current_objects = current_objects[::(- 1)]
for name in current_objects:
self.remove_general_object(name)
return
def add_rigid_mesh_object(self, name, filename, **object_params):
'\n\n :param name: (str) a str specifying a unique name of the mesh object.\n :param filename: (str) a str specifying the location of the .obj file.\n :param object_params: (params) depends on the parameters used for\n constructing the corresponding object.\n\n :return:\n '
if (name in self._name_keys):
raise Exception('name already exists as key for scene objects')
else:
self._name_keys.append(name)
self._rigid_objects[name] = MeshObject(self._rigid_objects_client_instances, name, filename, **object_params)
return
def add_silhoutte_general_object(self, name, shape, **object_params):
'\n\n :param name: (str) specifying a unique name of the visual object.\n :param shape: (str) specifying "cube" or "sphere" for now.\n :param object_params: (params) depends on the parameters used for\n constructing the corresponding object.\n\n :return:\n '
if (name in self._name_keys):
raise Exception('name already exists as key for scene objects')
else:
self._name_keys.append(name)
if (shape == 'cube'):
self._visual_objects[name] = SCuboid(self._visual_object_client_instances, name, **object_params)
elif (shape == 'sphere'):
self._visual_objects[name] = SSphere(self._visual_object_client_instances, name, **object_params)
else:
raise Exception('shape is not implemented yet')
return
def add_silhoutte_mesh_object(self, name, filename, **object_params):
'\n\n :param name: (str) specifying a unique name of the mesh visual object.\n :param filename: (str) a str specifying the location of the .obj file.\n :param object_params: (params) depends on the parameters used for\n constructing the corresponding object.\n\n :return:\n '
if (name in self._name_keys):
raise Exception('name already exists as key for scene objects')
else:
self._name_keys.append(name)
self._visual_objects[name] = SMeshObject(self._visual_object_client_instances, name, filename, **object_params)
return
def finalize_stage(self):
'\n finalizes the observation space of the environment after adding all\n the objects and visuals in the stage.\n\n :return:\n '
if (self._observation_mode == 'pixel'):
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations, cameras=self._cameras, camera_indicies=self._camera_indicies)
self.update_goal_image()
else:
self._stage_observations = StageObservations(self._rigid_objects, self._visual_objects, self._observation_mode, self._normalize_observations)
return
def select_observations(self, observation_keys):
'\n selects the observations to be returned by the environment.\n\n :param observation_keys: (list) list of str that specifies the\n observation keys to be returned and\n calculated by the environment.\n\n :return:\n '
self._stage_observations.reset_observation_keys()
self._stage_observations.initialize_observations()
for key in observation_keys:
self._stage_observations.add_observation(key)
self._stage_observations.set_observation_spaces()
def get_full_state(self, state_type='list'):
"\n\n :param state_type: (str) 'list' or 'dict' specifying to return the\n state as a dict with the state name as a key\n or just a concatenated list.\n\n :return: (list or dict) depending on the arg state_type, returns\n the full state of the stage itself.\n "
if (state_type == 'list'):
stage_state = []
elif (state_type == 'dict'):
stage_state = dict()
else:
raise Exception('type is not supported')
for name in self._name_keys:
if (name in self._rigid_objects):
object = self._rigid_objects[name]
elif (name in self._visual_objects):
object = self._visual_objects[name]
else:
raise Exception('possible error here')
if (state_type == 'list'):
stage_state.extend(object.get_state(state_type='list'))
elif (state_type == 'dict'):
stage_state[name] = object.get_state(state_type='dict')
return stage_state
def set_full_state(self, new_state):
'\n\n :param new_state: (dict) specifies the full state of all the objects\n in the arena.\n\n :return:\n '
start = 0
for name in self._name_keys:
if (name in self._rigid_objects):
object = self._rigid_objects[name]
end = (start + object.get_state_size())
object.set_full_state(new_state[start:end])
elif (name in self._visual_objects):
object = self._visual_objects[name]
end = (start + object.get_state_size())
object.set_full_state(new_state[start:end])
start = end
if (self._observation_mode == 'pixel'):
self.update_goal_image()
return
def set_objects_pose(self, names, positions, orientations):
'\n\n :param names: (list) list of object names to set their positions and\n orientations.\n :param positions: (list) corresponding list of positions of\n objects to be set.\n :param orientations: (list) corresponding list of orientations of\n objects to be set.\n\n :return:\n '
for i in range(len(names)):
name = names[i]
if (name in self._rigid_objects):
object = self._rigid_objects[name]
object.set_pose(positions[i], orientations[i])
elif (name in self._visual_objects):
object = self._visual_objects[name]
object.set_pose(positions[i], orientations[i])
else:
raise Exception('Object {} doesnt exist'.format(name))
if (self._observation_mode == 'pixel'):
self.update_goal_image()
return
def get_current_observations(self, helper_keys):
'\n\n :param helper_keys: (list) list of observation keys that are not\n part of the observation space but still\n needed to be returned to calculate further\n observations or for a dense reward function\n calculation.\n\n :return: (dict) returns the current observations where the keys\n corresponds to the observation key.\n '
return self._stage_observations.get_current_observations(helper_keys)
def get_observation_spaces(self):
'\n\n :return: (gym.spaces.Box) returns the current observation space of the\n environment.\n '
return self._stage_observations.get_observation_spaces()
def random_position(self, height_limits=(0.05, 0.15), angle_limits=(((- 2) * math.pi), (2 * math.pi)), radius_limits=(0.0, 0.15), allowed_section=np.array([[(- 0.5), (- 0.5), 0], [0.5, 0.5, 0.5]])):
'\n\n :param height_limits: (tuple) tuple of two values for low bound\n and upper bound.\n :param angle_limits: (tuple) tuple of two values for low bound\n and upper bound, theta in\n polar coordinates.\n :param radius_limits: (tuple) tuple of two values for low bound\n and upper bound, radius in\n polar coordinates.\n :param allowed_section: (nd.array) array of two sublists for low bound\n and upper bound (x, y, z) of\n restricted area for sampling, the\n shape of the input is basically (2,2).\n\n :return: (list) returns a cartesian random position in the arena\n (x, y, z).\n '
satisfying_constraints = False
while (not satisfying_constraints):
angle = np.random.uniform(*angle_limits)
radial_distance = np.sqrt(np.random.uniform((radius_limits[0] ** 2), (radius_limits[1] ** 2)))
if isinstance(height_limits, (int, float)):
height_z = height_limits
else:
height_z = np.random.uniform(*height_limits)
object_position = [(radial_distance * math.cos(angle)), (radial_distance * math.sin(angle)), height_z]
if (np.all((object_position > allowed_section[0])) and np.all((object_position < allowed_section[1]))):
satisfying_constraints = True
return object_position
def get_current_object_keys(self):
'\n\n :return: (list) returns the names of the rigid objects and visual\n objects concatenated.\n '
return (list(self._rigid_objects.keys()) + list(self._visual_objects.keys()))
def object_intervention(self, key, interventions_dict):
'\n\n :param key: (str) the unique name of the rigid or visual\n object to intervene on.\n :param interventions_dict: (dict) dict specifying the intervention to\n be performed.\n\n :return:\n '
if (key in self._rigid_objects):
object = self._rigid_objects[key]
elif (key in self._visual_objects):
object = self._visual_objects[key]
else:
raise Exception("The key {} passed doesn't exist in the stage yet".format(key))
object.apply_interventions(interventions_dict)
if (self._observation_mode == 'pixel'):
self.update_goal_image()
return
def get_current_variable_values_for_arena(self):
"\n\n :return: (dict) returns all the exposed variables and their values\n in the environment's stage except for objects.\n "
if (self._pybullet_client_w_o_goal_id is not None):
client = self._pybullet_client_w_o_goal_id
else:
client = self._pybullet_client_full_id
variable_params = dict()
variable_params['floor_color'] = pybullet.getVisualShapeData(WorldConstants.FLOOR_ID, physicsClientId=client)[0][7][:3]
variable_params['floor_friction'] = pybullet.getDynamicsInfo(WorldConstants.FLOOR_ID, (- 1), physicsClientId=client)[1]
variable_params['stage_color'] = pybullet.getVisualShapeData(WorldConstants.STAGE_ID, physicsClientId=client)[0][7][:3]
variable_params['stage_friction'] = pybullet.getDynamicsInfo(WorldConstants.STAGE_ID, (- 1), physicsClientId=client)[1]
variable_params['gravity'] = self._current_gravity
return variable_params
def get_current_variable_values_for_objects(self):
"\n\n :return: (dict) returns all the exposed variables and their values\n in the environment's stage for objects only.\n "
return self.get_full_state(state_type='dict')
def get_current_variable_values(self):
"\n\n :return: (dict) returns all the exposed variables and their values\n in the environment's stage.\n "
variable_params = self.get_current_variable_values_for_arena()
variable_params.update(self.get_current_variable_values_for_objects())
return variable_params
def apply_interventions(self, interventions_dict):
'\n\n :param interventions_dict: (dict) dict specifying the intervention to\n be performed.\n\n :return:\n '
for intervention in interventions_dict:
if isinstance(interventions_dict[intervention], dict):
self.object_intervention(intervention, interventions_dict[intervention])
elif (intervention == 'floor_color'):
for client in self._visual_object_client_instances:
pybullet.changeVisualShape(WorldConstants.FLOOR_ID, (- 1), rgbaColor=np.append(interventions_dict[intervention], 1), physicsClientId=client)
for client in self._rigid_objects_client_instances:
pybullet.changeVisualShape(WorldConstants.FLOOR_ID, (- 1), rgbaColor=np.append(interventions_dict[intervention], 1), physicsClientId=client)
elif (intervention == 'stage_color'):
for client in self._visual_object_client_instances:
pybullet.changeVisualShape(WorldConstants.STAGE_ID, (- 1), rgbaColor=np.append(interventions_dict[intervention], 1), physicsClientId=client)
for client in self._rigid_objects_client_instances:
pybullet.changeVisualShape(WorldConstants.STAGE_ID, (- 1), rgbaColor=np.append(interventions_dict[intervention], 1), physicsClientId=client)
elif (intervention == 'stage_friction'):
for client in self._rigid_objects_client_instances:
pybullet.changeDynamics(bodyUniqueId=WorldConstants.STAGE_ID, linkIndex=(- 1), lateralFriction=interventions_dict[intervention], physicsClientId=client)
elif (intervention == 'floor_friction'):
for client in self._rigid_objects_client_instances:
pybullet.changeDynamics(bodyUniqueId=WorldConstants.FLOOR_ID, linkIndex=(- 1), lateralFriction=interventions_dict[intervention], physicsClientId=client)
elif (intervention == 'gravity'):
for client in self._rigid_objects_client_instances:
pybullet.setGravity(interventions_dict[intervention][0], interventions_dict[intervention][1], interventions_dict[intervention][2], physicsClientId=client)
self._current_gravity = interventions_dict[intervention]
else:
raise Exception('The intervention on stage is not supported yet')
if (self._observation_mode == 'pixel'):
self.update_goal_image()
return
def get_object_full_state(self, key):
'\n\n :param key: (str) specifying the name of the object to return\n its state.\n\n :return: (dict) specifies the state of the object queried.\n '
if (key in self._rigid_objects):
return self._rigid_objects[key].get_state('dict')
elif (key in self._visual_objects):
return self._visual_objects[key].get_state('dict')
else:
raise Exception("The key {} passed doesn't exist in the stage yet".format(key))
def get_object_state(self, key, state_variable):
"\n\n :param key: (str) specifying the name of the object to return\n its state's variable value.\n :param state_variable: (str) specifying the variable's name of the\n object.\n\n :return: (nd.array) returns the variable's value of the object queried.\n "
if (key in self._rigid_objects):
return np.array(self._rigid_objects[key].get_variable_state(state_variable))
elif (key in self._visual_objects):
return np.array(self._visual_objects[key].get_variable_state(state_variable))
else:
raise Exception("The key {} passed doesn't exist in the stage yet".format(key))
def get_object(self, key):
'\n\n :param key: (str) specifying the name of the object to return it.\n\n :return: (causal_world.RigidObject or causal_world.SilhouetteObject)\n object to return.\n '
if (key in self._rigid_objects):
return self._rigid_objects[key]
elif (key in self._visual_objects):
return self._visual_objects[key]
else:
raise Exception("The key {} passed doesn't exist in the stage yet".format(key))
def are_blocks_colliding(self, block1, block2):
'\n\n :param block1: (causal_world.RigidObject) first block.\n :param block2: (causal_world.RigidObject) second block.\n\n :return: (bool) true if the two blocks passed are colliding.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if (((contact[1] == block1._block_ids[0]) and (contact[2] == block2._block_ids[0])) or ((contact[2] == block1._block_ids[0]) and (contact[1] == block2._block_ids[0]))):
return True
return False
def check_stage_free_of_colliding_blocks(self):
'\n\n :return: (bool) true if the stage is free of collisions of blocks.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if ((contact[1] > 3) and (contact[2] > 3)):
return False
return True
def is_colliding_with_stage(self, block1):
'\n\n :param block1: (causal_world.RigidObject) first block.\n\n :return: (bool) true if the stage is free of collisions between blocks.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if (((contact[1] == block1._block_ids[0]) and (contact[2] == WorldConstants.STAGE_ID)) or ((contact[2] == block1._block_ids[0]) and (contact[1] == WorldConstants.STAGE_ID))):
return True
return False
def is_colliding_with_floor(self, block1):
'\n\n :param block1: (causal_world.RigidObject) first block.\n\n :return: (bool) true if the block is colliding with the floor.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if (((contact[1] == block1._block_ids[0]) and (contact[2] == WorldConstants.FLOOR_ID)) or ((contact[2] == block1._block_ids[0]) and (contact[1] == WorldConstants.FLOOR_ID))):
return True
return False
def get_normal_interaction_force_between_blocks(self, block1, block2):
'\n\n :param block1: (causal_world.RigidObject) first block.\n :param block2: (causal_world.RigidObject) second block.\n\n :return: (float) normal interaction force between blocks or None if\n no interaction.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if (((contact[1] == block1._block_ids[0]) and (contact[2] == block2._block_ids[0])) or ((contact[2] == block1._block_ids[0]) and (contact[1] == block2._block_ids[0]))):
return (contact[9] * np.array(contact[7]))
return None
def add_observation(self, observation_key, lower_bound=None, upper_bound=None):
'\n\n :param observation_key: (str) new observation key to be added.\n :param lower_bound: (nd.array) low bound of the observation.\n :param upper_bound: (nd.array) upper bound of the observation.\n\n :return:\n '
self._stage_observations.add_observation(observation_key, lower_bound, upper_bound)
return
def normalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) observation to normalize.\n :param key: (str) observation key to be normalized.\n\n :return: (nd.array) normalized observation.\n '
return self._stage_observations.normalize_observation_for_key(observation, key)
def denormalize_observation_for_key(self, observation, key):
'\n\n :param observation: (nd.array) observation to denormalize.\n :param key: (str) observation key to be denormalized.\n\n :return: (nd.array) denormalized observation.\n '
return self._stage_observations.denormalize_observation_for_key(observation, key)
def get_current_goal_image(self):
"\n\n :return: (nd.array) returns the goal images concatenated if 'pixel' mode\n is enabled.\n "
return self._goal_image
def update_goal_image(self):
'\n updated the goal image.\n\n :return:\n '
self._goal_image = self._stage_observations.get_current_goal_image()
return
def check_feasiblity_of_stage(self):
'\n This function checks the feasibility of the current state of the stage\n (i.e checks if any of the bodies in the simulation are in a penetration\n mode)\n\n :return: (bool) A boolean indicating whether the stage is in a\n collision state or not. As well as if the visual\n objects are outside of the bounding box or not.\n '
for contact in pybullet.getContactPoints(physicsClientId=self._rigid_objects_client_instances[0]):
if (contact[8] < (- 0.03)):
return False
for visual_object in self._visual_objects:
if ((get_intersection(self._visual_objects[visual_object].get_bounding_box(), self.get_stage_bb()) / self._visual_objects[visual_object].get_volume()) < 0.5):
return False
if (self._visual_objects[visual_object].get_bounding_box()[0][(- 1)] < (- 0.01)):
return False
return True
def get_stage_bb(self):
'\n\n :return: (tuple) first element indicates the lower bound the stage arena\n (x,y, z) and second element indicated the upper bound\n similarly.\n '
return (tuple(WorldConstants.ARENA_BB[0]), tuple(WorldConstants.ARENA_BB[1]))
|
class EvaluationPipeline(object):
'\n This class provides functionalities to evaluate a trained policy on a set\n of protocols\n\n :param evaluation_protocols: (list) defines the protocols that will be\n evaluated in this pipleine.\n :param tracker_path: (causal_world.loggers.Tracker) if a tracker was stored\n during training this can\n be passed here.\n :param world_params: (dict) the world_params to set up the environment,\n including skip_frame, normalization params..etc.\n :param task_params: (dict) the task_params of the Task on which the policy\n is going to be evaluated.\n :param visualize_evaluation: (bool) if the evaluation is\n visualized in the GUI.\n :param initial_seed: (int) the random seed of the evaluation for\n reproducibility.\n '
def __init__(self, evaluation_protocols, tracker_path=None, world_params=None, task_params=None, visualize_evaluation=False, initial_seed=0):
self.initial_seed = initial_seed
self.data_recorder = DataRecorder(output_directory=None)
if (tracker_path is not None):
self.tracker = Tracker(file_path=os.path.join(tracker_path, 'tracker'))
task_stats = self.tracker.task_stats_log[0]
del task_stats.task_params['variables_space']
del task_stats.task_params['task_name']
self.task = generate_task(task_generator_id=task_stats.task_name, **task_stats.task_params, variables_space='space_a_b')
else:
if ('variables_space' in task_params):
del task_params['task_name']
del task_params['variables_space']
self.task = generate_task(**task_params, variables_space='space_a_b')
if tracker_path:
if ('seed' in self.tracker.world_params):
del self.tracker.world_params['seed']
if ('wrappers' in self.tracker.world_params):
del self.tracker.world_params['wrappers']
self.env = CausalWorld(self.task, **self.tracker.world_params, seed=self.initial_seed, data_recorder=self.data_recorder, enable_visualization=visualize_evaluation)
elif (world_params is not None):
if ('seed' in world_params):
del world_params['seed']
self.env = CausalWorld(self.task, **world_params, seed=self.initial_seed, data_recorder=self.data_recorder, enable_visualization=visualize_evaluation)
else:
self.env = CausalWorld(self.task, seed=self.initial_seed, data_recorder=self.data_recorder, enable_visualization=visualize_evaluation)
evaluation_episode_length_in_secs = self.task.get_default_max_episode_length()
self.time_steps_for_evaluation = int((evaluation_episode_length_in_secs / self.env.dt))
self.evaluation_env = self.env
self.evaluation_protocols = evaluation_protocols
self.metrics_list = []
self.metrics_list.append(MeanFullIntegratedFractionalSuccess())
self.metrics_list.append(MeanLastIntegratedFractionalSuccess())
self.metrics_list.append(MeanLastFractionalSuccess())
return
def run_episode(self, policy_fn):
'\n Returns the episode information that is accumulated when running a policy\n\n :param policy_fn: (func) the policy_fn that takes an observation as\n argument and returns the inferred action.\n :return: (causal_world.loggers.Episode) returns the recorded episode.\n '
obs = self.evaluation_env.reset()
done = False
while (not done):
desired_action = policy_fn(obs)
(obs, rew, done, info) = self.evaluation_env.step(desired_action)
return self.data_recorder.get_current_episode()
def process_metrics(self, episode):
'\n Processes an episode to compute all the metrics of the\n evaluation pipeline.\n\n :param episode: (causal_world.loggers.Episode) The episode to be processed.\n :return: (None)\n '
for metric in self.metrics_list:
metric.process_episode(episode)
return
def get_metric_scores(self):
'\n Returns the metric scores of all metrics in the evaluation pipeline\n\n :return: (dict) a score dictionary containing the score for each\n metric name as key.\n '
metrics = dict()
for metric in self.metrics_list:
(mean, std) = metric.get_metric_score()
metrics[('mean_' + metric.name)] = mean
metrics[('std_' + metric.name)] = std
return metrics
def reset_metric_scores(self):
'\n Resets the metric scores of each metric object\n\n :return:\n '
for metric in self.metrics_list:
metric.reset()
def evaluate_policy(self, policy, fraction=1):
'\n Runs the evaluation of a policy and returns a evaluation dictionary\n with all the scores for each metric for each protocol.\n\n :param policy: (func) the policy_fn that takes an observation as\n argument and returns the inferred action\n :param fraction: (float) fraction of episodes to be evaluated w.r.t\n default (can be higher than one).\n :return: (dict) scores dict for each metric for each protocol.\n '
pipeline_scores = dict()
for evaluation_protocol in self.evaluation_protocols:
logging.info(('Applying the following protocol now, ' + str(evaluation_protocol.get_name())))
self.evaluation_env = ProtocolWrapper(self.env, evaluation_protocol)
evaluation_protocol.init_protocol(env=self.env, tracker=self.env.get_tracker(), fraction=fraction)
episodes_in_protocol = evaluation_protocol.get_num_episodes()
for _ in range(episodes_in_protocol):
current_episode = self.run_episode(policy)
self.process_metrics(current_episode)
self.data_recorder.clear_recorder()
scores = self.get_metric_scores()
scores['total_intervention_steps'] = self.env.get_tracker().get_total_intervention_steps()
scores['total_interventions'] = self.env.get_tracker().get_total_interventions()
scores['total_timesteps'] = self.env.get_tracker().get_total_time_steps()
scores['total_resets'] = self.env.get_tracker().get_total_resets()
pipeline_scores[evaluation_protocol.get_name()] = scores
self.reset_metric_scores()
self.evaluation_env.close()
self.pipeline_scores = pipeline_scores
return pipeline_scores
def save_scores(self, evaluation_path, prefix=None):
'\n Saves the scores dict as json\n\n :param evaluation_path: (str) the path where the scores are saved.\n :param prefix: (str) an optional prefix to the file name.\n\n :return:\n '
if (not os.path.isdir(evaluation_path)):
os.makedirs(evaluation_path)
if (prefix is None):
file_path = os.path.join(evaluation_path, 'scores.json')
else:
file_path = os.path.join(evaluation_path, '{}_scores.json'.format(prefix))
with open(file_path, 'w') as json_file:
json.dump(self.pipeline_scores, json_file, indent=4)
|
class FullyRandomProtocol(ProtocolBase):
def __init__(self, name, variable_space='space_a_b'):
'\n This specifies a fully random protocol, where an intervention is\n produced on every exposed variable by uniformly sampling the\n intervention space.\n\n :param name: (str) specifies the name of the protocol to be reported.\n :param variable_space: (str) "space_a", "space_b" or "space_a_b".\n '
super().__init__(name)
self._variable_space = variable_space
def get_intervention(self, episode, timestep):
'\n Returns the interventions that are applied at a given timestep of the\n episode.\n\n :param episode: (int) episode number of the protocol\n :param timestep: (int) time step within episode\n :return: (dict) intervention dictionary\n '
if (timestep == 0):
if (self._variable_space == 'space_a_b'):
intervention_space = self.env.get_intervention_space_a_b()
elif (self._variable_space == 'space_a'):
intervention_space = self.env.get_intervention_space_a()
elif (self._variable_space == 'space_b'):
intervention_space = self.env.get_intervention_space_b()
interventions_dict = dict()
intervene_on_size = np.random.choice([0, 1], p=[0.5, 0.5])
intervene_on_joint_positions = np.random.choice([0, 1], p=[1, 0])
for variable in intervention_space:
if isinstance(intervention_space[variable], dict):
interventions_dict[variable] = dict()
for subvariable_name in intervention_space[variable]:
if ((subvariable_name == 'cylindrical_position') and intervene_on_size):
continue
if ((subvariable_name == 'size') and (not intervene_on_size)):
continue
interventions_dict[variable][subvariable_name] = np.random.uniform(intervention_space[variable][subvariable_name][0], intervention_space[variable][subvariable_name][1])
else:
if ((not intervene_on_joint_positions) and (variable == 'joint_positions')):
continue
interventions_dict[variable] = np.random.uniform(intervention_space[variable][0], intervention_space[variable][1])
return interventions_dict
else:
return None
def _init_protocol_helper(self):
'\n Used by the protocols to initialize some variables further after the\n environment is passed..etc.\n\n :return:\n '
if (self._variable_space == 'space_a_b'):
self.env.set_intervention_space(variables_space='space_a_b')
elif (self._variable_space == 'space_a'):
self.env.set_intervention_space(variables_space='space_a')
elif (self._variable_space == 'space_b'):
self.env.set_intervention_space(variables_space='space_b')
return
|
class ProtocolBase(object):
'\n Base Protocol from which each EvaluationProtocol inherits. Default number\n of evaluation protocols is 200\n :param name: (str) name of the protocol\n '
def __init__(self, name):
self.name = name
self.num_evaluation_episodes_default = 200
self.num_evaluation_episodes = self.num_evaluation_episodes_default
def init_protocol(self, env, tracker, fraction=1):
'\n Initializes protocol\n\n :param env: (CausalWorld) environment\n :param tracker: (Tracker)\n :param fraction: (float) fraction of episodes to be evaluated using\n the protocol (can be higher than one)\n\n :return:\n '
self.env = env
self.env.set_intervention_space(variables_space='space_a_b')
self.tracker = tracker
if (fraction > 0):
self.num_evaluation_episodes = int((self.num_evaluation_episodes_default * fraction))
else:
raise ValueError('fraction of episodes for evaluation needs to be strictly positive')
self._init_protocol_helper()
return
def _init_protocol_helper(self):
'\n Used by the protocols to initialize some variables further after the\n environment is passed..etc.\n :return:\n '
return
def get_name(self):
'\n Returns the name of the protocol\n\n :return: (str) protocol name\n '
return self.name
def get_num_episodes(self):
'\n Returns the name of the evaluation episodes in this protocol\n\n :return: (int) number of episodes in protocol\n '
return self.num_evaluation_episodes
def get_intervention(self, episode, timestep):
'\n Returns the interventions that are applied at a given timestep of the\n episode.\n\n :param episode: (int) episode number of the protocol\n :param timestep: (int) time step within episode\n :return: (dict) intervention dictionary\n '
raise NotImplementedError()
|
class ProtocolGenerator(ProtocolBase):
def __init__(self, name, first_level_regex, second_level_regex, variable_space='space_a_b'):
'\n This specifies a fully random protocol, where an intervention is\n produced on every exposed variable by uniformly sampling the\n intervention space.\n\n :param name: (str) specifies the name of the protocol to be reported.\n :param first_level_regex: (str) specifies the regex for first level\n of variables.\n :param second_level_regex: (str) specifies the regex for second level\n of variables.\n :param variable_space: (str) "space_a", "space_b" or "space_a_b".\n '
super().__init__(name)
self._first_level_regex = first_level_regex
self._second_level_regex = second_level_regex
self._variable_space = variable_space
def get_intervention(self, episode, timestep):
'\n Returns the interventions that are applied at a given timestep of the\n episode.\n\n :param episode: (int) episode number of the protocol\n :param timestep: (int) time step within episode\n :return: (dict) intervention dictionary\n '
if (timestep == 0):
intervention_dict = dict()
if (self._variable_space == 'space_a_b'):
intervention_space = self.env.get_intervention_space_a_b()
elif (self._variable_space == 'space_a'):
intervention_space = self.env.get_intervention_space_a()
elif (self._variable_space == 'space_b'):
intervention_space = self.env.get_intervention_space_b()
for variable in intervention_space:
if re.fullmatch(self._first_level_regex, variable):
if (not isinstance(intervention_space[variable], dict)):
intervention_dict[variable] = np.random.uniform(intervention_space[variable][0], intervention_space[variable][1])
else:
intervention_dict[variable] = dict()
for subvariable in intervention_space[variable]:
if re.fullmatch(self._second_level_regex, subvariable):
intervention_dict[variable][subvariable] = np.random.uniform(intervention_space[variable][subvariable][0], intervention_space[variable][subvariable][1])
return intervention_dict
else:
return None
def _init_protocol_helper(self):
'\n Used by the protocols to initialize some variables further after the\n environment is passed..etc.\n\n :return:\n '
if (self._variable_space == 'space_a_b'):
self.env.set_intervention_space(variables_space='space_a_b')
elif (self._variable_space == 'space_a'):
self.env.set_intervention_space(variables_space='space_a')
elif (self._variable_space == 'space_b'):
self.env.set_intervention_space(variables_space='space_b')
return
|
def bar_plots(output_path, data):
'\n\n :param output_path:\n :param data:\n :return:\n '
protocol_labels = data[0]
experiment_labels = data[1]
metric_labels = data[2]
x = np.arange(len(protocol_labels))
colors = ['blue', 'orange', 'green']
for metric_label in data[3]:
metric_scores = data[3][metric_label]
num_groups = len(metric_scores)
width = (0.7 / num_groups)
(fig, ax) = plt.subplots()
def autolabel(rects):
'Attach a text label above each bar in *rects*, displaying its height.'
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(round(height, 2)), xy=((rect.get_x() + (rect.get_width() / 2)), height), xytext=(0, 3), textcoords='offset points', ha='center', va='bottom', rotation=90)
for (index, experiment_label) in enumerate(metric_scores):
(experiment_scores_mean, experiment_scores_err) = metric_scores[experiment_label]
rects = ax.bar(((x - (((num_groups - 1) * width) / 2)) + (width * index)), experiment_scores_mean, width, label=experiment_labels[index], color=colors[index])
autolabel(rects)
ax.set_ylabel('fractional success')
ax.set_title(metric_label[5:])
ax.set_xticks(x)
ax.set_ylim((0, 1.2))
ax.set_xticklabels(protocol_labels, rotation='vertical')
ax.legend()
fig.tight_layout()
plt.savefig(os.path.join(output_path, 'bar_plots_{}.png'.format(metric_label)))
|
def bar_plots_with_protocol_table(output_path, data, protocol_settings, task):
'\n\n :param output_path:\n :param data:\n :param protocol_settings:\n :param task:\n :return:\n '
protocol_labels = data[0]
protocol_ids = ['P{}'.format(i) for i in range(len(protocol_labels))]
experiment_labels = data[1]
metric_labels = data[2]
x = np.arange(len(protocol_labels))
mpl.rc('text', usetex=True)
tex_fonts = {'text.usetex': True, 'font.family': 'serif', 'font.serif': ['computer modern'], 'axes.labelsize': 10, 'font.size': 10, 'axes.titlesize': 10, 'legend.fontsize': 8, 'xtick.labelsize': 8, 'ytick.labelsize': 8}
mpl.rcParams.update(tex_fonts)
colors = ['#3182bd', '#de2d26', '#31a354', '#9ecae1', '#fc9272', '#a1d99b', '#deebf7', '#fee0d2', '#e5f5e0']
colors = ['#3182bd', '#9ecae1', '#deebf7', '#de2d26', '#fc9272', '#fee0d2', '#31a354', '#a1d99b', '#e5f5e0']
for metric_label in data[3]:
metric_scores = data[3][metric_label]
num_groups = len(metric_scores)
fig_width = 5.5
fig_height = (fig_width / 3)
(fig, ax) = plt.subplots(figsize=(fig_width, fig_height))
fig.set_size_inches(fig_width, fig_height)
width = (0.7 / num_groups)
spare_width = 0.5
ax.set_xlim((- spare_width), (len(protocol_labels) - spare_width))
row_labels = list(protocol_settings[list(protocol_settings.keys())[0]].keys())
for (index, experiment_label) in enumerate(metric_scores):
(experiment_scores_mean, experiment_scores_err) = metric_scores[experiment_label]
experiment_scores_std_list_upper = [min(std, (1.0 - mean)) for (mean, std) in zip(experiment_scores_mean, experiment_scores_err)]
error_kw = dict(lw=5, capsize=5, capthick=3)
plt.bar(((x - (((num_groups - 1) * width) / 2)) + (width * index)), experiment_scores_mean, width, yerr=(experiment_scores_err, experiment_scores_std_list_upper), error_kw=dict(lw=1, capsize=1, capthick=1), label=experiment_labels[index], color=colors[index])
cell_text = list()
for row_label in row_labels:
cell_text.append(['{}'.format(protocol_settings[experiment_label][row_label]) for experiment_label in list(protocol_settings.keys())])
ax.set_ylabel('fractional success', fontsize=8)
plt.legend(ncol=3, loc='upper right', prop={'size': 6})
ax.set_ylim((0, 1.2))
plt.yticks(fontsize=8)
ax.get_xaxis().set_visible(False)
table = plt.table(cellText=cell_text, rowLabels=row_labels, colLabels=protocol_ids, loc='bottom')
table.auto_set_font_size(False)
table.set_fontsize(8)
cellDict = table.get_celld()
for i in range((- 1), len(protocol_ids)):
if (i != (- 1)):
cellDict[(0, i)].set_height(0.11)
for j in range(1, (len(row_labels) + 1)):
if (j == 2):
cellDict[(j, i)].set_height(0.15)
else:
cellDict[(j, i)].set_height(0.11)
cellDict[(j, i)].set_fontsize(6)
fig.subplots_adjust(bottom=0.33, left=0.11, right=0.99, top=0.98)
plt.savefig(os.path.join(output_path, 'bar_plots_protocol_table_{}.pdf'.format(metric_label)), dpi=300)
|
def radar_factory(num_vars, frame='circle'):
"\n Create a radar chart with `num_vars` axes.\n This function creates a RadarAxes projection and registers it.\n\n :param num_vars: (int) Number of variables for radar chart.\n :param frame: (str) Shape of frame surrounding axes, {'circle' | 'polygon'}.\n :return:\n "
theta = np.linspace(0, (2 * np.pi), num_vars, endpoint=False)
def draw_poly_patch(self):
verts = unit_poly_verts((theta + (np.pi / 2)))
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if (frame not in patch_dict):
raise ValueError(('unknown value for `frame`: %s' % frame))
class RadarAxes(PolarAxes):
name = 'radar'
RESOLUTION = 1
draw_patch = patch_dict[frame]
def __init__(self, *args, **kwargs):
super(RadarAxes, self).__init__(*args, **kwargs)
self.set_theta_zero_location('N')
def fill(self, *args, **kwargs):
'Override fill so that line is closed by default'
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(*args, closed=closed, **kwargs)
def plot(self, *args, **kwargs):
'Override plot so that line is closed by default'
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
(x, y) = line.get_data()
if (x[0] != x[(- 1)]):
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(np.degrees(theta), labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if (frame == 'circle'):
return PolarAxes._gen_axes_spines(self)
spine_type = 'circle'
verts = unit_poly_verts((theta + (np.pi / 2)))
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
|
def unit_poly_verts(theta):
'\n Return vertices of polygon for subplot axes.\n This polygon is circumscribed by a unit circle centered at (0.5, 0.5)\n\n :param theta:\n :return:\n '
(x0, y0, r) = ([0.5] * 3)
verts = [(((r * np.cos(t)) + x0), ((r * np.sin(t)) + y0)) for t in theta]
return verts
|
def radar_plots(output_path, data):
'\n\n :param output_path:\n :param data:\n :return:\n '
protocol_labels = data[0]
experiment_labels = data[1]
metric_labels = data[2]
N = len(protocol_labels)
theta = radar_factory(N, frame='circle')
colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6']
colors = ['blue', 'orange', 'green']
for metric_label in data[3]:
(fig, ax) = plt.subplots(figsize=(9, 9), nrows=1, ncols=1, subplot_kw=dict(projection='radar'))
fig.subplots_adjust(wspace=0.25, hspace=0.2, top=0.85, bottom=0.05)
metric_scores = data[3][metric_label]
ax.set_rgrids([0.2, 0.4, 0.6, 0.8])
ax.set_title(metric_label, weight='bold', size='medium', position=(0.5, 1.1), horizontalalignment='center', verticalalignment='center')
for (experiment_label, color) in zip(metric_scores, colors[:len(metric_scores)]):
(experiment_scores_mean, experiment_scores_err) = metric_scores[experiment_label]
ax.plot(theta, experiment_scores_mean, color=color)
ax.fill(theta, experiment_scores_mean, facecolor=color, alpha=0.0)
ax.set_varlabels(protocol_labels)
ax.set_ylim(0, 1.0)
labels = experiment_labels
ax.legend(labels, loc=(0.85, 0.95), labelspacing=0.1, fontsize='small')
fig.text(0.5, 0.965, 'radar_plots_automatic_evaluation_causal_world', horizontalalignment='center', color='black', weight='bold', size='large')
plt.savefig(os.path.join(output_path, 'radar_plots_{}.png'.format(metric_label)))
|
def aggregated_data_from_experiments(experiments, contains_err=False):
"\n experiments: Is a dict of score dicts with each key being the scores of an experiments\n contains_err: If True, for each metric score a error is expected under the key metric_label + '_std'\n\n\n Returns: a structured list that can be processed by the plotters\n first element is a list of the protocol_names\n second element is a list of the experiment_labels\n third element is a list of the metric_labels\n fourth element is the dict the scores:\n per metric label (key) the value are the metric_scores (is a dict)\n per experiment_label (key) the value is a tuple containing two lists:\n experiment_scores_list is a list of scores for each protocol\n experiment_scores_std_list is a list of stds of the scores for each protocol\n "
experiment_labels = list(experiments.keys())
protocol_labels = list(experiments[list(experiments.keys())[0]].keys())
metric_labels = []
for label in experiments[experiment_labels[0]][protocol_labels[0]].keys():
if (('mean' in label) and ('std' not in label)):
metric_labels.append(label)
data = list()
data.append(protocol_labels)
data.append(experiment_labels)
data.append(metric_labels)
scores = dict()
for metric_label in metric_labels:
metric_scores = dict()
for experiment_label in experiment_labels:
experiment_scores_list = list()
experiment_scores_std_list = list()
for evaluation_protocol in protocol_labels:
experiment_scores_list.append(experiments[experiment_label][evaluation_protocol][metric_label])
if contains_err:
experiment_scores_std_list.append(experiments[experiment_label][evaluation_protocol][(metric_label + '_std')])
else:
experiment_scores_std_list.append(0.0)
experiment_scores = (experiment_scores_list, experiment_scores_std_list)
metric_scores[experiment_label] = experiment_scores
scores[metric_label] = metric_scores
data.append(scores)
return data
|
def generate_visual_analysis(output_path, experiments):
'\n saves bar plots as well as radar plots for quick comparisons of the\n policies passed.\n\n :param output_path: (str) specifies the output path for saving the plot\n results.\n :param experiments: (dict) specifies the experiment name as a key and the\n scores json data as the value.\n :return:\n '
if (not os.path.exists(output_path)):
os.mkdir(output_path)
data = aggregated_data_from_experiments(experiments)
radar_plots(output_path, data)
bar_plots(output_path, data)
return
|
class BaseInterventionActorPolicy(object):
def __init__(self, **kwargs):
'\n This class indicates the interface of an intervention actor\n\n :param kwargs: (params) parameters for the construction of the actor.\n '
return
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
return
def act(self, variables_dict):
'\n This functions enables the intervention actor to decide on specific\n interventions.\n\n :param variables_dict: (dict) The current dict of variables that it\n can intervene on with their current\n values. (this can be a two level dict)\n\n :return: (dict) interventions decided by the intervention actor to\n intervene on.\n '
interventions_dict = self._act(variables_dict)
self.__validate_intervention_dict(variables_dict, interventions_dict)
return interventions_dict
def _act(self, variables_dict):
'\n\n :param variables_dict:\n\n :return:\n '
return {}
def __validate_intervention_dict(self, variables_dict, intervention_dict):
'\n\n :param variables_dict:\n :param intervention_dict:\n\n :return:\n '
for intervention in intervention_dict:
if (intervention not in variables_dict):
raise Exception('the meta actor performed an invalid intervention on a variable that is not part of its input')
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
raise Exception('get params is not implemented')
|
class GoalInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
'\n This class indicates the goal intervention actor, which an\n intervention actor that intervenes by sampling a new goal.\n\n :param kwargs: (params) parameters for the construction of the actor.\n '
super(GoalInterventionActorPolicy, self).__init__()
self.goal_sampler_function = None
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.goal_sampler_function = env.sample_new_goal
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n :return:\n '
return self.goal_sampler_function()
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'goal_actor': dict()}
|
class JointsInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
'\n This class indicates the joint intervention actor which intervenes on\n the joints of the robot in a random fashion.\n\n :param kwargs:\n '
super(JointsInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
self._inverse_kinemetics_func = None
self._stage_bb = None
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.task_intervention_space = env.get_variable_space_used()
self._inverse_kinemetics_func = env.get_robot().inverse_kinematics
self._stage_bb = env.get_stage().get_stage_bb()
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n :return:\n '
interventions_dict = dict()
desired_tip_positions = np.random.uniform(self._stage_bb[0], self._stage_bb[1], size=[3, 3]).flatten()
interventions_dict['joint_positions'] = self._inverse_kinemetics_func(desired_tip_positions, rest_pose=np.zeros(9).tolist())
return interventions_dict
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'joints_actor': dict()}
|
class PhysicalPropertiesInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, group, **kwargs):
'\n This intervention actor intervenes on physcial proporties such as\n friction, mass...etc\n\n :param group: (str) the object that the actor will intervene on.\n floor, stage, robot..etc\n :param kwargs:\n '
super(PhysicalPropertiesInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
self.group = group
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.task_intervention_space = env.get_variable_space_used()
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n\n :return:\n '
interventions_dict = dict()
for variable in self.task_intervention_space:
if variable.startswith(self.group):
if isinstance(self.task_intervention_space[variable], dict):
if ('mass' in self.task_intervention_space[variable]):
interventions_dict[variable] = dict()
interventions_dict[variable]['mass'] = np.random.uniform(self.task_intervention_space[variable]['mass'][0], self.task_intervention_space[variable]['mass'][1])
elif ('friction' in self.task_intervention_space[variable]):
interventions_dict[variable] = dict()
interventions_dict[variable]['friction'] = np.random.uniform(self.task_intervention_space[variable]['friction'][0], self.task_intervention_space[variable]['friction'][1])
elif ('mass' in variable):
interventions_dict[variable] = np.random.uniform(self.task_intervention_space[variable][0], self.task_intervention_space[variable][1])
elif ('friction' in variable):
interventions_dict[variable] = np.random.uniform(self.task_intervention_space[variable][0], self.task_intervention_space[variable][1])
return interventions_dict
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'physical_properties_actor': {'group': self.group}}
|
class RandomInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
'\n This is a random intervention actor which intervenes randomly on\n all available state variables except joint positions since its a\n trickier space.\n\n :param kwargs:\n '
super(RandomInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.task_intervention_space = env.get_variable_space_used()
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n :return:\n '
intervene_on_size = np.random.choice([0, 1], p=[0.5, 0.5])
intervene_on_joint_positions = np.random.choice([0, 1], p=[1, 0])
interventions_dict = dict()
for variable in self.task_intervention_space:
if isinstance(self.task_intervention_space[variable], dict):
interventions_dict[variable] = dict()
for subvariable_name in self.task_intervention_space[variable]:
if ((subvariable_name == 'cylindrical_position') and intervene_on_size):
continue
if ((subvariable_name == 'size') and (not intervene_on_size)):
continue
interventions_dict[variable][subvariable_name] = np.random.uniform(self.task_intervention_space[variable][subvariable_name][0], self.task_intervention_space[variable][subvariable_name][1])
else:
if ((not intervene_on_joint_positions) and (variable == 'joint_positions')):
continue
interventions_dict[variable] = np.random.uniform(self.task_intervention_space[variable][0], self.task_intervention_space[variable][1])
return interventions_dict
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'random_actor': dict()}
|
class RigidPoseInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, positions=True, orientations=True, **kwargs):
'\n This intervention actor intervenes on the pose of the blocks\n available in the arena.\n\n :param positions: (bool) True if interventions on positions should be\n allowed.\n :param orientations: (bool) True if interventions on orientations should\n be allowed.\n :param kwargs:\n '
super(RigidPoseInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
self.positions = positions
self.orientations = orientations
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.task_intervention_space = env.get_variable_space_used()
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n :return:\n '
interventions_dict = dict()
for variable in self.task_intervention_space:
if variable.startswith('tool'):
interventions_dict[variable] = dict()
if self.positions:
interventions_dict[variable]['cylindrical_position'] = np.random.uniform(self.task_intervention_space[variable]['cylindrical_position'][0], self.task_intervention_space[variable]['cylindrical_position'][1])
if self.orientations:
interventions_dict[variable]['euler_orientation'] = np.random.uniform(self.task_intervention_space[variable]['euler_orientation'][0], self.task_intervention_space[variable]['euler_orientation'][1])
return interventions_dict
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'rigid_pose_actor': {'positions': self.positions, 'orientations': self.orientations}}
|
class VisualInterventionActorPolicy(BaseInterventionActorPolicy):
def __init__(self, **kwargs):
'\n This intervention actor intervenes on all visual components of the\n robot, (i.e: colors).\n\n :param kwargs:\n '
super(VisualInterventionActorPolicy, self).__init__()
self.task_intervention_space = None
def initialize(self, env):
'\n This functions allows the intervention actor to query things from the env, such\n as intervention spaces or to have access to sampling funcs for goals..etc\n\n :param env: (causal_world.env.CausalWorld) the environment used for the\n intervention actor to query\n different methods from it.\n\n :return:\n '
self.task_intervention_space = env.get_variable_space_used()
return
def _act(self, variables_dict):
'\n\n :param variables_dict:\n\n :return:\n '
interventions_dict = dict()
for variable in self.task_intervention_space:
if isinstance(self.task_intervention_space[variable], dict):
if ('color' in self.task_intervention_space[variable]):
interventions_dict[variable] = dict()
interventions_dict[variable]['color'] = np.random.uniform(self.task_intervention_space[variable]['color'][0], self.task_intervention_space[variable]['color'][1])
elif ('color' in variable):
interventions_dict[variable] = np.random.uniform(self.task_intervention_space[variable][0], self.task_intervention_space[variable][1])
return interventions_dict
def get_params(self):
'\n returns parameters that could be used in recreating this intervention\n actor.\n\n :return: (dict) specifying paramters to create this intervention actor\n again.\n '
return {'visual_actor': dict()}
|
class DataLoader():
def __init__(self, episode_directory):
'\n This initializes a data loader that loads recorded episodes using a\n causal_world.loggers.DataRecorder object.\n\n :param episode_directory: (str) directory where it holds all the\n logged episodes.\n '
if os.path.isdir(episode_directory):
self.episode_directory = episode_directory
info_path = os.path.join(self.episode_directory, 'info.json')
with open(info_path, 'r') as json_file:
info_dict = json.load(json_file)
self.max_episode_index = info_dict['max_episode_index']
self.dumb_frequency = info_dict['dumb_frequency']
else:
raise ValueError('data_path does not exist')
def get_episodes(self, indices):
'\n\n :param indices: (list) list of indicies of the episodes.\n\n :return: (list) list of causal_world.loggers.Episode objects\n corresponding to the specified episode indicies.\n '
episodes = []
for index in indices:
episodes.append(self.get_episode(index))
return episodes
def get_episode(self, index):
'\n\n :param index: (int) index of the episode requested.\n\n :return: (causal_world.loggers.Episode) corresponding episode\n requested.\n '
if (index > self.max_episode_index):
raise Exception('Episode doesnt exist')
infile_index_episode = (index % self.dumb_frequency)
floor_index_episode = (index - infile_index_episode)
ceil_index_episode = ((floor_index_episode + self.dumb_frequency) - 1)
if (ceil_index_episode > self.max_episode_index):
ceil_index_episode = self.max_episode_index
episode_file = 'episode_{}_{}'.format(floor_index_episode, ceil_index_episode)
episodes_path = os.path.join(self.episode_directory, episode_file)
if os.path.isfile(episodes_path):
with open(episodes_path, 'rb') as file:
episodes = pickle.load(file)
return episodes[infile_index_episode]
else:
raise Exception('Error: Log file with requested episode does not exist')
|
class DataRecorder():
def __init__(self, output_directory=None, rec_dumb_frequency=100):
'\n This class logs the full histories of a world across multiple episodes\n\n :param output_directory: (str) specifies the output directory to save\n the episodes in.\n :param rec_dumb_frequency: (int) specifies the peridicity of saving\n the episodes.\n '
self.rec_dumb_frequency = rec_dumb_frequency
if (output_directory is not None):
if (not os.path.isdir(output_directory)):
os.makedirs(output_directory)
self.path = output_directory
self.episodes = []
self.last_episode_number_dumbed = (len(self.episodes) - 1)
self._curr = None
def new_episode(self, initial_full_state, task_name, task_params=None, world_params=None):
'\n\n :param initial_full_state: (dict) dict specifying the full state\n variables of the environment.\n :param task_name: (str) task generator name.\n :param task_params: (dict) task generator parameters.\n :param world_params: (dict) causal world parameters.\n :return:\n '
if self._curr:
self.episodes.append(self._curr)
self._curr = Episode(task_name, initial_full_state, task_params=task_params, world_params=world_params)
if ((self.path is not None) and ((len(self.episodes) % self.rec_dumb_frequency) == 0) and (len(self.episodes) != 0)):
self.save()
return
def append(self, robot_action, observation, reward, info, done, timestamp):
'\n\n :param robot_action: (nd.array) action passed to step function.\n :param observation: (nd.array) observations returned after stepping\n through the environment.\n :param reward: (float) reward received from the environment.\n :param info: (dict) dictionary specifying all the extra information\n after stepping through the environment.\n :param done: (bool) true if the environment returns done.\n :param timestamp: (float) time stamp with respect to the beginning of\n the episode.\n\n :return:\n '
self._curr.append(robot_action, observation, reward, info, done, timestamp)
return
def save(self):
'\n dumps the current episodes.\n\n :return:\n '
if (self.path is None):
return
if len(self._curr.observations):
self.episodes.append(self._curr)
new_episode_number_dumbed = (self.last_episode_number_dumbed + len(self.episodes))
file_path = os.path.join(self.path, 'episode_{}_{}'.format((self.last_episode_number_dumbed + 1), new_episode_number_dumbed))
with open(file_path, 'wb') as file_handle:
pickle.dump(self.episodes, file_handle)
self.last_episode_number_dumbed = new_episode_number_dumbed
self.episodes = []
info_path = os.path.join(self.path, 'info.json')
with open(info_path, 'w') as json_file:
info_dict = {'dumb_frequency': self.rec_dumb_frequency, 'max_episode_index': new_episode_number_dumbed}
json.dump(info_dict, json_file)
def get_number_of_logged_episodes(self):
'\n\n :return: (int) number of logged episodes.\n '
return ((self.last_episode_number_dumbed + len(self.episodes)) + 1)
def get_current_episode(self):
'\n\n :return: (causal_world.loggers.Episode) current episode saved.\n '
return self._curr
def clear_recorder(self):
'\n Clears the data recorder.\n\n :return:\n '
self.episodes = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.